gpt-2 on left context
This commit is contained in:
commit
6a0b3d8a28
8
.gitignore
vendored
Normal file
8
.gitignore
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
|
||||
*~
|
||||
*.swp
|
||||
*.bak
|
||||
*.pyc
|
||||
*.o
|
||||
.DS_Store
|
||||
.token
|
21
README.md
Normal file
21
README.md
Normal file
@ -0,0 +1,21 @@
|
||||
|
||||
wmt-2020-pl-en
|
||||
==========================================
|
||||
|
||||
Translate from Polish to English.
|
||||
|
||||
This is a challenge created from http://www.statmt.org/wmt20/translation-task.html . Train set is created from europarl wmt pl-en training data. Dev and test set are created from wmt pl-en development data.
|
||||
|
||||
Directory structure
|
||||
-------------------
|
||||
|
||||
* `README.md` — this file
|
||||
* `config.txt` — configuration file
|
||||
* `train/` — directory with training data
|
||||
* `train/train.tsv` — sample parallel corpus (Finnish text in the first column, Māori text in the second one)
|
||||
* `dev-0/` — directory with dev (test) data
|
||||
* `dev-0/in.tsv` — Finnish input text for the dev set
|
||||
* `dev-0/expected.tsv` — Māori reference translation for the dev set
|
||||
* `test-A` — directory with test data
|
||||
* `test-A/in.tsv` — Finnish input data for the test set
|
||||
* `test-A/expected.tsv` — Māori reference translation for the test set
|
1
config.txt
Normal file
1
config.txt
Normal file
@ -0,0 +1 @@
|
||||
--metric BLEU --precision 4 --tokenizer 13a -%
|
10623
dev-0/out.tsv
Normal file
10623
dev-0/out.tsv
Normal file
File diff suppressed because it is too large
Load Diff
113
run-transf-dec.ipynb
Normal file
113
run-transf-dec.ipynb
Normal file
@ -0,0 +1,113 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import torch\n",
|
||||
"from transformers import GPT2LMHeadModel, GPT2Tokenizer\n",
|
||||
"\n",
|
||||
"import numpy as np\n",
|
||||
"\n",
|
||||
"device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
|
||||
"\n",
|
||||
"torch.__version__, device\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tokenizer = GPT2Tokenizer.from_pretrained(\"gpt2-medium\")\n",
|
||||
"\n",
|
||||
"model = GPT2LMHeadModel.from_pretrained(\"gpt2-medium\")\n",
|
||||
"model.to(device)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import lzma\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def file_iterator(file_path):\n",
|
||||
" print(file_path, file_path.endswith(\".xz\"))\n",
|
||||
" if file_path.endswith(\".xz\"):\n",
|
||||
" with lzma.open(file_path, mode=\"r\") as fp:\n",
|
||||
" for line in fp.readlines():\n",
|
||||
" yield line.decode(\"utf-8\") # .split(\"\\t\")[7]\n",
|
||||
" else:\n",
|
||||
" with open(file_path, \"r\", encoding=\"utf-8\") as fp:\n",
|
||||
" for line in fp.readlines():\n",
|
||||
" yield line\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def clear_line(line):\n",
|
||||
" return line.lower().strip(\"\\n\").replace(\"\\\\n\", \"\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"K = 20\n",
|
||||
"for file_path in (\"dev-0\", \"test-A\"):\n",
|
||||
" print(\"Working on file from folder:\", file_path)\n",
|
||||
" data_iterator = file_iterator(f\"{file_path}/in.tsv.xz\")\n",
|
||||
" with open(f\"{file_path}/out-tr-dec.tsv\", \"w\", encoding=\"utf-8\") as fp:\n",
|
||||
" for line in data_iterator:\n",
|
||||
" # print([(i, part) for i, part in enumerate(line.split('\\t'))])\n",
|
||||
" left_context = clear_line(line.split(\"\\t\")[6])\n",
|
||||
" # print(left_context)\n",
|
||||
" inputs = tokenizer.encode(left_context, return_tensors=\"pt\").to(device)\n",
|
||||
" preds = model(inputs)\n",
|
||||
" # print('\\n', preds)\n",
|
||||
" z_dist = preds[0][0][-1]\n",
|
||||
" probability_distances = torch.softmax(preds[0][0][-1], dim=0)\n",
|
||||
" top_k = probability_distances.topk(K)\n",
|
||||
" # print(top_k)\n",
|
||||
" results = [\n",
|
||||
" f\"{tokenizer.decode([idx])}:{value}\"\n",
|
||||
" for value, idx in zip(top_k.values, top_k.indices)\n",
|
||||
" ]\n",
|
||||
" # print(results)\n",
|
||||
" line_to_write = \" \".join(results) + f\" :{1 - torch.sum(top_k.values)}\\n\"\n",
|
||||
" # print(line_to_write)\n",
|
||||
" fp.write(line_to_write)\n",
|
||||
" # break\n",
|
||||
" # break\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "mj_venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.11"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
7509
test-A/out.tsv
Normal file
7509
test-A/out.tsv
Normal file
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user