challenging-america-word-ga.../run.py

126 lines
3.5 KiB
Python

#!/usr/bin/env python
# coding: utf-8
# In[1]:
import torch
from transformers import GPT2Tokenizer, GPT2LMHeadModel, AutoConfig
# In[2]:
import lzma
def read_xz_file(fname):
with lzma.open(fname, mode='rt', encoding='utf-8') as f:
return [line.strip() for line in f.readlines()]
def read_file(fname):
with open(fname, mode='rt', encoding='utf-8') as f:
return [line.strip() for line in f.readlines()]
def get_contexts(input_text):
all_fields = input_text.replace(r'\n', ' ').split('\t')
return {'left': all_fields[6], 'right': all_fields[7]}
bos = '<|endoftext|>'
eos = '<|EOS|>'
def compose_sentences(raw_input, labels):
result = []
for input, label in zip(raw_input, labels):
context = get_contexts(input)
result.append(f'{bos} {context["left"]} {input} {eos}')
result.append(f'{bos} {input} {context["right"]} {eos}')
return result
# In[3]:
pad = '<|pad|>'
special_tokens_dict = {'eos_token': eos, 'bos_token': bos, 'pad_token': pad}
tokenizer = GPT2Tokenizer.from_pretrained('distilgpt2')
num_add_tokens = tokenizer.add_special_tokens(special_tokens_dict)
config = AutoConfig.from_pretrained('distilgpt2', bos_token_id=tokenizer.bos_token_id,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.pad_token_id, output_hidden_states=False, return_dict_in_generate=True)
# In[4]:
model = GPT2LMHeadModel.from_pretrained('distilgpt2', config=config)
model.resize_token_embeddings(len(tokenizer))
device = torch.device('cuda')
model.to(device)
# In[5]:
dev_input_raw = read_xz_file('challenging-america-word-gap-prediction/dev-0/in.tsv.xz')
dev_input_contexts = [get_contexts(input_text) for input_text in dev_input_raw]
test_input_raw = read_xz_file('challenging-america-word-gap-prediction/test-A/in.tsv.xz')
test_input_contexts = [get_contexts(input_text) for input_text in test_input_raw]
# In[6]:
from tqdm import tqdm
tokenizer.truncation_side = 'left'
blacklist = ['ia', 'ix', 'io',
'ik'] # Te tokeny się prawie zawsze powtarzają, a nie są to żadne słowa w języku angielskim.
def predict_words(dataset):
preds = []
for entry in tqdm(dataset):
text = f"{entry['right']}"
src = tokenizer.encode(text, return_tensors="pt", truncation=True).to(device)
output = model.generate(torch.flip(src, dims=(1,)), max_length=len(src[0]) + 1, do_sample=True, top_k=0, temperature=0.8,
num_return_sequences=1, no_repeat_ngram_size=2, output_scores=True)
probs, idxs = torch.softmax(output.scores[0][-1], dim=0).topk(30)
current_output = ''
accumulated_probability = 0
for prob, token_id in zip(probs, idxs):
token = tokenizer.decode(token_id, skip_special_tokens=True).split(' ')[-1]
if not token.isalnum() or token in blacklist:
continue
prob_value = prob.item()
accumulated_probability += prob_value
current_output += f'{token.strip()}:{prob_value} '
current_output += f':{1 - accumulated_probability}'
preds.append(current_output)
return preds
# In[7]:
dev_preds = predict_words(dev_input_contexts)
with open('challenging-america-word-gap-prediction/dev-0/out.tsv', 'w') as f:
f.writelines(line + '\n' for line in dev_preds)
# In[8]:
test_preds = predict_words(test_input_contexts)
with open('challenging-america-word-gap-prediction/test-A/out.tsv', 'w') as f:
f.writelines(line + '\n' for line in test_preds)