45 lines
1.5 KiB
Python
45 lines
1.5 KiB
Python
|
import torch
|
||
|
from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
||
|
import lzma
|
||
|
|
||
|
# import os
|
||
|
# os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:512"
|
||
|
torch.cuda.empty_cache()
|
||
|
|
||
|
|
||
|
top = 50
|
||
|
model_name = "gpt2"
|
||
|
device = torch.device('cuda')
|
||
|
model = GPT2LMHeadModel.from_pretrained(model_name)
|
||
|
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
|
||
|
model.to(torch.device(device))
|
||
|
|
||
|
for folder_name in ['dev-0', 'test-A']:
|
||
|
linecount = 10519 if folder_name == 'dev-0' else 7414
|
||
|
processed_lines = 0
|
||
|
f = lzma.open(f'{folder_name}/in.tsv.xz', mode='rt', encoding='utf-8')
|
||
|
with open(f'{folder_name}/out-top={top}.tsv', 'w', encoding='utf-8') as file:
|
||
|
for line in f:
|
||
|
separated = line.split('\t')
|
||
|
prefix = separated[6].replace(r'\n', ' ')
|
||
|
|
||
|
inputs = tokenizer.encode(prefix, return_tensors="pt").to(device)
|
||
|
output = model(inputs)
|
||
|
probs = torch.softmax(output[0][0][-1], dim=0)
|
||
|
|
||
|
result = ''
|
||
|
total = 0
|
||
|
values, indices = probs.topk(top)
|
||
|
for val, idx in zip(values, indices):
|
||
|
token = tokenizer.decode([idx])
|
||
|
total += val
|
||
|
result += f'{token.strip()}:{val} '
|
||
|
result += f':{1 - total}'
|
||
|
|
||
|
file.write(result + '\n')
|
||
|
print(f'\r{folder_name} : {(processed_lines/linecount)*100:.2f}%', end='')
|
||
|
processed_lines += 1
|
||
|
#print(processed_lines)
|
||
|
f.close()
|
||
|
print()
|