forked from kubapok/en-ner-conll-2003
20 KiB
20 KiB
Zadanie domowe
- sklonować repozytorium https://git.wmi.amu.edu.pl/kubapok/en-ner-conll-2003
- stworzyć model seq labelling bazujący na sieci neuronowej opisanej w punkcie niżej (można bazować na tym jupyterze lub nie).
- model sieci to GRU (o dowolnych parametrach) + CRF w pytorchu korzystając z modułu CRF z poprzednich zajęć- - stworzyć predykcje w plikach dev-0/out.tsv oraz test-A/out.tsv
- wynik fscore sprawdzony za pomocą narzędzia geval (patrz poprzednie zadanie) powinien wynosić conajmniej 0.65
- proszę umieścić predykcję oraz skrypty generujące (w postaci tekstowej a nie jupyter) w repo, a w MS TEAMS umieścić link do swojego repo termin 22.06, 60 punktów, za najlepszy wynik- 100 punktów
import numpy as np
import torch
from torchtext.vocab import Vocab
from collections import Counter
from tqdm.notebook import tqdm
import lzma
import itertools
from torchcrf import CRF
def read_data(filename):
all_data = lzma.open(filename).read().decode('UTF-8').split('\n')
return [line.split('\t') for line in all_data][:-1]
def data_process(dt):
return [torch.tensor([vocab['<bos>']] + [vocab[token] for token in document] + [vocab['<eos>']], dtype = torch.long) for document in dt]
def labels_process(dt):
return [ torch.tensor([0] + document + [0], dtype = torch.long) for document in dt]
def build_vocab(dataset):
counter = Counter()
for document in dataset:
counter.update(document)
return Vocab(counter, specials=['<unk>', '<pad>', '<bos>', '<eos>'])
train_data = read_data('train/train.tsv.xz')
tokens, ner_tags = [], []
for i in train_data:
ner_tags.append(i[0].split())
tokens.append(i[1].split())
vocab = build_vocab(tokens)
train_tokens_ids = data_process(tokens)
ner_tags_set = list(set(itertools.chain(*ner_tags)))
ner_tags_set.sort()
print(ner_tags_set)
train_labels = labels_process([[ner_tags_set.index(token) for token in doc] for doc in ner_tags])
['B-LOC', 'B-MISC', 'B-ORG', 'B-PER', 'I-LOC', 'I-MISC', 'I-ORG', 'I-PER', 'O']
num_tags = max([max(x) for x in train_labels]) + 1
class GRU(torch.nn.Module):
def __init__(self):
super(GRU, self).__init__()
self.emb = torch.nn.Embedding(len(vocab.itos),100)
self.dropout = torch.nn.Dropout(0.2)
self.rec = torch.nn.GRU(100, 256, 2, batch_first = True, bidirectional = True)
self.fc1 = torch.nn.Linear(2* 256 , 9)
def forward(self, x):
emb = torch.relu(self.emb(x))
emb = self.dropout(emb)
gru_output, h_n = self.rec(emb)
out_weights = self.fc1(gru_output)
return out_weights
def get_scores(y_true, y_pred):
acc_score = 0
tp = 0
fp = 0
selected_items = 0
relevant_items = 0
for p,t in zip(y_pred, y_true):
if p == t:
acc_score +=1
if p > 0 and p == t:
tp +=1
if p > 0:
selected_items += 1
if t > 0 :
relevant_items +=1
if selected_items == 0:
precision = 1.0
else:
precision = tp / selected_items
if relevant_items == 0:
recall = 1.0
else:
recall = tp / relevant_items
if precision + recall == 0.0 :
f1 = 0.0
else:
f1 = 2* precision * recall / (precision + recall)
return precision, recall, f1
def eval_model(dataset_tokens, dataset_labels, model):
Y_true = []
Y_pred = []
gru.eval()
crf.eval()
for i in tqdm(range(len(dataset_labels))):
batch_tokens = dataset_tokens[i]
tags = list(dataset_labels[i].numpy())
emissions = ff(batch_tokens).unsqueeze(1)
Y_pred += crf.decode(emissions)[0]
Y_true += tags
return get_scores(Y_true, Y_pred)
gru = GRU()
crf = CRF(num_tags)
params = list(gru.parameters()) + list(crf.parameters())
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(params)
NUM_EPOCHS = 2
for i in range(NUM_EPOCHS):
gru.train()
crf.train()
for i in tqdm(range(len(train_labels))):
batch_tokens = train_tokens_ids[i].unsqueeze(0)
tags = train_labels[i].unsqueeze(1)
predicted_tags = gru(batch_tokens)
optimizer.zero_grad()
loss = -crf(predicted_tags.unsqueeze(1),tags.squeeze(1))
loss.backward()
optimizer.step()
gru.eval()
crf.eval()
print(eval_model(train_tokens_ids, train_labels, gru))
HBox(children=(FloatProgress(value=0.0, max=945.0), HTML(value='')))
[0;31m---------------------------------------------------------------------------[0m [0;31mValueError[0m Traceback (most recent call last) [0;32m<ipython-input-77-3305460cbe70>[0m in [0;36m<module>[0;34m[0m [1;32m 7[0m [0mpredicted_tags[0m [0;34m=[0m [0mgru[0m[0;34m([0m[0mbatch_tokens[0m[0;34m)[0m[0;34m[0m[0;34m[0m[0m [1;32m 8[0m [0moptimizer[0m[0;34m.[0m[0mzero_grad[0m[0;34m([0m[0;34m)[0m[0;34m[0m[0;34m[0m[0m [0;32m----> 9[0;31m [0mloss[0m [0;34m=[0m [0;34m-[0m[0mcrf[0m[0;34m([0m[0mpredicted_tags[0m[0;34m.[0m[0munsqueeze[0m[0;34m([0m[0;36m1[0m[0;34m)[0m[0;34m,[0m[0mtags[0m[0;34m.[0m[0msqueeze[0m[0;34m([0m[0;36m1[0m[0;34m)[0m[0;34m)[0m[0;34m[0m[0;34m[0m[0m [0m[1;32m 10[0m [0mloss[0m[0;34m.[0m[0mbackward[0m[0;34m([0m[0;34m)[0m[0;34m[0m[0;34m[0m[0m [1;32m 11[0m [0moptimizer[0m[0;34m.[0m[0mstep[0m[0;34m([0m[0;34m)[0m[0;34m[0m[0;34m[0m[0m [0;32m~/.local/lib/python3.8/site-packages/torch/nn/modules/module.py[0m in [0;36m_call_impl[0;34m(self, *input, **kwargs)[0m [1;32m 1049[0m if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks [1;32m 1050[0m or _global_forward_hooks or _global_forward_pre_hooks): [0;32m-> 1051[0;31m [0;32mreturn[0m [0mforward_call[0m[0;34m([0m[0;34m*[0m[0minput[0m[0;34m,[0m [0;34m**[0m[0mkwargs[0m[0;34m)[0m[0;34m[0m[0;34m[0m[0m [0m[1;32m 1052[0m [0;31m# Do not call functions when jit is used[0m[0;34m[0m[0;34m[0m[0;34m[0m[0m [1;32m 1053[0m [0mfull_backward_hooks[0m[0;34m,[0m [0mnon_full_backward_hooks[0m [0;34m=[0m [0;34m[[0m[0;34m][0m[0;34m,[0m [0;34m[[0m[0;34m][0m[0;34m[0m[0;34m[0m[0m [0;32m~/.local/lib/python3.8/site-packages/torchcrf/__init__.py[0m in [0;36mforward[0;34m(self, emissions, tags, mask, reduction)[0m [1;32m 88[0m [0mreduction[0m [0;32mis[0m[0;31m [0m[0;31m`[0m[0;31m`[0m[0mnone[0m[0;31m`[0m[0;31m`[0m[0;34m,[0m[0;31m [0m[0;31m`[0m[0;31m`[0m[0;34m([0m[0;34m)[0m[0;31m`[0m[0;31m`[0m [0motherwise[0m[0;34m.[0m[0;34m[0m[0;34m[0m[0m [1;32m 89[0m """ [0;32m---> 90[0;31m [0mself[0m[0;34m.[0m[0m_validate[0m[0;34m([0m[0memissions[0m[0;34m,[0m [0mtags[0m[0;34m=[0m[0mtags[0m[0;34m,[0m [0mmask[0m[0;34m=[0m[0mmask[0m[0;34m)[0m[0;34m[0m[0;34m[0m[0m [0m[1;32m 91[0m [0;32mif[0m [0mreduction[0m [0;32mnot[0m [0;32min[0m [0;34m([0m[0;34m'none'[0m[0;34m,[0m [0;34m'sum'[0m[0;34m,[0m [0;34m'mean'[0m[0;34m,[0m [0;34m'token_mean'[0m[0;34m)[0m[0;34m:[0m[0;34m[0m[0;34m[0m[0m [1;32m 92[0m [0;32mraise[0m [0mValueError[0m[0;34m([0m[0;34mf'invalid reduction: {reduction}'[0m[0;34m)[0m[0;34m[0m[0;34m[0m[0m [0;32m~/.local/lib/python3.8/site-packages/torchcrf/__init__.py[0m in [0;36m_validate[0;34m(self, emissions, tags, mask)[0m [1;32m 145[0m mask: Optional[torch.ByteTensor] = None) -> None: [1;32m 146[0m [0;32mif[0m [0memissions[0m[0;34m.[0m[0mdim[0m[0;34m([0m[0;34m)[0m [0;34m!=[0m [0;36m3[0m[0;34m:[0m[0;34m[0m[0;34m[0m[0m [0;32m--> 147[0;31m [0;32mraise[0m [0mValueError[0m[0;34m([0m[0;34mf'emissions must have dimension of 3, got {emissions.dim()}'[0m[0;34m)[0m[0;34m[0m[0;34m[0m[0m [0m[1;32m 148[0m [0;32mif[0m [0memissions[0m[0;34m.[0m[0msize[0m[0;34m([0m[0;36m2[0m[0;34m)[0m [0;34m!=[0m [0mself[0m[0;34m.[0m[0mnum_tags[0m[0;34m:[0m[0;34m[0m[0;34m[0m[0m [1;32m 149[0m raise ValueError( [0;31mValueError[0m: emissions must have dimension of 3, got 4
dev-0
def predict_labels(dataset_tokens, model):
Y_true = []
Y_pred = []
result = []
for i in tqdm(range(len(dataset_tokens))):
batch_tokens = dataset_tokens[i].unsqueeze(0)
Y_batch_pred_weights = model(batch_tokens).squeeze(0)
Y_batch_pred = torch.argmax(Y_batch_pred_weights,1)
Y_pred += list(Y_batch_pred.numpy())
result += [list(Y_batch_pred.numpy())]
return result
with open('dev-0/in.tsv', "r", encoding="utf-8") as f:
dev_0_data = [line.rstrip() for line in f]
dev_0_data = [i.split() for i in dev_0_data]
dev_0_tokens_ids = data_process(dev_0_data)
with open('dev-0/expected.tsv', "r", encoding="utf-8") as f:
dev_0_labels = [line.rstrip() for line in f]
dev_0_labels = [i.split() for i in dev_0_labels]
dev_0_labels = labels_process([[ner_tags_set.index(token) for token in doc] for doc in dev_0_labels])
tmp = predict_labels(dev_0_tokens_ids, gru)
HBox(children=(FloatProgress(value=0.0, max=215.0), HTML(value='')))
r = [[ner_tags_set[i] for i in tmp2] for tmp2 in tmp]
# for doc in r:
# if doc[0] != 'O':
# doc[0] = 'B' + doc[0][1:]
# for i in range(len(doc))[:-1]:
# if doc[i] == 'O':
# if doc[i + 1] != 'O':
# doc[i + 1] = 'B' + doc[i + 1][1:]
# elif doc[i + 1] != 'O':
# if doc[i][1:] == doc[i + 1][1:]:
# doc[i + 1] = 'I' + doc[i + 1][1:]
# else:
# doc[i + 1] = 'B' + doc[i + 1][1:]
f = open("dev-0/out.tsv", "a")
for i in r:
f.write(' '.join(i) + '\n')
f.close()
test-A
with open('test-A/in.tsv', "r", encoding="utf-8") as f:
test_A_data = [line.rstrip() for line in f]
test_A_data = [i.split() for i in test_A_data]
test_A_tokens_ids = data_process(test_A_data)
tmp = predict_labels(dev_0_tokens_ids, gru)
r = [[ner_tags_set[i] for i in tmp2] for tmp2 in tmp]
for doc in r:
if doc[0] != 'O':
doc[0] = 'B' + doc[0][1:]
for i in range(len(doc))[:-1]:
if doc[i] == 'O':
if doc[i + 1] != 'O':
doc[i + 1] = 'B' + doc[i + 1][1:]
elif doc[i + 1] != 'O':
if doc[i][1:] == doc[i + 1][1:]:
doc[i + 1] = 'I' + doc[i + 1][1:]
else:
doc[i + 1] = 'B' + doc[i + 1][1:]
HBox(children=(FloatProgress(value=0.0, max=215.0), HTML(value='')))
f = open("test-A/out.tsv", "a")
for i in r:
f.write(' '.join(i) + '\n')
f.close()