14 KiB
14 KiB
from os import sep
from nltk import word_tokenize
import pandas as pd
import torch
from torchcrf import CRF
import gensim
from torch._C import device
from tqdm import tqdm
from torchtext.vocab import Vocab
from collections import Counter, OrderedDict
from torch.utils.data import DataLoader
import numpy as np
from sklearn.metrics import accuracy_score, f1_score, classification_report
import csv
import pickle
import lzma
import re
import itertools
def build_vocab(dataset):
counter = Counter()
for document in dataset:
counter.update(document)
return Vocab(counter, specials=['<unk>', '<pad>', '<bos>', '<eos>'])
def data_process(dt, vocab):
return [torch.tensor([vocab[token] for token in document], dtype=torch.long) for document in dt]
def get_scores(y_true, y_pred):
acc_score = 0
tp = 0
fp = 0
selected_items = 0
relevant_items = 0
for p, t in zip(y_pred, y_true):
if p == t:
acc_score += 1
if p > 0 and p == t:
tp += 1
if p > 0:
selected_items += 1
if t > 0:
relevant_items += 1
if selected_items == 0:
precision = 1.0
else:
precision = tp / selected_items
if relevant_items == 0:
recall = 1.0
else:
recall = tp / relevant_items
if precision + recall == 0.0:
f1 = 0.0
else:
f1 = 2 * precision * recall / (precision + recall)
return precision, recall, f1
def process_output(lines):
result = []
for line in lines:
last_label = None
new_line = []
for label in line:
if(label != "O" and label[0:2] == "I-"):
if last_label == None or last_label == "O":
label = label.replace('I-', 'B-')
else:
label = "I-" + last_label[2:]
last_label = label
new_line.append(label)
x = (" ".join(new_line))
result.append(" ".join(new_line))
return result
class GRU(torch.nn.Module):
def __init__(self):
super(GRU, self).__init__()
self.emb = torch.nn.Embedding(len(vocab_x.itos),100)
self.dropout = torch.nn.Dropout(0.2)
self.rec = torch.nn.GRU(100, 256, 2, batch_first = True, bidirectional = True)
self.fc1 = torch.nn.Linear(2* 256 , 9)
def forward(self, x):
emb = torch.relu(self.emb(x))
emb = self.dropout(emb)
gru_output, h_n = self.rec(emb)
out_weights = self.fc1(gru_output)
return out_weights
def dev_eval(model, crf, dev_tokens, dev_labels_tokens, vocab):
Y_true = []
Y_pred = []
model.eval()
crf.eval()
for i in tqdm(range(len(dev_labels_tokens))):
batch_tokens = dev_tokens[i].unsqueeze(0)
tags = list(dev_labels_tokens[i].numpy())
Y_true += tags
Y_batch_pred_weights = model(batch_tokens).squeeze(0)
Y_batch_pred = torch.argmax(Y_batch_pred_weights, 1)
Y_pred += [crf.decode(Y_batch_pred)[0]]
train = pd.read_csv('train/train.tsv', sep='\t',
names=['labels', 'document'])
Y_train = [y.split(sep=" ") for y in train['labels'].values]
X_train = [x.split(sep=" ") for x in train['document'].values]
dev = pd.read_csv('dev-0/in.tsv', sep='\t', names=['document'])
exp = pd.read_csv('dev-0/expected.tsv', sep='\t', names=['labels'])
X_dev = [x.split(sep=" ") for x in dev['document'].values]
Y_dev = [y.split(sep=" ") for y in exp['labels'].values]
test = pd.read_csv('test-A/in.tsv', sep='\t', names=['document'])
X_test = test['document'].values
vocab_x = build_vocab(X_train)
vocab_y = build_vocab(Y_train)
train_tokens = data_process(X_train, vocab_x)
labels_tokens = data_process(Y_train, vocab_y)
model = GRU()
crf = CRF(9)
criterion = torch.nn.CrossEntropyLoss()
params = list(model.parameters()) + list(crf.parameters())
optimizer = torch.optim.Adam(params)
for i in range(2):
crf.train()
model.train()
for i in tqdm(range(len(labels_tokens))):
batch_tokens = train_tokens[i].unsqueeze(0)
tags = labels_tokens[i].unsqueeze(1)
predicted_tags = model(batch_tokens).squeeze(0).unsqueeze(1)
optimizer.zero_grad()
loss = -crf(predicted_tags, tags)
loss.backward()
optimizer.step()
0%| | 0/945 [00:00<?, ?it/s]
[0;31m---------------------------------------------------------------------------[0m [0;31mValueError[0m Traceback (most recent call last) [0;32m<ipython-input-14-6dc1a1c63d46>[0m in [0;36m<module>[0;34m[0m [1;32m 9[0m [0;34m[0m[0m [1;32m 10[0m [0moptimizer[0m[0;34m.[0m[0mzero_grad[0m[0;34m([0m[0;34m)[0m[0;34m[0m[0;34m[0m[0m [0;32m---> 11[0;31m [0mloss[0m [0;34m=[0m [0;34m-[0m[0mcrf[0m[0;34m([0m[0mpredicted_tags[0m[0;34m,[0m [0mtags[0m[0;34m)[0m[0;34m[0m[0;34m[0m[0m [0m[1;32m 12[0m [0;34m[0m[0m [1;32m 13[0m [0mloss[0m[0;34m.[0m[0mbackward[0m[0;34m([0m[0;34m)[0m[0;34m[0m[0;34m[0m[0m [0;32m~/anaconda3/lib/python3.8/site-packages/torch/nn/modules/module.py[0m in [0;36m_call_impl[0;34m(self, *input, **kwargs)[0m [1;32m 887[0m [0mresult[0m [0;34m=[0m [0mself[0m[0;34m.[0m[0m_slow_forward[0m[0;34m([0m[0;34m*[0m[0minput[0m[0;34m,[0m [0;34m**[0m[0mkwargs[0m[0;34m)[0m[0;34m[0m[0;34m[0m[0m [1;32m 888[0m [0;32melse[0m[0;34m:[0m[0;34m[0m[0;34m[0m[0m [0;32m--> 889[0;31m [0mresult[0m [0;34m=[0m [0mself[0m[0;34m.[0m[0mforward[0m[0;34m([0m[0;34m*[0m[0minput[0m[0;34m,[0m [0;34m**[0m[0mkwargs[0m[0;34m)[0m[0;34m[0m[0;34m[0m[0m [0m[1;32m 890[0m for hook in itertools.chain( [1;32m 891[0m [0m_global_forward_hooks[0m[0;34m.[0m[0mvalues[0m[0;34m([0m[0;34m)[0m[0;34m,[0m[0;34m[0m[0;34m[0m[0m [0;32m~/anaconda3/lib/python3.8/site-packages/torchcrf/__init__.py[0m in [0;36mforward[0;34m(self, emissions, tags, mask, reduction)[0m [1;32m 88[0m [0mreduction[0m [0;32mis[0m[0;31m [0m[0;31m`[0m[0;31m`[0m[0mnone[0m[0;31m`[0m[0;31m`[0m[0;34m,[0m[0;31m [0m[0;31m`[0m[0;31m`[0m[0;34m([0m[0;34m)[0m[0;31m`[0m[0;31m`[0m [0motherwise[0m[0;34m.[0m[0;34m[0m[0;34m[0m[0m [1;32m 89[0m """ [0;32m---> 90[0;31m [0mself[0m[0;34m.[0m[0m_validate[0m[0;34m([0m[0memissions[0m[0;34m,[0m [0mtags[0m[0;34m=[0m[0mtags[0m[0;34m,[0m [0mmask[0m[0;34m=[0m[0mmask[0m[0;34m)[0m[0;34m[0m[0;34m[0m[0m [0m[1;32m 91[0m [0;32mif[0m [0mreduction[0m [0;32mnot[0m [0;32min[0m [0;34m([0m[0;34m'none'[0m[0;34m,[0m [0;34m'sum'[0m[0;34m,[0m [0;34m'mean'[0m[0;34m,[0m [0;34m'token_mean'[0m[0;34m)[0m[0;34m:[0m[0;34m[0m[0;34m[0m[0m [1;32m 92[0m [0;32mraise[0m [0mValueError[0m[0;34m([0m[0;34mf'invalid reduction: {reduction}'[0m[0;34m)[0m[0;34m[0m[0;34m[0m[0m [0;32m~/anaconda3/lib/python3.8/site-packages/torchcrf/__init__.py[0m in [0;36m_validate[0;34m(self, emissions, tags, mask)[0m [1;32m 147[0m [0;32mraise[0m [0mValueError[0m[0;34m([0m[0;34mf'emissions must have dimension of 3, got {emissions.dim()}'[0m[0;34m)[0m[0;34m[0m[0;34m[0m[0m [1;32m 148[0m [0;32mif[0m [0memissions[0m[0;34m.[0m[0msize[0m[0;34m([0m[0;36m2[0m[0;34m)[0m [0;34m!=[0m [0mself[0m[0;34m.[0m[0mnum_tags[0m[0;34m:[0m[0;34m[0m[0;34m[0m[0m [0;32m--> 149[0;31m raise ValueError( [0m[1;32m 150[0m [0;34mf'expected last dimension of emissions is {self.num_tags}, '[0m[0;34m[0m[0;34m[0m[0m [1;32m 151[0m f'got {emissions.size(2)}') [0;31mValueError[0m: expected last dimension of emissions is 10, got 9
Y_pred = []
model.eval()
crf.eval()
for i in tqdm(range(len(test_tokens))):
batch_tokens = test_tokens[i].unsqueeze(0)
Y_batch_pred = model(batch_tokens).squeeze(0).unsqueeze(1)
Y_pred += [crf.decode(Y_batch_pred)[0]]
Y_pred_translate = translate(Y_pred, vocab)
return Y_pred_translate