en-ner-conll-2003/gru.ipynb
2021-06-24 19:07:58 +02:00

14 KiB
Raw Permalink Blame History

from os import sep
from nltk import word_tokenize
import pandas as pd
import torch
from torchcrf import CRF
import gensim
from torch._C import device
from tqdm import tqdm
from torchtext.vocab import Vocab
from collections import Counter, OrderedDict


from torch.utils.data import DataLoader
import numpy as np
from sklearn.metrics import accuracy_score, f1_score, classification_report
import csv
import pickle

import lzma
import re
import itertools
def build_vocab(dataset):
    counter = Counter()
    for document in dataset:
        counter.update(document)
    return Vocab(counter, specials=['<unk>', '<pad>', '<bos>', '<eos>'])
def data_process(dt, vocab):
    return [torch.tensor([vocab[token] for token in document], dtype=torch.long) for document in dt]


def get_scores(y_true, y_pred):
    acc_score = 0
    tp = 0
    fp = 0
    selected_items = 0
    relevant_items = 0
    for p, t in zip(y_pred, y_true):
        if p == t:
            acc_score += 1
        if p > 0 and p == t:
            tp += 1
        if p > 0:
            selected_items += 1
        if t > 0:
            relevant_items += 1

    if selected_items == 0:
        precision = 1.0
    else:
        precision = tp / selected_items

    if relevant_items == 0:
        recall = 1.0
    else:
        recall = tp / relevant_items

    if precision + recall == 0.0:
        f1 = 0.0
    else:
        f1 = 2 * precision * recall / (precision + recall)

    return precision, recall, f1
def process_output(lines):
    result = []
    for line in lines:
        last_label = None
        new_line = []
        for label in line:
            if(label != "O" and label[0:2] == "I-"):
                if last_label == None or last_label == "O":
                    label = label.replace('I-', 'B-')
                else:
                    label = "I-" + last_label[2:]
            last_label = label
            new_line.append(label)
            x = (" ".join(new_line))
        result.append(" ".join(new_line))
    return result
class GRU(torch.nn.Module):
    def __init__(self):
        super(GRU, self).__init__()
        self.emb = torch.nn.Embedding(len(vocab_x.itos),100)
        self.dropout = torch.nn.Dropout(0.2)
        self.rec = torch.nn.GRU(100, 256, 2, batch_first = True, bidirectional = True)
        self.fc1 = torch.nn.Linear(2* 256 , 9)
        
    def forward(self, x):
        emb = torch.relu(self.emb(x))
        emb = self.dropout(emb)        
        gru_output, h_n = self.rec(emb)        
        out_weights = self.fc1(gru_output)
        return out_weights
def dev_eval(model, crf, dev_tokens, dev_labels_tokens, vocab):
    Y_true = []
    Y_pred = []
    model.eval()
    crf.eval()
    for i in tqdm(range(len(dev_labels_tokens))):
        batch_tokens = dev_tokens[i].unsqueeze(0)
        tags = list(dev_labels_tokens[i].numpy())
        Y_true += tags

        Y_batch_pred_weights = model(batch_tokens).squeeze(0)
        Y_batch_pred = torch.argmax(Y_batch_pred_weights, 1)
        Y_pred += [crf.decode(Y_batch_pred)[0]]
train = pd.read_csv('train/train.tsv', sep='\t',
                    names=['labels', 'document'])

Y_train = [y.split(sep=" ") for y in train['labels'].values]
X_train = [x.split(sep=" ") for x in train['document'].values]

dev = pd.read_csv('dev-0/in.tsv', sep='\t', names=['document'])
exp = pd.read_csv('dev-0/expected.tsv', sep='\t', names=['labels'])
X_dev = [x.split(sep=" ") for x in dev['document'].values]
Y_dev = [y.split(sep=" ") for y in exp['labels'].values]

test = pd.read_csv('test-A/in.tsv', sep='\t', names=['document'])
X_test = test['document'].values
vocab_x = build_vocab(X_train)
vocab_y = build_vocab(Y_train)
train_tokens = data_process(X_train, vocab_x)
labels_tokens = data_process(Y_train, vocab_y)
model = GRU()
crf = CRF(9)
criterion = torch.nn.CrossEntropyLoss()
params = list(model.parameters()) + list(crf.parameters())
optimizer = torch.optim.Adam(params)
for i in range(2):
    crf.train()
    model.train()
    for i in tqdm(range(len(labels_tokens))):
        batch_tokens = train_tokens[i].unsqueeze(0)
        tags = labels_tokens[i].unsqueeze(1)

        predicted_tags = model(batch_tokens).squeeze(0).unsqueeze(1)

        optimizer.zero_grad()
        loss = -crf(predicted_tags, tags)

        loss.backward()
        optimizer.step()
  0%|          | 0/945 [00:00<?, ?it/s]
---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-14-6dc1a1c63d46> in <module>
      9 
     10         optimizer.zero_grad()
---> 11         loss = -crf(predicted_tags, tags)
     12 
     13         loss.backward()

~/anaconda3/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
    887             result = self._slow_forward(*input, **kwargs)
    888         else:
--> 889             result = self.forward(*input, **kwargs)
    890         for hook in itertools.chain(
    891                 _global_forward_hooks.values(),

~/anaconda3/lib/python3.8/site-packages/torchcrf/__init__.py in forward(self, emissions, tags, mask, reduction)
     88             reduction is ``none``, ``()`` otherwise.
     89         """
---> 90         self._validate(emissions, tags=tags, mask=mask)
     91         if reduction not in ('none', 'sum', 'mean', 'token_mean'):
     92             raise ValueError(f'invalid reduction: {reduction}')

~/anaconda3/lib/python3.8/site-packages/torchcrf/__init__.py in _validate(self, emissions, tags, mask)
    147             raise ValueError(f'emissions must have dimension of 3, got {emissions.dim()}')
    148         if emissions.size(2) != self.num_tags:
--> 149             raise ValueError(
    150                 f'expected last dimension of emissions is {self.num_tags}, '
    151                 f'got {emissions.size(2)}')

ValueError: expected last dimension of emissions is 10, got 9
Y_pred = []
model.eval()
crf.eval()
for i in tqdm(range(len(test_tokens))):
    batch_tokens = test_tokens[i].unsqueeze(0)

    Y_batch_pred = model(batch_tokens).squeeze(0).unsqueeze(1)
    Y_pred += [crf.decode(Y_batch_pred)[0]]

Y_pred_translate = translate(Y_pred, vocab)
return Y_pred_translate