en-ner-conll-2003/.ipynb_checkpoints/gru-checkpoint.ipynb
2021-06-24 19:07:58 +02:00

11 KiB
Raw Permalink Blame History

from os import sep
from nltk import word_tokenize
import pandas as pd
import torch
from TorchCRF import CRF
import gensim
from torch._C import device
from tqdm import tqdm
from torchtext.vocab import Vocab
from collections import Counter, OrderedDict
import spacy


from torch.utils.data import DataLoader
import numpy as np
from sklearn.metrics import accuracy_score, f1_score, classification_report
import csv
import pickle

import lzma
import re
import itertools
C:\Users\grzyb\anaconda3\lib\site-packages\gensim\similarities\__init__.py:15: UserWarning: The gensim.similarities.levenshtein submodule is disabled, because the optional Levenshtein package <https://pypi.org/project/python-Levenshtein/> is unavailable. Install Levenhstein (e.g. `pip install python-Levenshtein`) to suppress this warning.
  warnings.warn(msg)
!pip3 install pytorch-crf
Requirement already satisfied: pytorch-crf in c:\users\grzyb\anaconda3\lib\site-packages (0.7.2)
import numpy as np
import gensim
import torch
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split

from torchtext.vocab import Vocab
from collections import Counter

from sklearn.datasets import fetch_20newsgroups
# https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html

from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score

from tqdm.notebook import tqdm

import torch
from torchcrf import CRF
---------------------------------------------------------------------------
ModuleNotFoundError                       Traceback (most recent call last)
<ipython-input-3-2a643b4fc1bb> in <module>
     18 
     19 import torch
---> 20 from torchcrf import CRF

ModuleNotFoundError: No module named 'torchcrf'
def build_vocab(dataset):
    counter = Counter()
    for document in dataset:
        counter.update(document)
    return Vocab(counter, specials=['<unk>', '<pad>', '<bos>', '<eos>'])
def data_process(dt, vocab):
    return [torch.tensor([vocab[token] for token in document], dtype=torch.long) for document in dt]


def get_scores(y_true, y_pred):
    acc_score = 0
    tp = 0
    fp = 0
    selected_items = 0
    relevant_items = 0
    for p, t in zip(y_pred, y_true):
        if p == t:
            acc_score += 1
        if p > 0 and p == t:
            tp += 1
        if p > 0:
            selected_items += 1
        if t > 0:
            relevant_items += 1

    if selected_items == 0:
        precision = 1.0
    else:
        precision = tp / selected_items

    if relevant_items == 0:
        recall = 1.0
    else:
        recall = tp / relevant_items

    if precision + recall == 0.0:
        f1 = 0.0
    else:
        f1 = 2 * precision * recall / (precision + recall)

    return precision, recall, f1
def process_output(lines):
    result = []
    for line in lines:
        last_label = None
        new_line = []
        for label in line:
            if(label != "O" and label[0:2] == "I-"):
                if last_label == None or last_label == "O":
                    label = label.replace('I-', 'B-')
                else:
                    label = "I-" + last_label[2:]
            last_label = label
            new_line.append(label)
            x = (" ".join(new_line))
        result.append(" ".join(new_line))
    return result
class GRU(torch.nn.Module):
    def __init__(self):
        super(GRU, self).__init__()
        self.emb = torch.nn.Embedding(len(vocab_x.itos),100)
        self.dropout = torch.nn.Dropout(0.2)
        self.rec = torch.nn.GRU(100, 256, 2, batch_first = True, bidirectional = True)
        self.fc1 = torch.nn.Linear(2* 256 , 9)
        
    def forward(self, x):
        emb = torch.relu(self.emb(x))
        emb = self.dropout(emb)        
        gru_output, h_n = self.rec(emb)        
        out_weights = self.fc1(gru_output)
        return out_weights
def dev_eval(model, crf, dev_tokens, dev_labels_tokens, vocab):
    Y_true = []
    Y_pred = []
    model.eval()
    crf.eval()
    for i in tqdm(range(len(dev_labels_tokens))):
        batch_tokens = dev_tokens[i].unsqueeze(0)
        tags = list(dev_labels_tokens[i].numpy())
        Y_true += tags

        Y_batch_pred_weights = model(batch_tokens).squeeze(0)
        Y_batch_pred = torch.argmax(Y_batch_pred_weights, 1)
        Y_pred += [crf.decode(Y_batch_pred)[0]]
train = pd.read_csv('train/train.tsv', sep='\t',
                    names=['labels', 'document'])

Y_train = [y.split(sep=" ") for y in train['labels'].values]
X_train = [x.split(sep=" ") for x in train['document'].values]

dev = pd.read_csv('dev-0/in.tsv', sep='\t', names=['document'])
exp = pd.read_csv('dev-0/expected.tsv', sep='\t', names=['labels'])
X_dev = [x.split(sep=" ") for x in dev['document'].values]
Y_dev = [y.split(sep=" ") for y in exp['labels'].values]

test = pd.read_csv('test-A/in.tsv', sep='\t', names=['document'])
X_test = test['document'].values
vocab_x = build_vocab(X_train)
vocab_y = build_vocab(Y_train)
train_tokens = data_process(X_train, vocab_x)
labels_tokens = data_process(Y_train, vocab_y)
torch.cuda.get_device_name(0)
device_gpu = torch.device("cuda:0")
model = GRU()
crf = CRF(9)
mask = torch.ByteTensor([1, 1])
criterion = torch.nn.CrossEntropyLoss()
params = list(model.parameters()) + list(crf.parameters())
optimizer = torch.optim.Adam(params)
for i in range(2):
    crf.train()
    model.train()
    for i in tqdm(range(len(labels_tokens))):
        batch_tokens = train_tokens[i].unsqueeze(0)
        tags = labels_tokens[i].unsqueeze(1)

        predicted_tags = model(batch_tokens).squeeze(0).unsqueeze(1)

        optimizer.zero_grad()
        loss = -crf(predicted_tags, tags)

        loss.backward()
        optimizer.step()
!pip3 install pytorch-crf
import torch
from torchcrf import CRF