en-ner-conll-2003/script.py
2021-06-09 13:26:47 +02:00

149 lines
4.7 KiB
Python

import pandas as pd
import numpy as np
import csv
import torch
from tqdm import tqdm
from itertools import islice
from nltk.tokenize import word_tokenize
import gensim.downloader as api
from gensim.models.word2vec import Word2Vec
class NERNetwork(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NERNetwork, self).__init__()
self.l1 = torch.nn.Linear(input_size, hidden_size)
self.l2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, x):
x = self.l1(x)
x = torch.relu(x)
x = self.l2(x)
x = torch.log_softmax(x, dim=1)
return x
model = Word2Vec(api.load('text8'))
word2vec = model.wv
WORD_FEATURES_LEN = word2vec.vector_size
LABEL = {'O': 0, 'LOC': 1, 'MISC': 2, 'ORG': 3,'PER': 4}
NUM_LABELS = len(LABEL)
SPECIAL_CHARACTERS = {',', '<', '/', '>', '%', '$', '#', '@', '^', '*', '(', ')', '[', ']', '{', '}', ':'}
OUT_OF_VOCABULARY = np.ones(WORD_FEATURES_LEN)
X_train = []
y_train = []
X_dev = []
X_test = []
def get_key_by_value(number):
return list(LABEL.keys())[list(LABEL.values()).index(number)]
def to_vectore(word):
extra_features = [word[0].isdigit(), len(word) == 1, len(word) < 4, word[0] in SPECIAL_CHARACTERS, word[0].isupper()]
word = word.lower()
if word in word2vec:
vec = word2vec[word]
else:
vec = OUT_OF_VOCABULARY
vec = vec.reshape(-1,1)
extra_features = np.array(extra_features).reshape(-1, 1)
return np.concatenate((vec, extra_features), axis=0)
def prediction_to_string(prediction):
prediction_list = prediction.tolist()
labels = [get_key_by_value(x) for x in prediction_list]
output = []
previous_label = None
for label in labels:
if label != 'O':
if previous_label:
if previous_label == label:
output.append(f'I-{label}')
else:
output.append(f'B-{label}')
else:
output.append(f'B-{label}')
else:
output.append(label)
previous_label = label
return ' '.join(output)
train_set = pd.read_table('train/train.tsv.xz', error_bad_lines=False, header=None, quoting=csv.QUOTE_NONE)
dev_set = pd.read_table('dev-0/in.tsv', error_bad_lines=False, header=None, quoting=csv.QUOTE_NONE)
test_set = pd.read_table('test-A/in.tsv', error_bad_lines=False, header=None, quoting=csv.QUOTE_NONE)
for index, row in tqdm(train_set.iterrows(), desc="Loading train data", total=train_set.shape[0]):
labels, words = row[0], row[1]
words, labels = words.split(), labels.split()
for word in words:
X_train.append(to_vectore(word))
for label in labels:
if label.startswith('B-'):
y_train.append(LABEL[label[2:]])
elif label.startswith('I-'):
y_train.append(LABEL[label[2:]])
else:
y_train.append(0)
for index, row in tqdm(dev_set.iterrows(), desc="Loading dev data", total=dev_set.shape[0]):
words = row[0]
words = words.split()
words = [to_vectore(word) for word in words]
X_dev.append(words)
for index, row in tqdm(test_set.iterrows(), desc="Loading test data", total=test_set.shape[0]):
words = row[0]
words = words.split()
words = [to_vectore(word) for word in words]
X_test.append(words)
model = NERNetwork(WORD_FEATURES_LEN + 5, 600, NUM_LABELS)
criterion = torch.nn.NLLLoss()
optimizer = torch.optim.Adam(model.parameters())
batch_size = 64
for epoch in range(5):
model.train()
for i in range(0, len(y_train), batch_size):
X = X_train[i:i+batch_size]
X = np.array(X).reshape(len(X), WORD_FEATURES_LEN + 5)
X = torch.tensor(X)
y = y_train[i:i+batch_size]
y = np.array(y)
y = torch.tensor(y)
outputs = model(X.float())
loss = criterion(outputs, y.long())
optimizer.zero_grad()
loss.backward()
optimizer.step()
d_pred = []
t_pred = []
model.eval()
with torch.no_grad():
for i in range(0, len(X_dev)):
X = X_dev[i]
X = np.array(X).reshape(len(X), WORD_FEATURES_LEN + 5)
X = torch.tensor(X)
output = model(X.float())
prediction = torch.argmax(output, dim=1)
d_pred.append(prediction_to_string(prediction))
for i in range(0, len(X_test)):
X = X_test[i]
X = np.array(X).reshape(len(X), WORD_FEATURES_LEN + 5)
X = torch.tensor(X)
output = model(X.float())
prediction = torch.argmax(output, dim=1)
t_pred.append(prediction_to_string(prediction))
np.asarray(d_pred).tofile('./dev-0/out.tsv', sep='\n', format='%s')
np.asarray(t_pred).tofile('./test-A/out.tsv', sep='\n', format='%s')
print('End')