forked from kubapok/en-ner-conll-2003
14 KiB
14 KiB
import pandas as pd
import torch
from collections import Counter
from torchtext.vocab import Vocab
def predict(input_tokens, labels):
results = []
for i in range(len(input_tokens)):
line_results = []
for j in range(1, len(input_tokens[i]) - 1):
x = input_tokens[i][j-1: j+2].to(device_gpu)
predicted = ner_model(x.long())
result = torch.argmax(predicted)
label = labels[result]
line_results.append(label)
results.append(line_results)
return results
def create_tensors_list(data):
tensors = []
for sent in data["tokens"]:
sent_tensor = torch.tensor(())
for word in sent:
temp = torch.tensor([word[0].isupper(), word[0].isdigit()])
sent_tensor = torch.cat((sent_tensor, temp))
tensors.append(sent_tensor)
return tensors
def save_to_file(path, results):
with open(path, "w") as f:
for line in results:
f.write(line + "\n")
def extra_features(tokens_ids, tensors_list):
return [torch.cat((token, tensors_list[i])) for i, token in enumerate(tokens_ids)]
def process_output(lines):
result = []
for line in lines:
last_label = None
new_line = []
for label in line:
if(label != "O" and label[0:2] == "I-"):
if last_label == None or last_label == "O":
label = label.replace('I-', 'B-')
else:
label = "I-" + last_label[2:]
last_label = label
new_line.append(label)
x = (" ".join(new_line))
result.append(" ".join(new_line))
return result
def build_vocab(dataset):
counter = Counter()
for document in dataset:
counter.update(document)
return Vocab(counter, specials=['<unk>', '<pad>', '<bos>', '<eos>'])
def labels_process(dt):
return [torch.tensor([0] + document + [0], dtype=torch.long) for document in dt]
def data_process(dt):
return [torch.tensor([vocab['<bos>']] + [vocab[token] for token in document] + [vocab['<eos>']], dtype=torch.long)
for document in dt]
class NERModel(torch.nn.Module):
def __init__(self, ):
super(NERModel, self).__init__()
self.emb = torch.nn.Embedding(23628, 200)
self.fc1 = torch.nn.Linear(600, 9)
def forward(self, x):
x = self.emb(x)
x = x.reshape(600)
x = self.fc1(x)
return x
labels = ['O', 'B-LOC', 'I-LOC', 'B-MISC', 'I-MISC', 'B-ORG', 'I-ORG', 'B-PER', 'I-PER']
data = pd.read_csv('train/train.tsv', sep='\t', names=['iob', 'tokens'])
data["iob"] = data["iob"].apply(lambda x: [labels.index(y) for y in x.split()])
data["tokens"] = data["tokens"].apply(lambda x: x.split())
extra_tensors = create_tensors_list(data)
vocab = build_vocab(data['tokens'])
device_gpu = torch.device("cuda:0")
ner_model = NERModel().to(device_gpu)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(ner_model.parameters())
train_labels = labels_process(data['iob'])
train_tokens_ids = data_process(data['tokens'])
train_tensors = extra_features(train_tokens_ids, extra_tensors)
print(train_tensors[0])
tensor([2.0000e+00, 9.6700e+02, 2.2410e+04, ..., 0.0000e+00, 0.0000e+00, 0.0000e+00])
type(train_labels)
list
data["iob"]
0 [5, 0, 3, 0, 0, 0, 3, 0, 0, 0, 7, 8, 0, 1, 0, ... 1 [0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ... 2 [1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, ... 3 [1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, ... 4 [3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, ... ... 940 [0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 7, 8, 0, 1, 0, ... 941 [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, ... 942 [0, 0, 3, 0, 7, 0, 5, 0, 0, 1, 0, 1, 0, 0, 3, ... 943 [0, 0, 1, 2, 3, 4, 0, 0, 0, 0, 1, 0, 1, 0, 0, ... 944 [0, 0, 3, 4, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, ... Name: iob, Length: 945, dtype: object
data["tokens"]
0 [EU, rejects, German, call, to, boycott, Briti... 1 [Rare, Hendrix, song, draft, sells, for, almos... 2 [China, says, Taiwan, spoils, atmosphere, for,... 3 [China, says, time, right, for, Taiwan, talks,... 4 [German, July, car, registrations, up, 14.2, p... ... 940 [CYCLING, -, BALLANGER, KEEPS, SPRINT, TITLE, ... 941 [CYCLING, -, WORLD, TRACK, CHAMPIONSHIP, RESUL... 942 [SOCCER, -, FRENCH, DEFENDER, KOMBOUARE, JOINS... 943 [MOTORCYCLING, -, SAN, MARINO, GRAND, PRIX, PR... 944 [GOLF, -, BRITISH, MASTERS, THIRD, ROUND, SCOR... Name: tokens, Length: 945, dtype: object
for epoch in range(5):
acc_score = 0
prec_score = 0
selected_items = 0
recall_score = 0
relevant_items = 0
items_total = 0
ner_model.train()
for i in range(len(train_labels)):
for j in range(1, len(train_labels[i]) - 1):
X = train_tensors[i][j - 1: j + 2].to(device_gpu)
Y = train_labels[i][j: j + 1].to(device_gpu)
# Had to add .long() to fit types
Y_predictions = ner_model(X.long())
acc_score += int(torch.argmax(Y_predictions) == Y)
if torch.argmax(Y_predictions) != 0:
selected_items += 1
if torch.argmax(Y_predictions) != 0 and torch.argmax(Y_predictions) == Y.item():
prec_score += 1
if Y.item() != 0:
relevant_items += 1
if Y.item() != 0 and torch.argmax(Y_predictions) == Y.item():
recall_score += 1
items_total += 1
optimizer.zero_grad()
loss = criterion(Y_predictions.unsqueeze(0), Y)
loss.backward()
optimizer.step()
precision = prec_score / selected_items
recall = recall_score / relevant_items
f1_score = (2 * precision * recall) / (precision + recall)
print(f'epoch: {epoch}')
print(f'f1: {f1_score}')
print(f'acc: {acc_score / items_total}')
0 epoch: 0 f1: 0.6370749322900994 acc: 0.9114627847775542 1 epoch: 1 f1: 0.7994615623567001 acc: 0.954334500473289 2 epoch: 2 f1: 0.8643503374296407 acc: 0.9701919807375957 3 epoch: 3 f1: 0.9025574619618 acc: 0.9791431170907888 4 epoch: 4 f1: 0.9295360263614699 acc: 0.9851580233979396
dev = pd.read_csv('dev-0/in.tsv', sep='\t', names=['tokens'])
dev["tokens"] = dev["tokens"].apply(lambda x: x.split())
dev_tokens_ids = data_process(dev["tokens"])
dev_extra_tensors = create_tensors_list(dev)
dev_tensors = extra_features(dev_tokens_ids, dev_extra_tensors)
results = predict(dev_tensors, labels)
results_processed = process_output(results)
save_to_file("dev-0/out.tsv", results_processed)
test = pd.read_csv('test-A/in.tsv', sep='\t', names=['tokens'])
test["tokens"] = test["tokens"].apply(lambda x: x.split())
test_tokens_ids = data_process(test["tokens"])
test_extra_tensors = create_tensors_list(test)
test_tensors = extra_features(test_tokens_ids, test_extra_tensors)
results = predict(test_tensors, labels)
results_processed = process_output(results)
save_to_file("test-A/out.tsv", results_processed)