This commit is contained in:
adnovac 2022-05-30 21:08:27 +02:00
parent b61e3e981c
commit 023903113d
3 changed files with 18061 additions and 17976 deletions

File diff suppressed because it is too large Load Diff

169
run.py
View File

@ -20,8 +20,9 @@ import csv
import itertools import itertools
from os.path import exists from os.path import exists
vocab_size = 30000 vocab_size = 15000
embed_size = 150 embed_size = 128
lstm_size = 128
# funkcje pomocnicze # funkcje pomocnicze
def clean(text): def clean(text):
@ -46,17 +47,29 @@ def get_word_lines_from_data(d):
yield get_words_from_line(line) yield get_words_from_line(line)
class Model(torch.nn.Module): class Model(torch.nn.Module):
def __init__(self, vocabulary_size, embedding_size): def __init__(self, vocabulary_size, embedding_size, lstm_size):
super(Model, self).__init__() super(Model, self).__init__()
self.model = torch.nn.Sequential( self.lstm_size = lstm_size
torch.nn.Embedding(vocabulary_size, embedding_size), self.embedding_dim = embedding_size
torch.nn.Linear(embedding_size, vocabulary_size), self.num_layers = 3
torch.nn.Softmax()
self.embedding = torch.nn.Embedding(
num_embeddings=vocab_size,
embedding_dim=self.embedding_dim,
) )
self.lstm = torch.nn.LSTM(
input_size=self.lstm_size,
hidden_size=self.lstm_size,
num_layers=self.num_layers,
dropout=0.2,
)
self.fc = torch.nn.Linear(self.lstm_size, vocab_size)
def forward(self, x): def forward(self, x, prev_state = None):
return self.model(x) embed = self.embedding(x)
output, state = self.lstm(embed, prev_state)
logits = self.fc(output)
return logits, state
class Trigrams(torch.utils.data.IterableDataset): class Trigrams(torch.utils.data.IterableDataset):
def __init__(self, data, vocabulary_size): def __init__(self, data, vocabulary_size):
@ -82,37 +95,41 @@ class Trigrams(torch.utils.data.IterableDataset):
# ładowanie danych treningowych # ładowanie danych treningowych
train_in = pd.read_csv("gdrive/MyDrive/train/in.tsv.xz", sep='\t', header=None, encoding="UTF-8", on_bad_lines="skip", quoting=csv.QUOTE_NONE, nrows=300000)[[6, 7]] train_in = pd.read_csv("gdrive/MyDrive/train/in.tsv.xz", sep='\t', header=None, encoding="UTF-8", on_bad_lines="skip", quoting=csv.QUOTE_NONE, nrows=20000)[[6, 7]]
train_expected = pd.read_csv("gdrive/MyDrive/train/expected.tsv", sep='\t', header=None, encoding="UTF-8", on_bad_lines="skip", quoting=csv.QUOTE_NONE, nrows=300000) train_expected = pd.read_csv("gdrive/MyDrive/train/expected.tsv", sep='\t', header=None, encoding="UTF-8", on_bad_lines="skip", quoting=csv.QUOTE_NONE, nrows=20000)
train_data = pd.concat([train_in, train_expected], axis=1) train_data = pd.concat([train_in, train_expected], axis=1)
train_data = train_data[6] + train_data[0] + train_data[7] train_data = train_data[6] + train_data[0] + train_data[7]
train_data = train_data.apply(clean) train_data = train_data.apply(clean)
train_dataset = Trigrams(train_data, vocab_size) train_dataset = Trigrams(train_data, vocab_size)
train_dataset_rev = Trigrams(train_data.iloc[::-1], vocab_size)
# trenowanie/wczytywanie modelu # trenowanie/wczytywanie modelu
device = 'cuda' if torch.cuda.is_available() else 'cpu' device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = Model(vocab_size, embed_size).to(device) model = Model(vocab_size, embed_size, lstm_size).to(device)
print(device) print(device)
if(not exists('model1.bin')): if(not exists('model1.bin')):
data = DataLoader(train_dataset, batch_size=8000) data = DataLoader(train_dataset, batch_size=8000)
optimizer = torch.optim.Adam(model.parameters()) optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
criterion = torch.nn.NLLLoss() criterion = torch.nn.CrossEntropyLoss()
model.train() model.train()
step = 0 step = 0
for i in range(2): for i in range(1):
print(f"EPOCH {i}=========================") print(f"EPOCH {i}=========================")
for x, y in data: for x, y in data:
optimizer.zero_grad()
x = x.to(device) x = x.to(device)
y = y.to(device) y = y.to(device)
optimizer.zero_grad()
ypredicted = model(x) y_pred, state_h = model(x)
loss = criterion(torch.log(ypredicted), y) loss = criterion(y_pred, y)
loss.backward()
optimizer.step()
if step % 100 == 0: if step % 100 == 0:
print(step, loss) print(step, loss)
step += 1 step += 1
loss.backward()
optimizer.step()
torch.save(model.state_dict(), 'model1.bin') torch.save(model.state_dict(), 'model1.bin')
else: else:
@ -122,47 +139,115 @@ else:
vocab = train_dataset.vocab vocab = train_dataset.vocab
def predict(tokens): # trenowanie/wczytywanie modelu
ixs = torch.tensor(vocab.forward(tokens)).to(device) device = 'cuda' if torch.cuda.is_available() else 'cpu'
model_b = Model(vocab_size, embed_size, lstm_size).to(device)
print(device)
if(not exists('model1_b.bin')):
data_b = DataLoader(train_dataset_rev, batch_size=8000)
optimizer = torch.optim.Adam(model_b.parameters(), lr=0.001)
criterion = torch.nn.CrossEntropyLoss()
model_b.train()
step = 0
for i in range(1):
print(f"EPOCH {i}=========================")
for x, y in data:
optimizer.zero_grad()
x = x.to(device)
y = y.to(device)
y_pred, state_h = model_b(x)
loss = criterion(y_pred, y)
loss.backward()
optimizer.step()
if step % 100 == 0:
print(step, loss)
step += 1
torch.save(model_b.state_dict(), 'model1_b.bin')
else:
print("Loading model1")
model_b.load_state_dict(torch.load('model1_b.bin'))
import numpy as np
def predict(tokens_left, tokens_right):
ixs = torch.tensor(vocab.forward(tokens_left)).to(device)
ixs_r = torch.tensor(vocab.forward(tokens_right)).to(device)
out = model(ixs) out = model(ixs)
out_b = model_b(ixs_r)
top = torch.topk(out[0], 8) top = torch.topk(out[0], 8)
top_indices = top.indices.tolist() top_b = torch.topk(out_b[0], 8)
top_probs = top.values.tolist() top_indices = top.indices.tolist()[0]
top_words = vocab.lookup_tokens(top_indices) top_probs = top.values.tolist()[0]
top_indices_b = top_b.indices.tolist()[0]
top_probs_b = top_b.values.tolist()[0]
raw_result = []
for ind in set(top_indices + top_indices_b):
prob = 0
if(ind in top_indices):
prob += top_probs[top_indices.index(ind)]
if(ind in top_indices_b):
prob += top_probs_b[top_indices_b.index(ind)]
raw_result += [[vocab.lookup_token(ind), prob]]
raw_result = list(filter(lambda x: x[0] != "<unk>", raw_result))
raw_result = sorted(raw_result, key=lambda x: -x[1])[:8]
words = [x[0] for x in raw_result]
probs = [x[1] for x in raw_result]
probs_x = np.exp(probs)/sum(np.exp(probs))
result = "" result = ""
for word, prob in list(zip(top_words, top_probs)): for word, prob in list(zip(words,probs_x)):
result += f"{word}:{prob} " result += f"{word}:{prob} "
# result += f':0.01' result += ":0.3"
result = result.rstrip()
return result return result
from nltk import word_tokenize from nltk import word_tokenize
def predict_file(result_path, data): def predict_file(result_path, data):
with open(result_path, "w+", encoding="UTF-8") as f: with open(result_path, "w+", encoding="UTF-8") as f:
for row in data: for index, row in data.iterrows():
result = {} result = {}
before = None before = None
for before in get_words_from_line(clean(str(row)), False): after = None
for after in get_words_from_line(clean(str(row[7])), False):
after = [after]
break
for before in get_words_from_line(clean(str(row[6])), False):
pass pass
before = [before] before = [before]
print(before) if(len(before) < 1 and len(after) < 1):
if(len(before) < 1): result = "a:0.2 the:0.2 to:0.2 of:0.1 and:0.1 of:0.1 :0.1"
result = "a:0.2 the:0.2 to:0.2 of:0.1 and:0.1 of:0.1 :0.1"
else: else:
result = predict(before) result = predict(before, after)
result = result.strip() result = result.strip()
f.write(result + "\n")
print(result) print(result)
f.write(result + "\n")
dev_data = pd.read_csv("gdrive/MyDrive/dev-0/in.tsv.xz", sep='\t', header=None, quoting=csv.QUOTE_NONE)[6] dev_data = pd.read_csv("gdrive/MyDrive/dev-0/in.tsv.xz", sep='\t', header=None, quoting=csv.QUOTE_NONE)
dev_data = dev_data.apply(clean) dev_data[6] = dev_data[6].apply(clean)
dev_data[7] = dev_data[7].apply(clean)
predict_file("gdrive/MyDrive/dev-0/out.tsv", dev_data) predict_file("gdrive/MyDrive/dev-0/out.tsv", dev_data)
test_data = pd.read_csv("gdrive/MyDrive/test-A/in.tsv.xz", sep='\t', header=None, quoting=csv.QUOTE_NONE)[6] test_data = pd.read_csv("gdrive/MyDrive/test-A/in.tsv.xz", sep='\t', header=None, quoting=csv.QUOTE_NONE)
test_data = test_data.apply(clean) test_data[6] = test_data[6].apply(clean)
test_data[7] = test_data[7].apply(clean)
predict_file("gdrive/MyDrive/test-A/out.tsv", test_data) predict_file("gdrive/MyDrive/test-A/out.tsv", test_data)
# !wget https://gonito.net/get/bin/geval
# !chmod 777 geval
!rm -r dev-0
!cp -r gdrive/MyDrive/dev-0 dev-0 !cp -r gdrive/MyDrive/dev-0 dev-0
!./geval -t dev-0 --metric PerplexityHashed !./geval -t dev-0 --metric PerplexityHashed
!rm -r dev-0

File diff suppressed because it is too large Load Diff