This commit is contained in:
Maciej(Linux) 2022-05-08 23:48:14 +02:00
parent be308d0c3c
commit fb4fdfff73
4 changed files with 17962 additions and 17956 deletions

File diff suppressed because it is too large Load Diff

BIN
model1.bin Normal file

Binary file not shown.

52
run.py
View File

@ -9,16 +9,19 @@ import itertools
import pandas as pd import pandas as pd
from torch.utils.data import DataLoader from torch.utils.data import DataLoader
import csv import csv
import os
def data_preprocessing(text): def data_preprocessing(text):
return re.sub(r'\p{P}', '', text.lower().replace('-\\n', '').replace('\\n', ' ').replace("'ll", " will").replace("-", "").replace("'ve", " have").replace("'s", " is")) return re.sub(r'\p{P}', '', text.lower().replace('-\\n', '').replace('\\n', ' ').replace("'ll", " will").replace("-", "").replace("'ve", " have").replace("'s", " is"))
def get_words_from_line(line): def get_words_from_line(line, s = True):
line = line.rstrip() line = line.rstrip()
yield '<s>' if s:
yield '<s>'
for m in re.finditer(r'[\p{L}0-9\*]+|\p{P}+', line): for m in re.finditer(r'[\p{L}0-9\*]+|\p{P}+', line):
yield m.group(0).lower() yield m.group(0).lower()
yield '</s>' if s:
yield '</s>'
def get_word_lines_from_file(data): def get_word_lines_from_file(data):
@ -89,25 +92,29 @@ bigram_data = Bigrams(data, vocab_size)
device = 'cpu' device = 'cpu'
model = SimpleBigramNeuralLanguageModel(vocab_size, embed_size).to(device) model = SimpleBigramNeuralLanguageModel(vocab_size, embed_size).to(device)
data = DataLoader(bigram_data, batch_size=5000)
optimizer = torch.optim.Adam(model.parameters())
criterion = torch.nn.NLLLoss()
model.train() if(not os.path.exists('model1.bin')):
step = 0 data = DataLoader(bigram_data, batch_size=5000)
for x, y in data: optimizer = torch.optim.Adam(model.parameters())
x = x.to(device) criterion = torch.nn.NLLLoss()
y = y.to(device)
optimizer.zero_grad()
ypredicted = model(x)
loss = criterion(torch.log(ypredicted), y)
if step % 100 == 0:
print(step, loss)
step += 1
loss.backward()
optimizer.step()
torch.save(model.state_dict(), 'model1.bin') model.train()
step = 0
for x, y in data:
x = x.to(device)
y = y.to(device)
optimizer.zero_grad()
ypredicted = model(x)
loss = criterion(torch.log(ypredicted), y)
if step % 100 == 0:
print(step, loss)
step += 1
loss.backward()
optimizer.step()
torch.save(model.state_dict(), 'model1.bin')
else:
model.load_state_dict(torch.load('model1.bin'))
vocab = bigram_data.vocab vocab = bigram_data.vocab
prediction = 'the:0.03 be:0.03 to:0.03 of:0.025 and:0.025 a:0.025 in:0.020 that:0.020 have:0.015 I:0.010 it:0.010 for:0.010 not:0.010 on:0.010 with:0.010 he:0.010 as:0.010 you:0.010 do:0.010 at:0.010 :0.77' prediction = 'the:0.03 be:0.03 to:0.03 of:0.025 and:0.025 a:0.025 in:0.020 that:0.020 have:0.015 I:0.010 it:0.010 for:0.010 not:0.010 on:0.010 with:0.010 he:0.010 as:0.010 you:0.010 do:0.010 at:0.010 :0.77'
@ -131,7 +138,6 @@ def predict(f):
with open(f'{f}/out.tsv', "w+", encoding="UTF-8") as f: with open(f'{f}/out.tsv', "w+", encoding="UTF-8") as f:
for row in x: for row in x:
result = {}
before = None before = None
for before in get_words_from_line(data_preprocessing(str(row)), False): for before in get_words_from_line(data_preprocessing(str(row)), False):
pass pass
@ -144,5 +150,5 @@ def predict(f):
pred_str = pred_str.strip() pred_str = pred_str.strip()
f.write(pred_str + "\n") f.write(pred_str + "\n")
prediction("dev-0/") predict("dev-0/")
prediction("test-A/") predict("test-A/")

File diff suppressed because it is too large Load Diff