121 KiB
121 KiB
Importy
from __future__ import unicode_literals, print_function, division
from io import open
import unicodedata
import re
import os
import random
import torch
import pandas as pd
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
from torchtext.data.metrics import bleu_score
from torch.utils.data import TensorDataset, DataLoader, RandomSampler
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
print(f'Is CUDA supported by this system? {torch.cuda.is_available()}')
print(f"CUDA version: {torch.version.cuda}")
cuda_id = torch.cuda.current_device()
print(f'ID of current CUDA device: {torch.cuda.current_device()}')
print(f'Name of current CUDA device: {torch.cuda.get_device_name(cuda_id)}')
Is CUDA supported by this system? True CUDA version: 12.1 ID of current CUDA device: 0 Name of current CUDA device: NVIDIA GeForce GTX 1660 Ti
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
cuda
Konwersja słów na tensory
SOS_token = 0
EOS_token = 1
class Lang:
def __init__(self, name):
self.name = name
self.word2index = {}
self.word2count = {}
self.index2word = {0: "SOS", 1: "EOS"}
self.n_words = 2 # Count SOS and EOS
def addSentence(self, sentence):
for word in sentence.split(' '):
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
Przygotowanie danych
# Turn a Unicode string to plain ASCII, thanks to
# https://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
# Lowercase, trim, and remove non-letter characters
def normalizeString(s):
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z!?]+", r" ", s)
return s.strip()
Wczytanie danych
def readLangs(lang1, lang2, reverse=False):
print("Reading lines...")
# Read the file and split into lines
lines = open('data/%s-%s.txt' % (lang1, lang2), encoding='utf-8').\
read().strip().split('\n')
# Split every line into pairs and normalize
pairs = [[normalizeString(s) for s in l.split('\t')[:-1]] for l in lines]
# Reverse pairs, make Lang instances
if reverse:
pairs = [df_filtered(reversed(p)) for p in pairs]
input_lang = Lang(lang2)
output_lang = Lang(lang1)
else:
input_lang = Lang(lang1)
output_lang = Lang(lang2)
return input_lang, output_lang, pairs
Filtracja danych
Ograniczenie zdań do 10 słów oraz zdań zaczynających się od prefiksów
MAX_LENGTH = 10
eng_prefixes = (
"i am ", "i m ",
"he is", "he s ",
"she is", "she s ",
"you are", "you re ",
"we are", "we re ",
"they are", "they re "
)
def filterPair(p):
return len(p[0].split(' ')) < MAX_LENGTH and \
len(p[1].split(' ')) < MAX_LENGTH and \
p[1].startswith(eng_prefixes)
def filterPairs(pairs):
return [pair for pair in pairs if filterPair(pair)]
def prepareData(lang1, lang2, reverse=False):
input_lang, output_lang, pairs = readLangs(lang1, lang2, reverse)
print("Read %s sentence pairs" % len(pairs))
pairs = filterPairs(pairs)
print("Trimmed to %s sentence pairs" % len(pairs))
print("Counting words...")
for pair in pairs:
input_lang.addSentence(pair[0])
output_lang.addSentence(pair[1])
print("Counted words:")
print(input_lang.name, input_lang.n_words)
print(output_lang.name, output_lang.n_words)
return input_lang, output_lang, pairs
input_lang, output_lang, pairs = prepareData('eng', 'pol' , True)
print(random.choice(pairs))
Reading lines... Read 49943 sentence pairs Trimmed to 3613 sentence pairs Counting words... Counted words: pol 3070 eng 1969 ['nie umieram', 'i m not dying']
Model
class EncoderRNN(nn.Module):
def __init__(self, input_size, hidden_size, dropout_p=0.1):
super(EncoderRNN, self).__init__()
self.hidden_size = hidden_size
self.embedding = nn.Embedding(input_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True)
self.dropout = nn.Dropout(dropout_p)
def forward(self, input):
embedded = self.dropout(self.embedding(input))
output, hidden = self.gru(embedded)
return output, hidden
class DecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size):
super(DecoderRNN, self).__init__()
self.embedding = nn.Embedding(output_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True)
self.out = nn.Linear(hidden_size, output_size)
def forward(self, encoder_outputs, encoder_hidden, target_tensor=None):
batch_size = encoder_outputs.size(0)
decoder_input = torch.empty(batch_size, 1, dtype=torch.long, device=device).fill_(SOS_token)
decoder_hidden = encoder_hidden
decoder_outputs = []
for i in range(MAX_LENGTH):
decoder_output, decoder_hidden = self.forward_step(decoder_input, decoder_hidden)
decoder_outputs.append(decoder_output)
if target_tensor is not None:
# Teacher forcing: Feed the target as the next input
decoder_input = target_tensor[:, i].unsqueeze(1) # Teacher forcing
else:
# Without teacher forcing: use its own predictions as the next input
_, topi = decoder_output.topk(1)
decoder_input = topi.squeeze(-1).detach() # detach from history as input
decoder_outputs = torch.cat(decoder_outputs, dim=1)
decoder_outputs = F.log_softmax(decoder_outputs, dim=-1)
return decoder_outputs, decoder_hidden, None # We return `None` for consistency in the training loop
def forward_step(self, input, hidden):
output = self.embedding(input)
output = F.relu(output)
output, hidden = self.gru(output, hidden)
output = self.out(output)
return output, hidden
class BahdanauAttention(nn.Module):
def __init__(self, hidden_size):
super(BahdanauAttention, self).__init__()
self.Wa = nn.Linear(hidden_size, hidden_size)
self.Ua = nn.Linear(hidden_size, hidden_size)
self.Va = nn.Linear(hidden_size, 1)
def forward(self, query, keys):
scores = self.Va(torch.tanh(self.Wa(query) + self.Ua(keys)))
scores = scores.squeeze(2).unsqueeze(1)
weights = F.softmax(scores, dim=-1)
context = torch.bmm(weights, keys)
return context, weights
class AttnDecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size, dropout_p=0.1):
super(AttnDecoderRNN, self).__init__()
self.embedding = nn.Embedding(output_size, hidden_size)
self.attention = BahdanauAttention(hidden_size)
self.gru = nn.GRU(2 * hidden_size, hidden_size, batch_first=True)
self.out = nn.Linear(hidden_size, output_size)
self.dropout = nn.Dropout(dropout_p)
def forward(self, encoder_outputs, encoder_hidden, target_tensor=None):
batch_size = encoder_outputs.size(0)
decoder_input = torch.empty(batch_size, 1, dtype=torch.long, device=device).fill_(SOS_token)
decoder_hidden = encoder_hidden
decoder_outputs = []
attentions = []
for i in range(MAX_LENGTH):
decoder_output, decoder_hidden, attn_weights = self.forward_step(
decoder_input, decoder_hidden, encoder_outputs
)
decoder_outputs.append(decoder_output)
attentions.append(attn_weights)
if target_tensor is not None:
# Teacher forcing: Feed the target as the next input
decoder_input = target_tensor[:, i].unsqueeze(1) # Teacher forcing
else:
# Without teacher forcing: use its own predictions as the next input
_, topi = decoder_output.topk(1)
decoder_input = topi.squeeze(-1).detach() # detach from history as input
decoder_outputs = torch.cat(decoder_outputs, dim=1)
decoder_outputs = F.log_softmax(decoder_outputs, dim=-1)
attentions = torch.cat(attentions, dim=1)
return decoder_outputs, decoder_hidden, attentions
def forward_step(self, input, hidden, encoder_outputs):
embedded = self.dropout(self.embedding(input))
query = hidden.permute(1, 0, 2)
context, attn_weights = self.attention(query, encoder_outputs)
input_gru = torch.cat((embedded, context), dim=2)
output, hidden = self.gru(input_gru, hidden)
output = self.out(output)
return output, hidden, attn_weights
def indexesFromSentence(lang, sentence):
return [lang.word2index[word] for word in sentence.split(' ')]
def tensorFromSentence(lang, sentence):
indexes = indexesFromSentence(lang, sentence)
indexes.append(EOS_token)
return torch.tensor(indexes, dtype=torch.long, device=device).view(1, -1)
def tensorsFromPair(pair):
input_tensor = tensorFromSentence(input_lang, pair[0])
target_tensor = tensorFromSentence(output_lang, pair[1])
return (input_tensor, target_tensor)
def get_dataloader(batch_size):
input_lang, output_lang, pairs = prepareData( 'eng', 'pol', True)
n = len(pairs)
input_ids = np.zeros((n, MAX_LENGTH), dtype=np.int32)
target_ids = np.zeros((n, MAX_LENGTH), dtype=np.int32)
for idx, (inp, tgt) in enumerate(pairs):
inp_ids = indexesFromSentence(input_lang, inp)
tgt_ids = indexesFromSentence(output_lang, tgt)
inp_ids.append(EOS_token)
tgt_ids.append(EOS_token)
input_ids[idx, :len(inp_ids)] = inp_ids
target_ids[idx, :len(tgt_ids)] = tgt_ids
train_data = TensorDataset(torch.LongTensor(input_ids).to(device),
torch.LongTensor(target_ids).to(device))
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)
return input_lang, output_lang, train_dataloader
Trening
def train_epoch(dataloader, encoder, decoder, encoder_optimizer,
decoder_optimizer, criterion):
total_loss = 0
for data in dataloader:
input_tensor, target_tensor = data
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
encoder_outputs, encoder_hidden = encoder(input_tensor)
decoder_outputs, _, _ = decoder(encoder_outputs, encoder_hidden, target_tensor)
loss = criterion(
decoder_outputs.view(-1, decoder_outputs.size(-1)),
target_tensor.view(-1)
)
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
total_loss += loss.item()
return total_loss / len(dataloader)
import time
import math
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import matplotlib.ticker as ticker
import numpy as np
def showPlot(points):
plt.figure()
fig, ax = plt.subplots()
# this locator puts ticks at regular intervals
loc = ticker.MultipleLocator(base=0.2)
ax.yaxis.set_major_locator(loc)
plt.plot(points)
def train(train_dataloader, encoder, decoder, n_epochs, learning_rate=0.001,
print_every=100, plot_every=100):
start = time.time()
plot_losses = []
print_loss_total = 0 # Reset every print_every
plot_loss_total = 0 # Reset every plot_every
encoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate)
decoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate)
criterion = nn.NLLLoss()
for epoch in range(1, n_epochs + 1):
loss = train_epoch(train_dataloader, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion)
print_loss_total += loss
plot_loss_total += loss
if epoch % print_every == 0:
print_loss_avg = print_loss_total / print_every
print_loss_total = 0
print('%s (%d %d%%) %.4f' % (timeSince(start, epoch / n_epochs),
epoch, epoch / n_epochs * 100, print_loss_avg))
if epoch % plot_every == 0:
plot_loss_avg = plot_loss_total / plot_every
plot_losses.append(plot_loss_avg)
plot_loss_total = 0
showPlot(plot_losses)
hidden_size = 256
batch_size = 64
input_lang, output_lang, train_dataloader = get_dataloader(batch_size)
encoder = EncoderRNN(input_lang.n_words, hidden_size).to(device)
decoder = AttnDecoderRNN(hidden_size, output_lang.n_words).to(device)
train(train_dataloader, encoder, decoder, 100, print_every=5, plot_every=5)
Reading lines... Read 49943 sentence pairs Trimmed to 3613 sentence pairs Counting words... Counted words: pol 3070 eng 1969 0m 7s (- 2m 18s) (5 5%) 1.9851 0m 14s (- 2m 8s) (10 10%) 1.0089 0m 21s (- 1m 59s) (15 15%) 0.5189 0m 28s (- 1m 52s) (20 20%) 0.2294 0m 35s (- 1m 45s) (25 25%) 0.0961 0m 42s (- 1m 38s) (30 30%) 0.0509 0m 50s (- 1m 33s) (35 35%) 0.0355 0m 57s (- 1m 25s) (40 40%) 0.0289 1m 4s (- 1m 18s) (45 45%) 0.0249 1m 11s (- 1m 11s) (50 50%) 0.0228 1m 18s (- 1m 4s) (55 55%) 0.0207 1m 25s (- 0m 57s) (60 60%) 0.0215 1m 32s (- 0m 49s) (65 65%) 0.0249 1m 39s (- 0m 42s) (70 70%) 0.0184 1m 47s (- 0m 35s) (75 75%) 0.0172 1m 55s (- 0m 28s) (80 80%) 0.0166 2m 3s (- 0m 21s) (85 85%) 0.0163 2m 11s (- 0m 14s) (90 90%) 0.0163 2m 18s (- 0m 7s) (95 95%) 0.0176 2m 27s (- 0m 0s) (100 100%) 0.0256
<Figure size 640x480 with 0 Axes>
Ewaluacja
def evaluate(encoder, decoder, sentence, input_lang, output_lang):
with torch.no_grad():
input_tensor = tensorFromSentence(input_lang, sentence)
encoder_outputs, encoder_hidden = encoder(input_tensor)
decoder_outputs, decoder_hidden, decoder_attn = decoder(encoder_outputs, encoder_hidden)
_, topi = decoder_outputs.topk(1)
decoded_ids = topi.squeeze()
decoded_words = []
for idx in decoded_ids:
if idx.item() == EOS_token:
decoded_words.append('<EOS>')
break
decoded_words.append(output_lang.index2word[idx.item()])
return decoded_words, decoder_attn
def evaluateRandomly(encoder, decoder, n=10):
for i in range(n):
pair = random.choice(pairs)
print('>', pair[0])
print('=', pair[1])
output_words, _ = evaluate(encoder, decoder, pair[0], input_lang, output_lang)
output_sentence = ' '.join(output_words)
print('<', output_sentence)
print('')
encoder.eval()
decoder.eval()
evaluateRandomly(encoder, decoder)
> utne sobie drzemke = i m going to go take a nap < i m going to go take a nap wallet <EOS> > nie jestem co do tego pewny to zalezy = i m not sure about that it depends < i m not sure about that it depends <EOS> > nie kupujemy = we re not buying < we re not buying <EOS> > nie jestem g upi = i m not stupid < i m not stupid <EOS> > jestes wymagajacy = you re demanding < you re demanding <EOS> > jestem m ody ale nie az tak = i m young but i m not that young < i m young but i m not that young <EOS> > nie jestem ubrana = i m not dressed < i m not dressed <EOS> > jestem gotowy sie z tym pogodzic = i m ready to accept it < i m ready to accept it <EOS> > jestem pewny ze ona nied ugo wroci = i m sure that she will come back soon < i m sure that she will come back soon <EOS> > w niedziele mam wolne = i m free on sunday < i m free on sunday <EOS>
def showAttention(input_sentence, output_words, attentions):
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(attentions.cpu().numpy(), cmap='bone')
fig.colorbar(cax)
# Set up axes
ax.set_xticklabels([''] + input_sentence.split(' ') +
['<EOS>'], rotation=90)
ax.set_yticklabels([''] + output_words)
# Show label at every tick
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
def evaluateAndShowAttention(input_sentence):
input_sentence = normalizeString(input_sentence)
output_words, attentions = evaluate(encoder, decoder, input_sentence, input_lang, output_lang)
print('input =', input_sentence)
print('output =', ' '.join(output_words))
showAttention(input_sentence, output_words, attentions[0, :len(output_words), :])
evaluateAndShowAttention('Nie jestem katoliczką')
input = nie jestem katoliczka output = i m not catholic <EOS>
C:\Users\adamw\AppData\Local\Temp\ipykernel_17652\691622281.py:8: UserWarning: set_ticklabels() should only be used with a fixed number of ticks, i.e. after set_ticks() or using a FixedLocator. ax.set_xticklabels([''] + input_sentence.split(' ') + C:\Users\adamw\AppData\Local\Temp\ipykernel_17652\691622281.py:10: UserWarning: set_ticklabels() should only be used with a fixed number of ticks, i.e. after set_ticks() or using a FixedLocator. ax.set_yticklabels([''] + output_words)
evaluateAndShowAttention('Przykro nam ze to sie zdarzyło')
input = przykro nam ze to sie zdarzy o output = we re sorry that it happened <EOS>
C:\Users\adamw\AppData\Local\Temp\ipykernel_17652\691622281.py:8: UserWarning: set_ticklabels() should only be used with a fixed number of ticks, i.e. after set_ticks() or using a FixedLocator. ax.set_xticklabels([''] + input_sentence.split(' ') + C:\Users\adamw\AppData\Local\Temp\ipykernel_17652\691622281.py:10: UserWarning: set_ticklabels() should only be used with a fixed number of ticks, i.e. after set_ticks() or using a FixedLocator. ax.set_yticklabels([''] + output_words)
evaluateAndShowAttention('On mówi płynnie po francusku')
input = on mowi p ynnie po francusku output = he is fluent in french <EOS>
C:\Users\adamw\AppData\Local\Temp\ipykernel_17652\691622281.py:8: UserWarning: set_ticklabels() should only be used with a fixed number of ticks, i.e. after set_ticks() or using a FixedLocator. ax.set_xticklabels([''] + input_sentence.split(' ') + C:\Users\adamw\AppData\Local\Temp\ipykernel_17652\691622281.py:10: UserWarning: set_ticklabels() should only be used with a fixed number of ticks, i.e. after set_ticks() or using a FixedLocator. ax.set_yticklabels([''] + output_words)
BLEU
def filter_rows(row):
return len(row["eng"].split(' '))<MAX_LENGTH and \
len(row["pol"].split(' '))<MAX_LENGTH and \
row["eng"].startswith(eng_prefixes)
def evaluateWithTokenization(input_sentence):
input_sentence = normalizeString(input_sentence)
output_words, attentions = evaluate(encoder, decoder, input_sentence, input_lang, output_lang)
if "<EOS>" in output_words:
output_words.remove("<EOS>")
return output_words
df = pd.read_csv("data/eng-pol.txt", sep='\t', names=["eng", "pol", "attribution"])
df["eng"] = df["eng"].apply(normalizeString)
df["pol"] = df["pol"].apply(normalizeString)
df_filtered = df.apply(filter_rows, axis=1)
test_df = df[df_filtered].sample(frac=1)
test_df["eng_token"] = test_df["eng"].apply(lambda x: x.split())
test_df["eng_eval"] = test_df["pol"].apply(lambda x: evaluateWithTokenization(x))
references_corpus = test_df["eng_token"].values.tolist()
candidate_corpus = test_df["eng_eval"].values.tolist()
references_corpus = [[el] for el in references_corpus]
bleu_score(candidate_corpus, references_corpus)
0.9301728010177612