27 KiB
27 KiB
Modelowanie Języka
10. Model rekurencyjny z atencją [ćwiczenia]
Jakub Pokrywka (2022)
notebook na podstawie:
https://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html
from __future__ import unicode_literals, print_function, division
from io import open
import unicodedata
import string
import re
import random
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
SOS_token = 0
EOS_token = 1
class Lang:
def __init__(self):
self.word2index = {}
self.word2count = {}
self.index2word = {0: "SOS", 1: "EOS"}
self.n_words = 2 # Count SOS and EOS
def addSentence(self, sentence):
for word in sentence.split(' '):
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
pairs = []
with open('data/eng-pol.txt') as f:
for line in f:
eng_line, pol_line = line.lower().rstrip().split('\t')
eng_line = re.sub(r"([.!?])", r" \1", eng_line)
eng_line = re.sub(r"[^a-zA-Z.!?]+", r" ", eng_line)
pol_line = re.sub(r"([.!?])", r" \1", pol_line)
pol_line = re.sub(r"[^a-zA-Z.!?ąćęłńóśźżĄĆĘŁŃÓŚŹŻ]+", r" ", pol_line)
pairs.append([eng_line, pol_line])
pairs[1]
['hi .', 'cześć .']
MAX_LENGTH = 10
eng_prefixes = (
"i am ", "i m ",
"he is", "he s ",
"she is", "she s ",
"you are", "you re ",
"we are", "we re ",
"they are", "they re "
)
pairs = [p for p in pairs if len(p[0].split(' ')) < MAX_LENGTH and len(p[1].split(' ')) < MAX_LENGTH]
pairs = [p for p in pairs if p[0].startswith(eng_prefixes)]
eng_lang = Lang()
pol_lang = Lang()
for pair in pairs:
eng_lang.addSentence(pair[0])
pol_lang.addSentence(pair[1])
pairs[0]
['i m ok .', 'ze mną wszystko w porządku .']
pairs[1]
['i m up .', 'wstałem .']
pairs[2]
['i m tom .', 'jestem tom .']
eng_lang.n_words
1828
pol_lang.n_words
2883
class EncoderRNN(nn.Module):
def __init__(self, input_size, hidden_size):
super(EncoderRNN, self).__init__()
self.hidden_size = hidden_size
self.embedding = nn.Embedding(input_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size)
def forward(self, input, hidden):
embedded = self.embedding(input).view(1, 1, -1)
output = embedded
output, hidden = self.gru(output, hidden)
return output, hidden
def initHidden(self):
return torch.zeros(1, 1, self.hidden_size, device=device)
class DecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size):
super(DecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.embedding = nn.Embedding(output_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size)
self.out = nn.Linear(hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, input, hidden):
output = self.embedding(input).view(1, 1, -1)
output = F.relu(output)
output, hidden = self.gru(output, hidden)
output = self.softmax(self.out(output[0]))
return output, hidden
def initHidden(self):
return torch.zeros(1, 1, self.hidden_size, device=device)
class AttnDecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size, dropout_p=0.1, max_length=MAX_LENGTH):
super(AttnDecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.dropout_p = dropout_p
self.max_length = max_length
self.embedding = nn.Embedding(self.output_size, self.hidden_size)
self.attn = nn.Linear(self.hidden_size * 2, self.max_length)
self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)
self.dropout = nn.Dropout(self.dropout_p)
self.gru = nn.GRU(self.hidden_size, self.hidden_size)
self.out = nn.Linear(self.hidden_size, self.output_size)
def forward(self, input, hidden, encoder_outputs):
embedded = self.embedding(input).view(1, 1, -1)
embedded = self.dropout(embedded)
attn_weights = F.softmax(
self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)
attn_applied = torch.bmm(attn_weights.unsqueeze(0),
encoder_outputs.unsqueeze(0))
output = torch.cat((embedded[0], attn_applied[0]), 1)
output = self.attn_combine(output).unsqueeze(0)
output = F.relu(output)
output, hidden = self.gru(output, hidden)
output = F.log_softmax(self.out(output[0]), dim=1)
return output, hidden, attn_weights
def initHidden(self):
return torch.zeros(1, 1, self.hidden_size, device=device)
def tensorFromSentence(sentence, lang):
indexes = [lang.word2index[word] for word in sentence.split(' ')]
indexes.append(EOS_token)
return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)
teacher_forcing_ratio = 0.5
def train_one_batch(input_tensor, target_tensor, encoder, decoder, optimizer, criterion, max_length=MAX_LENGTH):
encoder_hidden = encoder.initHidden()
optimizer.zero_grad()
input_length = input_tensor.size(0)
target_length = target_tensor.size(0)
encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)
loss = 0
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(input_tensor[ei], encoder_hidden)
encoder_outputs[ei] = encoder_output[0, 0]
decoder_input = torch.tensor([[SOS_token]], device=device)
decoder_hidden = encoder_hidden
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
if use_teacher_forcing:
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(decoder_input, decoder_hidden, encoder_outputs)
loss += criterion(decoder_output, target_tensor[di])
decoder_input = target_tensor[di] # Teacher forcing
else:
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(decoder_input, decoder_hidden, encoder_outputs)
topv, topi = decoder_output.topk(1)
decoder_input = topi.squeeze().detach() # detach from history as input
loss += criterion(decoder_output, target_tensor[di])
if decoder_input.item() == EOS_token:
break
loss.backward()
optimizer.step()
return loss.item() / target_length
def trainIters(encoder, decoder, n_iters, print_every=1000, learning_rate=0.01):
print_loss_total = 0 # Reset every print_every
encoder.train()
decoder.train()
optimizer = optim.SGD(list(encoder.parameters()) + list(decoder.parameters()), lr=learning_rate)
training_pairs = [random.choice(pairs) for _ in range(n_iters)]
training_pairs = [(tensorFromSentence(p[0], eng_lang), tensorFromSentence(p[1], pol_lang)) for p in training_pairs]
criterion = nn.NLLLoss()
for i in range(1, n_iters + 1):
training_pair = training_pairs[i - 1]
input_tensor = training_pair[0]
target_tensor = training_pair[1]
loss = train_one_batch(input_tensor,
target_tensor,
encoder,
decoder,
optimizer,
criterion)
print_loss_total += loss
if i % print_every == 0:
print_loss_avg = print_loss_total / print_every
print_loss_total = 0
print(f'iter: {i}, loss: {print_loss_avg}')
def evaluate(encoder, decoder, sentence, max_length=MAX_LENGTH):
encoder.eval()
decoder.eval()
with torch.no_grad():
input_tensor = tensorFromSentence(sentence, eng_lang)
input_length = input_tensor.size()[0]
encoder_hidden = encoder.initHidden()
encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(input_tensor[ei], encoder_hidden)
encoder_outputs[ei] += encoder_output[0, 0]
decoder_input = torch.tensor([[SOS_token]], device=device)
decoder_hidden = encoder_hidden
decoded_words = []
decoder_attentions = torch.zeros(max_length, max_length)
for di in range(max_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
decoder_attentions[di] = decoder_attention.data
topv, topi = decoder_output.data.topk(1)
if topi.item() == EOS_token:
decoded_words.append('<EOS>')
break
else:
decoded_words.append(pol_lang.index2word[topi.item()])
decoder_input = topi.squeeze().detach()
return decoded_words, decoder_attentions[:di + 1]
def evaluateRandomly(encoder, decoder, n=10):
for i in range(n):
pair = random.choice(pairs)
print('>', pair[0])
print('=', pair[1])
output_words, attentions = evaluate(encoder, decoder, pair[0])
output_sentence = ' '.join(output_words)
print('<', output_sentence)
print('')
hidden_size = 256
encoder1 = EncoderRNN(eng_lang.n_words, hidden_size).to(device)
attn_decoder1 = AttnDecoderRNN(hidden_size, pol_lang.n_words, dropout_p=0.1).to(device)
trainIters(encoder1, attn_decoder1, 10_000, print_every=50)
iter: 50, loss: 4.699110437711081 iter: 100, loss: 4.241607124086411 iter: 150, loss: 4.14866822333563 iter: 200, loss: 4.175457921709334 iter: 250, loss: 4.304153789429438 iter: 300, loss: 4.304717092377798 iter: 350, loss: 4.316578052808368 iter: 400, loss: 4.379952565056937 iter: 450, loss: 4.086811531929743 iter: 500, loss: 4.252370147765628 iter: 550, loss: 4.02257244164603 iter: 600, loss: 4.271288591505989 iter: 650, loss: 4.037527732379852 iter: 700, loss: 3.808401109422956 iter: 750, loss: 4.01287091629089 iter: 800, loss: 4.185342459905715 iter: 850, loss: 3.8268170519934763 iter: 900, loss: 3.9197384970074607 iter: 950, loss: 4.225208856279888 iter: 1000, loss: 4.128686094178094 iter: 1050, loss: 3.9167927505553712 iter: 1100, loss: 4.015269571940103 iter: 1150, loss: 4.168424199830918 iter: 1200, loss: 4.302581990559896 iter: 1250, loss: 3.7335942743392225 iter: 1300, loss: 3.9526881422315334 iter: 1350, loss: 3.8640213389169604 iter: 1400, loss: 4.101886716827512 iter: 1450, loss: 3.6106392740067985 iter: 1500, loss: 4.0689067233857665 iter: 1550, loss: 4.02288844353812 iter: 1600, loss: 3.572508715992883 iter: 1650, loss: 3.972692446489183 iter: 1700, loss: 3.8709554294404525 iter: 1750, loss: 3.9830204631714583 iter: 1800, loss: 3.7999766263961794 iter: 1850, loss: 3.7026816112578858 iter: 1900, loss: 3.833205360775902 iter: 1950, loss: 3.650638633606925 iter: 2000, loss: 3.748746382418133 iter: 2050, loss: 3.762590566922748 iter: 2100, loss: 3.5997376789214117 iter: 2150, loss: 3.919283335610041 iter: 2200, loss: 3.8638847478684912 iter: 2250, loss: 3.4960837801675946 iter: 2300, loss: 3.685049927688782 iter: 2350, loss: 3.5716699722759313 iter: 2400, loss: 3.8988636863874997 iter: 2450, loss: 3.752788569586617 iter: 2500, loss: 3.802307117961702 iter: 2550, loss: 3.6420236970432227 iter: 2600, loss: 3.6925315249912325 iter: 2650, loss: 3.8897219879059572 iter: 2700, loss: 3.6327851654537153 iter: 2750, loss: 3.396957855118645 iter: 2800, loss: 3.5258935768112307 iter: 2850, loss: 3.605109554866003 iter: 2900, loss: 3.533288128330594 iter: 2950, loss: 3.4583421086054 iter: 3000, loss: 3.403592811425526 iter: 3050, loss: 3.5225157889411567 iter: 3100, loss: 3.4702517202846592 iter: 3150, loss: 3.4234997159185867 iter: 3200, loss: 3.5447632862348404 iter: 3250, loss: 3.1799173504133074 iter: 3300, loss: 3.7154814013905 iter: 3350, loss: 3.4188442155444445 iter: 3400, loss: 3.6557525696527393 iter: 3450, loss: 3.52880564416401 iter: 3500, loss: 3.4842312318408295 iter: 3550, loss: 3.5256399853570115 iter: 3600, loss: 3.70226228499034 iter: 3650, loss: 3.2043497113424633 iter: 3700, loss: 3.4575287022439256 iter: 3750, loss: 3.4197605448374664 iter: 3800, loss: 3.290345760890417 iter: 3850, loss: 3.300158274309976 iter: 3900, loss: 3.3362661438139645 iter: 3950, loss: 3.4947717628630373 iter: 4000, loss: 3.5624450731353154 iter: 4050, loss: 3.438600626892514 iter: 4100, loss: 3.142976412258451 iter: 4150, loss: 3.332818130595344 iter: 4200, loss: 3.31952378733196 iter: 4250, loss: 3.5315058948123252 iter: 4300, loss: 3.6603812535074023 iter: 4350, loss: 3.35295347692853 iter: 4400, loss: 3.374297706498041 iter: 4450, loss: 3.09948105843105 iter: 4500, loss: 3.16787886763376 iter: 4550, loss: 3.455794033330583 iter: 4600, loss: 3.1263191164258926 iter: 4650, loss: 3.3723485524995 iter: 4700, loss: 3.147410953930445 iter: 4750, loss: 3.4546711923281346 iter: 4800, loss: 3.449277176016852 iter: 4850, loss: 3.197799104531606 iter: 4900, loss: 3.239384971149383 iter: 4950, loss: 3.696369633697328 iter: 5000, loss: 3.2114706332191587 iter: 5050, loss: 3.400943172795432 iter: 5100, loss: 3.298932059106372 iter: 5150, loss: 3.3697974183445907 iter: 5200, loss: 3.31293656670858 iter: 5250, loss: 3.1415378823658773 iter: 5300, loss: 3.1587839283867494 iter: 5350, loss: 3.3505903312440903 iter: 5400, loss: 3.247191356802744 iter: 5450, loss: 3.236625145200699 iter: 5500, loss: 3.19994143747148 iter: 5550, loss: 3.2911239544626265 iter: 5600, loss: 3.1855649600483122 iter: 5650, loss: 3.157031875163789 iter: 5700, loss: 3.2652817099586366 iter: 5750, loss: 3.3272896775593837 iter: 5800, loss: 3.3162626687458583 iter: 5850, loss: 3.1342987139338536 iter: 5900, loss: 3.29665669613036 iter: 5950, loss: 3.232995939807286 iter: 6000, loss: 3.0922561403758935 iter: 6050, loss: 3.1034776155835107 iter: 6100, loss: 3.1502840874081564 iter: 6150, loss: 2.915993771098909 iter: 6200, loss: 2.994096033270397 iter: 6250, loss: 3.1102042265392487 iter: 6300, loss: 2.8244728108587718 iter: 6350, loss: 3.117810124692462 iter: 6400, loss: 3.0742526639529637 iter: 6450, loss: 2.8390014954218787 iter: 6500, loss: 3.1032223067510687 iter: 6550, loss: 2.912433739840038 iter: 6600, loss: 2.9158696003490023 iter: 6650, loss: 3.2617745389030093 iter: 6700, loss: 3.295657290466248 iter: 6750, loss: 2.975928121767347 iter: 6800, loss: 3.0057779382069914 iter: 6850, loss: 2.85224422507059 iter: 6900, loss: 3.0329934195336836 iter: 6950, loss: 3.1322296761255415 iter: 7000, loss: 2.893814939192363 iter: 7050, loss: 2.934597730205173 iter: 7100, loss: 3.267660904082041 iter: 7150, loss: 3.1199153114651867 iter: 7200, loss: 2.8414319788160776 iter: 7250, loss: 3.1128779797251256 iter: 7300, loss: 3.1182169116565155 iter: 7350, loss: 3.101384938853128 iter: 7400, loss: 2.9836614183395627 iter: 7450, loss: 2.7261425285036602 iter: 7500, loss: 2.7323913456977356 iter: 7550, loss: 3.284201001443559 iter: 7600, loss: 2.9473503636405587 iter: 7650, loss: 2.861012626541986 iter: 7700, loss: 2.6726747900872003 iter: 7750, loss: 2.760957624162947 iter: 7800, loss: 2.647666095211393 iter: 7850, loss: 2.7921250426428657 iter: 7900, loss: 2.9527213778495787 iter: 7950, loss: 2.790506172891647 iter: 8000, loss: 2.8376009529431663 iter: 8050, loss: 3.0387913953690298 iter: 8100, loss: 2.908381733046637 iter: 8150, loss: 2.7374484727761104 iter: 8200, loss: 2.84610585779614 iter: 8250, loss: 2.8532650649736793 iter: 8300, loss: 2.856347685723078 iter: 8350, loss: 2.6641267998710503 iter: 8400, loss: 2.7541870554590973 iter: 8450, loss: 2.814719854824126 iter: 8500, loss: 2.6979909611694395 iter: 8550, loss: 2.577483120327904 iter: 8600, loss: 2.7884950113561415 iter: 8650, loss: 3.0236114144552317 iter: 8700, loss: 2.5850161893329924 iter: 8750, loss: 2.992550043756999 iter: 8800, loss: 2.581544444644262 iter: 8850, loss: 2.7955539315276674 iter: 8900, loss: 2.583812619288763 iter: 8950, loss: 2.6446591711649825 iter: 9000, loss: 2.577330000854674 iter: 9050, loss: 2.4657566853288615 iter: 9100, loss: 2.800543680138058 iter: 9150, loss: 2.8939966171544707 iter: 9200, loss: 2.484702325525738 iter: 9250, loss: 2.9708456475469807 iter: 9300, loss: 2.8829837035148858 iter: 9350, loss: 2.451061187414896 iter: 9400, loss: 3.144906068983533 iter: 9450, loss: 2.4527184899950787 iter: 9500, loss: 2.665944624832698 iter: 9550, loss: 2.5468089370273406 iter: 9600, loss: 2.51169423552165 iter: 9650, loss: 2.916568091210864 iter: 9700, loss: 2.8149766059640853 iter: 9750, loss: 2.6544064010362773 iter: 9800, loss: 2.300161985658464 iter: 9850, loss: 2.5070087575912483 iter: 9900, loss: 2.617770311056621 iter: 9950, loss: 2.756971993983738 iter: 10000, loss: 2.629019902910504
evaluateRandomly(encoder1, attn_decoder1)
> we re both in the same class . = jesteśmy oboje w tej samej klasie . < jesteśmy w w . <EOS> > you re telling lies again . = znowu kłamiesz . < znowu mi . <EOS> > i m glad you re back . = cieszę się że wróciliście . < cieszę się że . . <EOS> > i m not going to have any fun . = nie będę się bawił . < nie wolno się . . <EOS> > i m practising judo . = trenuję dżudo . < jestem . . <EOS> > you re wasting our time . = marnujesz nasz czas . < masz ci na . . <EOS> > he is anxious about her health . = on martwi się o jej zdrowie . < jest bardzo z niej . . <EOS> > you re introverted . = jesteś zamknięty w sobie . < masz . <EOS> > she s correct for sure . = ona z pewnością ma rację . < ona jest z z . <EOS> > they re armed . = są uzbrojeni . < są . . <EOS>