aitech-moj/cw/11_Model_rekurencyjny_z_atencją.ipynb
2022-05-29 18:14:19 +02:00

32 KiB
Raw Blame History

Logo 1

Modelowanie Języka

10. Model rekurencyjny z atencją [ćwiczenia]

Jakub Pokrywka (2022)

Logo 2

# https://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html
from __future__ import unicode_literals, print_function, division
from io import open
import unicodedata
import string
import re
import random

import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
SOS_token = 0
EOS_token = 1

class Lang:
    def __init__(self):
        self.word2index = {}
        self.word2count = {}
        self.index2word = {0: "SOS", 1: "EOS"}
        self.n_words = 2  # Count SOS and EOS

    def addSentence(self, sentence):
        for word in sentence.split(' '):
            self.addWord(word)

    def addWord(self, word):
        if word not in self.word2index:
            self.word2index[word] = self.n_words
            self.word2count[word] = 1
            self.index2word[self.n_words] = word
            self.n_words += 1
        else:
            self.word2count[word] += 1
def unicodeToAscii(s):
    return ''.join(
        c for c in unicodedata.normalize('NFD', s)
        if unicodedata.category(c) != 'Mn'
    )
pairs = []
with open('data/eng-fra.txt') as f:
    for line in f:
        eng_line, fra_line = line.lower().rstrip().split('\t')

        eng_line = re.sub(r"([.!?])", r" \1", eng_line)
        eng_line = re.sub(r"[^a-zA-Z.!?]+", r" ", eng_line)

        fra_line = re.sub(r"([.!?])", r" \1", fra_line)
        fra_line = re.sub(r"[^a-zA-Z.!?]+", r" ", fra_line)
        
        eng_line = unicodeToAscii(eng_line)
        fra_line = unicodeToAscii(fra_line)

        pairs.append([eng_line, fra_line])


pairs[1]
['run !', 'cours !']
MAX_LENGTH = 10
eng_prefixes = (
    "i am ", "i m ",
    "he is", "he s ",
    "she is", "she s ",
    "you are", "you re ",
    "we are", "we re ",
    "they are", "they re "
)

pairs = [p for p in pairs if len(p[0].split(' ')) < MAX_LENGTH and len(p[1].split(' ')) < MAX_LENGTH]
pairs = [p for p in pairs if p[0].startswith(eng_prefixes)]

eng_lang = Lang()
fra_lang = Lang()

for pair in pairs:
    eng_lang.addSentence(pair[0])
    fra_lang.addSentence(pair[1])
pairs[0]
['i m .', 'j ai ans .']
pairs[1]
['i m ok .', 'je vais bien .']
pairs[2]
['i m ok .', ' a va .']
class EncoderRNN(nn.Module):
    def __init__(self, input_size, hidden_size):
        super(EncoderRNN, self).__init__()
        self.hidden_size = hidden_size

        self.embedding = nn.Embedding(input_size, hidden_size)
        self.gru = nn.GRU(hidden_size, hidden_size)

    def forward(self, input, hidden):
        embedded = self.embedding(input).view(1, 1, -1)
        output = embedded
        output, hidden = self.gru(output, hidden)
        return output, hidden

    def initHidden(self):
        return torch.zeros(1, 1, self.hidden_size, device=device)
class DecoderRNN(nn.Module):
    def __init__(self, hidden_size, output_size):
        super(DecoderRNN, self).__init__()
        self.hidden_size = hidden_size

        self.embedding = nn.Embedding(output_size, hidden_size)
        self.gru = nn.GRU(hidden_size, hidden_size)
        self.out = nn.Linear(hidden_size, output_size)
        self.softmax = nn.LogSoftmax(dim=1)

    def forward(self, input, hidden):
        output = self.embedding(input).view(1, 1, -1)
        output = F.relu(output)
        output, hidden = self.gru(output, hidden)
        output = self.softmax(self.out(output[0]))
        return output, hidden

    def initHidden(self):
        return torch.zeros(1, 1, self.hidden_size, device=device)
class AttnDecoderRNN(nn.Module):
    def __init__(self, hidden_size, output_size, dropout_p=0.1, max_length=MAX_LENGTH):
        super(AttnDecoderRNN, self).__init__()
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.dropout_p = dropout_p
        self.max_length = max_length

        self.embedding = nn.Embedding(self.output_size, self.hidden_size)
        self.attn = nn.Linear(self.hidden_size * 2, self.max_length)
        self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)
        self.dropout = nn.Dropout(self.dropout_p)
        self.gru = nn.GRU(self.hidden_size, self.hidden_size)
        self.out = nn.Linear(self.hidden_size, self.output_size)

    def forward(self, input, hidden, encoder_outputs):
        embedded = self.embedding(input).view(1, 1, -1)
        embedded = self.dropout(embedded)

        attn_weights = F.softmax(
            self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)
        attn_applied = torch.bmm(attn_weights.unsqueeze(0),
                                 encoder_outputs.unsqueeze(0))

        output = torch.cat((embedded[0], attn_applied[0]), 1)
        output = self.attn_combine(output).unsqueeze(0)

        output = F.relu(output)
        output, hidden = self.gru(output, hidden)

        output = F.log_softmax(self.out(output[0]), dim=1)
        return output, hidden, attn_weights

    def initHidden(self):
        return torch.zeros(1, 1, self.hidden_size, device=device)
def tensorFromSentence(sentence, lang):
    indexes = [lang.word2index[word] for word in sentence.split(' ')]
    indexes.append(EOS_token)
    return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)
teacher_forcing_ratio = 0.5

def train_one_batch(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH):
    encoder_hidden = encoder.initHidden()

    encoder_optimizer.zero_grad()
    decoder_optimizer.zero_grad()

    input_length = input_tensor.size(0)
    target_length = target_tensor.size(0)

    encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)

    loss = 0

    for ei in range(input_length):
        encoder_output, encoder_hidden = encoder(
            input_tensor[ei], encoder_hidden)
        encoder_outputs[ei] = encoder_output[0, 0]

    decoder_input = torch.tensor([[SOS_token]], device=device)

    decoder_hidden = encoder_hidden

    use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False

    if use_teacher_forcing:
        # Teacher forcing: Feed the target as the next input
        for di in range(target_length):
            decoder_output, decoder_hidden, decoder_attention = decoder(
                decoder_input, decoder_hidden, encoder_outputs)
            loss += criterion(decoder_output, target_tensor[di])
            decoder_input = target_tensor[di]  # Teacher forcing

    else:
        # Without teacher forcing: use its own predictions as the next input
        for di in range(target_length):
            decoder_output, decoder_hidden, decoder_attention = decoder(
                decoder_input, decoder_hidden, encoder_outputs)
            topv, topi = decoder_output.topk(1)
            decoder_input = topi.squeeze().detach()  # detach from history as input

            loss += criterion(decoder_output, target_tensor[di])
            if decoder_input.item() == EOS_token:
                break

    loss.backward()

    encoder_optimizer.step()
    decoder_optimizer.step()

    return loss.item() / target_length
def trainIters(encoder, decoder, n_iters, print_every=1000, learning_rate=0.01):
    print_loss_total = 0  # Reset every print_every

    encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
    decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)
    
    training_pairs = [random.choice(pairs) for _ in range(n_iters)]
    training_pairs = [(tensorFromSentence(p[0], eng_lang), tensorFromSentence(p[1], fra_lang)) for p in training_pairs]
    
    criterion = nn.NLLLoss()

    for i in range(1, n_iters + 1):
        training_pair = training_pairs[i - 1]
        input_tensor = training_pair[0]
        target_tensor = training_pair[1]

        loss = train_one_batch(input_tensor,
                               target_tensor,
                               encoder,
                               encoder,
                               encoder_optimizer,
                               decoder_optimizer,
                               criterion)
        
        print_loss_total += loss

        if i % print_every == 0:
            print_loss_avg = print_loss_total / print_every
            print_loss_total = 0
            print(f'iter: {i}, loss: {print_loss_avg}')
def evaluate(encoder, decoder, sentence, max_length=MAX_LENGTH):
    with torch.no_grad():
        input_tensor = tensorFromSentence(sentence, eng_lang)
        input_length = input_tensor.size()[0]
        encoder_hidden = encoder.initHidden()

        encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)

        for ei in range(input_length):
            encoder_output, encoder_hidden = encoder(input_tensor[ei],
                                                     encoder_hidden)
            encoder_outputs[ei] += encoder_output[0, 0]

        decoder_input = torch.tensor([[SOS_token]], device=device)  # SOS

        decoder_hidden = encoder_hidden

        decoded_words = []
        decoder_attentions = torch.zeros(max_length, max_length)

        for di in range(max_length):
            decoder_output, decoder_hidden, decoder_attention = decoder(
                decoder_input, decoder_hidden, encoder_outputs)
            decoder_attentions[di] = decoder_attention.data
            topv, topi = decoder_output.data.topk(1)
            if topi.item() == EOS_token:
                decoded_words.append('<EOS>')
                break
            else:
                decoded_words.append(fra_lang.index2word[topi.item()])

            decoder_input = topi.squeeze().detach()

        return decoded_words, decoder_attentions[:di + 1]
def evaluateRandomly(encoder, decoder, n=10):
    for i in range(n):
        pair = random.choice(pairs)
        print('>', pair[0])
        print('=', pair[1])
        output_words, attentions = evaluate(encoder, decoder, pair[0])
        output_sentence = ' '.join(output_words)
        print('<', output_sentence)
        print('')
hidden_size = 256
encoder1 = EncoderRNN(eng_lang.n_words, hidden_size).to(device)
attn_decoder1 = AttnDecoderRNN(hidden_size, fra_lang.n_words, dropout_p=0.1).to(device)

trainIters(encoder1, attn_decoder1, 75000, print_every=50)
iter: 50, loss: 4.78930813773473
iter: 100, loss: 4.554949267220875
iter: 150, loss: 4.238516052685087
iter: 200, loss: 4.279887475513276
iter: 250, loss: 4.1802274973884455
iter: 300, loss: 4.2113521892305394
iter: 350, loss: 4.266180963228619
iter: 400, loss: 4.225914733432588
iter: 450, loss: 4.1369073431075565
iter: 500, loss: 3.9906799076019768
iter: 550, loss: 3.842005534717016
iter: 600, loss: 4.081443620484972
iter: 650, loss: 4.030401878296383
iter: 700, loss: 3.869014380984837
iter: 750, loss: 3.8505467753031906
iter: 800, loss: 3.855170104072209
iter: 850, loss: 3.675745445599631
iter: 900, loss: 3.9147777624584386
iter: 950, loss: 3.766264297788106
iter: 1000, loss: 3.6813155986997814
iter: 1050, loss: 3.9307321495934144
iter: 1100, loss: 3.9047770059525027
iter: 1150, loss: 3.655722749588981
iter: 1200, loss: 3.540693810886806
iter: 1250, loss: 3.790360960324605
iter: 1300, loss: 3.7472636015907153
iter: 1350, loss: 3.641857419574072
iter: 1400, loss: 3.717327400631375
iter: 1450, loss: 3.4848567311423166
iter: 1500, loss: 3.56774485397339
iter: 1550, loss: 3.460277635226175
iter: 1600, loss: 3.241899683013796
iter: 1650, loss: 3.50151977614751
iter: 1700, loss: 3.621569488313462
iter: 1750, loss: 3.3851226735947626
iter: 1800, loss: 3.346289497057597
iter: 1850, loss: 3.5180823354569695
iter: 1900, loss: 3.433616197676886
iter: 1950, loss: 3.6162788327080864
iter: 2000, loss: 3.4990604458763492
iter: 2050, loss: 3.3144700173423405
iter: 2100, loss: 3.2962356294980135
iter: 2150, loss: 3.1448448797861728
iter: 2200, loss: 3.6958242581534018
iter: 2250, loss: 3.5269318538241925
iter: 2300, loss: 3.180744191850934
iter: 2350, loss: 3.317159715145354
iter: 2400, loss: 3.638545340795366
iter: 2450, loss: 3.7591161967988995
iter: 2500, loss: 3.3513535446742218
iter: 2550, loss: 3.4554441847271393
iter: 2600, loss: 2.9394915195343994
iter: 2650, loss: 3.370902210848673
iter: 2700, loss: 3.4259227318839423
iter: 2750, loss: 3.4058353806904393
iter: 2800, loss: 3.467306881359647
iter: 2850, loss: 3.222254538074372
iter: 2900, loss: 3.3392559226808087
iter: 2950, loss: 3.4203980594362533
iter: 3000, loss: 3.3507530433563955
iter: 3050, loss: 3.4326547555317966
iter: 3100, loss: 3.1755515496390205
iter: 3150, loss: 3.3925877854634847
iter: 3200, loss: 3.223531436912598
iter: 3250, loss: 3.3089625614862603
iter: 3300, loss: 3.367763715501815
iter: 3350, loss: 3.4278301871163497
iter: 3400, loss: 3.373292277381534
iter: 3450, loss: 3.3497054475829717
iter: 3500, loss: 3.402910869681646
iter: 3550, loss: 3.072571641732776
iter: 3600, loss: 3.2611226563832116
iter: 3650, loss: 3.231520605495998
iter: 3700, loss: 3.3788801974569043
iter: 3750, loss: 3.176644308181036
iter: 3800, loss: 3.2255533708693496
iter: 3850, loss: 3.2362594686387083
iter: 3900, loss: 3.095807164230044
iter: 3950, loss: 3.2343999077024916
iter: 4000, loss: 3.3681417366512245
iter: 4050, loss: 3.0732023419879737
iter: 4100, loss: 3.0663742440617283
iter: 4150, loss: 3.396770855048347
iter: 4200, loss: 3.4262332421522292
iter: 4250, loss: 3.060121847773354
iter: 4300, loss: 2.895130627753243
iter: 4350, loss: 3.017712699065133
iter: 4400, loss: 3.1289404028559487
iter: 4450, loss: 3.163725920904249
iter: 4500, loss: 3.3627441662606743
iter: 4550, loss: 3.409984823173947
iter: 4600, loss: 2.8944704760899618
iter: 4650, loss: 3.0016444209568083
iter: 4700, loss: 2.8574393688837683
iter: 4750, loss: 3.1946328716656525
iter: 4800, loss: 2.768447057353125
iter: 4850, loss: 3.075327144675784
iter: 4900, loss: 3.268370175997416
iter: 4950, loss: 3.1798231331053235
iter: 5000, loss: 3.3217560536218063
iter: 5050, loss: 3.006732604223585
iter: 5100, loss: 3.3575944598061698
iter: 5150, loss: 2.9057663469655175
iter: 5200, loss: 2.8928466574502374
iter: 5250, loss: 3.061066797528948
iter: 5300, loss: 3.35562970057745
iter: 5350, loss: 2.9118076042901895
iter: 5400, loss: 2.9514354321918783
iter: 5450, loss: 2.9334804391406832
iter: 5500, loss: 3.204634138440329
iter: 5550, loss: 2.8140748963961526
iter: 5600, loss: 3.011708143741365
iter: 5650, loss: 3.323859388586074
iter: 5700, loss: 2.8442912295810756
iter: 5750, loss: 2.80684267281729
iter: 5800, loss: 3.1174840584860903
iter: 5850, loss: 2.6991389470478837
iter: 5900, loss: 2.9698236653237116
iter: 5950, loss: 3.0238281039586137
iter: 6000, loss: 2.8812837354947645
iter: 6050, loss: 3.1709352504639394
iter: 6100, loss: 2.937920509209709
iter: 6150, loss: 3.178728113076043
iter: 6200, loss: 2.8974244089429337
iter: 6250, loss: 2.809626478180052
iter: 6300, loss: 2.781241159703996
iter: 6350, loss: 2.9004218400395105
iter: 6400, loss: 2.9118271145669246
iter: 6450, loss: 2.8842602037096787
iter: 6500, loss: 2.9489114957536966
iter: 6550, loss: 2.9503131193130736
iter: 6600, loss: 2.8961831474304187
iter: 6650, loss: 3.002027267266834
iter: 6700, loss: 3.0047303264103236
iter: 6750, loss: 2.958453589060949
iter: 6800, loss: 2.9524990789852446
iter: 6850, loss: 2.935619188210321
iter: 6900, loss: 2.9734530233807033
iter: 6950, loss: 2.785320390822396
iter: 7000, loss: 3.1911680922054106
iter: 7050, loss: 2.7732513120363635
iter: 7100, loss: 2.7432456348282948
iter: 7150, loss: 2.823985375283256
iter: 7200, loss: 2.927504679808541
iter: 7250, loss: 3.0693400076760184
iter: 7300, loss: 2.666468213043515
iter: 7350, loss: 2.808132514378382
iter: 7400, loss: 2.558679431067573
iter: 7450, loss: 2.6974468813850763
iter: 7500, loss: 2.8497490201223457
iter: 7550, loss: 2.7490190564337236
iter: 7600, loss: 2.8300208840067427
iter: 7650, loss: 2.793417969741518
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
Input In [19], in <cell line: 5>()
      2 encoder1 = EncoderRNN(eng_lang.n_words, hidden_size).to(device)
      3 attn_decoder1 = AttnDecoderRNN(hidden_size, fra_lang.n_words, dropout_p=0.1).to(device)
----> 5 trainIters(encoder1, attn_decoder1, 75000, print_every=50)

Input In [16], in trainIters(encoder, decoder, n_iters, print_every, plot_every, learning_rate)
     16 input_tensor = training_pair[0]
     17 target_tensor = training_pair[1]
---> 19 loss = train(input_tensor, target_tensor, encoder,
     20              decoder, encoder_optimizer, decoder_optimizer, criterion)
     21 print_loss_total += loss
     22 plot_loss_total += loss

Input In [15], in train(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length)
     45         if decoder_input.item() == EOS_token:
     46             break
---> 48 loss.backward()
     50 encoder_optimizer.step()
     51 decoder_optimizer.step()

File ~/anaconda3/envs/zajeciaei/lib/python3.10/site-packages/torch/_tensor.py:363, in Tensor.backward(self, gradient, retain_graph, create_graph, inputs)
    354 if has_torch_function_unary(self):
    355     return handle_torch_function(
    356         Tensor.backward,
    357         (self,),
   (...)
    361         create_graph=create_graph,
    362         inputs=inputs)
--> 363 torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)

File ~/anaconda3/envs/zajeciaei/lib/python3.10/site-packages/torch/autograd/__init__.py:173, in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)
    168     retain_graph = create_graph
    170 # The reason we repeat same the comment below is that
    171 # some Python versions print out the first line of a multi-line function
    172 # calls in the traceback and some print out the last line
--> 173 Variable._execution_engine.run_backward(  # Calls into the C++ engine to run the backward pass
    174     tensors, grad_tensors_, retain_graph, create_graph, inputs,
    175     allow_unreachable=True, accumulate_grad=True)

KeyboardInterrupt: 
evaluateRandomly(encoder1, attn_decoder1)
> you re sad .
= tu es triste .
< vous tes . . <EOS>

> she is sewing a dress .
= elle coud une robe .
< elle est une une . . <EOS>

> he is suffering from a headache .
= il souffre d un mal de t te .
< il est un un un un . <EOS>

> i m glad to see you .
= je suis heureux de vous voir .
< je suis content de vous voir . <EOS>

> you are only young once .
= on n est jeune qu une fois .
< vous tes trop plus une enfant . <EOS>

> you re so sweet .
= vous tes si gentille !
< vous tes trop si . <EOS>

> i m running out of closet space .
= je manque d espace dans mon placard .
< je suis un de de <EOS>

> i m sort of an extrovert .
= je suis en quelque sorte extraverti .
< je suis un un . . <EOS>

> i m out of practice .
= je manque de pratique .
< j ai ai pas de <EOS>

> you re the last hope for humanity .
= tu es le dernier espoir de l humanit .
< vous tes le la la . . <EOS>