aitech-moj/cw/11_Model_rekurencyjny_z_atencją.ipynb
2022-05-29 19:05:03 +02:00

27 KiB

Logo 1

Modelowanie Języka

10. Model rekurencyjny z atencją [ćwiczenia]

Jakub Pokrywka (2022)

Logo 2

# https://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html
from __future__ import unicode_literals, print_function, division
from io import open
import unicodedata
import string
import re
import random

import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
SOS_token = 0
EOS_token = 1

class Lang:
    def __init__(self):
        self.word2index = {}
        self.word2count = {}
        self.index2word = {0: "SOS", 1: "EOS"}
        self.n_words = 2  # Count SOS and EOS

    def addSentence(self, sentence):
        for word in sentence.split(' '):
            self.addWord(word)

    def addWord(self, word):
        if word not in self.word2index:
            self.word2index[word] = self.n_words
            self.word2count[word] = 1
            self.index2word[self.n_words] = word
            self.n_words += 1
        else:
            self.word2count[word] += 1
# def unicodeToAscii(s):
#     return ''.join(
#         c for c in unicodedata.normalize('NFD', s)
#         if unicodedata.category(c) != 'Mn'
#     )
pairs = []
with open('data/eng-pol.txt') as f:
    for line in f:
        eng_line, pol_line = line.lower().rstrip().split('\t')

        eng_line = re.sub(r"([.!?])", r" \1", eng_line)
        eng_line = re.sub(r"[^a-zA-Z.!?]+", r" ", eng_line)

        pol_line = re.sub(r"([.!?])", r" \1", pol_line)
        pol_line = re.sub(r"[^a-zA-Z.!?]+", r" ", pol_line)
        
#         eng_line = unicodeToAscii(eng_line)
#         pol_line = unicodeToAscii(pol_line)

        pairs.append([eng_line, pol_line])


pairs[1]
['hi .', 'cze .']
MAX_LENGTH = 10
eng_prefixes = (
    "i am ", "i m ",
    "he is", "he s ",
    "she is", "she s ",
    "you are", "you re ",
    "we are", "we re ",
    "they are", "they re "
)

pairs = [p for p in pairs if len(p[0].split(' ')) < MAX_LENGTH and len(p[1].split(' ')) < MAX_LENGTH]
pairs = [p for p in pairs if p[0].startswith(eng_prefixes)]

eng_lang = Lang()
pol_lang = Lang()

for pair in pairs:
    eng_lang.addSentence(pair[0])
    pol_lang.addSentence(pair[1])
pairs[0]
['i m ok .', 'ze mn wszystko w porz dku .']
pairs[1]
['i m up .', 'wsta em .']
pairs[2]
['i m tom .', 'jestem tom .']
class EncoderRNN(nn.Module):
    def __init__(self, input_size, hidden_size):
        super(EncoderRNN, self).__init__()
        self.hidden_size = hidden_size

        self.embedding = nn.Embedding(input_size, hidden_size)
        self.gru = nn.GRU(hidden_size, hidden_size)

    def forward(self, input, hidden):
        embedded = self.embedding(input).view(1, 1, -1)
        output = embedded
        output, hidden = self.gru(output, hidden)
        return output, hidden

    def initHidden(self):
        return torch.zeros(1, 1, self.hidden_size, device=device)
class DecoderRNN(nn.Module):
    def __init__(self, hidden_size, output_size):
        super(DecoderRNN, self).__init__()
        self.hidden_size = hidden_size

        self.embedding = nn.Embedding(output_size, hidden_size)
        self.gru = nn.GRU(hidden_size, hidden_size)
        self.out = nn.Linear(hidden_size, output_size)
        self.softmax = nn.LogSoftmax(dim=1)

    def forward(self, input, hidden):
        output = self.embedding(input).view(1, 1, -1)
        output = F.relu(output)
        output, hidden = self.gru(output, hidden)
        output = self.softmax(self.out(output[0]))
        return output, hidden

    def initHidden(self):
        return torch.zeros(1, 1, self.hidden_size, device=device)
class AttnDecoderRNN(nn.Module):
    def __init__(self, hidden_size, output_size, dropout_p=0.1, max_length=MAX_LENGTH):
        super(AttnDecoderRNN, self).__init__()
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.dropout_p = dropout_p
        self.max_length = max_length

        self.embedding = nn.Embedding(self.output_size, self.hidden_size)
        self.attn = nn.Linear(self.hidden_size * 2, self.max_length)
        self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)
        self.dropout = nn.Dropout(self.dropout_p)
        self.gru = nn.GRU(self.hidden_size, self.hidden_size)
        self.out = nn.Linear(self.hidden_size, self.output_size)

    def forward(self, input, hidden, encoder_outputs):
        embedded = self.embedding(input).view(1, 1, -1)
        embedded = self.dropout(embedded)

        attn_weights = F.softmax(
            self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)
        attn_applied = torch.bmm(attn_weights.unsqueeze(0),
                                 encoder_outputs.unsqueeze(0))

        output = torch.cat((embedded[0], attn_applied[0]), 1)
        output = self.attn_combine(output).unsqueeze(0)

        output = F.relu(output)
        output, hidden = self.gru(output, hidden)

        output = F.log_softmax(self.out(output[0]), dim=1)
        return output, hidden, attn_weights

    def initHidden(self):
        return torch.zeros(1, 1, self.hidden_size, device=device)
def tensorFromSentence(sentence, lang):
    indexes = [lang.word2index[word] for word in sentence.split(' ')]
    indexes.append(EOS_token)
    return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)
teacher_forcing_ratio = 0.5

def train_one_batch(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH):
    encoder_hidden = encoder.initHidden()

    encoder_optimizer.zero_grad()
    decoder_optimizer.zero_grad()

    input_length = input_tensor.size(0)
    target_length = target_tensor.size(0)

    encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)

    loss = 0

    for ei in range(input_length):
        encoder_output, encoder_hidden = encoder(input_tensor[ei], encoder_hidden)
        encoder_outputs[ei] = encoder_output[0, 0]

    decoder_input = torch.tensor([[SOS_token]], device=device)

    decoder_hidden = encoder_hidden

    use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False

    if use_teacher_forcing:
        for di in range(target_length):
            decoder_output, decoder_hidden, decoder_attention = decoder(decoder_input, decoder_hidden, encoder_outputs)
            loss += criterion(decoder_output, target_tensor[di])
            decoder_input = target_tensor[di]  # Teacher forcing

    else:
        for di in range(target_length):
            decoder_output, decoder_hidden, decoder_attention = decoder(decoder_input, decoder_hidden, encoder_outputs)
            topv, topi = decoder_output.topk(1)
            decoder_input = topi.squeeze().detach()  # detach from history as input

            loss += criterion(decoder_output, target_tensor[di])
            if decoder_input.item() == EOS_token:
                break

    loss.backward()

    encoder_optimizer.step()
    decoder_optimizer.step()

    return loss.item() / target_length
def trainIters(encoder, decoder, n_iters, print_every=1000, learning_rate=0.01):
    print_loss_total = 0  # Reset every print_every

    encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
    decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)
    
    training_pairs = [random.choice(pairs) for _ in range(n_iters)]
    training_pairs = [(tensorFromSentence(p[0], eng_lang), tensorFromSentence(p[1], pol_lang)) for p in training_pairs]
    
    criterion = nn.NLLLoss()

    for i in range(1, n_iters + 1):
        training_pair = training_pairs[i - 1]
        input_tensor = training_pair[0]
        target_tensor = training_pair[1]

        loss = train_one_batch(input_tensor,
                               target_tensor,
                               encoder,
                               decoder,
                               encoder_optimizer,
                               decoder_optimizer,
                               criterion)
        
        print_loss_total += loss

        if i % print_every == 0:
            print_loss_avg = print_loss_total / print_every
            print_loss_total = 0
            print(f'iter: {i}, loss: {print_loss_avg}')
def evaluate(encoder, decoder, sentence, max_length=MAX_LENGTH):
    with torch.no_grad():
        input_tensor = tensorFromSentence(sentence, eng_lang)
        input_length = input_tensor.size()[0]
        encoder_hidden = encoder.initHidden()

        encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)

        for ei in range(input_length):
            encoder_output, encoder_hidden = encoder(input_tensor[ei],
                                                     encoder_hidden)
            encoder_outputs[ei] += encoder_output[0, 0]

        decoder_input = torch.tensor([[SOS_token]], device=device)  # SOS

        decoder_hidden = encoder_hidden

        decoded_words = []
        decoder_attentions = torch.zeros(max_length, max_length)

        for di in range(max_length):
            decoder_output, decoder_hidden, decoder_attention = decoder(
                decoder_input, decoder_hidden, encoder_outputs)
            decoder_attentions[di] = decoder_attention.data
            topv, topi = decoder_output.data.topk(1)
            if topi.item() == EOS_token:
                decoded_words.append('<EOS>')
                break
            else:
                decoded_words.append(pol_lang.index2word[topi.item()])

            decoder_input = topi.squeeze().detach()

        return decoded_words, decoder_attentions[:di + 1]
def evaluateRandomly(encoder, decoder, n=10):
    for i in range(n):
        pair = random.choice(pairs)
        print('>', pair[0])
        print('=', pair[1])
        output_words, attentions = evaluate(encoder, decoder, pair[0])
        output_sentence = ' '.join(output_words)
        print('<', output_sentence)
        print('')
hidden_size = 256
encoder1 = EncoderRNN(eng_lang.n_words, hidden_size).to(device)
attn_decoder1 = AttnDecoderRNN(hidden_size, pol_lang.n_words, dropout_p=0.1).to(device)

trainIters(encoder1, attn_decoder1, 75000, print_every=50)
iter: 50, loss: 5.000807713402643
iter: 100, loss: 4.439269823452783
iter: 150, loss: 3.9193654258516095
iter: 200, loss: 4.392944496881395
iter: 250, loss: 4.093038458445715
iter: 300, loss: 4.424980944542659
iter: 350, loss: 3.981394485715835
iter: 400, loss: 4.333685203370593
iter: 450, loss: 3.9591501615615123
iter: 500, loss: 3.9112882453070745
iter: 550, loss: 4.02278929338001
iter: 600, loss: 4.193090805341327
iter: 650, loss: 4.0906043112315835
iter: 700, loss: 4.469698131742931
iter: 750, loss: 4.176360895232548
iter: 800, loss: 3.961828211148579
iter: 850, loss: 4.261641813959393
iter: 900, loss: 4.051715278474111
iter: 950, loss: 3.936853767228505
iter: 1000, loss: 4.225432455638099
iter: 1050, loss: 4.045197415472971
iter: 1100, loss: 4.320344743092855
iter: 1150, loss: 4.053225604799058
iter: 1200, loss: 3.743754985476297
iter: 1250, loss: 4.0527504539035615
iter: 1300, loss: 3.84758229040721
iter: 1350, loss: 4.045899712789627
iter: 1400, loss: 4.027170557158334
iter: 1450, loss: 4.250136273232718
iter: 1500, loss: 3.895784365865919
iter: 1550, loss: 4.033517143960983
iter: 1600, loss: 4.067692023458934
iter: 1650, loss: 3.943578155487303
iter: 1700, loss: 3.638787496930078
iter: 1750, loss: 3.6410217295752636
iter: 1800, loss: 3.8924306627757965
iter: 1850, loss: 4.000204294613429
iter: 1900, loss: 3.8232511097136
iter: 1950, loss: 3.878676666108388
iter: 2000, loss: 3.9427240886536845
iter: 2050, loss: 3.7359260752693064
iter: 2100, loss: 3.583097653464666
iter: 2150, loss: 3.8278237684265024
iter: 2200, loss: 3.9119961933408463
iter: 2250, loss: 3.8753220474152346
iter: 2300, loss: 3.8338965735359802
iter: 2350, loss: 3.4894873487381712
iter: 2400, loss: 3.566151720009153
iter: 2450, loss: 3.937922410420009
iter: 2500, loss: 3.5345082195070057
iter: 2550, loss: 3.775564970758225
iter: 2600, loss: 3.864645612398783
iter: 2650, loss: 3.9066238069837063
iter: 2700, loss: 4.0819177106524265
iter: 2750, loss: 3.655153612878587
iter: 2800, loss: 3.832113747127473
iter: 2850, loss: 3.5925060623335456
iter: 2900, loss: 3.491001639260187
iter: 2950, loss: 3.5009806160094232
iter: 3000, loss: 3.6677673985693184
iter: 3050, loss: 3.781239900210547
iter: 3100, loss: 3.473299116104368
iter: 3150, loss: 3.7532493569813066
iter: 3200, loss: 3.7904585500293306
iter: 3250, loss: 3.6127893707487324
iter: 3300, loss: 3.4757489145445453
iter: 3350, loss: 3.7090715601784847
iter: 3400, loss: 3.8198574437792336
iter: 3450, loss: 3.509964802068377
iter: 3500, loss: 3.612169361614045
iter: 3550, loss: 3.641026579652514
iter: 3600, loss: 3.8201526030434483
iter: 3650, loss: 3.5652526591997287
iter: 3700, loss: 3.742421626257518
iter: 3750, loss: 4.003867071651277
iter: 3800, loss: 3.659059532135253
iter: 3850, loss: 3.641981271872445
iter: 3900, loss: 3.5502949162059356
iter: 3950, loss: 3.560595460755485
iter: 4000, loss: 3.5651848596542597
iter: 4050, loss: 3.980170504395925
iter: 4100, loss: 3.3924002220214367
iter: 4150, loss: 3.6649077605217233
iter: 4200, loss: 3.340204861981528
iter: 4250, loss: 3.722639773754848
iter: 4300, loss: 3.589223196249159
iter: 4350, loss: 3.4467484310770793
iter: 4400, loss: 3.4151901176921897
iter: 4450, loss: 3.4896546234630392
iter: 4500, loss: 3.2113779149963744
iter: 4550, loss: 3.5685467066235015
iter: 4600, loss: 3.005555194105421
iter: 4650, loss: 3.6020915983820716
iter: 4700, loss: 3.633627172273303
iter: 4750, loss: 3.4529481847551127
iter: 4800, loss: 3.4479807695207154
iter: 4850, loss: 3.370973790963491
iter: 4900, loss: 3.539276809162564
iter: 4950, loss: 3.3183354888189416
iter: 5000, loss: 3.521332158444421
iter: 5050, loss: 3.314378255844116
iter: 5100, loss: 3.291964127449762
iter: 5150, loss: 3.4429656072344086
iter: 5200, loss: 3.5413768560848538
iter: 5250, loss: 3.585603856238107
iter: 5300, loss: 3.470469724049644
iter: 5350, loss: 3.4666152168379893
iter: 5400, loss: 3.1305627430885563
iter: 5450, loss: 3.337137906922235
iter: 5500, loss: 3.481247283072699
iter: 5550, loss: 3.517226897428906
iter: 5600, loss: 3.1901850409886183
iter: 5650, loss: 3.136146711447883
iter: 5700, loss: 3.404250585170019
iter: 5750, loss: 3.3665729104375073
iter: 5800, loss: 3.382146033839574
iter: 5850, loss: 3.4272568195433837
iter: 5900, loss: 3.322702169350215
iter: 5950, loss: 3.156406671554324
iter: 6000, loss: 3.194001044719938
iter: 6050, loss: 3.3348103672814755
iter: 6100, loss: 3.150647495882852
iter: 6150, loss: 3.1009463010212728
iter: 6200, loss: 3.3785942046377393
iter: 6250, loss: 3.3160466527711776
iter: 6300, loss: 3.1596272509590024
iter: 6350, loss: 3.2589193917304753
iter: 6400, loss: 3.297462665050749
iter: 6450, loss: 3.3298678997206306
iter: 6500, loss: 3.219574876160848
iter: 6550, loss: 3.3395619553195104
iter: 6600, loss: 2.9891018758047196
iter: 6650, loss: 3.1851753817437185
iter: 6700, loss: 3.0209535363590896
iter: 6750, loss: 3.15220423432759
iter: 6800, loss: 3.181441980475471
iter: 6850, loss: 2.918750543064541
iter: 6900, loss: 3.2590200382944134
iter: 6950, loss: 3.187785402199578
iter: 7000, loss: 3.1073317580677213
iter: 7050, loss: 3.2191209546497896
iter: 7100, loss: 3.2027250674868397
iter: 7150, loss: 2.828316307037596
iter: 7200, loss: 2.8388766735886777
iter: 7250, loss: 2.778842180978684
iter: 7300, loss: 3.285732759347039
iter: 7350, loss: 3.0465498041349734
iter: 7400, loss: 2.90309523902999
iter: 7450, loss: 2.7295303400736004
iter: 7500, loss: 2.907297393454446
iter: 7550, loss: 3.1439063924077963
iter: 7600, loss: 3.2378484228376356
iter: 7650, loss: 3.0929804128919316
iter: 7700, loss: 3.0129570432239117
iter: 7750, loss: 2.707492174629181
iter: 7800, loss: 2.852806848832539
iter: 7850, loss: 2.983840656045883
iter: 7900, loss: 2.6098039440124756
iter: 7950, loss: 2.8175843656252293
iter: 8000, loss: 3.017819283258348
iter: 8050, loss: 2.728099891352275
iter: 8100, loss: 2.94138666140087
iter: 8150, loss: 3.004456134924813
iter: 8200, loss: 2.909780698662713
iter: 8250, loss: 2.8520988211707463
iter: 8300, loss: 2.9205126920351905
iter: 8350, loss: 3.1615525522080685
iter: 8400, loss: 2.8823572458918134
iter: 8450, loss: 2.990696503003438
iter: 8500, loss: 2.722038128603072
iter: 8550, loss: 2.7890086468212183
iter: 8600, loss: 2.7701356183233714
iter: 8650, loss: 2.8187452931555486
iter: 8700, loss: 2.927999514186192
iter: 8750, loss: 3.0153564615930826
iter: 8800, loss: 2.988208478534032
iter: 8850, loss: 3.053433906763319
iter: 8900, loss: 2.8472830426125295
iter: 8950, loss: 2.9679218861943206
iter: 9000, loss: 2.722358681913406
iter: 9050, loss: 2.995666239821722
iter: 9100, loss: 2.8067044997139585
iter: 9150, loss: 2.762981554493072
iter: 9200, loss: 2.8366338660906236
iter: 9250, loss: 2.877190364905766
iter: 9300, loss: 2.6378051905518487
iter: 9350, loss: 3.064765093697442
iter: 9400, loss: 2.5961536618868513
iter: 9450, loss: 2.786036056007658
iter: 9500, loss: 2.6443762784609715
iter: 9550, loss: 2.7273754563028847
iter: 9600, loss: 2.68890615716813
iter: 9650, loss: 2.525617115732223
iter: 9700, loss: 2.711592395033155
iter: 9750, loss: 2.540444574356079
iter: 9800, loss: 2.8242833649090358
iter: 9850, loss: 2.644202707573535
iter: 9900, loss: 2.7373070236084946
iter: 9950, loss: 3.0115960283960614
iter: 10000, loss: 2.8879434264046813
iter: 10050, loss: 2.562242189869048
iter: 10100, loss: 2.8641940906653325
iter: 10150, loss: 2.7755310885944056
iter: 10200, loss: 2.633019772166298
iter: 10250, loss: 2.6914108280454356
iter: 10300, loss: 2.764466069902692
iter: 10350, loss: 2.638823566330804
iter: 10400, loss: 2.6221462763756036
iter: 10450, loss: 2.8230800466423944
iter: 10500, loss: 2.772455602169037
iter: 10550, loss: 2.600414518220085
iter: 10600, loss: 2.7080593706161262
iter: 10650, loss: 2.4712089688513013
iter: 10700, loss: 2.6253130605485704
iter: 10750, loss: 2.558527778141082
iter: 10800, loss: 2.7869244644944633
iter: 10850, loss: 2.585347386742394
iter: 10900, loss: 2.5044392397517248
iter: 10950, loss: 2.596850109872364
iter: 11000, loss: 2.928512234038776
iter: 11050, loss: 2.5913034356851425
iter: 11100, loss: 2.679621921558229
evaluateRandomly(encoder1, attn_decoder1)