aitech-moj-2023/cw/14_Model_rekurencyjny_z_atencją.ipynb
Jakub Pokrywka 85d14a1c10 update
2022-07-05 11:24:56 +02:00

46 KiB
Raw Blame History

Logo 1

Modelowanie Języka

14. Model rekurencyjny z atencją [ćwiczenia]

Jakub Pokrywka (2022)

Logo 2

from __future__ import unicode_literals, print_function, division
from io import open
import unicodedata
import string
import re
import random

import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
SOS_token = 0
EOS_token = 1

class Lang:
    def __init__(self):
        self.word2index = {}
        self.word2count = {}
        self.index2word = {0: "SOS", 1: "EOS"}
        self.n_words = 2  # Count SOS and EOS

    def addSentence(self, sentence):
        for word in sentence.split(' '):
            self.addWord(word)

    def addWord(self, word):
        if word not in self.word2index:
            self.word2index[word] = self.n_words
            self.word2count[word] = 1
            self.index2word[self.n_words] = word
            self.n_words += 1
        else:
            self.word2count[word] += 1
pairs = []
with open('data/eng-pol.txt') as f:
    for line in f:
        eng_line, pol_line = line.lower().rstrip().split('\t')

        eng_line = re.sub(r"([.!?])", r" \1", eng_line)
        eng_line = re.sub(r"[^a-zA-Z.!?]+", r" ", eng_line)

        pol_line = re.sub(r"([.!?])", r" \1", pol_line)
        pol_line = re.sub(r"[^a-zA-Z.!?ąćęłńóśźżĄĆĘŁŃÓŚŹŻ]+", r" ", pol_line)

        pairs.append([eng_line, pol_line])


pairs[1]
['hi .', 'cześć .']
MAX_LENGTH = 10
eng_prefixes = (
    "i am ", "i m ",
    "he is", "he s ",
    "she is", "she s ",
    "you are", "you re ",
    "we are", "we re ",
    "they are", "they re "
)

pairs = [p for p in pairs if len(p[0].split(' ')) < MAX_LENGTH and len(p[1].split(' ')) < MAX_LENGTH]
pairs = [p for p in pairs if p[0].startswith(eng_prefixes)]

eng_lang = Lang()
pol_lang = Lang()

for pair in pairs:
    eng_lang.addSentence(pair[0])
    pol_lang.addSentence(pair[1])
pairs[0]
['i m ok .', 'ze mną wszystko w porządku .']
pairs[1]
['i m up .', 'wstałem .']
pairs[2]
['i m tom .', 'jestem tom .']
eng_lang.n_words
1828
pol_lang.n_words
2883
class EncoderRNN(nn.Module):
    def __init__(self, input_size, embedding_size, hidden_size):
        super(EncoderRNN, self).__init__()
        self.embedding_size = 200
        self.hidden_size = hidden_size

        self.embedding = nn.Embedding(input_size, self.embedding_size)
        self.gru = nn.GRU(self.embedding_size, hidden_size)

    def forward(self, input, hidden):
        embedded = self.embedding(input).view(1, 1, -1)
        output = embedded
        output, hidden = self.gru(output, hidden)
        return output, hidden

    def initHidden(self):
        return torch.zeros(1, 1, self.hidden_size, device=device)
class DecoderRNN(nn.Module):
    def __init__(self, embedding_size, hidden_size, output_size):
        super(DecoderRNN, self).__init__()
        self.embedding_size = embedding_size
        self.hidden_size = hidden_size

        self.embedding = nn.Embedding(output_size, self.embedding_size)
        self.gru = nn.GRU(self.embedding_size, hidden_size)
        self.out = nn.Linear(hidden_size, output_size)
        self.softmax = nn.LogSoftmax(dim=1)

    def forward(self, input, hidden):
        output = self.embedding(input).view(1, 1, -1)
        output = F.relu(output)
        output, hidden = self.gru(output, hidden)
        output = self.softmax(self.out(output[0]))
        return output, hidden

    def initHidden(self):
        return torch.zeros(1, 1, self.hidden_size, device=device)
class AttnDecoderRNN(nn.Module):
    def __init__(self, embedding_size, hidden_size, output_size, dropout_p=0.1, max_length=MAX_LENGTH):
        super(AttnDecoderRNN, self).__init__()
        self.embedding_size = embedding_size
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.dropout_p = dropout_p
        self.max_length = max_length

        self.embedding = nn.Embedding(self.output_size, self.embedding_size)
        self.attn = nn.Linear(self.hidden_size + self.embedding_size, self.max_length)
        self.attn_combine = nn.Linear(self.hidden_size + self.embedding_size, self.embedding_size)
        self.dropout = nn.Dropout(self.dropout_p)
        self.gru = nn.GRU(self.embedding_size, self.hidden_size)
        self.out = nn.Linear(self.hidden_size, self.output_size)

    def forward(self, input, hidden, encoder_outputs):
        embedded = self.embedding(input).view(1, 1, -1)
        embedded = self.dropout(embedded)

        attn_weights = F.softmax(
            self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)
        attn_applied = torch.bmm(attn_weights.unsqueeze(0),
                                 encoder_outputs.unsqueeze(0))
        import pdb; pdb.set_trace()

        output = torch.cat((embedded[0], attn_applied[0]), 1)
        output = self.attn_combine(output).unsqueeze(0)

        output = F.relu(output)
        output, hidden = self.gru(output, hidden)

        output = F.log_softmax(self.out(output[0]), dim=1)
        return output, hidden, attn_weights

    def initHidden(self):
        return torch.zeros(1, 1, self.hidden_size, device=device)
def tensorFromSentence(sentence, lang):
    indexes = [lang.word2index[word] for word in sentence.split(' ')]
    indexes.append(EOS_token)
    return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)
teacher_forcing_ratio = 0.5

def train_one_batch(input_tensor, target_tensor, encoder, decoder, optimizer, criterion, max_length=MAX_LENGTH):
    encoder_hidden = encoder.initHidden()


    optimizer.zero_grad()

    input_length = input_tensor.size(0)
    target_length = target_tensor.size(0)

    encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)

    loss = 0

    for ei in range(input_length):
        encoder_output, encoder_hidden = encoder(input_tensor[ei], encoder_hidden)
        encoder_outputs[ei] = encoder_output[0, 0]

    decoder_input = torch.tensor([[SOS_token]], device=device)

    decoder_hidden = encoder_hidden

    use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False

    if use_teacher_forcing:
        for di in range(target_length):
            decoder_output, decoder_hidden, decoder_attention = decoder(decoder_input, decoder_hidden, encoder_outputs)
            loss += criterion(decoder_output, target_tensor[di])
            decoder_input = target_tensor[di]  # Teacher forcing

    else:
        for di in range(target_length):
            decoder_output, decoder_hidden, decoder_attention = decoder(decoder_input, decoder_hidden, encoder_outputs)
            topv, topi = decoder_output.topk(1)
            decoder_input = topi.squeeze().detach()  # detach from history as input

            loss += criterion(decoder_output, target_tensor[di])
            if decoder_input.item() == EOS_token:
                break

    loss.backward()

    optimizer.step()

    return loss.item() / target_length
def trainIters(encoder, decoder, n_iters, print_every=1000, learning_rate=0.01):
    print_loss_total = 0  # Reset every print_every
    encoder.train()
    decoder.train()

    optimizer = optim.SGD(list(encoder.parameters()) + list(decoder.parameters()), lr=learning_rate)
    
    training_pairs = [random.choice(pairs) for _ in range(n_iters)]
    training_pairs = [(tensorFromSentence(p[0], eng_lang), tensorFromSentence(p[1], pol_lang)) for p in training_pairs]
    
    criterion = nn.NLLLoss()

    for i in range(1, n_iters + 1):
        training_pair = training_pairs[i - 1]
        input_tensor = training_pair[0]
        target_tensor = training_pair[1]

        loss = train_one_batch(input_tensor,
                               target_tensor,
                               encoder,
                               decoder,
                               optimizer,

                               criterion)
        
        print_loss_total += loss

        if i % print_every == 0:
            print_loss_avg = print_loss_total / print_every
            print_loss_total = 0
            print(f'iter: {i}, loss: {print_loss_avg}')
def evaluate(encoder, decoder, sentence, max_length=MAX_LENGTH):
    encoder.eval()
    decoder.eval()
    with torch.no_grad():
        input_tensor = tensorFromSentence(sentence, eng_lang)
        input_length = input_tensor.size()[0]
        encoder_hidden = encoder.initHidden()

        encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)

        for ei in range(input_length):
            encoder_output, encoder_hidden = encoder(input_tensor[ei], encoder_hidden)
            encoder_outputs[ei] += encoder_output[0, 0]

        decoder_input = torch.tensor([[SOS_token]], device=device)

        decoder_hidden = encoder_hidden

        decoded_words = []
        decoder_attentions = torch.zeros(max_length, max_length)

        for di in range(max_length):
            decoder_output, decoder_hidden, decoder_attention = decoder(
                decoder_input, decoder_hidden, encoder_outputs)
            decoder_attentions[di] = decoder_attention.data
            topv, topi = decoder_output.data.topk(1)
            if topi.item() == EOS_token:
                decoded_words.append('<EOS>')
                break
            else:
                decoded_words.append(pol_lang.index2word[topi.item()])

            decoder_input = topi.squeeze().detach()

        return decoded_words, decoder_attentions[:di + 1]
def evaluateRandomly(encoder, decoder, n=10):
    for i in range(n):
        pair = random.choice(pairs)
        print('>', pair[0])
        print('=', pair[1])
        output_words, attentions = evaluate(encoder, decoder, pair[0])
        output_sentence = ' '.join(output_words)
        print('<', output_sentence)
        print('')
embedding_size = 200
hidden_size = 256
encoder1 = EncoderRNN(eng_lang.n_words, embedding_size, hidden_size).to(device)
attn_decoder1 = AttnDecoderRNN(embedding_size, hidden_size, pol_lang.n_words, dropout_p=0.1).to(device)
trainIters(encoder1, attn_decoder1, 10_000, print_every=50)
> /tmp/ipykernel_41821/2519748186.py(27)forward()
     25         import pdb; pdb.set_trace()
     26 
---> 27         output = torch.cat((embedded[0], attn_applied[0]), 1)
     28         output = self.attn_combine(output).unsqueeze(0)
     29 

ipdb> embedded
tensor([[[-0.7259,  0.0000,  2.2112,  1.1947, -0.1261, -1.0427, -1.4295,
           0.1567, -0.3949, -1.0815,  1.1206,  2.0630,  2.8148, -1.8538,
          -1.5486, -0.4900, -0.0000,  0.0000, -1.5046,  2.0329, -0.5872,
           1.5764, -0.0000,  1.1447, -0.4200, -0.1560,  0.1723,  1.5950,
           1.2955, -0.5796, -0.0000, -0.8989,  0.4737,  1.7037,  0.8787,
          -0.2064,  1.9589,  2.0400, -1.0883,  1.0515,  0.0540,  0.1436,
           1.2383,  0.4912, -1.7719,  1.6435,  1.5523,  2.3576,  0.0000,
           0.4063, -0.0821, -1.2872,  0.8372, -0.5638,  0.0706,  0.4151,
          -0.0000,  1.1651,  1.7333, -0.1684, -0.0000, -0.8560, -0.0000,
           2.7717, -0.4485, -0.8488,  0.8165,  2.1787, -1.0720, -0.3146,
           1.5798, -0.6788,  0.0000,  0.5609,  0.7415, -0.5585,  2.0659,
           0.7054,  1.3791, -0.2697, -0.0458,  1.6028, -0.0304, -0.6326,
          -1.3258, -0.8370,  0.6533,  2.2756, -0.5393,  0.4752,  0.4479,
          -0.0186, -0.7785, -1.7858,  0.2345,  1.9794, -0.0314, -0.8594,
          -0.0000,  0.0596, -2.6836, -1.9927,  0.2714, -1.4617, -0.8142,
          -0.7790,  0.5029, -0.6001, -0.7932,  1.3418,  0.1305, -0.0000,
          -1.2961, -2.7107, -2.3360, -0.7960,  0.5207,  1.6896,  0.9285,
           0.0000,  1.8187, -0.0000,  1.5908,  0.2745, -0.2589,  0.4066,
          -0.0000, -1.3145, -0.5903,  0.3696, -1.9539, -1.9995, -0.8219,
           0.3937, -0.6068,  0.7947,  1.3940,  0.5513,  0.7498,  1.4578,
          -0.0000, -0.5037, -0.6856,  0.7723, -0.6553,  1.0936, -0.2788,
          -1.9658,  1.5950,  0.8480,  1.1166,  1.3168, -0.0000,  0.2597,
           1.0813,  0.1827, -1.6485,  0.5743, -0.4952,  0.7176, -0.4468,
          -1.7915, -0.6303,  0.2046,  0.7791,  0.1586,  0.2322, -2.3935,
           1.3643, -1.2023, -1.6792,  0.5582, -2.0117, -0.6245,  2.4039,
           2.3736,  0.0559,  0.9173,  0.6446, -0.2068, -0.8805, -0.3070,
           0.7318,  1.9806,  1.9318, -1.1276, -0.1307,  0.0243,  0.8480,
           0.4865, -1.5352,  0.8082,  1.7595, -0.2168,  2.0735, -1.0444,
          -0.0000,  1.0729, -0.2194,  0.5439]]], grad_fn=<MulBackward0>)
ipdb> embedded.shape
torch.Size([1, 1, 200])
ipdb> attn_weights
tensor([[0.0817, 0.1095, 0.1425, 0.1611, 0.0574, 0.0546, 0.0374, 0.0621, 0.0703,
         0.2234]], grad_fn=<SoftmaxBackward0>)
ipdb> attn_applied
tensor([[[ 0.0354, -0.0156, -0.0048, -0.0936,  0.0637,  0.1516,  0.1419,
           0.1106,  0.0511,  0.0235, -0.0622,  0.0725,  0.0709, -0.0624,
           0.1407, -0.0069, -0.1602, -0.1883, -0.1707, -0.1528, -0.0296,
          -0.0500,  0.2115,  0.0705, -0.1385, -0.0487, -0.0165, -0.0128,
          -0.0594,  0.0209, -0.1081,  0.0509,  0.0655,  0.1314, -0.0455,
          -0.0049, -0.1527, -0.1900, -0.0019,  0.0295, -0.0308,  0.0886,
           0.1369, -0.1571,  0.0518, -0.0991, -0.0310, -0.1781, -0.0290,
           0.0558,  0.0585, -0.1045, -0.0027, -0.0476, -0.0377, -0.1026,
           0.0481,  0.0398, -0.0956,  0.0655, -0.1449,  0.0193, -0.0380,
           0.0401,  0.0491, -0.1925,  0.0669,  0.0774,  0.0604,  0.1187,
          -0.0401,  0.1094,  0.0706,  0.0474,  0.0178, -0.0888, -0.0632,
           0.1180, -0.0257, -0.0180, -0.0807,  0.0867, -0.0428, -0.0982,
          -0.0129,  0.1326, -0.0868, -0.0118,  0.0923, -0.0634, -0.1758,
          -0.0835, -0.2328,  0.0578,  0.0184,  0.0602, -0.1132, -0.1089,
          -0.1371, -0.0996, -0.0758, -0.1615,  0.0474, -0.0595,  0.1130,
          -0.1329,  0.0068, -0.0485, -0.0376,  0.0170,  0.0743,  0.0284,
          -0.1708,  0.0283, -0.0161,  0.1138, -0.0223, -0.0504, -0.0068,
           0.1297,  0.0962,  0.1806, -0.1773, -0.1658,  0.1612,  0.0569,
           0.0703, -0.0321, -0.1741, -0.0983, -0.0848,  0.0342,  0.1021,
          -0.1319,  0.1122, -0.0467,  0.0927, -0.0528, -0.0696,  0.0227,
           0.0445,  0.0268,  0.1563,  0.0008,  0.0296,  0.0112, -0.0863,
          -0.1705, -0.0137, -0.0336, -0.0533,  0.0015, -0.0134, -0.0530,
           0.0995,  0.0445, -0.1190, -0.1675,  0.1295, -0.1072,  0.0954,
           0.0559,  0.0572,  0.1595,  0.0054, -0.1020,  0.0309, -0.0821,
           0.0230, -0.1480, -0.0815, -0.0013, -0.0012,  0.1046,  0.0248,
           0.1121,  0.0055,  0.1006, -0.0891, -0.0237, -0.0231, -0.0891,
           0.0234,  0.0164, -0.0080, -0.0431, -0.0041,  0.2627, -0.2110,
           0.1026, -0.0049,  0.0077, -0.1126,  0.0161,  0.0039,  0.0700,
           0.0353, -0.0941,  0.0770,  0.1015, -0.1124, -0.1738,  0.0232,
           0.1839, -0.2329,  0.0488,  0.0791,  0.2002,  0.0389, -0.0985,
          -0.0744,  0.1392,  0.0052,  0.1119,  0.0851, -0.1062, -0.0948,
           0.0718,  0.0308,  0.0136,  0.2036, -0.0510,  0.0615,  0.1164,
           0.0242, -0.0717,  0.0955, -0.0796,  0.0856,  0.0040, -0.1370,
          -0.1614,  0.0605, -0.1396, -0.0286,  0.0295,  0.0515, -0.0880,
           0.0249, -0.2263,  0.0048, -0.0381, -0.0019,  0.0186, -0.0209,
          -0.0929, -0.1371,  0.0052, -0.1237, -0.1090, -0.0606,  0.0524,
           0.0351,  0.0283,  0.0264,  0.0866]]], grad_fn=<BmmBackward0>)
ipdb> attn_applied.shape
torch.Size([1, 1, 256])
ipdb> attn_applied.shape
torch.Size([1, 1, 256])
ipdb> attn_weights.shape
torch.Size([1, 10])
ipdb> encoder_outputs.shape
torch.Size([10, 256])
ipdb> attn_applied.shape
torch.Size([1, 1, 256])
ipdb> attn_applied
tensor([[[ 0.0354, -0.0156, -0.0048, -0.0936,  0.0637,  0.1516,  0.1419,
           0.1106,  0.0511,  0.0235, -0.0622,  0.0725,  0.0709, -0.0624,
           0.1407, -0.0069, -0.1602, -0.1883, -0.1707, -0.1528, -0.0296,
          -0.0500,  0.2115,  0.0705, -0.1385, -0.0487, -0.0165, -0.0128,
          -0.0594,  0.0209, -0.1081,  0.0509,  0.0655,  0.1314, -0.0455,
          -0.0049, -0.1527, -0.1900, -0.0019,  0.0295, -0.0308,  0.0886,
           0.1369, -0.1571,  0.0518, -0.0991, -0.0310, -0.1781, -0.0290,
           0.0558,  0.0585, -0.1045, -0.0027, -0.0476, -0.0377, -0.1026,
           0.0481,  0.0398, -0.0956,  0.0655, -0.1449,  0.0193, -0.0380,
           0.0401,  0.0491, -0.1925,  0.0669,  0.0774,  0.0604,  0.1187,
          -0.0401,  0.1094,  0.0706,  0.0474,  0.0178, -0.0888, -0.0632,
           0.1180, -0.0257, -0.0180, -0.0807,  0.0867, -0.0428, -0.0982,
          -0.0129,  0.1326, -0.0868, -0.0118,  0.0923, -0.0634, -0.1758,
          -0.0835, -0.2328,  0.0578,  0.0184,  0.0602, -0.1132, -0.1089,
          -0.1371, -0.0996, -0.0758, -0.1615,  0.0474, -0.0595,  0.1130,
          -0.1329,  0.0068, -0.0485, -0.0376,  0.0170,  0.0743,  0.0284,
          -0.1708,  0.0283, -0.0161,  0.1138, -0.0223, -0.0504, -0.0068,
           0.1297,  0.0962,  0.1806, -0.1773, -0.1658,  0.1612,  0.0569,
           0.0703, -0.0321, -0.1741, -0.0983, -0.0848,  0.0342,  0.1021,
          -0.1319,  0.1122, -0.0467,  0.0927, -0.0528, -0.0696,  0.0227,
           0.0445,  0.0268,  0.1563,  0.0008,  0.0296,  0.0112, -0.0863,
          -0.1705, -0.0137, -0.0336, -0.0533,  0.0015, -0.0134, -0.0530,
           0.0995,  0.0445, -0.1190, -0.1675,  0.1295, -0.1072,  0.0954,
           0.0559,  0.0572,  0.1595,  0.0054, -0.1020,  0.0309, -0.0821,
           0.0230, -0.1480, -0.0815, -0.0013, -0.0012,  0.1046,  0.0248,
           0.1121,  0.0055,  0.1006, -0.0891, -0.0237, -0.0231, -0.0891,
           0.0234,  0.0164, -0.0080, -0.0431, -0.0041,  0.2627, -0.2110,
           0.1026, -0.0049,  0.0077, -0.1126,  0.0161,  0.0039,  0.0700,
           0.0353, -0.0941,  0.0770,  0.1015, -0.1124, -0.1738,  0.0232,
           0.1839, -0.2329,  0.0488,  0.0791,  0.2002,  0.0389, -0.0985,
          -0.0744,  0.1392,  0.0052,  0.1119,  0.0851, -0.1062, -0.0948,
           0.0718,  0.0308,  0.0136,  0.2036, -0.0510,  0.0615,  0.1164,
           0.0242, -0.0717,  0.0955, -0.0796,  0.0856,  0.0040, -0.1370,
          -0.1614,  0.0605, -0.1396, -0.0286,  0.0295,  0.0515, -0.0880,
           0.0249, -0.2263,  0.0048, -0.0381, -0.0019,  0.0186, -0.0209,
          -0.0929, -0.1371,  0.0052, -0.1237, -0.1090, -0.0606,  0.0524,
           0.0351,  0.0283,  0.0264,  0.0866]]], grad_fn=<BmmBackward0>)
ipdb> attn_weights.shape
torch.Size([1, 10])
ipdb> encoder_outputs.shape
torch.Size([10, 256])
ipdb> embedded.shape
torch.Size([1, 1, 200])
ipdb> attn_applied.shape
torch.Size([1, 1, 256])
ipdb>  output = torch.cat((embedded[0], attn_applied[0]), 1)
ipdb> output.shape
torch.Size([1, 456])
ipdb> output = self.attn_combine(output).unsqueeze(0)
ipdb> output.shape
torch.Size([1, 1, 200])
ipdb> attn_weights
tensor([[0.0817, 0.1095, 0.1425, 0.1611, 0.0574, 0.0546, 0.0374, 0.0621, 0.0703,
         0.2234]], grad_fn=<SoftmaxBackward0>)
ipdb> attn_weights.shape
torch.Size([1, 10])
ipdb> attn_applied.shape
torch.Size([1, 1, 256])
ipdb> attn_applied.shape
torch.Size([1, 1, 256])
ipdb> attn_applied
tensor([[[ 0.0354, -0.0156, -0.0048, -0.0936,  0.0637,  0.1516,  0.1419,
           0.1106,  0.0511,  0.0235, -0.0622,  0.0725,  0.0709, -0.0624,
           0.1407, -0.0069, -0.1602, -0.1883, -0.1707, -0.1528, -0.0296,
          -0.0500,  0.2115,  0.0705, -0.1385, -0.0487, -0.0165, -0.0128,
          -0.0594,  0.0209, -0.1081,  0.0509,  0.0655,  0.1314, -0.0455,
          -0.0049, -0.1527, -0.1900, -0.0019,  0.0295, -0.0308,  0.0886,
           0.1369, -0.1571,  0.0518, -0.0991, -0.0310, -0.1781, -0.0290,
           0.0558,  0.0585, -0.1045, -0.0027, -0.0476, -0.0377, -0.1026,
           0.0481,  0.0398, -0.0956,  0.0655, -0.1449,  0.0193, -0.0380,
           0.0401,  0.0491, -0.1925,  0.0669,  0.0774,  0.0604,  0.1187,
          -0.0401,  0.1094,  0.0706,  0.0474,  0.0178, -0.0888, -0.0632,
           0.1180, -0.0257, -0.0180, -0.0807,  0.0867, -0.0428, -0.0982,
          -0.0129,  0.1326, -0.0868, -0.0118,  0.0923, -0.0634, -0.1758,
          -0.0835, -0.2328,  0.0578,  0.0184,  0.0602, -0.1132, -0.1089,
          -0.1371, -0.0996, -0.0758, -0.1615,  0.0474, -0.0595,  0.1130,
          -0.1329,  0.0068, -0.0485, -0.0376,  0.0170,  0.0743,  0.0284,
          -0.1708,  0.0283, -0.0161,  0.1138, -0.0223, -0.0504, -0.0068,
           0.1297,  0.0962,  0.1806, -0.1773, -0.1658,  0.1612,  0.0569,
           0.0703, -0.0321, -0.1741, -0.0983, -0.0848,  0.0342,  0.1021,
          -0.1319,  0.1122, -0.0467,  0.0927, -0.0528, -0.0696,  0.0227,
           0.0445,  0.0268,  0.1563,  0.0008,  0.0296,  0.0112, -0.0863,
          -0.1705, -0.0137, -0.0336, -0.0533,  0.0015, -0.0134, -0.0530,
           0.0995,  0.0445, -0.1190, -0.1675,  0.1295, -0.1072,  0.0954,
           0.0559,  0.0572,  0.1595,  0.0054, -0.1020,  0.0309, -0.0821,
           0.0230, -0.1480, -0.0815, -0.0013, -0.0012,  0.1046,  0.0248,
           0.1121,  0.0055,  0.1006, -0.0891, -0.0237, -0.0231, -0.0891,
           0.0234,  0.0164, -0.0080, -0.0431, -0.0041,  0.2627, -0.2110,
           0.1026, -0.0049,  0.0077, -0.1126,  0.0161,  0.0039,  0.0700,
           0.0353, -0.0941,  0.0770,  0.1015, -0.1124, -0.1738,  0.0232,
           0.1839, -0.2329,  0.0488,  0.0791,  0.2002,  0.0389, -0.0985,
          -0.0744,  0.1392,  0.0052,  0.1119,  0.0851, -0.1062, -0.0948,
           0.0718,  0.0308,  0.0136,  0.2036, -0.0510,  0.0615,  0.1164,
           0.0242, -0.0717,  0.0955, -0.0796,  0.0856,  0.0040, -0.1370,
          -0.1614,  0.0605, -0.1396, -0.0286,  0.0295,  0.0515, -0.0880,
           0.0249, -0.2263,  0.0048, -0.0381, -0.0019,  0.0186, -0.0209,
          -0.0929, -0.1371,  0.0052, -0.1237, -0.1090, -0.0606,  0.0524,
           0.0351,  0.0283,  0.0264,  0.0866]]], grad_fn=<BmmBackward0>)
ipdb> torch.cat((embedded[0], attn_applied[0]), 1)
tensor([[-7.2585e-01,  0.0000e+00,  2.2112e+00,  1.1947e+00, -1.2609e-01,
         -1.0427e+00, -1.4295e+00,  1.5669e-01, -3.9488e-01, -1.0815e+00,
          1.1206e+00,  2.0630e+00,  2.8148e+00, -1.8538e+00, -1.5486e+00,
         -4.8997e-01, -0.0000e+00,  0.0000e+00, -1.5046e+00,  2.0329e+00,
         -5.8720e-01,  1.5764e+00, -0.0000e+00,  1.1447e+00, -4.2003e-01,
         -1.5600e-01,  1.7233e-01,  1.5950e+00,  1.2955e+00, -5.7964e-01,
         -0.0000e+00, -8.9891e-01,  4.7372e-01,  1.7037e+00,  8.7866e-01,
         -2.0642e-01,  1.9589e+00,  2.0400e+00, -1.0883e+00,  1.0515e+00,
          5.3959e-02,  1.4358e-01,  1.2383e+00,  4.9123e-01, -1.7719e+00,
          1.6435e+00,  1.5523e+00,  2.3576e+00,  0.0000e+00,  4.0628e-01,
         -8.2075e-02, -1.2872e+00,  8.3723e-01, -5.6378e-01,  7.0637e-02,
          4.1508e-01, -0.0000e+00,  1.1651e+00,  1.7333e+00, -1.6842e-01,
         -0.0000e+00, -8.5601e-01, -0.0000e+00,  2.7717e+00, -4.4849e-01,
         -8.4885e-01,  8.1650e-01,  2.1787e+00, -1.0720e+00, -3.1463e-01,
          1.5798e+00, -6.7880e-01,  0.0000e+00,  5.6090e-01,  7.4153e-01,
         -5.5849e-01,  2.0659e+00,  7.0539e-01,  1.3791e+00, -2.6968e-01,
         -4.5789e-02,  1.6028e+00, -3.0432e-02, -6.3259e-01, -1.3258e+00,
         -8.3697e-01,  6.5333e-01,  2.2756e+00, -5.3934e-01,  4.7520e-01,
          4.4788e-01, -1.8612e-02, -7.7847e-01, -1.7858e+00,  2.3452e-01,
          1.9794e+00, -3.1421e-02, -8.5938e-01, -0.0000e+00,  5.9576e-02,
         -2.6836e+00, -1.9927e+00,  2.7139e-01, -1.4617e+00, -8.1419e-01,
         -7.7900e-01,  5.0293e-01, -6.0008e-01, -7.9323e-01,  1.3418e+00,
          1.3053e-01, -0.0000e+00, -1.2961e+00, -2.7107e+00, -2.3360e+00,
         -7.9603e-01,  5.2071e-01,  1.6896e+00,  9.2845e-01,  0.0000e+00,
          1.8187e+00, -0.0000e+00,  1.5908e+00,  2.7451e-01, -2.5888e-01,
          4.0663e-01, -0.0000e+00, -1.3145e+00, -5.9031e-01,  3.6964e-01,
         -1.9539e+00, -1.9995e+00, -8.2193e-01,  3.9374e-01, -6.0678e-01,
          7.9467e-01,  1.3940e+00,  5.5134e-01,  7.4983e-01,  1.4578e+00,
         -0.0000e+00, -5.0368e-01, -6.8556e-01,  7.7229e-01, -6.5534e-01,
          1.0936e+00, -2.7885e-01, -1.9658e+00,  1.5950e+00,  8.4796e-01,
          1.1166e+00,  1.3168e+00, -0.0000e+00,  2.5968e-01,  1.0813e+00,
          1.8274e-01, -1.6485e+00,  5.7433e-01, -4.9516e-01,  7.1760e-01,
         -4.4680e-01, -1.7915e+00, -6.3027e-01,  2.0462e-01,  7.7905e-01,
          1.5859e-01,  2.3222e-01, -2.3935e+00,  1.3643e+00, -1.2023e+00,
         -1.6792e+00,  5.5823e-01, -2.0117e+00, -6.2452e-01,  2.4039e+00,
          2.3736e+00,  5.5896e-02,  9.1725e-01,  6.4464e-01, -2.0675e-01,
         -8.8049e-01, -3.0703e-01,  7.3178e-01,  1.9806e+00,  1.9318e+00,
         -1.1276e+00, -1.3072e-01,  2.4253e-02,  8.4797e-01,  4.8654e-01,
         -1.5352e+00,  8.0822e-01,  1.7595e+00, -2.1682e-01,  2.0735e+00,
         -1.0444e+00, -0.0000e+00,  1.0729e+00, -2.1940e-01,  5.4391e-01,
          3.5435e-02, -1.5585e-02, -4.8357e-03, -9.3600e-02,  6.3727e-02,
          1.5162e-01,  1.4191e-01,  1.1063e-01,  5.1059e-02,  2.3501e-02,
         -6.2207e-02,  7.2538e-02,  7.0922e-02, -6.2352e-02,  1.4066e-01,
         -6.8974e-03, -1.6019e-01, -1.8832e-01, -1.7067e-01, -1.5275e-01,
         -2.9574e-02, -5.0036e-02,  2.1154e-01,  7.0534e-02, -1.3852e-01,
         -4.8703e-02, -1.6496e-02, -1.2794e-02, -5.9357e-02,  2.0857e-02,
         -1.0812e-01,  5.0935e-02,  6.5458e-02,  1.3136e-01, -4.5476e-02,
         -4.8890e-03, -1.5270e-01, -1.9004e-01, -1.9268e-03,  2.9531e-02,
         -3.0820e-02,  8.8608e-02,  1.3690e-01, -1.5715e-01,  5.1807e-02,
         -9.9062e-02, -3.0984e-02, -1.7808e-01, -2.8995e-02,  5.5791e-02,
          5.8522e-02, -1.0453e-01, -2.7097e-03, -4.7650e-02, -3.7730e-02,
         -1.0258e-01,  4.8142e-02,  3.9797e-02, -9.5571e-02,  6.5458e-02,
         -1.4489e-01,  1.9339e-02, -3.8005e-02,  4.0136e-02,  4.9097e-02,
         -1.9247e-01,  6.6852e-02,  7.7364e-02,  6.0379e-02,  1.1870e-01,
         -4.0057e-02,  1.0945e-01,  7.0648e-02,  4.7377e-02,  1.7824e-02,
         -8.8779e-02, -6.3218e-02,  1.1804e-01, -2.5733e-02, -1.7959e-02,
         -8.0674e-02,  8.6741e-02, -4.2754e-02, -9.8244e-02, -1.2859e-02,
          1.3257e-01, -8.6784e-02, -1.1774e-02,  9.2331e-02, -6.3417e-02,
         -1.7581e-01, -8.3526e-02, -2.3277e-01,  5.7765e-02,  1.8407e-02,
          6.0199e-02, -1.1321e-01, -1.0885e-01, -1.3705e-01, -9.9638e-02,
         -7.5838e-02, -1.6146e-01,  4.7433e-02, -5.9514e-02,  1.1298e-01,
         -1.3286e-01,  6.7797e-03, -4.8545e-02, -3.7572e-02,  1.7049e-02,
          7.4291e-02,  2.8442e-02, -1.7075e-01,  2.8328e-02, -1.6143e-02,
          1.1376e-01, -2.2335e-02, -5.0417e-02, -6.8320e-03,  1.2967e-01,
          9.6223e-02,  1.8056e-01, -1.7727e-01, -1.6582e-01,  1.6121e-01,
          5.6873e-02,  7.0338e-02, -3.2107e-02, -1.7414e-01, -9.8330e-02,
         -8.4751e-02,  3.4170e-02,  1.0213e-01, -1.3191e-01,  1.1224e-01,
         -4.6743e-02,  9.2736e-02, -5.2760e-02, -6.9552e-02,  2.2712e-02,
          4.4459e-02,  2.6758e-02,  1.5629e-01,  8.4847e-04,  2.9560e-02,
          1.1163e-02, -8.6294e-02, -1.7045e-01, -1.3690e-02, -3.3578e-02,
         -5.3289e-02,  1.4815e-03, -1.3354e-02, -5.3049e-02,  9.9541e-02,
          4.4520e-02, -1.1904e-01, -1.6747e-01,  1.2955e-01, -1.0718e-01,
          9.5381e-02,  5.5950e-02,  5.7216e-02,  1.5949e-01,  5.4154e-03,
         -1.0203e-01,  3.0928e-02, -8.2072e-02,  2.2982e-02, -1.4800e-01,
         -8.1458e-02, -1.3399e-03, -1.2277e-03,  1.0457e-01,  2.4771e-02,
          1.1215e-01,  5.4644e-03,  1.0059e-01, -8.9117e-02, -2.3669e-02,
         -2.3117e-02, -8.9104e-02,  2.3379e-02,  1.6435e-02, -8.0299e-03,
         -4.3092e-02, -4.1300e-03,  2.6272e-01, -2.1100e-01,  1.0265e-01,
         -4.9496e-03,  7.7325e-03, -1.1258e-01,  1.6118e-02,  3.8591e-03,
          6.9952e-02,  3.5275e-02, -9.4110e-02,  7.6992e-02,  1.0149e-01,
         -1.1243e-01, -1.7381e-01,  2.3158e-02,  1.8389e-01, -2.3291e-01,
          4.8788e-02,  7.9070e-02,  2.0018e-01,  3.8932e-02, -9.8458e-02,
         -7.4388e-02,  1.3917e-01,  5.1577e-03,  1.1188e-01,  8.5138e-02,
         -1.0618e-01, -9.4835e-02,  7.1822e-02,  3.0813e-02,  1.3624e-02,
          2.0363e-01, -5.0962e-02,  6.1539e-02,  1.1643e-01,  2.4200e-02,
         -7.1730e-02,  9.5475e-02, -7.9572e-02,  8.5584e-02,  3.9502e-03,
         -1.3701e-01, -1.6142e-01,  6.0496e-02, -1.3962e-01, -2.8607e-02,
          2.9515e-02,  5.1506e-02, -8.7967e-02,  2.4942e-02, -2.2634e-01,
          4.7778e-03, -3.8064e-02, -1.9145e-03,  1.8559e-02, -2.0943e-02,
         -9.2896e-02, -1.3714e-01,  5.1929e-03, -1.2374e-01, -1.0901e-01,
         -6.0571e-02,  5.2448e-02,  3.5082e-02,  2.8269e-02,  2.6405e-02,
          8.6625e-02]], grad_fn=<CatBackward0>)
ipdb> torch.cat((embedded[0], attn_applied[0]), 1).shape
torch.Size([1, 456])
ipdb> attnn_weights
*** NameError: name 'attnn_weights' is not defined
ipdb> attn_weights.shape
torch.Size([1, 10])
ipdb> attn_applied
tensor([[[ 0.0354, -0.0156, -0.0048, -0.0936,  0.0637,  0.1516,  0.1419,
           0.1106,  0.0511,  0.0235, -0.0622,  0.0725,  0.0709, -0.0624,
           0.1407, -0.0069, -0.1602, -0.1883, -0.1707, -0.1528, -0.0296,
          -0.0500,  0.2115,  0.0705, -0.1385, -0.0487, -0.0165, -0.0128,
          -0.0594,  0.0209, -0.1081,  0.0509,  0.0655,  0.1314, -0.0455,
          -0.0049, -0.1527, -0.1900, -0.0019,  0.0295, -0.0308,  0.0886,
           0.1369, -0.1571,  0.0518, -0.0991, -0.0310, -0.1781, -0.0290,
           0.0558,  0.0585, -0.1045, -0.0027, -0.0476, -0.0377, -0.1026,
           0.0481,  0.0398, -0.0956,  0.0655, -0.1449,  0.0193, -0.0380,
           0.0401,  0.0491, -0.1925,  0.0669,  0.0774,  0.0604,  0.1187,
          -0.0401,  0.1094,  0.0706,  0.0474,  0.0178, -0.0888, -0.0632,
           0.1180, -0.0257, -0.0180, -0.0807,  0.0867, -0.0428, -0.0982,
          -0.0129,  0.1326, -0.0868, -0.0118,  0.0923, -0.0634, -0.1758,
          -0.0835, -0.2328,  0.0578,  0.0184,  0.0602, -0.1132, -0.1089,
          -0.1371, -0.0996, -0.0758, -0.1615,  0.0474, -0.0595,  0.1130,
          -0.1329,  0.0068, -0.0485, -0.0376,  0.0170,  0.0743,  0.0284,
          -0.1708,  0.0283, -0.0161,  0.1138, -0.0223, -0.0504, -0.0068,
           0.1297,  0.0962,  0.1806, -0.1773, -0.1658,  0.1612,  0.0569,
           0.0703, -0.0321, -0.1741, -0.0983, -0.0848,  0.0342,  0.1021,
          -0.1319,  0.1122, -0.0467,  0.0927, -0.0528, -0.0696,  0.0227,
           0.0445,  0.0268,  0.1563,  0.0008,  0.0296,  0.0112, -0.0863,
          -0.1705, -0.0137, -0.0336, -0.0533,  0.0015, -0.0134, -0.0530,
           0.0995,  0.0445, -0.1190, -0.1675,  0.1295, -0.1072,  0.0954,
           0.0559,  0.0572,  0.1595,  0.0054, -0.1020,  0.0309, -0.0821,
           0.0230, -0.1480, -0.0815, -0.0013, -0.0012,  0.1046,  0.0248,
           0.1121,  0.0055,  0.1006, -0.0891, -0.0237, -0.0231, -0.0891,
           0.0234,  0.0164, -0.0080, -0.0431, -0.0041,  0.2627, -0.2110,
           0.1026, -0.0049,  0.0077, -0.1126,  0.0161,  0.0039,  0.0700,
           0.0353, -0.0941,  0.0770,  0.1015, -0.1124, -0.1738,  0.0232,
           0.1839, -0.2329,  0.0488,  0.0791,  0.2002,  0.0389, -0.0985,
          -0.0744,  0.1392,  0.0052,  0.1119,  0.0851, -0.1062, -0.0948,
           0.0718,  0.0308,  0.0136,  0.2036, -0.0510,  0.0615,  0.1164,
           0.0242, -0.0717,  0.0955, -0.0796,  0.0856,  0.0040, -0.1370,
          -0.1614,  0.0605, -0.1396, -0.0286,  0.0295,  0.0515, -0.0880,
           0.0249, -0.2263,  0.0048, -0.0381, -0.0019,  0.0186, -0.0209,
          -0.0929, -0.1371,  0.0052, -0.1237, -0.1090, -0.0606,  0.0524,
           0.0351,  0.0283,  0.0264,  0.0866]]], grad_fn=<BmmBackward0>)
ipdb> attn_applied.shape
torch.Size([1, 1, 256])
ipdb>  torch.cat((embedded[0], attn_applied[0]), 1).shape
torch.Size([1, 456])
ipdb> self.attn_combine(output).unsqueeze(0).shape
*** RuntimeError: mat1 and mat2 shapes cannot be multiplied (1x200 and 456x200)
ipdb> output = self.attn_combine(output).unsqueeze(0)
*** RuntimeError: mat1 and mat2 shapes cannot be multiplied (1x200 and 456x200)
ipdb> output = torch.cat((embedded[0], attn_applied[0]), 1)
ipdb> output = torch.cat((embedded[0], attn_applied[0]), 1)
ipdb> c
> /tmp/ipykernel_41821/2519748186.py(27)forward()
     25         import pdb; pdb.set_trace()
     26 
---> 27         output = torch.cat((embedded[0], attn_applied[0]), 1)
     28         output = self.attn_combine(output).unsqueeze(0)
     29 

ipdb>  output = torch.cat((embedded[0], attn_applied[0]), 1)
ipdb> attn_weights.shape
torch.Size([1, 10])
ipdb> attn_applied.shape
torch.Size([1, 1, 256])
ipdb> output.shape
torch.Size([1, 456])
ipdb> self.attn_combine(output).unsqueeze(0).shape
torch.Size([1, 1, 200])
evaluateRandomly(encoder1, attn_decoder1)
## ZADANIE

Gonito "WMT2017 Czech-English machine translation challenge for news "

Proszę wytrenować najpierw model german -> english, a później dotrenować na czech-> english.
Można wziąć inicjalizować enkoder od nowa lub nie. Proszę w każdym razie użyć wytrenowanego dekodera.