This commit is contained in:
Kacper Kalinowski 2024-06-02 18:23:23 +02:00
parent be2fab12c9
commit 546623e34e
2 changed files with 232999 additions and 0 deletions

232736
Seq2seq/fra.txt Normal file

File diff suppressed because it is too large Load Diff

263
Seq2seq/seq2seq.py Normal file
View File

@ -0,0 +1,263 @@
import torch
import torch.nn as nn
import torch.optim as optim
import random
import re
import unicodedata
from torchtext.data.metrics import bleu_score
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Data preparation
def unicode_to_ascii(s):
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
def preprocess_sentence(w):
w = unicode_to_ascii(w.lower().strip())
w = re.sub(r"([?.!,¿])", r" \1 ", w)
w = re.sub(r'[" "]+', " ", w)
w = re.sub(r"[^a-zA-Z?.!,¿]+", " ", w)
w = w.strip()
return w
def read_langs(lang1, lang2, path):
lines = open(path, encoding='utf-8').read().strip().split('\n')
pairs = []
for line in lines:
parts = line.split('\t')
if len(parts) >= 2:
pairs.append([preprocess_sentence(parts[0]), preprocess_sentence(parts[1])])
return pairs
data_path = 'fra.txt'
pairs = read_langs('eng', 'fra', data_path)
# Vocabulary class
class Vocabulary:
def __init__(self):
self.word2index = {}
self.index2word = {}
self.word2count = {}
self.n_words = 0
self.add_word('<unk>')
self.add_word('<pad>')
def add_sentence(self, sentence):
for word in sentence.split(' '):
self.add_word(word)
def add_word(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.index2word[self.n_words] = word
self.word2count[word] = 1
self.n_words += 1
else:
self.word2count[word] += 1
def lookup(self, word):
return self.word2index.get(word, self.word2index['<unk>'])
eng_vocab = Vocabulary()
fra_vocab = Vocabulary()
for pair in pairs:
eng_vocab.add_sentence(pair[0])
fra_vocab.add_sentence(pair[1])
# Seq2Seq Model with Attention
class Encoder(nn.Module):
def __init__(self, input_dim, emb_dim, hid_dim, n_layers, dropout):
super().__init__()
self.embedding = nn.Embedding(input_dim, emb_dim)
self.rnn = nn.GRU(emb_dim, hid_dim, n_layers, dropout=dropout, bidirectional=True)
self.fc = nn.Linear(hid_dim * 2, hid_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, src):
embedded = self.dropout(self.embedding(src))
outputs, hidden = self.rnn(embedded)
# Sum bidirectional outputs
hidden = torch.tanh(self.fc(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim=1)))
return outputs, hidden
class Attention(nn.Module):
def __init__(self, hid_dim):
super().__init__()
self.attn = nn.Linear(hid_dim * 3, hid_dim)
self.v = nn.Linear(hid_dim, 1, bias=False)
def forward(self, hidden, encoder_outputs):
# hidden = [batch size, hid dim]
# encoder_outputs = [src len, batch size, hid dim * 2]
src_len = encoder_outputs.shape[0]
hidden = hidden.unsqueeze(1).expand(-1, src_len, -1)
encoder_outputs = encoder_outputs.permute(1, 0, 2)
# hidden = [batch size, src len, hid dim]
# encoder_outputs = [batch size, src len, hid dim * 2]
energy = torch.tanh(self.attn(torch.cat((hidden, encoder_outputs), dim=2)))
# energy = [batch size, src len, hid dim]
attention = self.v(energy).squeeze(2)
# attention = [batch size, src len]
return torch.softmax(attention, dim=1)
class Decoder(nn.Module):
def __init__(self, output_dim, emb_dim, hid_dim, n_layers, dropout, attention):
super().__init__()
self.output_dim = output_dim
self.attention = attention
self.embedding = nn.Embedding(output_dim, emb_dim)
self.rnn = nn.GRU(hid_dim * 2 + emb_dim, hid_dim, n_layers, dropout=dropout)
self.fc_out = nn.Linear(hid_dim * 3 + emb_dim, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, input, hidden, encoder_outputs):
input = input.unsqueeze(0)
embedded = self.dropout(self.embedding(input))
a = self.attention(hidden[-1], encoder_outputs).unsqueeze(1)
encoder_outputs = encoder_outputs.permute(1, 0, 2)
weighted = torch.bmm(a, encoder_outputs)
rnn_input = torch.cat((embedded, weighted.permute(1, 0, 2)), dim=2)
output, hidden = self.rnn(rnn_input, hidden)
embedded = embedded.squeeze(0)
output = output.squeeze(0)
weighted = weighted.squeeze(1)
prediction = self.fc_out(torch.cat((output, weighted, embedded), dim=1))
return prediction, hidden
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder, device):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.device = device
def forward(self, src, trg, teacher_forcing_ratio=0.5):
trg_len = trg.shape[0]
batch_size = trg.shape[1]
trg_vocab_size = self.decoder.output_dim
outputs = torch.zeros(trg_len, batch_size, trg_vocab_size).to(self.device)
encoder_outputs, hidden = self.encoder(src)
# Initialize hidden state of the decoder with the hidden state of the encoder
hidden = hidden.unsqueeze(0).repeat(self.decoder.rnn.num_layers, 1, 1)
input = trg[0, :]
for t in range(1, trg_len):
output, hidden = self.decoder(input, hidden, encoder_outputs)
outputs[t] = output
top1 = output.argmax(1)
input = trg[t] if random.random() < teacher_forcing_ratio else top1
return outputs
# Training and evaluation functions
def train(model, iterator, optimizer, criterion, clip, print_every=100, max_batches=1000):
model.train()
epoch_loss = 0
i = 0 # Initialize batch counter
for src, trg in iterator:
if i >= max_batches: # Limit the number of batches processed in each epoch
break
src = src.to(device)
trg = trg.to(device)
optimizer.zero_grad()
output = model(src, trg)
output_dim = output.shape[-1]
output = output[1:].reshape(-1, output_dim)
trg = trg[1:].reshape(-1)
loss = criterion(output, trg)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
epoch_loss += loss.item()
if (i + 1) % print_every == 0:
print(f'Batch {i+1}, Loss: {loss.item():.4f}')
i += 1 # Increment batch counter
return epoch_loss / (i if i > 0 else 1) # Avoid division by zero
def evaluate(model, iterator, criterion):
model.eval()
epoch_loss = 0
i = 0 # Initialize batch counter
with torch.no_grad():
for src, trg in iterator:
src = src.to(device)
trg = trg.to(device)
output = model(src, trg, 0)
output_dim = output.shape[-1]
output = output[1:].reshape(-1, output_dim)
trg = trg[1:].reshape(-1)
loss = criterion(output, trg)
epoch_loss += loss.item()
i += 1 # Increment batch counter
return epoch_loss / (i if i > 0 else 1) # Avoid division by zero
# BLEU Score calculation
def calculate_bleu(data, model, src_vocab, trg_vocab):
trgs = []
pred_trgs = []
for (src, trg) in data:
src_tensor = torch.tensor([src_vocab.lookup(word) for word in src.split(' ')]).unsqueeze(1).to(device)
trg_tensor = torch.tensor([trg_vocab.lookup(word) for word in trg.split(' ')]).unsqueeze(1).to(device)
with torch.no_grad():
output = model(src_tensor, trg_tensor, 0)
output_dim = output.shape[-1]
output = output[1:].reshape(-1, output_dim)
output = output.argmax(1)
pred_trg = [trg_vocab.index2word[idx.item()] for idx in output if idx.item() != trg_vocab.word2index['<pad>']]
pred_trgs.append(pred_trg)
trgs.append([trg.split(' ')])
return bleu_score(pred_trgs, trgs)
# Main script
INPUT_DIM = eng_vocab.n_words
OUTPUT_DIM = fra_vocab.n_words
ENC_EMB_DIM = 256
DEC_EMB_DIM = 256
HID_DIM = 512
N_LAYERS = 2
ENC_DROPOUT = 0.5
DEC_DROPOUT = 0.5
BATCH_SIZE = 32
N_EPOCHS = 7
CLIP = 1
attn = Attention(HID_DIM)
enc = Encoder(INPUT_DIM, ENC_EMB_DIM, HID_DIM, N_LAYERS, ENC_DROPOUT)
dec = Decoder(OUTPUT_DIM, DEC_EMB_DIM, HID_DIM, N_LAYERS, DEC_DROPOUT, attn)
model = Seq2Seq(enc, dec, device).to(device)
optimizer = optim.Adam(model.parameters())
criterion = nn.CrossEntropyLoss(ignore_index=fra_vocab.word2index['<pad>'])
# Splitting data into train and test sets
train_data = pairs[:int(0.8*len(pairs))]
test_data = pairs[int(0.8*len(pairs)):]
# Custom DataLoader with padding
def pad_sequence(seq, max_len, pad_value):
seq += [pad_value] * (max_len - len(seq))
return seq
def data_generator(data, src_vocab, trg_vocab, batch_size):
for i in range(0, len(data), batch_size):
src_batch = [d[0] for d in data[i:i+batch_size]]
trg_batch = [d[1] for d in data[i:i+batch_size]]
max_src_len = max(len(s.split(' ')) for s in src_batch)
max_trg_len = max(len(s.split(' ')) for s in trg_batch)
src_tensor = torch.tensor([pad_sequence([src_vocab.lookup(word) for word in sentence.split(' ')], max_len=max_src_len, pad_value=src_vocab.word2index['<pad>']) for sentence in src_batch], dtype=torch.long).T
trg_tensor = torch.tensor([pad_sequence([trg_vocab.lookup(word) for word in sentence.split(' ')], max_len=max_trg_len, pad_value=trg_vocab.word2index['<pad>']) for sentence in trg_batch], dtype=torch.long).T
yield src_tensor, trg_tensor
for epoch in range(N_EPOCHS):
print(f'Epoch {epoch+1}/{N_EPOCHS}')
train_iterator = data_generator(train_data, eng_vocab, fra_vocab, BATCH_SIZE)
valid_iterator = data_generator(test_data, eng_vocab, fra_vocab, BATCH_SIZE)
train_loss = train(model, train_iterator, optimizer, criterion, CLIP)
valid_loss = evaluate(model, valid_iterator, criterion)
print(f'Epoch {epoch+1:02}, Train Loss: {train_loss:.3f}, Val. Loss: {valid_loss:.3f}')
bleu = calculate_bleu(test_data, model, eng_vocab, fra_vocab)
print(f'BLEU score = {bleu:.2f}')