update
This commit is contained in:
parent
546623e34e
commit
cf9b7a024e
@ -1,263 +1,178 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.optim as optim
|
||||
import random
|
||||
import re
|
||||
import unicodedata
|
||||
from torchtext.data.metrics import bleu_score
|
||||
import numpy as np
|
||||
import keras
|
||||
from nltk.translate.bleu_score import corpus_bleu
|
||||
|
||||
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||||
# Configuration
|
||||
batch_size = 64
|
||||
epochs = 350
|
||||
latent_dim = 256
|
||||
num_samples = 50000
|
||||
data_path = "fra.txt"
|
||||
|
||||
# Data preparation
|
||||
def unicode_to_ascii(s):
|
||||
return ''.join(c for c in unicodedata.normalize('NFD', s)
|
||||
if unicodedata.category(c) != 'Mn')
|
||||
# Preparing data
|
||||
input_texts = []
|
||||
target_texts = []
|
||||
input_characters = set()
|
||||
target_characters = set()
|
||||
with open(data_path, "r", encoding="utf-8") as f:
|
||||
lines = f.read().split("\n")
|
||||
for line in lines[: min(num_samples, len(lines) - 1)]:
|
||||
input_text, target_text, _ = line.split("\t")
|
||||
target_text = "\t" + target_text + "\n"
|
||||
input_texts.append(input_text)
|
||||
target_texts.append(target_text)
|
||||
for char in input_text:
|
||||
if char not in input_characters:
|
||||
input_characters.add(char)
|
||||
for char in target_text:
|
||||
if char not in target_characters:
|
||||
target_characters.add(char)
|
||||
|
||||
def preprocess_sentence(w):
|
||||
w = unicode_to_ascii(w.lower().strip())
|
||||
w = re.sub(r"([?.!,¿])", r" \1 ", w)
|
||||
w = re.sub(r'[" "]+', " ", w)
|
||||
w = re.sub(r"[^a-zA-Z?.!,¿]+", " ", w)
|
||||
w = w.strip()
|
||||
return w
|
||||
input_characters = sorted(list(input_characters))
|
||||
target_characters = sorted(list(target_characters))
|
||||
num_encoder_tokens = len(input_characters)
|
||||
num_decoder_tokens = len(target_characters)
|
||||
max_encoder_seq_length = max([len(txt) for txt in input_texts])
|
||||
max_decoder_seq_length = max([len(txt) for txt in target_texts])
|
||||
|
||||
def read_langs(lang1, lang2, path):
|
||||
lines = open(path, encoding='utf-8').read().strip().split('\n')
|
||||
pairs = []
|
||||
for line in lines:
|
||||
parts = line.split('\t')
|
||||
if len(parts) >= 2:
|
||||
pairs.append([preprocess_sentence(parts[0]), preprocess_sentence(parts[1])])
|
||||
return pairs
|
||||
print("Number of samples:", len(input_texts))
|
||||
print("Number of unique input tokens:", num_encoder_tokens)
|
||||
print("Number of unique output tokens:", num_decoder_tokens)
|
||||
print("Max sequence length for inputs:", max_encoder_seq_length)
|
||||
print("Max sequence length for outputs:", max_decoder_seq_length)
|
||||
|
||||
data_path = 'fra.txt'
|
||||
pairs = read_langs('eng', 'fra', data_path)
|
||||
input_token_index = dict([(char, i) for i, char in enumerate(input_characters)])
|
||||
target_token_index = dict([(char, i) for i, char in enumerate(target_characters)])
|
||||
|
||||
# Vocabulary class
|
||||
class Vocabulary:
|
||||
def __init__(self):
|
||||
self.word2index = {}
|
||||
self.index2word = {}
|
||||
self.word2count = {}
|
||||
self.n_words = 0
|
||||
self.add_word('<unk>')
|
||||
self.add_word('<pad>')
|
||||
encoder_input_data = np.zeros(
|
||||
(len(input_texts), max_encoder_seq_length, num_encoder_tokens),
|
||||
dtype="float32",
|
||||
)
|
||||
decoder_input_data = np.zeros(
|
||||
(len(input_texts), max_decoder_seq_length, num_decoder_tokens),
|
||||
dtype="float32",
|
||||
)
|
||||
decoder_target_data = np.zeros(
|
||||
(len(input_texts), max_decoder_seq_length, num_decoder_tokens),
|
||||
dtype="float32",
|
||||
)
|
||||
|
||||
def add_sentence(self, sentence):
|
||||
for word in sentence.split(' '):
|
||||
self.add_word(word)
|
||||
for i, (input_text, target_text) in enumerate(zip(input_texts, target_texts)):
|
||||
for t, char in enumerate(input_text):
|
||||
encoder_input_data[i, t, input_token_index[char]] = 1.0
|
||||
encoder_input_data[i, t + 1 :, input_token_index[" "]] = 1.0
|
||||
for t, char in enumerate(target_text):
|
||||
decoder_input_data[i, t, target_token_index[char]] = 1.0
|
||||
if t > 0:
|
||||
decoder_target_data[i, t - 1, target_token_index[char]] = 1.0
|
||||
decoder_input_data[i, t + 1 :, target_token_index[" "]] = 1.0
|
||||
decoder_target_data[i, t:, target_token_index[" "]] = 1.0
|
||||
|
||||
def add_word(self, word):
|
||||
if word not in self.word2index:
|
||||
self.word2index[word] = self.n_words
|
||||
self.index2word[self.n_words] = word
|
||||
self.word2count[word] = 1
|
||||
self.n_words += 1
|
||||
else:
|
||||
self.word2count[word] += 1
|
||||
# Creating model
|
||||
encoder_inputs = keras.Input(shape=(None, num_encoder_tokens))
|
||||
encoder = keras.layers.LSTM(latent_dim, return_state=True)
|
||||
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
|
||||
encoder_states = [state_h, state_c]
|
||||
|
||||
def lookup(self, word):
|
||||
return self.word2index.get(word, self.word2index['<unk>'])
|
||||
decoder_inputs = keras.Input(shape=(None, num_decoder_tokens))
|
||||
decoder_lstm = keras.layers.LSTM(latent_dim, return_sequences=True, return_state=True)
|
||||
decoder_outputs, _, _ = decoder_lstm(decoder_inputs, initial_state=encoder_states)
|
||||
decoder_dense = keras.layers.Dense(num_decoder_tokens, activation="softmax")
|
||||
decoder_outputs = decoder_dense(decoder_outputs)
|
||||
|
||||
eng_vocab = Vocabulary()
|
||||
fra_vocab = Vocabulary()
|
||||
model = keras.Model([encoder_inputs, decoder_inputs], decoder_outputs)
|
||||
|
||||
for pair in pairs:
|
||||
eng_vocab.add_sentence(pair[0])
|
||||
fra_vocab.add_sentence(pair[1])
|
||||
# Creating the training model
|
||||
model.compile(
|
||||
optimizer="rmsprop", loss="categorical_crossentropy", metrics=["accuracy"]
|
||||
)
|
||||
model.fit(
|
||||
[encoder_input_data, decoder_input_data],
|
||||
decoder_target_data,
|
||||
batch_size=batch_size,
|
||||
epochs=epochs,
|
||||
validation_split=0.2,
|
||||
)
|
||||
model.save("s2s_model.keras")
|
||||
|
||||
# Seq2Seq Model with Attention
|
||||
class Encoder(nn.Module):
|
||||
def __init__(self, input_dim, emb_dim, hid_dim, n_layers, dropout):
|
||||
super().__init__()
|
||||
self.embedding = nn.Embedding(input_dim, emb_dim)
|
||||
self.rnn = nn.GRU(emb_dim, hid_dim, n_layers, dropout=dropout, bidirectional=True)
|
||||
self.fc = nn.Linear(hid_dim * 2, hid_dim)
|
||||
self.dropout = nn.Dropout(dropout)
|
||||
# Sampling
|
||||
model = keras.models.load_model("s2s_model.keras")
|
||||
|
||||
def forward(self, src):
|
||||
embedded = self.dropout(self.embedding(src))
|
||||
outputs, hidden = self.rnn(embedded)
|
||||
# Sum bidirectional outputs
|
||||
hidden = torch.tanh(self.fc(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim=1)))
|
||||
return outputs, hidden
|
||||
encoder_inputs = model.input[0]
|
||||
encoder_outputs, state_h_enc, state_c_enc = model.layers[2].output
|
||||
encoder_states = [state_h_enc, state_c_enc]
|
||||
encoder_model = keras.Model(encoder_inputs, encoder_states)
|
||||
|
||||
class Attention(nn.Module):
|
||||
def __init__(self, hid_dim):
|
||||
super().__init__()
|
||||
self.attn = nn.Linear(hid_dim * 3, hid_dim)
|
||||
self.v = nn.Linear(hid_dim, 1, bias=False)
|
||||
decoder_inputs = model.input[1]
|
||||
decoder_state_input_h = keras.Input(shape=(latent_dim,))
|
||||
decoder_state_input_c = keras.Input(shape=(latent_dim,))
|
||||
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
|
||||
decoder_lstm = model.layers[3]
|
||||
decoder_outputs, state_h_dec, state_c_dec = decoder_lstm(
|
||||
decoder_inputs, initial_state=decoder_states_inputs
|
||||
)
|
||||
decoder_states = [state_h_dec, state_c_dec]
|
||||
decoder_dense = model.layers[4]
|
||||
decoder_outputs = decoder_dense(decoder_outputs)
|
||||
decoder_model = keras.Model(
|
||||
[decoder_inputs] + decoder_states_inputs, [decoder_outputs] + decoder_states
|
||||
)
|
||||
|
||||
def forward(self, hidden, encoder_outputs):
|
||||
# hidden = [batch size, hid dim]
|
||||
# encoder_outputs = [src len, batch size, hid dim * 2]
|
||||
src_len = encoder_outputs.shape[0]
|
||||
hidden = hidden.unsqueeze(1).expand(-1, src_len, -1)
|
||||
encoder_outputs = encoder_outputs.permute(1, 0, 2)
|
||||
# hidden = [batch size, src len, hid dim]
|
||||
# encoder_outputs = [batch size, src len, hid dim * 2]
|
||||
energy = torch.tanh(self.attn(torch.cat((hidden, encoder_outputs), dim=2)))
|
||||
# energy = [batch size, src len, hid dim]
|
||||
attention = self.v(energy).squeeze(2)
|
||||
# attention = [batch size, src len]
|
||||
return torch.softmax(attention, dim=1)
|
||||
reverse_input_char_index = dict((i, char) for char, i in input_token_index.items())
|
||||
reverse_target_char_index = dict((i, char) for char, i in target_token_index.items())
|
||||
|
||||
class Decoder(nn.Module):
|
||||
def __init__(self, output_dim, emb_dim, hid_dim, n_layers, dropout, attention):
|
||||
super().__init__()
|
||||
self.output_dim = output_dim
|
||||
self.attention = attention
|
||||
self.embedding = nn.Embedding(output_dim, emb_dim)
|
||||
self.rnn = nn.GRU(hid_dim * 2 + emb_dim, hid_dim, n_layers, dropout=dropout)
|
||||
self.fc_out = nn.Linear(hid_dim * 3 + emb_dim, output_dim)
|
||||
self.dropout = nn.Dropout(dropout)
|
||||
def decode_sequences(input_seqs):
|
||||
states_values = encoder_model.predict(input_seqs, verbose=0)
|
||||
target_seqs = np.zeros((len(input_seqs), 1, num_decoder_tokens))
|
||||
target_seqs[:, 0, target_token_index["\t"]] = 1.0
|
||||
decoded_sentences = [""] * len(input_seqs)
|
||||
stop_conditions = np.zeros(len(input_seqs), dtype=bool)
|
||||
|
||||
def forward(self, input, hidden, encoder_outputs):
|
||||
input = input.unsqueeze(0)
|
||||
embedded = self.dropout(self.embedding(input))
|
||||
a = self.attention(hidden[-1], encoder_outputs).unsqueeze(1)
|
||||
encoder_outputs = encoder_outputs.permute(1, 0, 2)
|
||||
weighted = torch.bmm(a, encoder_outputs)
|
||||
rnn_input = torch.cat((embedded, weighted.permute(1, 0, 2)), dim=2)
|
||||
output, hidden = self.rnn(rnn_input, hidden)
|
||||
embedded = embedded.squeeze(0)
|
||||
output = output.squeeze(0)
|
||||
weighted = weighted.squeeze(1)
|
||||
prediction = self.fc_out(torch.cat((output, weighted, embedded), dim=1))
|
||||
return prediction, hidden
|
||||
while not np.all(stop_conditions):
|
||||
output_tokens, h, c = decoder_model.predict(
|
||||
[target_seqs] + states_values, verbose=0
|
||||
)
|
||||
|
||||
class Seq2Seq(nn.Module):
|
||||
def __init__(self, encoder, decoder, device):
|
||||
super().__init__()
|
||||
self.encoder = encoder
|
||||
self.decoder = decoder
|
||||
self.device = device
|
||||
sampled_token_indices = np.argmax(output_tokens[:, -1, :], axis=1)
|
||||
sampled_chars = [
|
||||
reverse_target_char_index[idx] for idx in sampled_token_indices
|
||||
]
|
||||
|
||||
def forward(self, src, trg, teacher_forcing_ratio=0.5):
|
||||
trg_len = trg.shape[0]
|
||||
batch_size = trg.shape[1]
|
||||
trg_vocab_size = self.decoder.output_dim
|
||||
outputs = torch.zeros(trg_len, batch_size, trg_vocab_size).to(self.device)
|
||||
encoder_outputs, hidden = self.encoder(src)
|
||||
# Initialize hidden state of the decoder with the hidden state of the encoder
|
||||
hidden = hidden.unsqueeze(0).repeat(self.decoder.rnn.num_layers, 1, 1)
|
||||
input = trg[0, :]
|
||||
for t in range(1, trg_len):
|
||||
output, hidden = self.decoder(input, hidden, encoder_outputs)
|
||||
outputs[t] = output
|
||||
top1 = output.argmax(1)
|
||||
input = trg[t] if random.random() < teacher_forcing_ratio else top1
|
||||
return outputs
|
||||
for i, char in enumerate(sampled_chars):
|
||||
decoded_sentences[i] += char
|
||||
if char == "\n" or len(decoded_sentences[i]) > max_decoder_seq_length:
|
||||
stop_conditions[i] = True
|
||||
|
||||
# Training and evaluation functions
|
||||
def train(model, iterator, optimizer, criterion, clip, print_every=100, max_batches=1000):
|
||||
model.train()
|
||||
epoch_loss = 0
|
||||
i = 0 # Initialize batch counter
|
||||
for src, trg in iterator:
|
||||
if i >= max_batches: # Limit the number of batches processed in each epoch
|
||||
break
|
||||
src = src.to(device)
|
||||
trg = trg.to(device)
|
||||
optimizer.zero_grad()
|
||||
output = model(src, trg)
|
||||
output_dim = output.shape[-1]
|
||||
output = output[1:].reshape(-1, output_dim)
|
||||
trg = trg[1:].reshape(-1)
|
||||
loss = criterion(output, trg)
|
||||
loss.backward()
|
||||
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
|
||||
optimizer.step()
|
||||
epoch_loss += loss.item()
|
||||
|
||||
if (i + 1) % print_every == 0:
|
||||
print(f'Batch {i+1}, Loss: {loss.item():.4f}')
|
||||
|
||||
i += 1 # Increment batch counter
|
||||
target_seqs = np.zeros((len(input_seqs), 1, num_decoder_tokens))
|
||||
for i, token_index in enumerate(sampled_token_indices):
|
||||
target_seqs[i, 0, token_index] = 1.0
|
||||
|
||||
return epoch_loss / (i if i > 0 else 1) # Avoid division by zero
|
||||
states_values = [h, c]
|
||||
|
||||
def evaluate(model, iterator, criterion):
|
||||
model.eval()
|
||||
epoch_loss = 0
|
||||
i = 0 # Initialize batch counter
|
||||
with torch.no_grad():
|
||||
for src, trg in iterator:
|
||||
src = src.to(device)
|
||||
trg = trg.to(device)
|
||||
output = model(src, trg, 0)
|
||||
output_dim = output.shape[-1]
|
||||
output = output[1:].reshape(-1, output_dim)
|
||||
trg = trg[1:].reshape(-1)
|
||||
loss = criterion(output, trg)
|
||||
epoch_loss += loss.item()
|
||||
i += 1 # Increment batch counter
|
||||
return epoch_loss / (i if i > 0 else 1) # Avoid division by zero
|
||||
return decoded_sentences
|
||||
|
||||
# BLEU Score calculation
|
||||
def calculate_bleu(data, model, src_vocab, trg_vocab):
|
||||
trgs = []
|
||||
pred_trgs = []
|
||||
for (src, trg) in data:
|
||||
src_tensor = torch.tensor([src_vocab.lookup(word) for word in src.split(' ')]).unsqueeze(1).to(device)
|
||||
trg_tensor = torch.tensor([trg_vocab.lookup(word) for word in trg.split(' ')]).unsqueeze(1).to(device)
|
||||
with torch.no_grad():
|
||||
output = model(src_tensor, trg_tensor, 0)
|
||||
output_dim = output.shape[-1]
|
||||
output = output[1:].reshape(-1, output_dim)
|
||||
output = output.argmax(1)
|
||||
pred_trg = [trg_vocab.index2word[idx.item()] for idx in output if idx.item() != trg_vocab.word2index['<pad>']]
|
||||
pred_trgs.append(pred_trg)
|
||||
trgs.append([trg.split(' ')])
|
||||
return bleu_score(pred_trgs, trgs)
|
||||
# BLEU score evaluation
|
||||
def calculate_bleu_score(input_texts, target_texts, num_samples=500):
|
||||
input_seqs = np.zeros(
|
||||
(num_samples, max_encoder_seq_length, num_encoder_tokens), dtype="float32"
|
||||
)
|
||||
for i, input_text in enumerate(input_texts[:num_samples]):
|
||||
for t, char in enumerate(input_text):
|
||||
input_seqs[i, t, input_token_index[char]] = 1.0
|
||||
input_seqs[i, t + 1 :, input_token_index[" "]] = 1.0
|
||||
|
||||
# Main script
|
||||
INPUT_DIM = eng_vocab.n_words
|
||||
OUTPUT_DIM = fra_vocab.n_words
|
||||
ENC_EMB_DIM = 256
|
||||
DEC_EMB_DIM = 256
|
||||
HID_DIM = 512
|
||||
N_LAYERS = 2
|
||||
ENC_DROPOUT = 0.5
|
||||
DEC_DROPOUT = 0.5
|
||||
BATCH_SIZE = 32
|
||||
N_EPOCHS = 7
|
||||
CLIP = 1
|
||||
decoded_sentences = decode_sequences(input_seqs)
|
||||
|
||||
attn = Attention(HID_DIM)
|
||||
enc = Encoder(INPUT_DIM, ENC_EMB_DIM, HID_DIM, N_LAYERS, ENC_DROPOUT)
|
||||
dec = Decoder(OUTPUT_DIM, DEC_EMB_DIM, HID_DIM, N_LAYERS, DEC_DROPOUT, attn)
|
||||
references = [[list(text.strip())] for text in target_texts[:num_samples]]
|
||||
hypotheses = [list(text.strip()) for text in decoded_sentences]
|
||||
bleu = corpus_bleu(references, hypotheses)
|
||||
print("BLEU Score:", bleu)
|
||||
|
||||
model = Seq2Seq(enc, dec, device).to(device)
|
||||
optimizer = optim.Adam(model.parameters())
|
||||
criterion = nn.CrossEntropyLoss(ignore_index=fra_vocab.word2index['<pad>'])
|
||||
print("\nExample Translations:")
|
||||
for i in range(10):
|
||||
print("Input:", input_texts[i])
|
||||
print("Target:", target_texts[i])
|
||||
print("Translation:", decoded_sentences[i])
|
||||
print()
|
||||
|
||||
# Splitting data into train and test sets
|
||||
train_data = pairs[:int(0.8*len(pairs))]
|
||||
test_data = pairs[int(0.8*len(pairs)):]
|
||||
|
||||
# Custom DataLoader with padding
|
||||
def pad_sequence(seq, max_len, pad_value):
|
||||
seq += [pad_value] * (max_len - len(seq))
|
||||
return seq
|
||||
|
||||
def data_generator(data, src_vocab, trg_vocab, batch_size):
|
||||
for i in range(0, len(data), batch_size):
|
||||
src_batch = [d[0] for d in data[i:i+batch_size]]
|
||||
trg_batch = [d[1] for d in data[i:i+batch_size]]
|
||||
max_src_len = max(len(s.split(' ')) for s in src_batch)
|
||||
max_trg_len = max(len(s.split(' ')) for s in trg_batch)
|
||||
src_tensor = torch.tensor([pad_sequence([src_vocab.lookup(word) for word in sentence.split(' ')], max_len=max_src_len, pad_value=src_vocab.word2index['<pad>']) for sentence in src_batch], dtype=torch.long).T
|
||||
trg_tensor = torch.tensor([pad_sequence([trg_vocab.lookup(word) for word in sentence.split(' ')], max_len=max_trg_len, pad_value=trg_vocab.word2index['<pad>']) for sentence in trg_batch], dtype=torch.long).T
|
||||
yield src_tensor, trg_tensor
|
||||
|
||||
for epoch in range(N_EPOCHS):
|
||||
print(f'Epoch {epoch+1}/{N_EPOCHS}')
|
||||
train_iterator = data_generator(train_data, eng_vocab, fra_vocab, BATCH_SIZE)
|
||||
valid_iterator = data_generator(test_data, eng_vocab, fra_vocab, BATCH_SIZE)
|
||||
train_loss = train(model, train_iterator, optimizer, criterion, CLIP)
|
||||
valid_loss = evaluate(model, valid_iterator, criterion)
|
||||
print(f'Epoch {epoch+1:02}, Train Loss: {train_loss:.3f}, Val. Loss: {valid_loss:.3f}')
|
||||
|
||||
bleu = calculate_bleu(test_data, model, eng_vocab, fra_vocab)
|
||||
print(f'BLEU score = {bleu:.2f}')
|
||||
calculate_bleu_score(input_texts, target_texts)
|
||||
|
Loading…
Reference in New Issue
Block a user