challenging-america-word-ga.../run.py
2024-05-22 15:37:23 +02:00

222 lines
4.7 KiB
Python

#!/usr/bin/env python
# coding: utf-8
# In[25]:
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Embedding, Dense, Lambda
from gensim.models import Word2Vec
import numpy as np
from sklearn.model_selection import train_test_split
from tensorflow.keras.callbacks import EarlyStopping
# In[26]:
import lzma
def read_xz_file(file_path):
data = []
with lzma.open(file_path, 'rt', encoding='utf-8') as f:
for line in f:
line = line.lower().replace("-\\n", "").replace("\\n", " ").replace("\xad", "").replace("\\\\n", " ").replace("\\\\", " ").replace("\n", " ")
data.append(line)
return data
# In[27]:
def read_tsv_file(file_path):
data = []
with open(file_path, 'r', encoding='utf-8') as file:
for line in file:
line = line.strip().split('\t')
data.append(line)
return data
# In[28]:
file_path = "train\\in.tsv.xz"
# In[29]:
data = read_xz_file(file_path)
# In[30]:
expected = read_tsv_file("train\\expected.tsv")
# In[31]:
corpus_before=[]
corpus_after=[]
for i in range(20000):
corpus_before.append(str(data[i].split("\t")[6]))
corpus_after.append(str(data[i].split("\t")[7]))
# In[32]:
for i in range(20000):
expected[i] = str(expected[i]).lower()
# In[33]:
corpus = []
for i in range(20000):
corpus.append(corpus_before[i] + " " + expected[i] + " " + corpus_after[i])
# In[34]:
sentences = [text.split() for text in corpus]
# In[35]:
word2vec_model = Word2Vec(sentences, vector_size=70, window=5, min_count=1, workers=4)
word2vec_model.train(sentences, total_examples=word2vec_model.corpus_count, epochs=10)
# In[36]:
tokenizer = Tokenizer()
tokenizer.fit_on_texts(corpus)
total_words = len(tokenizer.word_index) + 1
# In[37]:
input_sequences = []
output_words = []
for before, word, after in zip(corpus_before, expected, corpus_after):
before_tokens = tokenizer.texts_to_sequences([before])[0]
after_tokens = tokenizer.texts_to_sequences([after])[0]
word_token = tokenizer.texts_to_sequences([word])[0][0]
for i in range(1, 6):
input_seq = before_tokens[-(5-i):] + [word_token] + after_tokens[:i]
input_sequences.append(pad_sequences([input_seq], maxlen=5, padding='pre')[0])
output_words.append(word_token)
input_sequences = np.array(input_sequences)
output_words = np.array(output_words)
# In[38]:
embedding_matrix = np.zeros((total_words, 70))
for word, i in tokenizer.word_index.items():
if word in word2vec_model.wv:
embedding_matrix[i] = word2vec_model.wv[word]
# In[39]:
X_train, X_val, y_train, y_val = train_test_split(input_sequences, output_words, test_size=0.2, random_state=33)
# In[40]:
input_layer = Input(shape=(5,))
embedding_layer = Embedding(total_words, 70, trainable=False)(input_layer)
sum_layer = Lambda(lambda x: tf.reduce_sum(x, axis=1))(embedding_layer)
dense_layer1 = Dense(128, activation='relu')(sum_layer)
dense_layer2 = Dense(64, activation='relu')(dense_layer1)
linear_layer = Dense(70, activation='relu')(dense_layer2)
output_layer = Dense(total_words, activation='softmax')(linear_layer)
# In[41]:
model = Model(inputs=input_layer, outputs=output_layer)
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
early_stopping = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)
# In[42]:
model.fit(X_train, y_train, epochs=15, verbose=2, validation_data=(X_val, y_val), callbacks=[early_stopping])
# In[43]:
model.save('ngram_model_20k.keras')
# In[44]:
def predict_next_words(model, tokenizer, text, top_n=1):
sequence = tokenizer.texts_to_sequences([text])[0]
padded_sequence = pad_sequences([sequence], maxlen=5, padding='pre')
predictions = model.predict(padded_sequence)[0]
top_indices = np.argsort(predictions)[-top_n:][::-1]
top_words = [tokenizer.index_word[index] for index in top_indices]
top_probabilities = [predictions[index] for index in top_indices]
return list(zip(top_words, top_probabilities))
# In[45]:
data = read_xz_file("dev-0\\in.tsv.xz")
corpus_before=[]
corpus_after=[]
for i in range(len(data)):
corpus_before.append(str(data[i].split("\t")[6]))
corpus_after.append(str(data[i].split("\t")[7]))
with open("dev-0\\out.tsv", "w", encoding="utf-8") as output:
for text in corpus_before:
predictions = predict_next_words(model, tokenizer, text)
result = " ".join([f"{word}:{round(probability,5)}" for word, probability in predictions])
output.write(str(result.replace("\n", "").strip() + "\n"))