ium_434780/train-tensorflow.py

86 lines
2.6 KiB
Python
Raw Permalink Normal View History

2021-05-08 22:32:28 +02:00
import pandas as pd
2021-05-06 22:50:47 +02:00
from silence_tensorflow import silence_tensorflow
2021-05-08 22:32:28 +02:00
from tensorflow import keras
2021-05-06 22:50:47 +02:00
silence_tensorflow()
2021-05-08 22:32:28 +02:00
from tensorflow.keras.preprocessing.text import Tokenizer
from collections import Counter
from tensorflow.keras.preprocessing.sequence import pad_sequences
2021-05-06 22:50:47 +02:00
from tensorflow.keras import layers
2021-05-08 22:32:28 +02:00
def counter_word(text_col):
count = Counter()
for text in text_col.values:
for word in text.split():
count[word] += 1
return count
2021-05-06 22:50:47 +02:00
2021-05-08 22:32:28 +02:00
df = pd.read_csv('data.csv')
train_df = pd.read_csv('train.csv')
val_df = pd.read_csv('dev.csv')
test_df = pd.read_csv('test.csv')
2021-05-06 22:50:47 +02:00
2021-05-08 22:32:28 +02:00
df.dropna(subset = ['reviews.text'], inplace = True)
val_df.dropna(subset = ['reviews.text'], inplace = True)
test_df.dropna(subset = ['reviews.text'], inplace = True)
2021-05-16 22:25:31 +02:00
train_df.dropna(subset = ['reviews.text'], inplace = True)
2021-05-06 22:50:47 +02:00
2021-05-08 22:32:28 +02:00
train_sentences = train_df['reviews.text'].to_numpy()
train_labels = train_df['reviews.doRecommend'].to_numpy()
val_sentences = val_df['reviews.text'].to_numpy()
val_labels = val_df['reviews.doRecommend'].to_numpy()
test_sentences = test_df['reviews.text'].to_numpy()
test_labels = test_df['reviews.doRecommend'].to_numpy()
2021-05-06 22:50:47 +02:00
2021-05-08 22:32:28 +02:00
# print(train_labels.shape)
# print(train_sentences.shape)
2021-05-06 22:50:47 +02:00
2021-05-08 22:32:28 +02:00
counter = counter_word(df['reviews.text'])
num_unique_words = len(counter)
2021-04-25 22:14:32 +02:00
2021-05-08 22:32:28 +02:00
tokenizer = Tokenizer(num_words=num_unique_words)
tokenizer.fit_on_texts(train_sentences)
2021-04-25 22:14:32 +02:00
2021-05-08 22:32:28 +02:00
word_index = tokenizer.word_index
2021-04-25 22:14:32 +02:00
2021-05-08 22:32:28 +02:00
train_sequences = tokenizer.texts_to_sequences(train_sentences)
val_sequences = tokenizer.texts_to_sequences(val_sentences)
test_sequences = tokenizer.texts_to_sequences(test_sentences)
2021-04-25 22:14:32 +02:00
2021-05-08 22:32:28 +02:00
max_length = 30
train_padded = pad_sequences(train_sequences, maxlen=max_length, padding="post", truncating="post")
val_padded = pad_sequences(val_sequences, maxlen=max_length, padding="post", truncating="post")
test_padded = pad_sequences(test_sequences, maxlen=max_length, padding="post", truncating="post")
2021-04-25 22:14:32 +02:00
2021-05-16 20:10:28 +02:00
test_df['reviews.text'] = test_padded
test_df.to_csv('test.csv')
2021-04-25 22:14:32 +02:00
2021-05-08 22:32:28 +02:00
model = keras.models.Sequential()
model.add(layers.Embedding(num_unique_words, 32, input_length=max_length))
model.add(layers.LSTM(64, dropout=0.1))
model.add(layers.Dense(1, activation="sigmoid"))
2021-04-25 22:14:32 +02:00
2021-05-06 22:50:47 +02:00
model.summary()
2021-04-25 22:14:32 +02:00
2021-05-08 22:32:28 +02:00
loss = keras.losses.BinaryCrossentropy(from_logits=False)
optim = keras.optimizers.Adam(lr = 0.001)
metrics = ["accuracy"]
2021-04-25 22:14:32 +02:00
2021-05-08 22:32:28 +02:00
model.compile(loss = loss, optimizer = optim, metrics = metrics)
model.fit(train_padded, train_labels, epochs = 20, validation_data=(val_padded, val_labels), verbose=2)
2021-04-25 22:14:32 +02:00
2021-05-08 22:32:28 +02:00
predictions = model.predict(test_padded)
2021-04-25 22:14:32 +02:00
2021-05-08 22:32:28 +02:00
predictions = [1 if p > 0.5 else 0 for p in predictions]
2021-04-25 22:14:32 +02:00
2021-05-16 20:10:28 +02:00
model.save('trained_model')
2021-05-06 22:50:47 +02:00
file = open('results.txt', 'w')
2021-05-08 22:32:28 +02:00
file.write(predictions.__str__())
2021-05-06 22:50:47 +02:00
file.close()
2021-04-25 22:14:32 +02:00