ium_434765/neural_network.py

94 lines
3.1 KiB
Python
Raw Normal View History

2021-04-24 21:18:57 +02:00
import pandas as pd
2021-04-24 22:23:04 +02:00
import numpy as np
2021-05-17 20:04:15 +02:00
from sklearn.metrics import mean_squared_error
2021-04-24 22:23:04 +02:00
from tensorflow import keras
2021-05-17 21:36:02 +02:00
import sys
2021-05-17 19:24:30 +02:00
def normalize_data(data):
return (data - np.min(data)) / (np.max(data) - np.min(data))
2021-05-17 21:04:57 +02:00
data = pd.read_csv("data_train", sep=',', skip_blank_lines=True, nrows=1087, error_bad_lines=False,
names=["video_id", "last_trending_date", "publish_date", "publish_hour", "category_id",
2021-05-20 19:03:48 +02:00
"channel_title", "views", "likes", "dislikes", "comment_count", "comments_disabled",
"ratings_disabled", "tag_appeared_in_title_count", "tag_appeared_in_title", "title",
"tags", "description", "trend_day_count", "trend_publish_diff", "trend_tag_highest",
"trend_tag_total", "tags_count", "subscriber"]).dropna()
2021-05-20 19:15:23 +02:00
X = []
for datum in data[["views"]].values:
try:
X.append(int(datum))
except:
print(datum)
X = pd.DataFrame(X)
y = []
for datum in data[["likes"]].values:
try:
y.append(int(datum))
except:
print(datum)
y = pd.DataFrame(y)
2021-05-20 19:03:48 +02:00
2021-05-17 19:24:30 +02:00
min_val_sub = np.min(X)
max_val_sub = np.max(X)
X = (X - min_val_sub) / (max_val_sub - min_val_sub)
print(min_val_sub)
print(max_val_sub)
2021-04-24 22:23:04 +02:00
2021-05-17 19:24:30 +02:00
min_val_like = np.min(y)
max_val_like = np.max(y)
y = (y - min_val_like) / (max_val_like - min_val_like)
2021-04-24 22:23:04 +02:00
2021-05-17 19:24:30 +02:00
print(min_val_like)
print(max_val_like)
2021-04-24 22:23:04 +02:00
model = keras.Sequential([
2021-05-17 20:04:15 +02:00
keras.layers.Dense(512,input_dim = X.shape[1], activation='relu'),
keras.layers.Dense(256, activation='relu'),
keras.layers.Dense(256, activation='relu'),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(1,activation='linear'),
2021-04-24 22:23:04 +02:00
])
2021-05-17 19:24:30 +02:00
model.compile(loss='mean_absolute_error', optimizer="Adam", metrics=['mean_absolute_error'])
2021-04-24 22:23:04 +02:00
2021-05-17 21:36:02 +02:00
model.fit(X, y, epochs=int(sys.argv[1]), validation_split = 0.3)
2021-04-24 22:23:04 +02:00
2021-05-17 21:04:57 +02:00
data = pd.read_csv("data_dev", sep=',', error_bad_lines=False,
skip_blank_lines=True, nrows=527, names=["video_id", "last_trending_date",
"publish_date", "publish_hour", "category_id",
"channel_title", "views", "likes", "dislikes",
"comment_count"]).dropna()
X_test = data.loc[:,data.columns == "views"].astype(int)
y_test = data.loc[:,data.columns == "likes"].astype(int)
2021-04-24 22:23:04 +02:00
2021-05-17 19:24:30 +02:00
min_val_sub = np.min(X_test)
max_val_sub = np.max(X_test)
X_test = (X_test - min_val_sub) / (max_val_sub - min_val_sub)
print(min_val_sub)
print(max_val_sub)
min_val_like = np.min(y_test)
max_val_like = np.max(y_test)
print(min_val_like)
print(max_val_like)
2021-04-24 22:23:04 +02:00
prediction = model.predict(X_test)
2021-05-17 20:04:15 +02:00
prediction_denormalized = []
for pred in prediction:
denorm = pred[0] * (max_val_like[0] - min_val_like[0]) + min_val_like[0]
prediction_denormalized.append(denorm)
2021-04-24 22:23:04 +02:00
f = open("predictions.txt", "w")
2021-05-17 20:04:15 +02:00
for (pred, test) in zip(prediction_denormalized, y_test.values):
f.write("predicted: %s expected: %s\n" % (str(pred), str(test[0])))
error = mean_squared_error(y_test, prediction_denormalized)
print(error)
2021-05-17 21:10:47 +02:00
model.save('model')