ium_151636/script5_1.py

80 lines
2.7 KiB
Python
Raw Normal View History

2023-05-14 21:11:40 +02:00
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MultiLabelBinarizer
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
# Load the dataset from the CSV file
2023-05-14 21:17:30 +02:00
data = pd.read_csv('data.csv')
2023-05-14 21:11:40 +02:00
2023-05-14 21:19:23 +02:00
2023-05-14 21:11:40 +02:00
# Prepare the data
2023-05-14 21:17:30 +02:00
X = data[['movie title', 'User Rating', 'Director', 'Top 5 Casts', 'Writer']]
2023-05-14 21:37:56 +02:00
#y = data['Rating'].apply(lambda x: float(x) if isinstance(x, (int, float)) else np.nan) # Convert 'Rating' column to float data type
2023-05-14 21:34:40 +02:00
#y = pd.Series(data['Rating'], dtype=float)
2023-05-14 21:39:44 +02:00
y = data['Rating'].astype('float64')
2023-05-14 21:48:07 +02:00
#y = y.fillna(y.mean())
#y = np.array(y)
2023-05-14 21:11:40 +02:00
2023-05-14 21:48:07 +02:00
print("Unique values in 'Rating' column:", data['Rating'].unique())
2023-05-14 21:24:33 +02:00
print("Data type of 'Rating' column:", y.dtype)
2023-05-14 21:57:25 +02:00
mean_rating = data['Rating'].mean()
data['Rating'].fillna(mean_rating, inplace=True)
2023-05-14 21:24:33 +02:00
2023-05-14 21:11:40 +02:00
# Preprocess the data
# Convert the categorical columns into numerical representations
mlb_genres = MultiLabelBinarizer()
2023-05-14 21:17:30 +02:00
X_genres = pd.DataFrame(mlb_genres.fit_transform(data['Generes']), columns=mlb_genres.classes_)
2023-05-14 21:11:40 +02:00
mlb_keywords = MultiLabelBinarizer()
2023-05-14 21:17:30 +02:00
X_keywords = pd.DataFrame(mlb_keywords.fit_transform(data['Plot Kyeword']), columns=mlb_keywords.classes_)
2023-05-14 21:11:40 +02:00
mlb_casts = MultiLabelBinarizer()
2023-05-14 21:17:30 +02:00
X_casts = pd.DataFrame(mlb_casts.fit_transform(data['Top 5 Casts'].astype(str)), columns=mlb_casts.classes_)
# Concatenate the transformed columns with the remaining columns
X = pd.concat([X, X_genres, X_keywords, X_casts], axis=1)
2023-05-14 21:11:40 +02:00
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
2023-05-14 21:57:25 +02:00
# Convert the modified 'Rating' column to a numpy array
y_train_updated = data['Rating'].to_numpy()
2023-05-14 22:06:04 +02:00
y_train_updated = pd.Series(y_train_updated[:len(X_train)])
2023-05-14 21:57:25 +02:00
# Update the 'y_train' array with the modified values
2023-05-14 22:06:04 +02:00
y_train.loc[X_train.index] = y_train_updated
2023-05-14 21:57:25 +02:00
2023-05-14 22:10:00 +02:00
# Fill NaN values with the mean of non-missing values
y_train.fillna(y_train.mean(), inplace=True)
# Convert the 'y_train' series to a NumPy array
y_train = y_train.values
2023-05-14 22:12:28 +02:00
# Filter out non-numeric and NaN values from y_train
y_train = np.array([x for x in y_train if np.isfinite(x)])
2023-05-14 22:10:00 +02:00
2023-05-14 21:11:40 +02:00
# Create the neural network model
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=X.shape[1]))
model.add(Dense(16, activation='relu'))
model.add(Dense(1))
2023-05-14 21:17:30 +02:00
2023-05-14 21:11:40 +02:00
# Compile the model
model.compile(optimizer=Adam(), loss='mse')
2023-05-14 21:50:30 +02:00
print("Data type of 'Rating' column:", y_train.dtype)
2023-05-14 21:52:31 +02:00
print("First few rows of 'y_train':", y_train[:10])
2023-05-14 21:11:40 +02:00
# Train the model
model.fit(X_train, y_train, batch_size=64, epochs=10, validation_data=(X_test, y_test))
# Evaluate the model
mse = model.evaluate(X_test, y_test)
print("Mean Squared Error:", mse)