90 KiB
%matplotlib inline
%load_ext autoreload
%autoreload 2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.display import Markdown, display, HTML
from collections import defaultdict
import torch
import torch.nn as nn
import torch.optim as optim
from livelossplot import PlotLosses
# Fix the dying kernel problem (only a problem in some installations - you can remove it, if it works without it)
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
The autoreload extension is already loaded. To reload it, use: %reload_ext autoreload
Load the dataset for recommenders
data_path = os.path.join("data", "hotel_data")
interactions_df = pd.read_csv(os.path.join(data_path, "hotel_data_interactions_df.csv"), index_col=0)
base_item_features = ['term', 'length_of_stay_bucket', 'rate_plan', 'room_segment', 'n_people_bucket', 'weekend_stay']
column_values_dict = {
'term': ['WinterVacation', 'Easter', 'OffSeason', 'HighSeason', 'LowSeason', 'MayLongWeekend', 'NewYear', 'Christmas'],
'length_of_stay_bucket': ['[0-1]', '[2-3]', '[4-7]', '[8-inf]'],
'rate_plan': ['Standard', 'Nonref'],
'room_segment': ['[0-160]', '[160-260]', '[260-360]', '[360-500]', '[500-900]'],
'n_people_bucket': ['[1-1]', '[2-2]', '[3-4]', '[5-inf]'],
'weekend_stay': ['True', 'False']
}
interactions_df.loc[:, 'term'] = pd.Categorical(
interactions_df['term'], categories=column_values_dict['term'])
interactions_df.loc[:, 'length_of_stay_bucket'] = pd.Categorical(
interactions_df['length_of_stay_bucket'], categories=column_values_dict['length_of_stay_bucket'])
interactions_df.loc[:, 'rate_plan'] = pd.Categorical(
interactions_df['rate_plan'], categories=column_values_dict['rate_plan'])
interactions_df.loc[:, 'room_segment'] = pd.Categorical(
interactions_df['room_segment'], categories=column_values_dict['room_segment'])
interactions_df.loc[:, 'n_people_bucket'] = pd.Categorical(
interactions_df['n_people_bucket'], categories=column_values_dict['n_people_bucket'])
interactions_df.loc[:, 'weekend_stay'] = interactions_df['weekend_stay'].astype('str')
interactions_df.loc[:, 'weekend_stay'] = pd.Categorical(
interactions_df['weekend_stay'], categories=column_values_dict['weekend_stay'])
display(HTML(interactions_df.head(15).to_html()))
user_id | item_id | term | length_of_stay_bucket | rate_plan | room_segment | n_people_bucket | weekend_stay | |
---|---|---|---|---|---|---|---|---|
0 | 1 | 0 | WinterVacation | [2-3] | Standard | [260-360] | [5-inf] | True |
1 | 2 | 1 | WinterVacation | [2-3] | Standard | [160-260] | [3-4] | True |
2 | 3 | 2 | WinterVacation | [2-3] | Standard | [160-260] | [2-2] | False |
3 | 4 | 3 | WinterVacation | [4-7] | Standard | [160-260] | [3-4] | True |
4 | 5 | 4 | WinterVacation | [4-7] | Standard | [0-160] | [2-2] | True |
5 | 6 | 5 | Easter | [4-7] | Standard | [260-360] | [5-inf] | True |
6 | 7 | 6 | OffSeason | [2-3] | Standard | [260-360] | [5-inf] | True |
7 | 8 | 7 | HighSeason | [2-3] | Standard | [160-260] | [1-1] | True |
8 | 9 | 8 | HighSeason | [2-3] | Standard | [0-160] | [1-1] | True |
9 | 8 | 7 | HighSeason | [2-3] | Standard | [160-260] | [1-1] | True |
10 | 8 | 7 | HighSeason | [2-3] | Standard | [160-260] | [1-1] | True |
11 | 10 | 9 | HighSeason | [2-3] | Standard | [160-260] | [3-4] | True |
12 | 11 | 9 | HighSeason | [2-3] | Standard | [160-260] | [3-4] | True |
13 | 12 | 10 | HighSeason | [8-inf] | Standard | [160-260] | [3-4] | True |
14 | 14 | 11 | HighSeason | [2-3] | Standard | [0-160] | [3-4] | True |
(Optional) Prepare numerical user features
The method below is left here for convenience if you want to experiment with content-based user features as an input for your neural network.
def n_to_p(l):
n = sum(l)
return [x / n for x in l] if n > 0 else l
def calculate_p(x, values):
counts = [0]*len(values)
for v in x:
counts[values.index(v)] += 1
return n_to_p(counts)
def prepare_users_df(interactions_df):
users_df = interactions_df.loc[:, ["user_id"]]
users_df = users_df.groupby("user_id").first().reset_index(drop=False)
user_features = []
for column in base_item_features:
column_values = column_values_dict[column]
df = interactions_df.loc[:, ['user_id', column]]
df = df.groupby('user_id').aggregate(lambda x: list(x)).reset_index(drop=False)
def calc_p(x):
return calculate_p(x, column_values)
df.loc[:, column] = df[column].apply(lambda x: calc_p(x))
p_columns = []
for i in range(len(column_values)):
p_columns.append("user_" + column + "_" + column_values[i])
df.loc[:, p_columns[i]] = df[column].apply(lambda x: x[i])
user_features.append(p_columns[i])
users_df = pd.merge(users_df, df.loc[:, ['user_id'] + p_columns], on=["user_id"])
return users_df, user_features
users_df, user_features = prepare_users_df(interactions_df)
print(user_features)
display(HTML(users_df.loc[users_df['user_id'].isin([706, 1736, 7779, 96, 1, 50, 115])].head(15).to_html()))
['user_term_WinterVacation', 'user_term_Easter', 'user_term_OffSeason', 'user_term_HighSeason', 'user_term_LowSeason', 'user_term_MayLongWeekend', 'user_term_NewYear', 'user_term_Christmas', 'user_length_of_stay_bucket_[0-1]', 'user_length_of_stay_bucket_[2-3]', 'user_length_of_stay_bucket_[4-7]', 'user_length_of_stay_bucket_[8-inf]', 'user_rate_plan_Standard', 'user_rate_plan_Nonref', 'user_room_segment_[0-160]', 'user_room_segment_[160-260]', 'user_room_segment_[260-360]', 'user_room_segment_[360-500]', 'user_room_segment_[500-900]', 'user_n_people_bucket_[1-1]', 'user_n_people_bucket_[2-2]', 'user_n_people_bucket_[3-4]', 'user_n_people_bucket_[5-inf]', 'user_weekend_stay_True', 'user_weekend_stay_False']
user_id | user_term_WinterVacation | user_term_Easter | user_term_OffSeason | user_term_HighSeason | user_term_LowSeason | user_term_MayLongWeekend | user_term_NewYear | user_term_Christmas | user_length_of_stay_bucket_[0-1] | user_length_of_stay_bucket_[2-3] | user_length_of_stay_bucket_[4-7] | user_length_of_stay_bucket_[8-inf] | user_rate_plan_Standard | user_rate_plan_Nonref | user_room_segment_[0-160] | user_room_segment_[160-260] | user_room_segment_[260-360] | user_room_segment_[360-500] | user_room_segment_[500-900] | user_n_people_bucket_[1-1] | user_n_people_bucket_[2-2] | user_n_people_bucket_[3-4] | user_n_people_bucket_[5-inf] | user_weekend_stay_True | user_weekend_stay_False | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 1 | 0.130435 | 0.0 | 0.652174 | 0.086957 | 0.130435 | 0.000000 | 0.000000 | 0.000000 | 0.000000 | 0.608696 | 0.391304 | 0.000000 | 0.521739 | 0.478261 | 0.000000 | 0.869565 | 0.130435 | 0.000000 | 0.0 | 0.000000 | 0.739130 | 0.173913 | 0.086957 | 0.782609 | 0.217391 |
47 | 50 | 0.043478 | 0.0 | 0.434783 | 0.304348 | 0.217391 | 0.000000 | 0.000000 | 0.000000 | 0.000000 | 0.913043 | 0.086957 | 0.000000 | 0.260870 | 0.739130 | 0.000000 | 0.565217 | 0.434783 | 0.000000 | 0.0 | 0.000000 | 0.173913 | 0.521739 | 0.304348 | 0.782609 | 0.217391 |
92 | 96 | 0.083333 | 0.0 | 0.708333 | 0.125000 | 0.041667 | 0.041667 | 0.000000 | 0.000000 | 0.250000 | 0.666667 | 0.041667 | 0.041667 | 0.291667 | 0.708333 | 0.125000 | 0.791667 | 0.083333 | 0.000000 | 0.0 | 0.041667 | 0.333333 | 0.541667 | 0.083333 | 0.750000 | 0.250000 |
111 | 115 | 0.727273 | 0.0 | 0.272727 | 0.000000 | 0.000000 | 0.000000 | 0.000000 | 0.000000 | 0.500000 | 0.363636 | 0.136364 | 0.000000 | 1.000000 | 0.000000 | 0.000000 | 0.818182 | 0.181818 | 0.000000 | 0.0 | 0.818182 | 0.090909 | 0.045455 | 0.045455 | 0.363636 | 0.636364 |
675 | 706 | 0.091988 | 0.0 | 0.451039 | 0.189911 | 0.207715 | 0.038576 | 0.011869 | 0.008902 | 0.169139 | 0.459941 | 0.272997 | 0.097923 | 0.994065 | 0.005935 | 0.020772 | 0.839763 | 0.130564 | 0.008902 | 0.0 | 0.041543 | 0.094955 | 0.738872 | 0.124629 | 0.676558 | 0.323442 |
1699 | 1736 | 0.034483 | 0.0 | 0.482759 | 0.206897 | 0.275862 | 0.000000 | 0.000000 | 0.000000 | 0.241379 | 0.551724 | 0.206897 | 0.000000 | 0.172414 | 0.827586 | 0.000000 | 0.931034 | 0.068966 | 0.000000 | 0.0 | 0.379310 | 0.413793 | 0.206897 | 0.000000 | 0.448276 | 0.551724 |
7639 | 7779 | 0.037037 | 0.0 | 0.296296 | 0.259259 | 0.370370 | 0.000000 | 0.000000 | 0.037037 | 0.111111 | 0.296296 | 0.481481 | 0.111111 | 1.000000 | 0.000000 | 0.000000 | 0.814815 | 0.185185 | 0.000000 | 0.0 | 0.000000 | 0.037037 | 0.740741 | 0.222222 | 0.814815 | 0.185185 |
(Optional) Prepare numerical item features
The method below is left here for convenience if you want to experiment with content-based item features as an input for your neural network.
def map_items_to_onehot(df):
one_hot = pd.get_dummies(df.loc[:, base_item_features])
df = df.drop(base_item_features, axis = 1)
df = df.join(one_hot)
return df, list(one_hot.columns)
def prepare_items_df(interactions_df):
items_df = interactions_df.loc[:, ["item_id"] + base_item_features].drop_duplicates()
items_df, item_features = map_items_to_onehot(items_df)
return items_df, item_features
items_df, item_features = prepare_items_df(interactions_df)
print(item_features)
display(HTML(items_df.loc[items_df['item_id'].isin([0, 1, 2, 3, 4, 5, 6])].head(15).to_html()))
['term_WinterVacation', 'term_Easter', 'term_OffSeason', 'term_HighSeason', 'term_LowSeason', 'term_MayLongWeekend', 'term_NewYear', 'term_Christmas', 'length_of_stay_bucket_[0-1]', 'length_of_stay_bucket_[2-3]', 'length_of_stay_bucket_[4-7]', 'length_of_stay_bucket_[8-inf]', 'rate_plan_Standard', 'rate_plan_Nonref', 'room_segment_[0-160]', 'room_segment_[160-260]', 'room_segment_[260-360]', 'room_segment_[360-500]', 'room_segment_[500-900]', 'n_people_bucket_[1-1]', 'n_people_bucket_[2-2]', 'n_people_bucket_[3-4]', 'n_people_bucket_[5-inf]', 'weekend_stay_True', 'weekend_stay_False']
item_id | term_WinterVacation | term_Easter | term_OffSeason | term_HighSeason | term_LowSeason | term_MayLongWeekend | term_NewYear | term_Christmas | length_of_stay_bucket_[0-1] | length_of_stay_bucket_[2-3] | length_of_stay_bucket_[4-7] | length_of_stay_bucket_[8-inf] | rate_plan_Standard | rate_plan_Nonref | room_segment_[0-160] | room_segment_[160-260] | room_segment_[260-360] | room_segment_[360-500] | room_segment_[500-900] | n_people_bucket_[1-1] | n_people_bucket_[2-2] | n_people_bucket_[3-4] | n_people_bucket_[5-inf] | weekend_stay_True | weekend_stay_False | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 |
1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 |
2 | 2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
3 | 3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 |
4 | 4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 |
5 | 5 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 |
6 | 6 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 |
Neural network recommender
Task:
Code a recommender based on a neural network model. You are free to choose any network architecture you find appropriate. The network can use the interaction vectors for users and items, embeddings of users and items, as well as user and item features (you can use the features you developed in the first project).
Remember to keep control over randomness - in the init method add the seed as a parameter and initialize the random seed generator with that seed (both for numpy and pytorch):
self.seed = seed
self.rng = np.random.RandomState(seed=seed)
in the network model:
self.seed = torch.manual_seed(seed)
You are encouraged to experiment with:
- the number of layers in the network, the number of neurons and different activation functions,
- different optimizers and their parameters,
- batch size and the number of epochs,
- embedding layers,
- content-based features of both users and items.
from recommenders.recommender import Recommender
# HR10 = 0.07
# class Net(nn.Module):
# def __init__(self, features_len, output_len):
# super(Net, self).__init__()
# self.fc1 = nn.Linear(features_len, 150)
# self.fc2 = nn.Linear(150, 100)
# self.fc3 = nn.Linear(100, output_len)
# self.fc4 = nn.Linear(output_len, output_len+200)
# self.dropout = nn.Dropout(p=0.5)
# def forward(self, x):
# x = F.relu(self.fc1(x))
# x = self.dropout(x)
# x = F.relu(self.fc2(x))
# x = self.dropout(x)
# x = F.relu(self.fc3(x))
# return self.fc4(x)
# HR10 = 0.06
# class Net(nn.Module):
# def __init__(self, features_len, output_len):
# super(Net, self).__init__()
# self.fc1 = nn.Linear(features_len, 150)
# self.fc2 = nn.Linear(150, 100)
# self.fc3 = nn.Linear(100, output_len)
# self.fc4 = nn.Linear(output_len, output_len+150)
# self.dropout = nn.Dropout(p=0.5)
# def forward(self, x):
# x = F.relu(self.fc1(x))
# x = self.dropout(x)
# x = F.relu(self.fc2(x))
# x = self.dropout(x)
# x = F.relu(self.fc3(x))
# x = self.dropout(x)
# return self.fc4(x)
# Softmax very bad choice for multiclassification
# class Net(nn.Module):
# def __init__(self, features_len, output_len):
# super(Net, self).__init__()
# self.fc1 = nn.Linear(features_len, 150)
# self.fc2 = nn.Linear(150, 100)
# self.fc3 = nn.Linear(100, output_len)
# self.fc4 = nn.Linear(output_len, output_len+200)
# self.dropout = nn.Dropout(p=0.5)
# self.softmax = nn.Softmax()
# def forward(self, x):
# x = F.relu(self.fc1(x))
# x = self.dropout(x)
# x = F.relu(self.fc2(x))
# x = self.dropout(x)
# x = F.relu(self.fc3(x))
# x = self.fc4(x)
# x = self.softmax(x)
# return x
# HR10 = 0.083
class Net(nn.Module):
def __init__(self, features_len, output_len):
super(Net, self).__init__()
self.fc1 = nn.Linear(features_len, 150)
self.fc2 = nn.Linear(150, 100)
self.fc3 = nn.Linear(100, output_len)
self.fc4 = nn.Linear(output_len, output_len+200)
self.dropout = nn.Dropout(p=0.5)
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = F.relu(self.fc2(x))
x = self.dropout(x)
x = F.relu(self.fc3(x))
return self.fc4(x)
class NNRecommender(Recommender):
"""
Linear recommender class based on user and item features.
"""
def __init__(self, seed=6789, n_neg_per_pos=5, n_epochs=5000, lr=0.01,):
"""
Initialize base recommender params and variables.
"""
self.model = None
self.n_neg_per_pos = n_neg_per_pos
self.recommender_df = pd.DataFrame(columns=['user_id', 'item_id', 'score'])
self.users_df = None
self.user_features = None
self.seed = seed
self.rng = np.random.RandomState(seed=seed)
self.n_epochs = n_epochs
self.lr = lr
def calculate_accuracy(self, y_true, y_pred):
predictions=(y_pred.argmax(1))
return (predictions == y_true).sum().float() / len(y_true)
def round_tensor(self, t, decimal_places=3):
return round(t.item(), decimal_places)
def fit(self, interactions_df, users_df, items_df):
"""
Training of the recommender.
:param pd.DataFrame interactions_df: DataFrame with recorded interactions between users and items
defined by user_id, item_id and features of the interaction.
:param pd.DataFrame users_df: DataFrame with users and their features defined by user_id and the user feature columns.
:param pd.DataFrame items_df: DataFrame with items and their features defined by item_id and the item feature columns.
"""
interactions_df = interactions_df.copy()
# Prepare users_df and items_df
# (optional - use only if you want to train a hybrid model with content-based features)
users_df, user_features = prepare_users_df(interactions_df)
self.users_df = users_df
self.user_features = user_features
items_df, item_features = prepare_items_df(interactions_df)
items_df = items_df.loc[:, ['item_id'] + item_features]
X = items_df[['term_WinterVacation', 'term_Easter', 'term_OffSeason', 'term_HighSeason', 'term_LowSeason', 'term_MayLongWeekend', 'term_NewYear', 'term_Christmas', 'rate_plan_Standard', 'rate_plan_Nonref', 'room_segment_[0-160]', 'room_segment_[160-260]', 'room_segment_[260-360]', 'room_segment_[360-500]', 'room_segment_[500-900]', 'n_people_bucket_[1-1]', 'n_people_bucket_[2-2]', 'n_people_bucket_[3-4]', 'n_people_bucket_[5-inf]', 'weekend_stay_True', 'weekend_stay_False']]
y = items_df[['item_id']]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=self.seed)
X_train = torch.from_numpy(X_train.to_numpy()).float()
y_train = torch.squeeze(torch.from_numpy(y_train.to_numpy()).long())
X_test = torch.from_numpy(X_test.to_numpy()).float()
y_test = torch.squeeze(torch.from_numpy(y_test.to_numpy()).long())
self.net = Net(X_train.shape[1], items_df['item_id'].unique().size)
optimizer = optim.Adam(self.net.parameters(), lr=self.lr)
criterion = nn.CrossEntropyLoss()
for epoch in range(self.n_epochs):
y_pred = self.net(X_train)
y_pred = torch.squeeze(y_pred)
train_loss = criterion(y_pred, y_train)
# if epoch % 100 == 0:
# train_acc = self.calculate_accuracy(y_train, y_pred)
# y_test_pred = self.net(X_test)
# y_test_pred = torch.squeeze(y_test_pred)
# test_loss = criterion(y_test_pred, y_test)
# test_acc = self.calculate_accuracy(y_test, y_test_pred)
# print(
# f'''epoch {epoch}
# Train set - loss: {self.round_tensor(train_loss)}, accuracy: {self.round_tensor(train_acc)}
# Test set - loss: {self.round_tensor(test_loss)}, accuracy: {self.round_tensor(test_acc)}
# ''')
optimizer.zero_grad()
train_loss.backward()
optimizer.step()
def recommend(self, users_df, items_df, n_recommendations=1):
"""
Serving of recommendations. Scores items in items_df for each user in users_df and returns
top n_recommendations for each user.
:param pd.DataFrame users_df: DataFrame with users and their features for which recommendations should be generated.
:param pd.DataFrame items_df: DataFrame with items and their features which should be scored.
:param int n_recommendations: Number of recommendations to be returned for each user.
:return: DataFrame with user_id, item_id and score as columns returning n_recommendations top recommendations
for each user.
:rtype: pd.DataFrame
"""
# Clean previous recommendations (iloc could be used alternatively)
self.recommender_df = self.recommender_df[:0]
# Prepare users_df and items_df
# (optional - use only if you want to train a hybrid model with content-based features)
users_df = users_df.loc[:, 'user_id']
users_df = pd.merge(users_df, self.users_df, on=['user_id'], how='left').fillna(0)
# items_df, item_features = prepare_items_df(items_df)
# items_df = items_df.loc[:, ['item_id'] + item_features]
# Score the items
recommendations = pd.DataFrame(columns=['user_id', 'item_id', 'score'])
for ix, user in users_df.iterrows():
prep_user = torch.from_numpy(user[['user_term_WinterVacation', 'user_term_Easter', 'user_term_OffSeason', 'user_term_HighSeason', 'user_term_LowSeason', 'user_term_MayLongWeekend', 'user_term_NewYear', 'user_term_Christmas', 'user_rate_plan_Standard', 'user_rate_plan_Nonref', 'user_room_segment_[0-160]', 'user_room_segment_[160-260]', 'user_room_segment_[260-360]', 'user_room_segment_[360-500]', 'user_room_segment_[500-900]', 'user_n_people_bucket_[1-1]', 'user_n_people_bucket_[2-2]', 'user_n_people_bucket_[3-4]', 'user_n_people_bucket_[5-inf]', 'user_weekend_stay_True', 'user_weekend_stay_False']].to_numpy()).float()
scores = self.net(prep_user).detach().numpy()
chosen_ids = np.argsort(-scores)[:n_recommendations]
recommendations = []
for item_id in chosen_ids:
recommendations.append(
{
'user_id': user['user_id'],
'item_id': item_id,
'score': scores[item_id]
}
)
user_recommendations = pd.DataFrame(recommendations)
self.recommender_df = pd.concat([self.recommender_df, user_recommendations])
return self.recommender_df
# Fit method
# nn_recommender = NNRecommender(10000, 0.02)
# nn_recommender.fit(interactions_df.head(1000), None, None)
# nn_recommender.fit(interactions_df, None, None)
Quick test of the recommender
items_df = interactions_df.loc[:, ['item_id'] + base_item_features].drop_duplicates()
# Fit method
nn_recommender = NNRecommender(n_epochs=200, lr=0.01)
nn_recommender.fit(interactions_df.head(1000), None, None)
# nn_recommender.fit(interactions_df, None, None)
epoch 0 Train set - loss: 6.042, accuracy: 0.011 Test set - loss: 6.025, accuracy: 0.0 epoch 100 Train set - loss: 1.162, accuracy: 0.506 Test set - loss: 36.526, accuracy: 0.0
# Recommender method
recommendations = nn_recommender.recommend(pd.DataFrame([[1],[3]], columns=['user_id']), items_df, 3)
recommendations = pd.merge(recommendations, items_df, on='item_id', how='left')
display(HTML(recommendations.to_html()))
user_id | item_id | score | term | length_of_stay_bucket | rate_plan | room_segment | n_people_bucket | weekend_stay | |
---|---|---|---|---|---|---|---|---|---|
0 | 1.0 | 119 | 5.364058 | Easter | [2-3] | Standard | [160-260] | [2-2] | True |
1 | 1.0 | 88 | 5.033441 | WinterVacation | [0-1] | Standard | [160-260] | [2-2] | True |
2 | 1.0 | 57 | 4.771185 | WinterVacation | [2-3] | Standard | [160-260] | [2-2] | True |
3 | 3.0 | 2 | 11.286193 | WinterVacation | [2-3] | Standard | [160-260] | [2-2] | False |
4 | 3.0 | 74 | 10.848604 | WinterVacation | [4-7] | Standard | [160-260] | [2-2] | False |
5 | 3.0 | 81 | 10.656947 | WinterVacation | [0-1] | Standard | [160-260] | [2-2] | False |
Tuning method
from evaluation_and_testing.testing import evaluate_train_test_split_implicit
seed = 6789
from hyperopt import hp, fmin, tpe, Trials
import traceback
def tune_recommender(recommender_class, interactions_df, items_df,
param_space, max_evals=1, show_progressbar=True, seed=6789):
# Split into train_validation and test sets
shuffle = np.arange(len(interactions_df))
rng = np.random.RandomState(seed=seed)
rng.shuffle(shuffle)
shuffle = list(shuffle)
train_test_split = 0.8
split_index = int(len(interactions_df) * train_test_split)
train_validation = interactions_df.iloc[shuffle[:split_index]]
test = interactions_df.iloc[shuffle[split_index:]]
# Tune
def loss(tuned_params):
recommender = recommender_class(seed=seed, **tuned_params)
hr1, hr3, hr5, hr10, ndcg1, ndcg3, ndcg5, ndcg10 = evaluate_train_test_split_implicit(
recommender, train_validation, items_df, seed=seed)
return -hr10
n_tries = 1
succeded = False
try_id = 0
while not succeded and try_id < n_tries:
try:
trials = Trials()
best_param_set = fmin(loss, space=param_space, algo=tpe.suggest,
max_evals=max_evals, show_progressbar=show_progressbar, trials=trials, verbose=True)
succeded = True
except:
traceback.print_exc()
try_id += 1
if not succeded:
return None
# Validate
recommender = recommender_class(seed=seed, **best_param_set)
results = [[recommender_class.__name__] + list(evaluate_train_test_split_implicit(
recommender, {'train': train_validation, 'test': test}, items_df, seed=seed))]
results = pd.DataFrame(results,
columns=['Recommender', 'HR@1', 'HR@3', 'HR@5', 'HR@10', 'NDCG@1', 'NDCG@3', 'NDCG@5', 'NDCG@10'])
display(HTML(results.to_html()))
return best_param_set
Tuning of the recommender
Task:
Tune your model using the code below. You only need to put the class name of your recommender and choose an appropriate parameter space.
param_space = {
'n_neg_per_pos': hp.quniform('n_neg_per_pos', 1, 10, 1)
}
items_df['item_id'].unique().size
best_param_set = tune_recommender(NNRecommender, interactions_df, items_df,
param_space, max_evals=10, show_progressbar=True, seed=seed)
print("Best parameters:")
print(best_param_set)
0%| | 0/10 [00:01<?, ?trial/s, best loss=?] Best parameters: None
Traceback (most recent call last): File "/opt/conda/envs/rek_uno/lib/python3.8/site-packages/pandas/core/groupby/generic.py", line 965, in aggregate result = agg_list_like(self, [func], _axis=self.axis) File "/opt/conda/envs/rek_uno/lib/python3.8/site-packages/pandas/core/aggregation.py", line 672, in agg_list_like raise ValueError("no results") ValueError: no results During handling of the above exception, another exception occurred: Traceback (most recent call last): File "<ipython-input-311-164530239fdf>", line 33, in tune_recommender best_param_set = fmin(loss, space=param_space, algo=tpe.suggest, File "/opt/conda/envs/rek_uno/lib/python3.8/site-packages/hyperopt/fmin.py", line 507, in fmin return trials.fmin( File "/opt/conda/envs/rek_uno/lib/python3.8/site-packages/hyperopt/base.py", line 682, in fmin return fmin( File "/opt/conda/envs/rek_uno/lib/python3.8/site-packages/hyperopt/fmin.py", line 553, in fmin rval.exhaust() File "/opt/conda/envs/rek_uno/lib/python3.8/site-packages/hyperopt/fmin.py", line 356, in exhaust self.run(self.max_evals - n_done, block_until_done=self.asynchronous) File "/opt/conda/envs/rek_uno/lib/python3.8/site-packages/hyperopt/fmin.py", line 292, in run self.serial_evaluate() File "/opt/conda/envs/rek_uno/lib/python3.8/site-packages/hyperopt/fmin.py", line 170, in serial_evaluate result = self.domain.evaluate(spec, ctrl) File "/opt/conda/envs/rek_uno/lib/python3.8/site-packages/hyperopt/base.py", line 907, in evaluate rval = self.fn(pyll_rval) File "<ipython-input-311-164530239fdf>", line 23, in loss hr1, hr3, hr5, hr10, ndcg1, ndcg3, ndcg5, ndcg10 = evaluate_train_test_split_implicit( File "/home/jovyan/REK/evaluation_and_testing/testing.py", line 93, in evaluate_train_test_split_implicit recommender.fit(interactions_df_train, None, items_df) File "<ipython-input-427-e94347308877>", line 131, in fit users_df, user_features = prepare_users_df(interactions_df) File "<ipython-input-304-79a89ba1fcad>", line 15, in prepare_users_df users_df = users_df.groupby("user_id").first().reset_index(drop=False) File "/opt/conda/envs/rek_uno/lib/python3.8/site-packages/pandas/core/groupby/groupby.py", line 1698, in first return self._agg_general( File "/opt/conda/envs/rek_uno/lib/python3.8/site-packages/pandas/core/groupby/groupby.py", line 1044, in _agg_general result = self.aggregate(lambda x: npfunc(x, axis=self.axis)) File "/opt/conda/envs/rek_uno/lib/python3.8/site-packages/pandas/core/groupby/generic.py", line 977, in aggregate result = self._aggregate_frame(func) File "/opt/conda/envs/rek_uno/lib/python3.8/site-packages/pandas/core/groupby/generic.py", line 1135, in _aggregate_frame fres = func(data, *args, **kwargs) File "/opt/conda/envs/rek_uno/lib/python3.8/site-packages/pandas/core/groupby/groupby.py", line 1044, in <lambda> result = self.aggregate(lambda x: npfunc(x, axis=self.axis)) File "/opt/conda/envs/rek_uno/lib/python3.8/site-packages/pandas/core/groupby/groupby.py", line 1692, in first_compat return obj.apply(first, axis=axis) File "/opt/conda/envs/rek_uno/lib/python3.8/site-packages/pandas/core/frame.py", line 7768, in apply return op.get_result() File "/opt/conda/envs/rek_uno/lib/python3.8/site-packages/pandas/core/apply.py", line 185, in get_result return self.apply_standard() File "/opt/conda/envs/rek_uno/lib/python3.8/site-packages/pandas/core/apply.py", line 276, in apply_standard results, res_index = self.apply_series_generator() File "/opt/conda/envs/rek_uno/lib/python3.8/site-packages/pandas/core/apply.py", line 288, in apply_series_generator for i, v in enumerate(series_gen): File "/opt/conda/envs/rek_uno/lib/python3.8/site-packages/pandas/core/apply.py", line 330, in <genexpr> return (self.obj._ixs(i, axis=1) for i in range(len(self.columns))) File "/opt/conda/envs/rek_uno/lib/python3.8/site-packages/pandas/core/frame.py", line 2964, in _ixs values = self._mgr.iget(i) File "/opt/conda/envs/rek_uno/lib/python3.8/site-packages/pandas/core/internals/managers.py", line 1006, in iget return SingleBlockManager( File "/opt/conda/envs/rek_uno/lib/python3.8/site-packages/pandas/core/internals/managers.py", line 1555, in __init__ if fastpath is not lib.no_default: KeyboardInterrupt
Final evaluation
Task:
Run the final evaluation of your recommender and present its results against the Amazon and Netflix recommenders' results. You just need to give the class name of your recommender and its tuned parameters below.
nn_recommender = NNRecommender(n_neg_per_pos=6, n_epochs=20000) # Initialize your recommender here
# Give the name of your recommender in the line below
nn_tts_results = [['NNRecommender'] + list(evaluate_train_test_split_implicit(
nn_recommender, interactions_df, items_df))]
nn_tts_results = pd.DataFrame(
nn_tts_results, columns=['Recommender', 'HR@1', 'HR@3', 'HR@5', 'HR@10', 'NDCG@1', 'NDCG@3', 'NDCG@5', 'NDCG@10'])
display(HTML(nn_tts_results.to_html()))
Recommender | HR@1 | HR@3 | HR@5 | HR@10 | NDCG@1 | NDCG@3 | NDCG@5 | NDCG@10 | |
---|---|---|---|---|---|---|---|---|---|
0 | NNRecommender | 0.025008 | 0.035209 | 0.066469 | 0.116815 | 0.025008 | 0.0311 | 0.043697 | 0.059459 |
from recommenders.amazon_recommender import AmazonRecommender
amazon_recommender = AmazonRecommender()
amazon_tts_results = [['AmazonRecommender'] + list(evaluate_train_test_split_implicit(
amazon_recommender, interactions_df, items_df))]
amazon_tts_results = pd.DataFrame(
amazon_tts_results, columns=['Recommender', 'HR@1', 'HR@3', 'HR@5', 'HR@10', 'NDCG@1', 'NDCG@3', 'NDCG@5', 'NDCG@10'])
display(HTML(amazon_tts_results.to_html()))
Recommender | HR@1 | HR@3 | HR@5 | HR@10 | NDCG@1 | NDCG@3 | NDCG@5 | NDCG@10 | |
---|---|---|---|---|---|---|---|---|---|
0 | AmazonRecommender | 0.042119 | 0.10464 | 0.140507 | 0.199408 | 0.042119 | 0.076826 | 0.091797 | 0.110711 |
from recommenders.netflix_recommender import NetflixRecommender
netflix_recommender = NetflixRecommender(n_epochs=30, print_type='live')
netflix_tts_results = [['NetflixRecommender'] + list(evaluate_train_test_split_implicit(
netflix_recommender, interactions_df, items_df))]
netflix_tts_results = pd.DataFrame(
netflix_tts_results, columns=['Recommender', 'HR@1', 'HR@3', 'HR@5', 'HR@10', 'NDCG@1', 'NDCG@3', 'NDCG@5', 'NDCG@10'])
display(HTML(netflix_tts_results.to_html()))
Loss training (min: 0.161, max: 0.228, cur: 0.161) validation (min: 0.176, max: 0.242, cur: 0.177)
Recommender | HR@1 | HR@3 | HR@5 | HR@10 | NDCG@1 | NDCG@3 | NDCG@5 | NDCG@10 | |
---|---|---|---|---|---|---|---|---|---|
0 | NetflixRecommender | 0.042777 | 0.106614 | 0.143139 | 0.200395 | 0.042777 | 0.078228 | 0.093483 | 0.111724 |
tts_results = pd.concat([nn_tts_results, amazon_tts_results, netflix_tts_results]).reset_index(drop=True)
display(HTML(tts_results.to_html()))
Recommender | HR@1 | HR@3 | HR@5 | HR@10 | NDCG@1 | NDCG@3 | NDCG@5 | NDCG@10 | |
---|---|---|---|---|---|---|---|---|---|
0 | NNRecommender | 0.025008 | 0.035209 | 0.066469 | 0.116815 | 0.025008 | 0.031100 | 0.043697 | 0.059459 |
1 | AmazonRecommender | 0.042119 | 0.104640 | 0.140507 | 0.199408 | 0.042119 | 0.076826 | 0.091797 | 0.110711 |
2 | NetflixRecommender | 0.042777 | 0.106614 | 0.143139 | 0.200395 | 0.042777 | 0.078228 | 0.093483 | 0.111724 |
Summary
Task:
Write a summary of your experiments. What worked well and what did not? What are your thoughts how could you possibly further improve the model?
Na początku bezmyślnie użyłem BCELoss,
to był duży błąd, który kosztował mnie godzinę szukania w internecie, dlaczego ciągle zwraca mi tylko item-id=0
Wyższe "accuracy" w testach != lepszy wynik w predykcjach
Fitting nie zawsze znajduje najlepszy możliwy parametr. Miałem przypadek gdzie został wybrany 5, a dawał HR 0.05, podczas gdy 6 dawał 0.08
Dodanie dropout potrawfi znacząco zwiększyć wyniki. Dropout podniósł HR10 z 0.035 do 0.11
(niestety, w trakcie dalszych prób udoskonalenia, gdzieś zagubiłem to rozwiązanie)
Podsumowanie: