92 KiB
92 KiB
Prepare test set
import pandas as pd
import numpy as np
import scipy.sparse as sparse
from collections import defaultdict
from itertools import chain
import random
from tqdm import tqdm
# In evaluation we do not load train set - it is not needed
test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\t', header=None)
test.columns=['user', 'item', 'rating', 'timestamp']
test['user_code'] = test['user'].astype("category").cat.codes
test['item_code'] = test['item'].astype("category").cat.codes
user_code_id = dict(enumerate(test['user'].astype("category").cat.categories))
user_id_code = dict((v, k) for k, v in user_code_id.items())
item_code_id = dict(enumerate(test['item'].astype("category").cat.categories))
item_id_code = dict((v, k) for k, v in item_code_id.items())
test_ui = sparse.csr_matrix((test['rating'], (test['user_code'], test['item_code'])))
Estimations metrics
estimations_df=pd.read_csv('Recommendations generated/ml-100k/Ready_Baseline_estimations.csv', header=None)
estimations_df.columns=['user', 'item' ,'score']
estimations_df['user_code']=[user_id_code[user] for user in estimations_df['user']]
estimations_df['item_code']=[item_id_code[item] for item in estimations_df['item']]
estimations=sparse.csr_matrix((estimations_df['score'], (estimations_df['user_code'], estimations_df['item_code'])), shape=test_ui.shape)
def estimations_metrics(test_ui, estimations):
result=[]
RMSE=(np.sum((estimations.data-test_ui.data)**2)/estimations.nnz)**(1/2)
result.append(['RMSE', RMSE])
MAE=np.sum(abs(estimations.data-test_ui.data))/estimations.nnz
result.append(['MAE', MAE])
df_result=(pd.DataFrame(list(zip(*result))[1])).T
df_result.columns=list(zip(*result))[0]
return df_result
# in case of error (in the laboratories) you might have to switch to the other version of pandas
# try !pip3 install pandas=='1.0.3' (or pip if you use python 2) and restart the kernel
estimations_metrics(test_ui, estimations)
RMSE | MAE | |
---|---|---|
0 | 0.949459 | 0.752487 |
Ranking metrics
import numpy as np
reco = np.loadtxt('Recommendations generated/ml-100k/Ready_Baseline_reco.csv', delimiter=',')
# Let's ignore scores - they are not used in evaluation:
users=reco[:,:1]
items=reco[:,1::2]
# Let's use inner ids instead of real ones
users=np.vectorize(lambda x: user_id_code.setdefault(x, -1))(users)
items=np.vectorize(lambda x: item_id_code.setdefault(x, -1))(items) # maybe items we recommend are not in test set
# Let's put them into one array
reco=np.concatenate((users, items), axis=1)
reco
array([[663, 475, 62, ..., 472, 269, 503], [ 48, 313, 475, ..., 591, 175, 466], [351, 313, 475, ..., 591, 175, 466], ..., [259, 313, 475, ..., 11, 591, 175], [ 33, 313, 475, ..., 11, 591, 175], [ 77, 313, 475, ..., 11, 591, 175]])
def ranking_metrics(test_ui, reco, super_reactions=[], topK=10):
nb_items=test_ui.shape[1]
relevant_users, super_relevant_users, prec, rec, F_1, F_05, prec_super, rec_super, ndcg, mAP, MRR, LAUC, HR=\
0,0,0,0,0,0,0,0,0,0,0,0,0
cg = (1.0 / np.log2(np.arange(2, topK + 2)))
cg_sum = np.cumsum(cg)
for (nb_user, user) in tqdm(enumerate(reco[:,0])):
u_rated_items=test_ui.indices[test_ui.indptr[user]:test_ui.indptr[user+1]]
nb_u_rated_items=len(u_rated_items)
if nb_u_rated_items>0: # skip users with no items in test set (still possible that there will be no super items)
relevant_users+=1
u_super_items=u_rated_items[np.vectorize(lambda x: x in super_reactions)\
(test_ui.data[test_ui.indptr[user]:test_ui.indptr[user+1]])]
# more natural seems u_super_items=[item for item in u_rated_items if test_ui[user,item] in super_reactions]
# but accesing test_ui[user,item] is expensive -we should avoid doing it
if len(u_super_items)>0:
super_relevant_users+=1
user_successes=np.zeros(topK)
nb_user_successes=0
user_super_successes=np.zeros(topK)
nb_user_super_successes=0
# evaluation
for (item_position,item) in enumerate(reco[nb_user,1:topK+1]):
if item in u_rated_items:
user_successes[item_position]=1
nb_user_successes+=1
if item in u_super_items:
user_super_successes[item_position]=1
nb_user_super_successes+=1
prec_u=nb_user_successes/topK
prec+=prec_u
rec_u=nb_user_successes/nb_u_rated_items
rec+=rec_u
F_1+=2*(prec_u*rec_u)/(prec_u+rec_u) if prec_u+rec_u>0 else 0
F_05+=(0.5**2+1)*(prec_u*rec_u)/(0.5**2*prec_u+rec_u) if prec_u+rec_u>0 else 0
prec_super+=nb_user_super_successes/topK
rec_super+=nb_user_super_successes/max(len(u_super_items),1) # to set 0 if no super items
ndcg+=np.dot(user_successes,cg)/cg_sum[min(topK, nb_u_rated_items)-1]
cumsum_successes=np.cumsum(user_successes)
mAP+=np.dot(cumsum_successes/np.arange(1,topK+1), user_successes)/min(topK, nb_u_rated_items)
MRR+=1/(user_successes.nonzero()[0][0]+1) if user_successes.nonzero()[0].size>0 else 0
LAUC+=(np.dot(cumsum_successes, 1-user_successes)+\
(nb_user_successes+nb_u_rated_items)/2*((nb_items-nb_u_rated_items)-(topK-nb_user_successes)))/\
((nb_items-nb_u_rated_items)*nb_u_rated_items)
HR+=nb_user_successes>0
result=[]
result.append(('precision', prec/relevant_users))
result.append(('recall', rec/relevant_users))
result.append(('F_1', F_1/relevant_users))
result.append(('F_05', F_05/relevant_users))
result.append(('precision_super', prec_super/super_relevant_users))
result.append(('recall_super', rec_super/super_relevant_users))
result.append(('NDCG', ndcg/relevant_users))
result.append(('mAP', mAP/relevant_users))
result.append(('MRR', MRR/relevant_users))
result.append(('LAUC', LAUC/relevant_users))
result.append(('HR', HR/relevant_users))
df_result=(pd.DataFrame(list(zip(*result))[1])).T
df_result.columns=list(zip(*result))[0]
return df_result
ranking_metrics(test_ui, reco, super_reactions=[4,5], topK=10)
943it [00:00, 2282.19it/s]
precision | recall | F_1 | F_05 | precision_super | recall_super | NDCG | mAP | MRR | LAUC | HR | |
---|---|---|---|---|---|---|---|---|---|---|---|
0 | 0.09141 | 0.037652 | 0.04603 | 0.061286 | 0.079614 | 0.056463 | 0.095957 | 0.043178 | 0.198193 | 0.515501 | 0.437964 |
Diversity metrics
def diversity_metrics(test_ui, reco, topK=10):
frequencies=defaultdict(int)
# let's assign 0 to all items in test set
for item in list(set(test_ui.indices)):
frequencies[item]=0
# counting frequencies
for item in reco[:,1:].flat:
frequencies[item]+=1
nb_reco_outside_test=frequencies[-1]
del frequencies[-1]
frequencies=np.array(list(frequencies.values()))
nb_rec_items=len(frequencies[frequencies>0])
nb_reco_inside_test=np.sum(frequencies)
frequencies=frequencies/np.sum(frequencies)
frequencies=np.sort(frequencies)
with np.errstate(divide='ignore'): # let's put zeros put items with 0 frequency and ignore division warning
log_frequencies=np.nan_to_num(np.log(frequencies), posinf=0, neginf=0)
result=[]
result.append(('Reco in test', nb_reco_inside_test/(nb_reco_inside_test+nb_reco_outside_test)))
result.append(('Test coverage', nb_rec_items/test_ui.shape[1]))
result.append(('Shannon', -np.dot(frequencies, log_frequencies)))
result.append(('Gini', np.dot(frequencies, np.arange(1-len(frequencies), len(frequencies), 2))/(len(frequencies)-1)))
df_result=(pd.DataFrame(list(zip(*result))[1])).T
df_result.columns=list(zip(*result))[0]
return df_result
# in case of errors try !pip3 install numpy==1.18.4 (or pip if you use python 2) and restart the kernel
import evaluation_measures as ev
import imp
imp.reload(ev)
x=diversity_metrics(test_ui, reco, topK=10)
x
Reco in test | Test coverage | Shannon | Gini | |
---|---|---|---|---|
0 | 1.0 | 0.033911 | 2.836513 | 0.991139 |
To be used in other notebooks
import evaluation_measures as ev
import imp
imp.reload(ev)
estimations_df=pd.read_csv('Recommendations generated/ml-100k/Ready_Baseline_estimations.csv', header=None)
reco=np.loadtxt('Recommendations generated/ml-100k/Ready_Baseline_reco.csv', delimiter=',')
ev.evaluate(test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\t', header=None),
estimations_df=estimations_df,
reco=reco,
super_reactions=[4,5])
#also you can just type ev.evaluate_all(estimations_df, reco) - I put above values as default
943it [00:00, 2668.06it/s]
RMSE | MAE | precision | recall | F_1 | F_05 | precision_super | recall_super | NDCG | mAP | MRR | LAUC | HR | F_2 | Whole_average | Reco in test | Test coverage | Shannon | Gini | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 0.949459 | 0.752487 | 0.09141 | 0.037652 | 0.04603 | 0.061286 | 0.079614 | 0.056463 | 0.095957 | 0.043178 | 0.198193 | 0.515501 | 0.437964 | 0.039549 | 0.1419 | 1.0 | 0.033911 | 2.836513 | 0.991139 |
import evaluation_measures as ev
import imp
imp.reload(ev)
dir_path="Recommendations generated/ml-100k/"
super_reactions=[4,5]
test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\t', header=None)
df=ev.evaluate_all(test, dir_path, super_reactions)
#also you can just type ev.evaluate_all() - I put above values as default
943it [00:00, 3513.93it/s] 943it [00:00, 5048.26it/s] 943it [00:00, 4530.47it/s] 943it [00:00, 5016.38it/s] 943it [00:00, 3958.29it/s] 943it [00:00, 4004.71it/s] 943it [00:00, 4465.19it/s] 943it [00:00, 4760.13it/s] 943it [00:00, 4948.57it/s] 943it [00:00, 3895.70it/s] 943it [00:00, 4446.36it/s] 943it [00:00, 5322.70it/s] 943it [00:00, 4464.53it/s] 943it [00:00, 5275.54it/s] 943it [00:00, 5161.31it/s] 943it [00:00, 2960.67it/s] 943it [00:00, 4734.14it/s] 943it [00:00, 3319.18it/s]
df.iloc[:,:9]
Model | RMSE | MAE | precision | recall | F_1 | F_05 | precision_super | recall_super | |
---|---|---|---|---|---|---|---|---|---|
0 | Self_RP3Beta | 3.702928 | 3.527713 | 0.322694 | 0.216069 | 0.212152 | 0.247538 | 0.245279 | 0.284983 |
0 | Self_P3 | 3.702446 | 3.527273 | 0.282185 | 0.192092 | 0.186749 | 0.216980 | 0.204185 | 0.240096 |
0 | Self_TopPop | 2.508258 | 2.217909 | 0.188865 | 0.116919 | 0.118732 | 0.141584 | 0.130472 | 0.137473 |
0 | Self_SVDBaseline | 3.645666 | 3.480246 | 0.137858 | 0.082398 | 0.084151 | 0.101063 | 0.107940 | 0.109393 |
0 | Ready_SVD | 0.952563 | 0.750158 | 0.094486 | 0.046274 | 0.051389 | 0.065625 | 0.082618 | 0.074150 |
0 | Self_SVD | 0.914890 | 0.717962 | 0.102969 | 0.042325 | 0.052022 | 0.069313 | 0.093562 | 0.074994 |
0 | Ready_Baseline | 0.949459 | 0.752487 | 0.091410 | 0.037652 | 0.046030 | 0.061286 | 0.079614 | 0.056463 |
0 | Self_KNNSurprisetask | 0.946255 | 0.745209 | 0.083457 | 0.032848 | 0.041227 | 0.055493 | 0.074785 | 0.048890 |
0 | Self_TopRated | 2.508258 | 2.217909 | 0.079321 | 0.032667 | 0.039983 | 0.053170 | 0.068884 | 0.048582 |
0 | Ready_SVDBiased | 0.942141 | 0.742760 | 0.081230 | 0.032344 | 0.040302 | 0.053932 | 0.072639 | 0.051126 |
0 | Self_GlobalAvg | 1.125760 | 0.943534 | 0.061188 | 0.025968 | 0.031383 | 0.041343 | 0.040558 | 0.032107 |
0 | Ready_Random | 1.525633 | 1.225714 | 0.047720 | 0.022049 | 0.025494 | 0.032845 | 0.029077 | 0.025015 |
0 | Ready_I-KNN | 1.030386 | 0.813067 | 0.026087 | 0.006908 | 0.010593 | 0.016046 | 0.021137 | 0.009522 |
0 | Ready_I-KNNBaseline | 0.935327 | 0.737424 | 0.002545 | 0.000755 | 0.001105 | 0.001602 | 0.002253 | 0.000930 |
0 | Ready_U-KNN | 1.023495 | 0.807913 | 0.000742 | 0.000205 | 0.000305 | 0.000449 | 0.000536 | 0.000198 |
0 | Self_BaselineIU | 0.958136 | 0.754051 | 0.000954 | 0.000188 | 0.000298 | 0.000481 | 0.000644 | 0.000223 |
0 | Self_BaselineUI | 0.967585 | 0.762740 | 0.000954 | 0.000170 | 0.000278 | 0.000463 | 0.000644 | 0.000189 |
0 | Self_IKNN | 1.018363 | 0.808793 | 0.000318 | 0.000108 | 0.000140 | 0.000189 | 0.000000 | 0.000000 |
df.iloc[:,np.append(0,np.arange(9, df.shape[1]))]
Model | NDCG | mAP | MRR | LAUC | HR | F_2 | Whole_average | Reco in test | Test coverage | Shannon | Gini | |
---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | Self_RP3Beta | 0.388271 | 0.248239 | 0.636318 | 0.605683 | 0.910923 | 0.205450 | 0.376967 | 0.999788 | 0.178932 | 4.549663 | 0.950182 |
0 | Self_P3 | 0.339114 | 0.204905 | 0.572157 | 0.593544 | 0.875928 | 0.181702 | 0.340803 | 1.000000 | 0.077201 | 3.875892 | 0.974947 |
0 | Self_TopPop | 0.214651 | 0.111707 | 0.400939 | 0.555546 | 0.765642 | 0.112750 | 0.249607 | 1.000000 | 0.038961 | 3.159079 | 0.987317 |
0 | Self_SVDBaseline | 0.164477 | 0.082973 | 0.342374 | 0.538097 | 0.638388 | 0.079860 | 0.205748 | 0.999894 | 0.279221 | 5.159076 | 0.907220 |
0 | Ready_SVD | 0.109320 | 0.051383 | 0.240693 | 0.519849 | 0.475080 | 0.046237 | 0.154759 | 0.993425 | 0.206349 | 4.442996 | 0.952832 |
0 | Self_SVD | 0.105416 | 0.050278 | 0.191533 | 0.517890 | 0.462354 | 0.044591 | 0.150604 | 0.867656 | 0.141414 | 3.929249 | 0.971112 |
0 | Ready_Baseline | 0.095957 | 0.043178 | 0.198193 | 0.515501 | 0.437964 | 0.039549 | 0.141900 | 1.000000 | 0.033911 | 2.836513 | 0.991139 |
0 | Self_KNNSurprisetask | 0.089577 | 0.040902 | 0.189057 | 0.513076 | 0.417815 | 0.034996 | 0.135177 | 0.888547 | 0.130592 | 3.611806 | 0.978659 |
0 | Self_TopRated | 0.070766 | 0.027602 | 0.114790 | 0.512943 | 0.411453 | 0.034385 | 0.124546 | 1.000000 | 0.024531 | 2.761238 | 0.991660 |
0 | Ready_SVDBiased | 0.087552 | 0.039346 | 0.191285 | 0.512818 | 0.416755 | 0.034405 | 0.134478 | 0.997667 | 0.165224 | 4.147579 | 0.964690 |
0 | Self_GlobalAvg | 0.067695 | 0.027470 | 0.171187 | 0.509546 | 0.384942 | 0.027213 | 0.118383 | 1.000000 | 0.025974 | 2.711772 | 0.992003 |
0 | Ready_Random | 0.051757 | 0.019242 | 0.128181 | 0.507543 | 0.327678 | 0.022628 | 0.103269 | 0.987275 | 0.184704 | 5.105122 | 0.906561 |
0 | Ready_I-KNN | 0.024214 | 0.008958 | 0.048068 | 0.499885 | 0.154825 | 0.008007 | 0.069521 | 0.402333 | 0.434343 | 5.133650 | 0.877999 |
0 | Ready_I-KNNBaseline | 0.003444 | 0.001362 | 0.011760 | 0.496724 | 0.021209 | 0.000862 | 0.045379 | 0.482821 | 0.059885 | 2.232578 | 0.994487 |
0 | Ready_U-KNN | 0.000845 | 0.000274 | 0.002744 | 0.496441 | 0.007423 | 0.000235 | 0.042533 | 0.602121 | 0.010823 | 2.089186 | 0.995706 |
0 | Self_BaselineIU | 0.001043 | 0.000335 | 0.003348 | 0.496433 | 0.009544 | 0.000220 | 0.042809 | 0.699046 | 0.005051 | 1.945910 | 0.995669 |
0 | Self_BaselineUI | 0.000752 | 0.000168 | 0.001677 | 0.496424 | 0.009544 | 0.000201 | 0.042622 | 0.600530 | 0.005051 | 1.803126 | 0.996380 |
0 | Self_IKNN | 0.000214 | 0.000037 | 0.000368 | 0.496391 | 0.003181 | 0.000118 | 0.041755 | 0.392153 | 0.115440 | 4.174741 | 0.965327 |
Check metrics on toy dataset
import evaluation_measures as ev
import imp
import helpers
imp.reload(ev)
dir_path="Recommendations generated/toy-example/"
super_reactions=[4,5]
test=pd.read_csv('./Datasets/toy-example/test.csv', sep='\t', header=None)
display(ev.evaluate_all(test, dir_path, super_reactions, topK=3))
#also you can just type ev.evaluate_all() - I put above values as default
toy_train_read=pd.read_csv('./Datasets/toy-example/train.csv', sep='\t', header=None, names=['user', 'item', 'rating', 'timestamp'])
toy_test_read=pd.read_csv('./Datasets/toy-example/test.csv', sep='\t', header=None, names=['user', 'item', 'rating', 'timestamp'])
reco=pd.read_csv('Recommendations generated/toy-example/Self_BaselineUI_reco.csv', header=None)
estimations=pd.read_csv('Recommendations generated/toy-example/Self_BaselineUI_estimations.csv', names=['user', 'item', 'est_score'])
toy_train_ui, toy_test_ui, toy_user_code_id, toy_user_id_code, \
toy_item_code_id, toy_item_id_code = helpers.data_to_csr(toy_train_read, toy_test_read)
print('Training data:')
display(toy_train_ui.todense())
print('Test data:')
display(toy_test_ui.todense())
print('Recommendations:')
display(reco)
print('Estimations:')
display(estimations)
3it [00:00, 1024.67it/s] 3it [00:00, 2922.18it/s]
Model | RMSE | MAE | precision | recall | F_1 | F_05 | precision_super | recall_super | NDCG | mAP | MRR | LAUC | HR | F_2 | Whole_average | Reco in test | Test coverage | Shannon | Gini | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | Self_BaselineIU | 1.648337 | 1.575 | 0.444444 | 0.888889 | 0.555556 | 0.478632 | 0.333333 | 0.75 | 0.720550 | 0.629630 | 0.666667 | 0.722222 | 1.0 | 0.698413 | 0.657361 | 0.777778 | 0.8 | 1.351784 | 0.357143 |
0 | Self_BaselineUI | 1.612452 | 1.400 | 0.444444 | 0.888889 | 0.555556 | 0.478632 | 0.333333 | 0.75 | 0.676907 | 0.574074 | 0.611111 | 0.638889 | 1.0 | 0.698413 | 0.637521 | 0.888889 | 0.8 | 1.386294 | 0.250000 |
Training data:
matrix([[3, 4, 0, 0, 5, 0, 0, 4], [0, 1, 2, 3, 0, 0, 0, 0], [0, 0, 0, 5, 0, 3, 4, 0]], dtype=int64)
Test data:
matrix([[0, 0, 0, 0, 0, 0, 3, 0], [0, 0, 0, 0, 5, 0, 0, 0], [5, 0, 4, 0, 0, 0, 0, 2]], dtype=int64)
Recommendations:
0 | 1 | 2 | 3 | 4 | 5 | 6 | |
---|---|---|---|---|---|---|---|
0 | 0 | 30 | 5.0 | 20 | 4.0 | 60 | 4.0 |
1 | 10 | 40 | 3.0 | 60 | 2.0 | 70 | 2.0 |
2 | 20 | 40 | 5.0 | 20 | 4.0 | 70 | 4.0 |
Estimations:
user | item | est_score | |
---|---|---|---|
0 | 0 | 60 | 4.0 |
1 | 10 | 40 | 3.0 |
2 | 20 | 0 | 3.0 |
3 | 20 | 20 | 4.0 |
4 | 20 | 70 | 4.0 |
Sample recommendations
train=pd.read_csv('./Datasets/ml-100k/train.csv', sep='\t', header=None, names=['user', 'item', 'rating', 'timestamp'])
items=pd.read_csv('./Datasets/ml-100k/movies.csv')
user=random.choice(list(set(train['user'])))
train_content=pd.merge(train, items, left_on='item', right_on='id')
print('Here is what user rated high:')
display(train_content[train_content['user']==user][['user', 'rating', 'title', 'genres']]\
.sort_values(by='rating', ascending=False)[:15])
reco = np.loadtxt('Recommendations generated/ml-100k/Self_BaselineUI_reco.csv', delimiter=',')
items=pd.read_csv('./Datasets/ml-100k/movies.csv')
# Let's ignore scores - they are not used in evaluation:
reco_users=reco[:,:1]
reco_items=reco[:,1::2]
# Let's put them into one array
reco=np.concatenate((reco_users, reco_items), axis=1)
# Let's rebuild it user-item dataframe
recommended=[]
for row in reco:
for rec_nb, entry in enumerate(row[1:]):
recommended.append((row[0], rec_nb+1, entry))
recommended=pd.DataFrame(recommended, columns=['user','rec_nb', 'item'])
recommended_content=pd.merge(recommended, items, left_on='item', right_on='id')
print('Here is what we recommend:')
recommended_content[recommended_content['user']==user][['user', 'rec_nb', 'title', 'genres']].sort_values(by='rec_nb')
Here is what user rated high:
user | rating | title | genres | |
---|---|---|---|---|
332 | 614 | 5 | Toy Story (1995) | Animation, Children's, Comedy |
19024 | 614 | 5 | Mars Attacks! (1996) | Action, Comedy, Sci-Fi, War |
55124 | 614 | 5 | Long Kiss Goodnight, The (1996) | Action, Thriller |
34978 | 614 | 5 | My Best Friend's Wedding (1997) | Comedy, Romance |
17149 | 614 | 4 | Leaving Las Vegas (1995) | Drama, Romance |
60860 | 614 | 4 | Juror, The (1996) | Drama, Thriller |
24486 | 614 | 4 | People vs. Larry Flynt, The (1996) | Drama |
33831 | 614 | 4 | Independence Day (ID4) (1996) | Action, Sci-Fi, War |
38565 | 614 | 4 | Spitfire Grill, The (1996) | Drama |
44201 | 614 | 4 | Father of the Bride Part II (1995) | Comedy |
41492 | 614 | 3 | Donnie Brasco (1997) | Crime, Drama |
66393 | 614 | 3 | Postino, Il (1994) | Drama, Romance |
66231 | 614 | 3 | Stealing Beauty (1996) | Drama |
64564 | 614 | 3 | Once Upon a Time... When We Were Colored (1995) | Drama |
51125 | 614 | 3 | Dragonheart (1996) | Action, Adventure, Fantasy |
Here is what we recommend:
user | rec_nb | title | genres | |
---|---|---|---|---|
612 | 614.0 | 1 | Great Day in Harlem, A (1994) | Documentary |
1554 | 614.0 | 2 | Tough and Deadly (1995) | Action, Drama, Thriller |
2496 | 614.0 | 3 | Aiqing wansui (1994) | Drama |
3438 | 614.0 | 4 | Delta of Venus (1994) | Drama |
4380 | 614.0 | 5 | Someone Else's America (1995) | Drama |
5322 | 614.0 | 6 | Saint of Fort Washington, The (1993) | Drama |
6264 | 614.0 | 7 | Celestial Clockwork (1994) | Comedy |
7207 | 614.0 | 8 | Some Mother's Son (1996) | Drama |
9101 | 614.0 | 9 | Maya Lin: A Strong Clear Vision (1994) | Documentary |
8147 | 614.0 | 10 | Prefontaine (1997) | Drama |
project task 3: implement some other evaluation measure
# it may be your idea, modification of what we have already implemented
# (for example Hit2 rate which would count as a success users whoreceived at least 2 relevant recommendations)
# or something well-known
# expected output: modification of evaluation_measures.py such that evaluate_all will also display your measure
dir_path="Recommendations generated/ml-100k/"
super_reactions=[4,5]
test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\t', header=None)
ev.evaluate_all(test, dir_path, super_reactions)
943it [00:00, 1901.35it/s] 943it [00:00, 4856.92it/s] 943it [00:00, 4595.27it/s] 943it [00:00, 2703.28it/s] 943it [00:00, 4351.40it/s] 943it [00:00, 4062.22it/s] 943it [00:00, 4997.62it/s] 943it [00:00, 4371.27it/s] 943it [00:00, 4910.24it/s] 943it [00:00, 4240.88it/s] 943it [00:00, 4037.69it/s] 943it [00:00, 3703.04it/s] 943it [00:00, 2715.94it/s] 943it [00:00, 5319.09it/s] 943it [00:00, 3988.17it/s] 943it [00:00, 4858.36it/s] 943it [00:00, 5096.80it/s] 943it [00:00, 4678.05it/s]
Model | RMSE | MAE | precision | recall | F_1 | F_05 | precision_super | recall_super | NDCG | mAP | MRR | LAUC | HR | F_2 | Whole_average | Reco in test | Test coverage | Shannon | Gini | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | Self_RP3Beta | 3.702928 | 3.527713 | 0.322694 | 0.216069 | 0.212152 | 0.247538 | 0.245279 | 0.284983 | 0.388271 | 0.248239 | 0.636318 | 0.605683 | 0.910923 | 0.205450 | 0.376967 | 0.999788 | 0.178932 | 4.549663 | 0.950182 |
0 | Self_P3 | 3.702446 | 3.527273 | 0.282185 | 0.192092 | 0.186749 | 0.216980 | 0.204185 | 0.240096 | 0.339114 | 0.204905 | 0.572157 | 0.593544 | 0.875928 | 0.181702 | 0.340803 | 1.000000 | 0.077201 | 3.875892 | 0.974947 |
0 | Self_TopPop | 2.508258 | 2.217909 | 0.188865 | 0.116919 | 0.118732 | 0.141584 | 0.130472 | 0.137473 | 0.214651 | 0.111707 | 0.400939 | 0.555546 | 0.765642 | 0.112750 | 0.249607 | 1.000000 | 0.038961 | 3.159079 | 0.987317 |
0 | Self_SVDBaseline | 3.645666 | 3.480246 | 0.137858 | 0.082398 | 0.084151 | 0.101063 | 0.107940 | 0.109393 | 0.164477 | 0.082973 | 0.342374 | 0.538097 | 0.638388 | 0.079860 | 0.205748 | 0.999894 | 0.279221 | 5.159076 | 0.907220 |
0 | Ready_SVD | 0.952563 | 0.750158 | 0.094486 | 0.046274 | 0.051389 | 0.065625 | 0.082618 | 0.074150 | 0.109320 | 0.051383 | 0.240693 | 0.519849 | 0.475080 | 0.046237 | 0.154759 | 0.993425 | 0.206349 | 4.442996 | 0.952832 |
0 | Self_SVD | 0.914890 | 0.717962 | 0.102969 | 0.042325 | 0.052022 | 0.069313 | 0.093562 | 0.074994 | 0.105416 | 0.050278 | 0.191533 | 0.517890 | 0.462354 | 0.044591 | 0.150604 | 0.867656 | 0.141414 | 3.929249 | 0.971112 |
0 | Ready_Baseline | 0.949459 | 0.752487 | 0.091410 | 0.037652 | 0.046030 | 0.061286 | 0.079614 | 0.056463 | 0.095957 | 0.043178 | 0.198193 | 0.515501 | 0.437964 | 0.039549 | 0.141900 | 1.000000 | 0.033911 | 2.836513 | 0.991139 |
0 | Self_KNNSurprisetask | 0.946255 | 0.745209 | 0.083457 | 0.032848 | 0.041227 | 0.055493 | 0.074785 | 0.048890 | 0.089577 | 0.040902 | 0.189057 | 0.513076 | 0.417815 | 0.034996 | 0.135177 | 0.888547 | 0.130592 | 3.611806 | 0.978659 |
0 | Self_TopRated | 2.508258 | 2.217909 | 0.079321 | 0.032667 | 0.039983 | 0.053170 | 0.068884 | 0.048582 | 0.070766 | 0.027602 | 0.114790 | 0.512943 | 0.411453 | 0.034385 | 0.124546 | 1.000000 | 0.024531 | 2.761238 | 0.991660 |
0 | Ready_SVDBiased | 0.942141 | 0.742760 | 0.081230 | 0.032344 | 0.040302 | 0.053932 | 0.072639 | 0.051126 | 0.087552 | 0.039346 | 0.191285 | 0.512818 | 0.416755 | 0.034405 | 0.134478 | 0.997667 | 0.165224 | 4.147579 | 0.964690 |
0 | Self_GlobalAvg | 1.125760 | 0.943534 | 0.061188 | 0.025968 | 0.031383 | 0.041343 | 0.040558 | 0.032107 | 0.067695 | 0.027470 | 0.171187 | 0.509546 | 0.384942 | 0.027213 | 0.118383 | 1.000000 | 0.025974 | 2.711772 | 0.992003 |
0 | Ready_Random | 1.525633 | 1.225714 | 0.047720 | 0.022049 | 0.025494 | 0.032845 | 0.029077 | 0.025015 | 0.051757 | 0.019242 | 0.128181 | 0.507543 | 0.327678 | 0.022628 | 0.103269 | 0.987275 | 0.184704 | 5.105122 | 0.906561 |
0 | Ready_I-KNN | 1.030386 | 0.813067 | 0.026087 | 0.006908 | 0.010593 | 0.016046 | 0.021137 | 0.009522 | 0.024214 | 0.008958 | 0.048068 | 0.499885 | 0.154825 | 0.008007 | 0.069521 | 0.402333 | 0.434343 | 5.133650 | 0.877999 |
0 | Ready_I-KNNBaseline | 0.935327 | 0.737424 | 0.002545 | 0.000755 | 0.001105 | 0.001602 | 0.002253 | 0.000930 | 0.003444 | 0.001362 | 0.011760 | 0.496724 | 0.021209 | 0.000862 | 0.045379 | 0.482821 | 0.059885 | 2.232578 | 0.994487 |
0 | Ready_U-KNN | 1.023495 | 0.807913 | 0.000742 | 0.000205 | 0.000305 | 0.000449 | 0.000536 | 0.000198 | 0.000845 | 0.000274 | 0.002744 | 0.496441 | 0.007423 | 0.000235 | 0.042533 | 0.602121 | 0.010823 | 2.089186 | 0.995706 |
0 | Self_BaselineIU | 0.958136 | 0.754051 | 0.000954 | 0.000188 | 0.000298 | 0.000481 | 0.000644 | 0.000223 | 0.001043 | 0.000335 | 0.003348 | 0.496433 | 0.009544 | 0.000220 | 0.042809 | 0.699046 | 0.005051 | 1.945910 | 0.995669 |
0 | Self_BaselineUI | 0.967585 | 0.762740 | 0.000954 | 0.000170 | 0.000278 | 0.000463 | 0.000644 | 0.000189 | 0.000752 | 0.000168 | 0.001677 | 0.496424 | 0.009544 | 0.000201 | 0.042622 | 0.600530 | 0.005051 | 1.803126 | 0.996380 |
0 | Self_IKNN | 1.018363 | 0.808793 | 0.000318 | 0.000108 | 0.000140 | 0.000189 | 0.000000 | 0.000000 | 0.000214 | 0.000037 | 0.000368 | 0.496391 | 0.003181 | 0.000118 | 0.041755 | 0.392153 | 0.115440 | 4.174741 | 0.965327 |