systemy_rekomendacyjne/P2. Evaluation.ipynb

62 KiB

Prepare test set

import pandas as pd
import numpy as np
import scipy.sparse as sparse
from collections import defaultdict
from itertools import chain
import random
from tqdm import tqdm

# In evaluation we do not load train set - it is not needed
test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\t', header=None)
test.columns=['user', 'item', 'rating', 'timestamp']

test['user_code'] = test['user'].astype("category").cat.codes
test['item_code'] = test['item'].astype("category").cat.codes

user_code_id = dict(enumerate(test['user'].astype("category").cat.categories))
user_id_code = dict((v, k) for k, v in user_code_id.items())
item_code_id = dict(enumerate(test['item'].astype("category").cat.categories))
item_id_code = dict((v, k) for k, v in item_code_id.items())

test_ui = sparse.csr_matrix((test['rating'], (test['user_code'], test['item_code'])))

Estimations metrics

estimations_df=pd.read_csv('Recommendations generated/ml-100k/Ready_Baseline_estimations.csv', header=None)
estimations_df.columns=['user', 'item' ,'score']

estimations_df['user_code']=[user_id_code[user] for user in estimations_df['user']]
estimations_df['item_code']=[item_id_code[item] for item in estimations_df['item']]
estimations=sparse.csr_matrix((estimations_df['score'], (estimations_df['user_code'], estimations_df['item_code'])), shape=test_ui.shape)
def estimations_metrics(test_ui, estimations):
    result=[]

    RMSE=(np.sum((estimations.data-test_ui.data)**2)/estimations.nnz)**(1/2)
    result.append(['RMSE', RMSE])

    MAE=np.sum(abs(estimations.data-test_ui.data))/estimations.nnz
    result.append(['MAE', MAE])
    
    df_result=(pd.DataFrame(list(zip(*result))[1])).T
    df_result.columns=list(zip(*result))[0]
    return df_result
# in case of error (in the laboratories) you might have to switch to the other version of pandas
# try !pip3 install pandas=='1.0.3' (or pip if you use python 2) and restart the kernel

estimations_metrics(test_ui, estimations)
RMSE MAE
0 0.949459 0.752487

Ranking metrics

import numpy as np
reco = np.loadtxt('Recommendations generated/ml-100k/Ready_Baseline_reco.csv', delimiter=',')
# Let's ignore scores - they are not used in evaluation: 
users=reco[:,:1]
items=reco[:,1::2]
# Let's use inner ids instead of real ones
users=np.vectorize(lambda x: user_id_code.setdefault(x, -1))(users)
items=np.vectorize(lambda x: item_id_code.setdefault(x, -1))(items) # maybe items we recommend are not in test set
# Let's put them into one array
reco=np.concatenate((users, items), axis=1)
reco
array([[663, 475,  62, ..., 472, 269, 503],
       [ 48, 313, 475, ..., 591, 175, 466],
       [351, 313, 475, ..., 591, 175, 466],
       ...,
       [259, 313, 475, ...,  11, 591, 175],
       [ 33, 313, 475, ...,  11, 591, 175],
       [ 77, 313, 475, ...,  11, 591, 175]])
def ranking_metrics(test_ui, reco, super_reactions=[], topK=10):
    
    nb_items=test_ui.shape[1]
    relevant_users, super_relevant_users, prec, rec, F_1, F_05, prec_super, rec_super, ndcg, mAP, MRR, LAUC, HR=\
    0,0,0,0,0,0,0,0,0,0,0,0,0
    
    cg = (1.0 / np.log2(np.arange(2, topK + 2)))
    cg_sum = np.cumsum(cg)
    
    for (nb_user, user) in tqdm(enumerate(reco[:,0])):
        u_rated_items=test_ui.indices[test_ui.indptr[user]:test_ui.indptr[user+1]]
        nb_u_rated_items=len(u_rated_items)
        if nb_u_rated_items>0: # skip users with no items in test set (still possible that there will be no super items)
            relevant_users+=1
            
            u_super_items=u_rated_items[np.vectorize(lambda x: x in super_reactions)\
            (test_ui.data[test_ui.indptr[user]:test_ui.indptr[user+1]])]
            # more natural seems u_super_items=[item for item in u_rated_items if test_ui[user,item] in super_reactions]
            # but accesing test_ui[user,item] is expensive -we should avoid doing it
            if len(u_super_items)>0:
                super_relevant_users+=1
            
            user_successes=np.zeros(topK)
            nb_user_successes=0
            user_super_successes=np.zeros(topK)
            nb_user_super_successes=0
            
            # evaluation
            for (item_position,item) in enumerate(reco[nb_user,1:topK+1]):
                if item in u_rated_items:
                    user_successes[item_position]=1
                    nb_user_successes+=1
                    if item in u_super_items:
                        user_super_successes[item_position]=1
                        nb_user_super_successes+=1
                        
            prec_u=nb_user_successes/topK 
            prec+=prec_u
            
            rec_u=nb_user_successes/nb_u_rated_items
            rec+=rec_u
            
            F_1+=2*(prec_u*rec_u)/(prec_u+rec_u) if prec_u+rec_u>0 else 0
            F_05+=(0.5**2+1)*(prec_u*rec_u)/(0.5**2*prec_u+rec_u) if prec_u+rec_u>0 else 0
            
            prec_super+=nb_user_super_successes/topK
            rec_super+=nb_user_super_successes/max(len(u_super_items),1) # to set 0 if no super items
            ndcg+=np.dot(user_successes,cg)/cg_sum[min(topK, nb_u_rated_items)-1]
            
            cumsum_successes=np.cumsum(user_successes)
            mAP+=np.dot(cumsum_successes/np.arange(1,topK+1), user_successes)/min(topK, nb_u_rated_items)
            MRR+=1/(user_successes.nonzero()[0][0]+1) if user_successes.nonzero()[0].size>0 else 0
            LAUC+=(np.dot(cumsum_successes, 1-user_successes)+\
            (nb_user_successes+nb_u_rated_items)/2*((nb_items-nb_u_rated_items)-(topK-nb_user_successes)))/\
            ((nb_items-nb_u_rated_items)*nb_u_rated_items)
            
            HR+=nb_user_successes>0
            
            
    result=[]
    result.append(('precision', prec/relevant_users))
    result.append(('recall', rec/relevant_users))
    result.append(('F_1', F_1/relevant_users))
    result.append(('F_05', F_05/relevant_users))
    result.append(('precision_super', prec_super/super_relevant_users))
    result.append(('recall_super', rec_super/super_relevant_users))
    result.append(('NDCG', ndcg/relevant_users))
    result.append(('mAP', mAP/relevant_users))
    result.append(('MRR', MRR/relevant_users))
    result.append(('LAUC', LAUC/relevant_users))
    result.append(('HR', HR/relevant_users))

    df_result=(pd.DataFrame(list(zip(*result))[1])).T
    df_result.columns=list(zip(*result))[0]
    return df_result
ranking_metrics(test_ui, reco, super_reactions=[4,5], topK=10)
943it [00:00, 6210.87it/s]
precision recall F_1 F_05 precision_super recall_super NDCG mAP MRR LAUC HR
0 0.09141 0.037652 0.04603 0.061286 0.079614 0.056463 0.095957 0.043178 0.198193 0.515501 0.437964

Diversity metrics

def diversity_metrics(test_ui, reco, topK=10):
    
    frequencies=defaultdict(int)
    
    # let's assign 0 to all items in test set
    for item in list(set(test_ui.indices)):
        frequencies[item]=0
        
    # counting frequencies
    for item in reco[:,1:].flat:
        frequencies[item]+=1
        
    nb_reco_outside_test=frequencies[-1]
    del frequencies[-1]
    
    frequencies=np.array(list(frequencies.values()))
                         
    nb_rec_items=len(frequencies[frequencies>0])
    nb_reco_inside_test=np.sum(frequencies)
                         
    frequencies=frequencies/np.sum(frequencies)
    frequencies=np.sort(frequencies)
    
    with np.errstate(divide='ignore'): # let's put zeros put items with 0 frequency and ignore division warning
        log_frequencies=np.nan_to_num(np.log(frequencies), posinf=0, neginf=0)
                         
    result=[]
    result.append(('Reco in test', nb_reco_inside_test/(nb_reco_inside_test+nb_reco_outside_test)))
    result.append(('Test coverage', nb_rec_items/test_ui.shape[1]))
    result.append(('Shannon', -np.dot(frequencies, log_frequencies)))
    result.append(('Gini', np.dot(frequencies, np.arange(1-len(frequencies), len(frequencies), 2))/(len(frequencies)-1)))
    
    df_result=(pd.DataFrame(list(zip(*result))[1])).T
    df_result.columns=list(zip(*result))[0]
    return df_result
# in case of errors try !pip3 install numpy==1.18.4 (or pip if you use python 2) and restart the kernel

import evaluation_measures as ev
import imp
imp.reload(ev)

x=diversity_metrics(test_ui, reco, topK=10)
x
Reco in test Test coverage Shannon Gini
0 1.0 0.033911 2.836513 0.991139

To be used in other notebooks

import evaluation_measures as ev
import imp
imp.reload(ev)

estimations_df=pd.read_csv('Recommendations generated/ml-100k/Ready_Baseline_estimations.csv', header=None)
reco=np.loadtxt('Recommendations generated/ml-100k/Ready_Baseline_reco.csv', delimiter=',')

ev.evaluate(test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\t', header=None),
            estimations_df=estimations_df, 
            reco=reco,
            super_reactions=[4,5])
#also you can just type ev.evaluate_all(estimations_df, reco) - I put above values as default
943it [00:00, 6361.52it/s]
RMSE MAE precision recall F_1 F_05 precision_super recall_super NDCG mAP MRR LAUC HR H2R Reco in test Test coverage Shannon Gini
0 0.949459 0.752487 0.09141 0.037652 0.04603 0.061286 0.079614 0.056463 0.095957 0.043178 0.198193 0.515501 0.437964 0.239661 1.0 0.033911 2.836513 0.991139
import evaluation_measures as ev
import imp
imp.reload(ev)

dir_path="Recommendations generated/ml-100k/"
super_reactions=[4,5]
test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\t', header=None)

df=ev.evaluate_all(test, dir_path, super_reactions)
#also you can just type ev.evaluate_all() - I put above values as default
943it [00:00, 6381.68it/s]
943it [00:00, 5946.17it/s]
943it [00:00, 6749.72it/s]
943it [00:00, 6013.22it/s]
943it [00:00, 6124.74it/s]
943it [00:00, 5717.96it/s]
df.iloc[:,:9]
Model RMSE MAE precision recall F_1 F_05 precision_super recall_super
0 Self_TopPop 2.508258 2.217909 0.188865 0.116919 0.118732 0.141584 0.130472 0.137473
0 Ready_Baseline 0.949459 0.752487 0.091410 0.037652 0.046030 0.061286 0.079614 0.056463
0 Self_GlobalAvg 1.125760 0.943534 0.061188 0.025968 0.031383 0.041343 0.040558 0.032107
0 Ready_Random 1.524954 1.223352 0.045599 0.021181 0.024585 0.031518 0.027897 0.021931
0 Self_TopRated NaN NaN 0.032025 0.012674 0.015714 0.021183 0.028433 0.018573
0 Self_BaselineUI 0.967585 0.762740 0.000954 0.000170 0.000278 0.000463 0.000644 0.000189
df.iloc[:,np.append(0,np.arange(9, df.shape[1]))]
Model NDCG mAP MRR LAUC HR H2R Reco in test Test coverage Shannon Gini
0 Self_TopPop 0.214651 0.111707 0.400939 0.555546 0.765642 0.492047 1.000000 0.038961 3.159079 0.987317
0 Ready_Baseline 0.095957 0.043178 0.198193 0.515501 0.437964 0.239661 1.000000 0.033911 2.836513 0.991139
0 Self_GlobalAvg 0.067695 0.027470 0.171187 0.509546 0.384942 0.142100 1.000000 0.025974 2.711772 0.992003
0 Ready_Random 0.048111 0.017381 0.119005 0.507096 0.330859 0.091198 0.988123 0.181818 5.100792 0.906866
0 Self_TopRated 0.022741 0.005328 0.031602 0.502764 0.237540 0.065748 0.697031 0.014430 2.220811 0.995173
0 Self_BaselineUI 0.000752 0.000168 0.001677 0.496424 0.009544 0.000000 0.600530 0.005051 1.803126 0.996380

Check metrics on toy dataset

import evaluation_measures as ev
import imp
import helpers
imp.reload(ev)

dir_path="Recommendations generated/toy-example/"
super_reactions=[4,5]
test=pd.read_csv('./Datasets/toy-example/test.csv', sep='\t', header=None)

display(ev.evaluate_all(test, dir_path, super_reactions, topK=3))
#also you can just type ev.evaluate_all() - I put above values as default

toy_train_read=pd.read_csv('./Datasets/toy-example/train.csv', sep='\t', header=None, names=['user', 'item', 'rating', 'timestamp'])
toy_test_read=pd.read_csv('./Datasets/toy-example/test.csv', sep='\t', header=None, names=['user', 'item', 'rating', 'timestamp'])
reco=pd.read_csv('Recommendations generated/toy-example/Self_BaselineUI_reco.csv', header=None)
estimations=pd.read_csv('Recommendations generated/toy-example/Self_BaselineUI_estimations.csv', names=['user', 'item', 'est_score'])
toy_train_ui, toy_test_ui, toy_user_code_id, toy_user_id_code, \
toy_item_code_id, toy_item_id_code = helpers.data_to_csr(toy_train_read, toy_test_read)

print('Training data:')
display(toy_train_ui.todense())

print('Test data:')
display(toy_test_ui.todense())

print('Recommendations:')
display(reco)

print('Estimations:')
display(estimations)
3it [00:00, 2328.44it/s]
Model RMSE MAE precision recall F_1 F_05 precision_super recall_super NDCG mAP MRR LAUC HR H2R Reco in test Test coverage Shannon Gini
0 Self_BaselineUI 1.612452 1.4 0.444444 0.888889 0.555556 0.478632 0.333333 0.75 0.676907 0.574074 0.611111 0.638889 1.0 0.333333 0.888889 0.8 1.386294 0.25
Training data:
matrix([[3, 4, 0, 0, 5, 0, 0, 4],
        [0, 1, 2, 3, 0, 0, 0, 0],
        [0, 0, 0, 5, 0, 3, 4, 0]], dtype=int64)
Test data:
matrix([[0, 0, 0, 0, 0, 0, 3, 0],
        [0, 0, 0, 0, 5, 0, 0, 0],
        [5, 0, 4, 0, 0, 0, 0, 2]], dtype=int64)
Recommendations:
0 1 2 3 4 5 6
0 0 30 5.0 20 4.0 60 4.0
1 10 40 3.0 60 2.0 70 2.0
2 20 40 5.0 20 4.0 70 4.0
Estimations:
user item est_score
0 0 60 4.0
1 10 40 3.0
2 20 0 3.0
3 20 20 4.0
4 20 70 4.0

Sample recommendations

train=pd.read_csv('./Datasets/ml-100k/train.csv', sep='\t', header=None, names=['user', 'item', 'rating', 'timestamp'])
items=pd.read_csv('./Datasets/ml-100k/movies.csv')

user=random.choice(list(set(train['user'])))

train_content=pd.merge(train, items, left_on='item', right_on='id')

print('Here is what user rated high:')
display(train_content[train_content['user']==user][['user', 'rating', 'title', 'genres']]\
        .sort_values(by='rating', ascending=False)[:15])

reco = np.loadtxt('Recommendations generated/ml-100k/Self_BaselineUI_reco.csv', delimiter=',')
items=pd.read_csv('./Datasets/ml-100k/movies.csv')

# Let's ignore scores - they are not used in evaluation: 
reco_users=reco[:,:1]
reco_items=reco[:,1::2]
# Let's put them into one array
reco=np.concatenate((reco_users, reco_items), axis=1)

# Let's rebuild it user-item dataframe
recommended=[]
for row in reco:
    for rec_nb, entry in enumerate(row[1:]):
        recommended.append((row[0], rec_nb+1, entry))
recommended=pd.DataFrame(recommended, columns=['user','rec_nb', 'item'])

recommended_content=pd.merge(recommended, items, left_on='item', right_on='id')

print('Here is what we recommend:')
recommended_content[recommended_content['user']==user][['user', 'rec_nb', 'title', 'genres']].sort_values(by='rec_nb')
Here is what user rated high:
user rating title genres
3054 4 5 Star Wars (1977) Action, Adventure, Romance, Sci-Fi, War
26266 4 5 Air Force One (1997) Action, Thriller
69162 4 5 Desperate Measures (1998) Crime, Drama, Thriller
58881 4 5 Wedding Singer, The (1998) Comedy, Romance
56335 4 5 Ulee's Gold (1997) Drama
48926 4 5 Lost Highway (1997) Mystery
46942 4 5 Cop Land (1997) Crime, Drama, Mystery
28827 4 5 Blues Brothers 2000 (1998) Action, Comedy, Musical
21562 4 5 In & Out (1997) Comedy
19981 4 5 Liar Liar (1997) Comedy
71647 4 5 Assignment, The (1997) Thriller
24585 4 4 One Flew Over the Cuckoo's Nest (1975) Drama
7469 4 4 Starship Troopers (1997) Action, Adventure, Sci-Fi, War
31546 4 4 Seven (Se7en) (1995) Crime, Thriller
32233 4 4 Event Horizon (1997) Action, Mystery, Sci-Fi, Thriller
Here is what we recommend:
user rec_nb title genres
3 4.0 1 Great Day in Harlem, A (1994) Documentary
945 4.0 2 Tough and Deadly (1995) Action, Drama, Thriller
1887 4.0 3 Aiqing wansui (1994) Drama
2829 4.0 4 Delta of Venus (1994) Drama
3771 4.0 5 Someone Else's America (1995) Drama
4713 4.0 6 Saint of Fort Washington, The (1993) Drama
5655 4.0 7 Celestial Clockwork (1994) Comedy
6597 4.0 8 Some Mother's Son (1996) Drama
8491 4.0 9 Maya Lin: A Strong Clear Vision (1994) Documentary
7538 4.0 10 Prefontaine (1997) Drama

project task 3: implement some other evaluation measure

# it may be your idea, modification of what we have already implemented 
# (for example Hit2 rate which would count as a success users whoreceived at least 2 relevant recommendations) 
# or something well-known
# expected output: modification of evaluation_measures.py such that evaluate_all will also display your measure
dir_path="Recommendations generated/ml-100k/"
super_reactions=[4,5]
test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\t', header=None)

display(ev.evaluate_all(test, dir_path, super_reactions))
#H2R too easy not to do this one
943it [00:00, 6201.84it/s]
943it [00:00, 6111.62it/s]
943it [00:00, 6907.71it/s]
943it [00:00, 5865.19it/s]
943it [00:00, 5220.21it/s]
943it [00:00, 5230.33it/s]
Model RMSE MAE precision recall F_1 F_05 precision_super recall_super NDCG mAP MRR LAUC HR H2R Reco in test Test coverage Shannon Gini
0 Self_TopPop 2.508258 2.217909 0.188865 0.116919 0.118732 0.141584 0.130472 0.137473 0.214651 0.111707 0.400939 0.555546 0.765642 0.492047 1.000000 0.038961 3.159079 0.987317
0 Ready_Baseline 0.949459 0.752487 0.091410 0.037652 0.046030 0.061286 0.079614 0.056463 0.095957 0.043178 0.198193 0.515501 0.437964 0.239661 1.000000 0.033911 2.836513 0.991139
0 Self_GlobalAvg 1.125760 0.943534 0.061188 0.025968 0.031383 0.041343 0.040558 0.032107 0.067695 0.027470 0.171187 0.509546 0.384942 0.142100 1.000000 0.025974 2.711772 0.992003
0 Ready_Random 1.524954 1.223352 0.045599 0.021181 0.024585 0.031518 0.027897 0.021931 0.048111 0.017381 0.119005 0.507096 0.330859 0.091198 0.988123 0.181818 5.100792 0.906866
0 Self_TopRated NaN NaN 0.032025 0.012674 0.015714 0.021183 0.028433 0.018573 0.022741 0.005328 0.031602 0.502764 0.237540 0.065748 0.697031 0.014430 2.220811 0.995173
0 Self_BaselineUI 0.967585 0.762740 0.000954 0.000170 0.000278 0.000463 0.000644 0.000189 0.000752 0.000168 0.001677 0.496424 0.009544 0.000000 0.600530 0.005051 1.803126 0.996380