Systemy-rekomedacyjne-praca.../P2. Evaluation.ipynb

92 KiB

Prepare test set

import pandas as pd
import numpy as np
import scipy.sparse as sparse
from collections import defaultdict
from itertools import chain
import random
from tqdm import tqdm

# In evaluation we do not load train set - it is not needed
test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\t', header=None)
test.columns=['user', 'item', 'rating', 'timestamp']

test['user_code'] = test['user'].astype("category").cat.codes
test['item_code'] = test['item'].astype("category").cat.codes

user_code_id = dict(enumerate(test['user'].astype("category").cat.categories))
user_id_code = dict((v, k) for k, v in user_code_id.items())
item_code_id = dict(enumerate(test['item'].astype("category").cat.categories))
item_id_code = dict((v, k) for k, v in item_code_id.items())

test_ui = sparse.csr_matrix((test['rating'], (test['user_code'], test['item_code'])))

Estimations metrics

estimations_df=pd.read_csv('Recommendations generated/ml-100k/Ready_Baseline_estimations.csv', header=None)
estimations_df.columns=['user', 'item' ,'score']

estimations_df['user_code']=[user_id_code[user] for user in estimations_df['user']]
estimations_df['item_code']=[item_id_code[item] for item in estimations_df['item']]
estimations=sparse.csr_matrix((estimations_df['score'], (estimations_df['user_code'], estimations_df['item_code'])), shape=test_ui.shape)
def estimations_metrics(test_ui, estimations):
    result=[]

    RMSE=(np.sum((estimations.data-test_ui.data)**2)/estimations.nnz)**(1/2)
    result.append(['RMSE', RMSE])

    MAE=np.sum(abs(estimations.data-test_ui.data))/estimations.nnz
    result.append(['MAE', MAE])
    
    df_result=(pd.DataFrame(list(zip(*result))[1])).T
    df_result.columns=list(zip(*result))[0]
    return df_result
# in case of error (in the laboratories) you might have to switch to the other version of pandas
# try !pip3 install pandas=='1.0.3' (or pip if you use python 2) and restart the kernel

estimations_metrics(test_ui, estimations)
RMSE MAE
0 0.949459 0.752487

Ranking metrics

import numpy as np
reco = np.loadtxt('Recommendations generated/ml-100k/Ready_Baseline_reco.csv', delimiter=',')
# Let's ignore scores - they are not used in evaluation: 
users=reco[:,:1]
items=reco[:,1::2]
# Let's use inner ids instead of real ones
users=np.vectorize(lambda x: user_id_code.setdefault(x, -1))(users)
items=np.vectorize(lambda x: item_id_code.setdefault(x, -1))(items) # maybe items we recommend are not in test set
# Let's put them into one array
reco=np.concatenate((users, items), axis=1)
reco
array([[663, 475,  62, ..., 472, 269, 503],
       [ 48, 313, 475, ..., 591, 175, 466],
       [351, 313, 475, ..., 591, 175, 466],
       ...,
       [259, 313, 475, ...,  11, 591, 175],
       [ 33, 313, 475, ...,  11, 591, 175],
       [ 77, 313, 475, ...,  11, 591, 175]])
def ranking_metrics(test_ui, reco, super_reactions=[], topK=10):
    
    nb_items=test_ui.shape[1]
    relevant_users, super_relevant_users, prec, rec, F_1, F_05, prec_super, rec_super, ndcg, mAP, MRR, LAUC, HR=\
    0,0,0,0,0,0,0,0,0,0,0,0,0
    
    cg = (1.0 / np.log2(np.arange(2, topK + 2)))
    cg_sum = np.cumsum(cg)
    
    for (nb_user, user) in tqdm(enumerate(reco[:,0])):
        u_rated_items=test_ui.indices[test_ui.indptr[user]:test_ui.indptr[user+1]]
        nb_u_rated_items=len(u_rated_items)
        if nb_u_rated_items>0: # skip users with no items in test set (still possible that there will be no super items)
            relevant_users+=1
            
            u_super_items=u_rated_items[np.vectorize(lambda x: x in super_reactions)\
            (test_ui.data[test_ui.indptr[user]:test_ui.indptr[user+1]])]
            # more natural seems u_super_items=[item for item in u_rated_items if test_ui[user,item] in super_reactions]
            # but accesing test_ui[user,item] is expensive -we should avoid doing it
            if len(u_super_items)>0:
                super_relevant_users+=1
            
            user_successes=np.zeros(topK)
            nb_user_successes=0
            user_super_successes=np.zeros(topK)
            nb_user_super_successes=0
            
            # evaluation
            for (item_position,item) in enumerate(reco[nb_user,1:topK+1]):
                if item in u_rated_items:
                    user_successes[item_position]=1
                    nb_user_successes+=1
                    if item in u_super_items:
                        user_super_successes[item_position]=1
                        nb_user_super_successes+=1
                        
            prec_u=nb_user_successes/topK 
            prec+=prec_u
            
            rec_u=nb_user_successes/nb_u_rated_items
            rec+=rec_u
            
            F_1+=2*(prec_u*rec_u)/(prec_u+rec_u) if prec_u+rec_u>0 else 0
            F_05+=(0.5**2+1)*(prec_u*rec_u)/(0.5**2*prec_u+rec_u) if prec_u+rec_u>0 else 0
            
            prec_super+=nb_user_super_successes/topK
            rec_super+=nb_user_super_successes/max(len(u_super_items),1) # to set 0 if no super items
            ndcg+=np.dot(user_successes,cg)/cg_sum[min(topK, nb_u_rated_items)-1]
            
            cumsum_successes=np.cumsum(user_successes)
            mAP+=np.dot(cumsum_successes/np.arange(1,topK+1), user_successes)/min(topK, nb_u_rated_items)
            MRR+=1/(user_successes.nonzero()[0][0]+1) if user_successes.nonzero()[0].size>0 else 0
            LAUC+=(np.dot(cumsum_successes, 1-user_successes)+\
            (nb_user_successes+nb_u_rated_items)/2*((nb_items-nb_u_rated_items)-(topK-nb_user_successes)))/\
            ((nb_items-nb_u_rated_items)*nb_u_rated_items)
            
            HR+=nb_user_successes>0
            
            
    result=[]
    result.append(('precision', prec/relevant_users))
    result.append(('recall', rec/relevant_users))
    result.append(('F_1', F_1/relevant_users))
    result.append(('F_05', F_05/relevant_users))
    result.append(('precision_super', prec_super/super_relevant_users))
    result.append(('recall_super', rec_super/super_relevant_users))
    result.append(('NDCG', ndcg/relevant_users))
    result.append(('mAP', mAP/relevant_users))
    result.append(('MRR', MRR/relevant_users))
    result.append(('LAUC', LAUC/relevant_users))
    result.append(('HR', HR/relevant_users))

    df_result=(pd.DataFrame(list(zip(*result))[1])).T
    df_result.columns=list(zip(*result))[0]
    return df_result
ranking_metrics(test_ui, reco, super_reactions=[4,5], topK=10)
943it [00:00, 11776.77it/s]
precision recall F_1 F_05 precision_super recall_super NDCG mAP MRR LAUC HR
0 0.09141 0.037652 0.04603 0.061286 0.079614 0.056463 0.095957 0.043178 0.198193 0.515501 0.437964

Diversity metrics

def diversity_metrics(test_ui, reco, topK=10):
    
    frequencies=defaultdict(int)
    
    # let's assign 0 to all items in test set
    for item in list(set(test_ui.indices)):
        frequencies[item]=0
        
    # counting frequencies
    for item in reco[:,1:].flat:
        frequencies[item]+=1
        
    nb_reco_outside_test=frequencies[-1]
    del frequencies[-1]
    
    frequencies=np.array(list(frequencies.values()))
                         
    nb_rec_items=len(frequencies[frequencies>0])
    nb_reco_inside_test=np.sum(frequencies)
                         
    frequencies=frequencies/np.sum(frequencies)
    frequencies=np.sort(frequencies)
    
    with np.errstate(divide='ignore'): # let's put zeros put items with 0 frequency and ignore division warning
        log_frequencies=np.nan_to_num(np.log(frequencies), posinf=0, neginf=0)
                         
    result=[]
    result.append(('Reco in test', nb_reco_inside_test/(nb_reco_inside_test+nb_reco_outside_test)))
    result.append(('Test coverage', nb_rec_items/test_ui.shape[1]))
    result.append(('Shannon', -np.dot(frequencies, log_frequencies)))
    result.append(('Gini', np.dot(frequencies, np.arange(1-len(frequencies), len(frequencies), 2))/(len(frequencies)-1)))
    
    df_result=(pd.DataFrame(list(zip(*result))[1])).T
    df_result.columns=list(zip(*result))[0]
    return df_result
# in case of errors try !pip3 install numpy==1.18.4 (or pip if you use python 2) and restart the kernel

import evaluation_measures as ev
import imp
imp.reload(ev)

x=diversity_metrics(test_ui, reco, topK=10)
x
Reco in test Test coverage Shannon Gini
0 1.0 0.033911 2.836513 0.991139

To be used in other notebooks

import evaluation_measures as ev
import imp
imp.reload(ev)

estimations_df=pd.read_csv('Recommendations generated/ml-100k/Ready_Baseline_estimations.csv', header=None)
reco=np.loadtxt('Recommendations generated/ml-100k/Ready_Baseline_reco.csv', delimiter=',')

ev.evaluate(test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\t', header=None),
            estimations_df=estimations_df, 
            reco=reco,
            super_reactions=[4,5])
#also you can just type ev.evaluate_all(estimations_df, reco) - I put above values as default
943it [00:00, 11489.51it/s]
RMSE MAE precision recall F_1 F_05 precision_super recall_super NDCG mAP MRR LAUC HR F_2 Whole_average Reco in test Test coverage Shannon Gini
0 0.949459 0.752487 0.09141 0.037652 0.04603 0.061286 0.079614 0.056463 0.095957 0.043178 0.198193 0.515501 0.437964 0.039549 0.1419 1.0 0.033911 2.836513 0.991139
import evaluation_measures as ev
import imp
imp.reload(ev)

dir_path="Recommendations generated/ml-100k/"
super_reactions=[4,5]
test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\t', header=None)

df=ev.evaluate_all(test, dir_path, super_reactions)
#also you can just type ev.evaluate_all() - I put above values as default
943it [00:00, 11216.05it/s]
943it [00:00, 11620.62it/s]
943it [00:00, 11489.57it/s]
943it [00:00, 11216.02it/s]
943it [00:00, 11776.74it/s]
943it [00:00, 12396.66it/s]
943it [00:00, 12396.39it/s]
943it [00:00, 12561.74it/s]
943it [00:00, 11216.08it/s]
943it [00:00, 11631.42it/s]
943it [00:00, 11084.07it/s]
943it [00:00, 11776.74it/s]
943it [00:00, 10706.13it/s]
943it [00:00, 11925.84it/s]
943it [00:00, 11925.88it/s]
943it [00:00, 11925.34it/s]
943it [00:00, 12235.52it/s]
943it [00:00, 10829.27it/s]
df.iloc[:,:9]
Model RMSE MAE precision recall F_1 F_05 precision_super recall_super
0 Self_RP3Beta 3.702928 3.527713 0.322694 0.216069 0.212152 0.247538 0.245279 0.284983
0 Self_P3 3.702446 3.527273 0.282185 0.192092 0.186749 0.216980 0.204185 0.240096
0 Self_TopPop 2.508258 2.217909 0.188865 0.116919 0.118732 0.141584 0.130472 0.137473
0 Self_SVDBaseline 3.645871 3.480308 0.135949 0.078868 0.082011 0.099188 0.106974 0.103767
0 Ready_SVD 0.950835 0.748676 0.097879 0.048335 0.053780 0.068420 0.086159 0.080289
0 Self_SVD 0.913966 0.717846 0.105514 0.044566 0.054152 0.071575 0.095386 0.075767
0 Ready_Baseline 0.949459 0.752487 0.091410 0.037652 0.046030 0.061286 0.079614 0.056463
0 Ready_SVDBiased 0.943277 0.743628 0.080912 0.033048 0.040445 0.053881 0.070815 0.049631
0 Self_KNNSurprisetask 0.946255 0.745209 0.083457 0.032848 0.041227 0.055493 0.074785 0.048890
0 Self_TopRated 2.508258 2.217909 0.079321 0.032667 0.039983 0.053170 0.068884 0.048582
0 Self_GlobalAvg 1.125760 0.943534 0.061188 0.025968 0.031383 0.041343 0.040558 0.032107
0 Ready_Random 1.514265 1.215956 0.048780 0.021007 0.024667 0.032495 0.031867 0.023414
0 Ready_I-KNN 1.030386 0.813067 0.026087 0.006908 0.010593 0.016046 0.021137 0.009522
0 Ready_I-KNNBaseline 0.935327 0.737424 0.002545 0.000755 0.001105 0.001602 0.002253 0.000930
0 Ready_U-KNN 1.023495 0.807913 0.000742 0.000205 0.000305 0.000449 0.000536 0.000198
0 Self_BaselineIU 0.958136 0.754051 0.000954 0.000188 0.000298 0.000481 0.000644 0.000223
0 Self_BaselineUI 0.967585 0.762740 0.000954 0.000170 0.000278 0.000463 0.000644 0.000189
0 Self_IKNN 1.018363 0.808793 0.000318 0.000108 0.000140 0.000189 0.000000 0.000000
df.iloc[:,np.append(0,np.arange(9, df.shape[1]))]
Model NDCG mAP MRR LAUC HR F_2 Whole_average Reco in test Test coverage Shannon Gini
0 Self_RP3Beta 0.388271 0.248239 0.636318 0.605683 0.910923 0.205450 0.376967 0.999788 0.178932 4.549663 0.950182
0 Self_P3 0.339114 0.204905 0.572157 0.593544 0.875928 0.181702 0.340803 1.000000 0.077201 3.875892 0.974947
0 Self_TopPop 0.214651 0.111707 0.400939 0.555546 0.765642 0.112750 0.249607 1.000000 0.038961 3.159079 0.987317
0 Self_SVDBaseline 0.159486 0.079783 0.328576 0.536311 0.632025 0.077145 0.201674 0.999894 0.281385 5.140721 0.909056
0 Ready_SVD 0.113553 0.054094 0.249037 0.520893 0.498409 0.048439 0.159941 0.997985 0.204906 4.395721 0.954872
0 Self_SVD 0.108802 0.051730 0.200919 0.519021 0.482503 0.046741 0.154723 0.861612 0.142136 3.845461 0.973440
0 Ready_Baseline 0.095957 0.043178 0.198193 0.515501 0.437964 0.039549 0.141900 1.000000 0.033911 2.836513 0.991139
0 Ready_SVDBiased 0.090496 0.041928 0.200192 0.513176 0.411453 0.034776 0.135063 0.998727 0.168110 4.165618 0.964211
0 Self_KNNSurprisetask 0.089577 0.040902 0.189057 0.513076 0.417815 0.034996 0.135177 0.888547 0.130592 3.611806 0.978659
0 Self_TopRated 0.070766 0.027602 0.114790 0.512943 0.411453 0.034385 0.124546 1.000000 0.024531 2.761238 0.991660
0 Self_GlobalAvg 0.067695 0.027470 0.171187 0.509546 0.384942 0.027213 0.118383 1.000000 0.025974 2.711772 0.992003
0 Ready_Random 0.052904 0.020511 0.126790 0.507024 0.322375 0.021635 0.102789 0.988017 0.183983 5.100443 0.906900
0 Ready_I-KNN 0.024214 0.008958 0.048068 0.499885 0.154825 0.008007 0.069521 0.402333 0.434343 5.133650 0.877999
0 Ready_I-KNNBaseline 0.003444 0.001362 0.011760 0.496724 0.021209 0.000862 0.045379 0.482821 0.059885 2.232578 0.994487
0 Ready_U-KNN 0.000845 0.000274 0.002744 0.496441 0.007423 0.000235 0.042533 0.602121 0.010823 2.089186 0.995706
0 Self_BaselineIU 0.001043 0.000335 0.003348 0.496433 0.009544 0.000220 0.042809 0.699046 0.005051 1.945910 0.995669
0 Self_BaselineUI 0.000752 0.000168 0.001677 0.496424 0.009544 0.000201 0.042622 0.600530 0.005051 1.803126 0.996380
0 Self_IKNN 0.000214 0.000037 0.000368 0.496391 0.003181 0.000118 0.041755 0.392153 0.115440 4.174741 0.965327

Check metrics on toy dataset

import evaluation_measures as ev
import imp
import helpers
imp.reload(ev)

dir_path="Recommendations generated/toy-example/"
super_reactions=[4,5]
test=pd.read_csv('./Datasets/toy-example/test.csv', sep='\t', header=None)

display(ev.evaluate_all(test, dir_path, super_reactions, topK=3))
#also you can just type ev.evaluate_all() - I put above values as default

toy_train_read=pd.read_csv('./Datasets/toy-example/train.csv', sep='\t', header=None, names=['user', 'item', 'rating', 'timestamp'])
toy_test_read=pd.read_csv('./Datasets/toy-example/test.csv', sep='\t', header=None, names=['user', 'item', 'rating', 'timestamp'])
reco=pd.read_csv('Recommendations generated/toy-example/Self_BaselineUI_reco.csv', header=None)
estimations=pd.read_csv('Recommendations generated/toy-example/Self_BaselineUI_estimations.csv', names=['user', 'item', 'est_score'])
toy_train_ui, toy_test_ui, toy_user_code_id, toy_user_id_code, \
toy_item_code_id, toy_item_id_code = helpers.data_to_csr(toy_train_read, toy_test_read)

print('Training data:')
display(toy_train_ui.todense())

print('Test data:')
display(toy_test_ui.todense())

print('Recommendations:')
display(reco)

print('Estimations:')
display(estimations)
3it [00:00, ?it/s]
3it [00:00, ?it/s]
Model RMSE MAE precision recall F_1 F_05 precision_super recall_super NDCG mAP MRR LAUC HR F_2 Whole_average Reco in test Test coverage Shannon Gini
0 Self_BaselineUI 1.612452 1.400 0.444444 0.888889 0.555556 0.478632 0.333333 0.75 0.676907 0.574074 0.611111 0.638889 1.0 0.698413 0.637521 0.888889 0.8 1.386294 0.250000
0 Self_BaselineIU 1.648337 1.575 0.444444 0.888889 0.555556 0.478632 0.333333 0.75 0.720550 0.629630 0.666667 0.722222 1.0 0.698413 0.657361 0.777778 0.8 1.351784 0.357143
Training data:
matrix([[3, 4, 0, 0, 5, 0, 0, 4],
        [0, 1, 2, 3, 0, 0, 0, 0],
        [0, 0, 0, 5, 0, 3, 4, 0]], dtype=int64)
Test data:
matrix([[0, 0, 0, 0, 0, 0, 3, 0],
        [0, 0, 0, 0, 5, 0, 0, 0],
        [5, 0, 4, 0, 0, 0, 0, 2]], dtype=int64)
Recommendations:
0 1 2 3 4 5 6
0 0 30 5.0 20 4.0 60 4.0
1 10 40 3.0 60 2.0 70 2.0
2 20 40 5.0 20 4.0 70 4.0
Estimations:
user item est_score
0 0 60 4.0
1 10 40 3.0
2 20 0 3.0
3 20 20 4.0
4 20 70 4.0

Sample recommendations

train=pd.read_csv('./Datasets/ml-100k/train.csv', sep='\t', header=None, names=['user', 'item', 'rating', 'timestamp'])
items=pd.read_csv('./Datasets/ml-100k/movies.csv')

user=random.choice(list(set(train['user'])))

train_content=pd.merge(train, items, left_on='item', right_on='id')

print('Here is what user rated high:')
display(train_content[train_content['user']==user][['user', 'rating', 'title', 'genres']]\
        .sort_values(by='rating', ascending=False)[:15])

reco = np.loadtxt('Recommendations generated/ml-100k/Self_BaselineUI_reco.csv', delimiter=',')
items=pd.read_csv('./Datasets/ml-100k/movies.csv')

# Let's ignore scores - they are not used in evaluation: 
reco_users=reco[:,:1]
reco_items=reco[:,1::2]
# Let's put them into one array
reco=np.concatenate((reco_users, reco_items), axis=1)

# Let's rebuild it user-item dataframe
recommended=[]
for row in reco:
    for rec_nb, entry in enumerate(row[1:]):
        recommended.append((row[0], rec_nb+1, entry))
recommended=pd.DataFrame(recommended, columns=['user','rec_nb', 'item'])

recommended_content=pd.merge(recommended, items, left_on='item', right_on='id')

print('Here is what we recommend:')
recommended_content[recommended_content['user']==user][['user', 'rec_nb', 'title', 'genres']].sort_values(by='rec_nb')
Here is what user rated high:
user rating title genres
22969 757 5 Rob Roy (1995) Drama, Romance, War
3557 757 5 Aliens (1986) Action, Sci-Fi, Thriller, War
37294 757 5 Contact (1997) Drama, Sci-Fi
31932 757 5 Shawshank Redemption, The (1994) Drama
6545 757 5 Unforgiven (1992) Western
6261 757 5 Raiders of the Lost Ark (1981) Action, Adventure
53730 757 5 Get Shorty (1995) Action, Comedy, Drama
25135 757 4 Twelve Monkeys (1995) Drama, Sci-Fi
26741 757 4 Star Trek IV: The Voyage Home (1986) Action, Adventure, Sci-Fi
27269 757 4 Empire Strikes Back, The (1980) Action, Adventure, Drama, Romance, Sci-Fi, War
51365 757 4 Interview with the Vampire (1994) Drama, Horror
54800 757 4 Scream 2 (1997) Horror, Thriller
28015 757 4 Face/Off (1997) Action, Sci-Fi, Thriller
53448 757 4 Nightmare Before Christmas, The (1993) Children's, Comedy, Musical
28534 757 4 Right Stuff, The (1983) Drama
Here is what we recommend:
user rec_nb title genres
755 757.0 1 Great Day in Harlem, A (1994) Documentary
1697 757.0 2 Tough and Deadly (1995) Action, Drama, Thriller
2639 757.0 3 Aiqing wansui (1994) Drama
3581 757.0 4 Delta of Venus (1994) Drama
4523 757.0 5 Someone Else's America (1995) Drama
5465 757.0 6 Saint of Fort Washington, The (1993) Drama
6407 757.0 7 Celestial Clockwork (1994) Comedy
7348 757.0 8 Some Mother's Son (1996) Drama
9244 757.0 9 Maya Lin: A Strong Clear Vision (1994) Documentary
8290 757.0 10 Prefontaine (1997) Drama

project task 3: implement some other evaluation measure

# it may be your idea, modification of what we have already implemented 
# (for example Hit2 rate which would count as a success users whoreceived at least 2 relevant recommendations) 
# or something well-known
# expected output: modification of evaluation_measures.py such that evaluate_all will also display your measure
dir_path="Recommendations generated/ml-100k/"
super_reactions=[4,5]
test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\t', header=None)

ev.evaluate_all(test, dir_path, super_reactions)
943it [00:00, 11351.38it/s]
943it [00:00, 11776.77it/s]
943it [00:00, 11631.38it/s]
943it [00:00, 11925.95it/s]
943it [00:00, 11489.67it/s]
943it [00:00, 12396.58it/s]
943it [00:00, 12561.94it/s]
943it [00:00, 12078.99it/s]
943it [00:00, 10955.16it/s]
943it [00:00, 10947.46it/s]
943it [00:00, 11489.44it/s]
943it [00:00, 11925.88it/s]
943it [00:00, 10585.87it/s]
943it [00:00, 11925.80it/s]
943it [00:00, 11631.25it/s]
943it [00:00, 11631.52it/s]
943it [00:00, 12396.70it/s]
943it [00:00, 10829.27it/s]
Model RMSE MAE precision recall F_1 F_05 precision_super recall_super NDCG mAP MRR LAUC HR F_2 Whole_average Reco in test Test coverage Shannon Gini
0 Self_RP3Beta 3.702928 3.527713 0.322694 0.216069 0.212152 0.247538 0.245279 0.284983 0.388271 0.248239 0.636318 0.605683 0.910923 0.205450 0.376967 0.999788 0.178932 4.549663 0.950182
0 Self_P3 3.702446 3.527273 0.282185 0.192092 0.186749 0.216980 0.204185 0.240096 0.339114 0.204905 0.572157 0.593544 0.875928 0.181702 0.340803 1.000000 0.077201 3.875892 0.974947
0 Self_TopPop 2.508258 2.217909 0.188865 0.116919 0.118732 0.141584 0.130472 0.137473 0.214651 0.111707 0.400939 0.555546 0.765642 0.112750 0.249607 1.000000 0.038961 3.159079 0.987317
0 Self_SVDBaseline 3.645871 3.480308 0.135949 0.078868 0.082011 0.099188 0.106974 0.103767 0.159486 0.079783 0.328576 0.536311 0.632025 0.077145 0.201674 0.999894 0.281385 5.140721 0.909056
0 Ready_SVD 0.950835 0.748676 0.097879 0.048335 0.053780 0.068420 0.086159 0.080289 0.113553 0.054094 0.249037 0.520893 0.498409 0.048439 0.159941 0.997985 0.204906 4.395721 0.954872
0 Self_SVD 0.913966 0.717846 0.105514 0.044566 0.054152 0.071575 0.095386 0.075767 0.108802 0.051730 0.200919 0.519021 0.482503 0.046741 0.154723 0.861612 0.142136 3.845461 0.973440
0 Ready_Baseline 0.949459 0.752487 0.091410 0.037652 0.046030 0.061286 0.079614 0.056463 0.095957 0.043178 0.198193 0.515501 0.437964 0.039549 0.141900 1.000000 0.033911 2.836513 0.991139
0 Ready_SVDBiased 0.943277 0.743628 0.080912 0.033048 0.040445 0.053881 0.070815 0.049631 0.090496 0.041928 0.200192 0.513176 0.411453 0.034776 0.135063 0.998727 0.168110 4.165618 0.964211
0 Self_KNNSurprisetask 0.946255 0.745209 0.083457 0.032848 0.041227 0.055493 0.074785 0.048890 0.089577 0.040902 0.189057 0.513076 0.417815 0.034996 0.135177 0.888547 0.130592 3.611806 0.978659
0 Self_TopRated 2.508258 2.217909 0.079321 0.032667 0.039983 0.053170 0.068884 0.048582 0.070766 0.027602 0.114790 0.512943 0.411453 0.034385 0.124546 1.000000 0.024531 2.761238 0.991660
0 Self_GlobalAvg 1.125760 0.943534 0.061188 0.025968 0.031383 0.041343 0.040558 0.032107 0.067695 0.027470 0.171187 0.509546 0.384942 0.027213 0.118383 1.000000 0.025974 2.711772 0.992003
0 Ready_Random 1.514265 1.215956 0.048780 0.021007 0.024667 0.032495 0.031867 0.023414 0.052904 0.020511 0.126790 0.507024 0.322375 0.021635 0.102789 0.988017 0.183983 5.100443 0.906900
0 Ready_I-KNN 1.030386 0.813067 0.026087 0.006908 0.010593 0.016046 0.021137 0.009522 0.024214 0.008958 0.048068 0.499885 0.154825 0.008007 0.069521 0.402333 0.434343 5.133650 0.877999
0 Ready_I-KNNBaseline 0.935327 0.737424 0.002545 0.000755 0.001105 0.001602 0.002253 0.000930 0.003444 0.001362 0.011760 0.496724 0.021209 0.000862 0.045379 0.482821 0.059885 2.232578 0.994487
0 Ready_U-KNN 1.023495 0.807913 0.000742 0.000205 0.000305 0.000449 0.000536 0.000198 0.000845 0.000274 0.002744 0.496441 0.007423 0.000235 0.042533 0.602121 0.010823 2.089186 0.995706
0 Self_BaselineIU 0.958136 0.754051 0.000954 0.000188 0.000298 0.000481 0.000644 0.000223 0.001043 0.000335 0.003348 0.496433 0.009544 0.000220 0.042809 0.699046 0.005051 1.945910 0.995669
0 Self_BaselineUI 0.967585 0.762740 0.000954 0.000170 0.000278 0.000463 0.000644 0.000189 0.000752 0.000168 0.001677 0.496424 0.009544 0.000201 0.042622 0.600530 0.005051 1.803126 0.996380
0 Self_IKNN 1.018363 0.808793 0.000318 0.000108 0.000140 0.000189 0.000000 0.000000 0.000214 0.000037 0.000368 0.496391 0.003181 0.000118 0.041755 0.392153 0.115440 4.174741 0.965327