{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Prepare test set"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"slideshow": {
"slide_type": "-"
}
},
"outputs": [],
"source": [
"import pandas as pd\n",
"import numpy as np\n",
"import scipy.sparse as sparse\n",
"from collections import defaultdict\n",
"from itertools import chain\n",
"import random\n",
"from tqdm import tqdm\n",
"\n",
"# In evaluation we do not load train set - it is not needed\n",
"test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\\t', header=None)\n",
"test.columns=['user', 'item', 'rating', 'timestamp']\n",
"\n",
"test['user_code'] = test['user'].astype(\"category\").cat.codes\n",
"test['item_code'] = test['item'].astype(\"category\").cat.codes\n",
"\n",
"user_code_id = dict(enumerate(test['user'].astype(\"category\").cat.categories))\n",
"user_id_code = dict((v, k) for k, v in user_code_id.items())\n",
"item_code_id = dict(enumerate(test['item'].astype(\"category\").cat.categories))\n",
"item_id_code = dict((v, k) for k, v in item_code_id.items())\n",
"\n",
"test_ui = sparse.csr_matrix((test['rating'], (test['user_code'], test['item_code'])))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Estimations metrics"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"estimations_df=pd.read_csv('Recommendations generated/ml-100k/Ready_Baseline_estimations.csv', header=None)\n",
"estimations_df.columns=['user', 'item' ,'score']\n",
"\n",
"estimations_df['user_code']=[user_id_code[user] for user in estimations_df['user']]\n",
"estimations_df['item_code']=[item_id_code[item] for item in estimations_df['item']]\n",
"estimations=sparse.csr_matrix((estimations_df['score'], (estimations_df['user_code'], estimations_df['item_code'])), shape=test_ui.shape)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"def estimations_metrics(test_ui, estimations):\n",
" result=[]\n",
"\n",
" RMSE=(np.sum((estimations.data-test_ui.data)**2)/estimations.nnz)**(1/2)\n",
" result.append(['RMSE', RMSE])\n",
"\n",
" MAE=np.sum(abs(estimations.data-test_ui.data))/estimations.nnz\n",
" result.append(['MAE', MAE])\n",
" \n",
" df_result=(pd.DataFrame(list(zip(*result))[1])).T\n",
" df_result.columns=list(zip(*result))[0]\n",
" return df_result"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"
\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" RMSE | \n",
" MAE | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" 0.949459 | \n",
" 0.752487 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" RMSE MAE\n",
"0 0.949459 0.752487"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# in case of error (in the laboratories) you might have to switch to the other version of pandas\n",
"# try !pip3 install pandas=='1.0.3' (or pip if you use python 2) and restart the kernel\n",
"\n",
"estimations_metrics(test_ui, estimations)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Ranking metrics"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"array([[663, 475, 62, ..., 472, 269, 503],\n",
" [ 48, 313, 475, ..., 591, 175, 466],\n",
" [351, 313, 475, ..., 591, 175, 466],\n",
" ...,\n",
" [259, 313, 475, ..., 11, 591, 175],\n",
" [ 33, 313, 475, ..., 11, 591, 175],\n",
" [ 77, 313, 475, ..., 11, 591, 175]])"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import numpy as np\n",
"reco = np.loadtxt('Recommendations generated/ml-100k/Ready_Baseline_reco.csv', delimiter=',')\n",
"# Let's ignore scores - they are not used in evaluation: \n",
"users=reco[:,:1]\n",
"items=reco[:,1::2]\n",
"# Let's use inner ids instead of real ones\n",
"users=np.vectorize(lambda x: user_id_code.setdefault(x, -1))(users)\n",
"items=np.vectorize(lambda x: item_id_code.setdefault(x, -1))(items) # maybe items we recommend are not in test set\n",
"# Let's put them into one array\n",
"reco=np.concatenate((users, items), axis=1)\n",
"reco"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"def ranking_metrics(test_ui, reco, super_reactions=[], topK=10):\n",
" \n",
" nb_items=test_ui.shape[1]\n",
" relevant_users, super_relevant_users, prec, rec, F_1, F_05, prec_super, rec_super, ndcg, mAP, MRR, LAUC, HR=\\\n",
" 0,0,0,0,0,0,0,0,0,0,0,0,0\n",
" \n",
" cg = (1.0 / np.log2(np.arange(2, topK + 2)))\n",
" cg_sum = np.cumsum(cg)\n",
" \n",
" for (nb_user, user) in tqdm(enumerate(reco[:,0])):\n",
" u_rated_items=test_ui.indices[test_ui.indptr[user]:test_ui.indptr[user+1]]\n",
" nb_u_rated_items=len(u_rated_items)\n",
" if nb_u_rated_items>0: # skip users with no items in test set (still possible that there will be no super items)\n",
" relevant_users+=1\n",
" \n",
" u_super_items=u_rated_items[np.vectorize(lambda x: x in super_reactions)\\\n",
" (test_ui.data[test_ui.indptr[user]:test_ui.indptr[user+1]])]\n",
" # more natural seems u_super_items=[item for item in u_rated_items if test_ui[user,item] in super_reactions]\n",
" # but accesing test_ui[user,item] is expensive -we should avoid doing it\n",
" if len(u_super_items)>0:\n",
" super_relevant_users+=1\n",
" \n",
" user_successes=np.zeros(topK)\n",
" nb_user_successes=0\n",
" user_super_successes=np.zeros(topK)\n",
" nb_user_super_successes=0\n",
" \n",
" # evaluation\n",
" for (item_position,item) in enumerate(reco[nb_user,1:topK+1]):\n",
" if item in u_rated_items:\n",
" user_successes[item_position]=1\n",
" nb_user_successes+=1\n",
" if item in u_super_items:\n",
" user_super_successes[item_position]=1\n",
" nb_user_super_successes+=1\n",
" \n",
" prec_u=nb_user_successes/topK \n",
" prec+=prec_u\n",
" \n",
" rec_u=nb_user_successes/nb_u_rated_items\n",
" rec+=rec_u\n",
" \n",
" F_1+=2*(prec_u*rec_u)/(prec_u+rec_u) if prec_u+rec_u>0 else 0\n",
" F_05+=(0.5**2+1)*(prec_u*rec_u)/(0.5**2*prec_u+rec_u) if prec_u+rec_u>0 else 0\n",
" \n",
" prec_super+=nb_user_super_successes/topK\n",
" rec_super+=nb_user_super_successes/max(len(u_super_items),1) # to set 0 if no super items\n",
" ndcg+=np.dot(user_successes,cg)/cg_sum[min(topK, nb_u_rated_items)-1]\n",
" \n",
" cumsum_successes=np.cumsum(user_successes)\n",
" mAP+=np.dot(cumsum_successes/np.arange(1,topK+1), user_successes)/min(topK, nb_u_rated_items)\n",
" MRR+=1/(user_successes.nonzero()[0][0]+1) if user_successes.nonzero()[0].size>0 else 0\n",
" LAUC+=(np.dot(cumsum_successes, 1-user_successes)+\\\n",
" (nb_user_successes+nb_u_rated_items)/2*((nb_items-nb_u_rated_items)-(topK-nb_user_successes)))/\\\n",
" ((nb_items-nb_u_rated_items)*nb_u_rated_items)\n",
" \n",
" HR+=nb_user_successes>0\n",
" \n",
" \n",
" result=[]\n",
" result.append(('precision', prec/relevant_users))\n",
" result.append(('recall', rec/relevant_users))\n",
" result.append(('F_1', F_1/relevant_users))\n",
" result.append(('F_05', F_05/relevant_users))\n",
" result.append(('precision_super', prec_super/super_relevant_users))\n",
" result.append(('recall_super', rec_super/super_relevant_users))\n",
" result.append(('NDCG', ndcg/relevant_users))\n",
" result.append(('mAP', mAP/relevant_users))\n",
" result.append(('MRR', MRR/relevant_users))\n",
" result.append(('LAUC', LAUC/relevant_users))\n",
" result.append(('HR', HR/relevant_users))\n",
"\n",
" df_result=(pd.DataFrame(list(zip(*result))[1])).T\n",
" df_result.columns=list(zip(*result))[0]\n",
" return df_result"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"943it [00:00, 6497.15it/s]\n"
]
},
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" precision | \n",
" recall | \n",
" F_1 | \n",
" F_05 | \n",
" precision_super | \n",
" recall_super | \n",
" NDCG | \n",
" mAP | \n",
" MRR | \n",
" LAUC | \n",
" HR | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" 0.09141 | \n",
" 0.037652 | \n",
" 0.04603 | \n",
" 0.061286 | \n",
" 0.079614 | \n",
" 0.056463 | \n",
" 0.095957 | \n",
" 0.043178 | \n",
" 0.198193 | \n",
" 0.515501 | \n",
" 0.437964 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" precision recall F_1 F_05 precision_super recall_super \\\n",
"0 0.09141 0.037652 0.04603 0.061286 0.079614 0.056463 \n",
"\n",
" NDCG mAP MRR LAUC HR \n",
"0 0.095957 0.043178 0.198193 0.515501 0.437964 "
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"ranking_metrics(test_ui, reco, super_reactions=[4,5], topK=10)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Diversity metrics"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"def diversity_metrics(test_ui, reco, topK=10):\n",
" \n",
" frequencies=defaultdict(int)\n",
" \n",
" # let's assign 0 to all items in test set\n",
" for item in list(set(test_ui.indices)):\n",
" frequencies[item]=0\n",
" \n",
" # counting frequencies\n",
" for item in reco[:,1:].flat:\n",
" frequencies[item]+=1\n",
" \n",
" nb_reco_outside_test=frequencies[-1]\n",
" del frequencies[-1]\n",
" \n",
" frequencies=np.array(list(frequencies.values()))\n",
" \n",
" nb_rec_items=len(frequencies[frequencies>0])\n",
" nb_reco_inside_test=np.sum(frequencies)\n",
" \n",
" frequencies=frequencies/np.sum(frequencies)\n",
" frequencies=np.sort(frequencies)\n",
" \n",
" with np.errstate(divide='ignore'): # let's put zeros put items with 0 frequency and ignore division warning\n",
" log_frequencies=np.nan_to_num(np.log(frequencies), posinf=0, neginf=0)\n",
" \n",
" result=[]\n",
" result.append(('Reco in test', nb_reco_inside_test/(nb_reco_inside_test+nb_reco_outside_test)))\n",
" result.append(('Test coverage', nb_rec_items/test_ui.shape[1]))\n",
" result.append(('Shannon', -np.dot(frequencies, log_frequencies)))\n",
" result.append(('Gini', np.dot(frequencies, np.arange(1-len(frequencies), len(frequencies), 2))/(len(frequencies)-1)))\n",
" \n",
" df_result=(pd.DataFrame(list(zip(*result))[1])).T\n",
" df_result.columns=list(zip(*result))[0]\n",
" return df_result"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" Reco in test | \n",
" Test coverage | \n",
" Shannon | \n",
" Gini | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" 1.0 | \n",
" 0.033911 | \n",
" 2.836513 | \n",
" 0.991139 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" Reco in test Test coverage Shannon Gini\n",
"0 1.0 0.033911 2.836513 0.991139"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# in case of errors try !pip3 install numpy==1.18.4 (or pip if you use python 2) and restart the kernel\n",
"\n",
"import evaluation_measures as ev\n",
"import imp\n",
"imp.reload(ev)\n",
"\n",
"x=diversity_metrics(test_ui, reco, topK=10)\n",
"x"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# To be used in other notebooks"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"943it [00:00, 5143.71it/s]\n"
]
},
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" RMSE | \n",
" MAE | \n",
" precision | \n",
" recall | \n",
" F_1 | \n",
" F_05 | \n",
" precision_super | \n",
" recall_super | \n",
" NDCG | \n",
" mAP | \n",
" MRR | \n",
" LAUC | \n",
" HR | \n",
" Reco in test | \n",
" Test coverage | \n",
" Shannon | \n",
" Gini | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" 0.949459 | \n",
" 0.752487 | \n",
" 0.09141 | \n",
" 0.037652 | \n",
" 0.04603 | \n",
" 0.061286 | \n",
" 0.079614 | \n",
" 0.056463 | \n",
" 0.095957 | \n",
" 0.043178 | \n",
" 0.198193 | \n",
" 0.515501 | \n",
" 0.437964 | \n",
" 1.0 | \n",
" 0.033911 | \n",
" 2.836513 | \n",
" 0.991139 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" RMSE MAE precision recall F_1 F_05 \\\n",
"0 0.949459 0.752487 0.09141 0.037652 0.04603 0.061286 \n",
"\n",
" precision_super recall_super NDCG mAP MRR LAUC \\\n",
"0 0.079614 0.056463 0.095957 0.043178 0.198193 0.515501 \n",
"\n",
" HR Reco in test Test coverage Shannon Gini \n",
"0 0.437964 1.0 0.033911 2.836513 0.991139 "
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import evaluation_measures as ev\n",
"import imp\n",
"imp.reload(ev)\n",
"\n",
"estimations_df=pd.read_csv('Recommendations generated/ml-100k/Ready_Baseline_estimations.csv', header=None)\n",
"reco=np.loadtxt('Recommendations generated/ml-100k/Ready_Baseline_reco.csv', delimiter=',')\n",
"\n",
"ev.evaluate(test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\\t', header=None),\n",
" estimations_df=estimations_df, \n",
" reco=reco,\n",
" super_reactions=[4,5])\n",
"#also you can just type ev.evaluate_all(estimations_df, reco) - I put above values as default"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"943it [00:00, 3573.64it/s]\n",
"943it [00:00, 5141.54it/s]\n",
"943it [00:00, 2827.19it/s]\n",
"943it [00:00, 2513.13it/s]\n",
"943it [00:00, 3555.67it/s]\n"
]
}
],
"source": [
"import evaluation_measures as ev\n",
"import imp\n",
"imp.reload(ev)\n",
"\n",
"dir_path=\"Recommendations generated/ml-100k/\"\n",
"super_reactions=[4,5]\n",
"test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\\t', header=None)\n",
"\n",
"df=ev.evaluate_all(test, dir_path, super_reactions)\n",
"#also you can just type ev.evaluate_all() - I put above values as default"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" Model | \n",
" RMSE | \n",
" MAE | \n",
" precision | \n",
" recall | \n",
" F_1 | \n",
" F_05 | \n",
" precision_super | \n",
" recall_super | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" Self_TopPop | \n",
" 2.508258 | \n",
" 2.217909 | \n",
" 0.188865 | \n",
" 0.116919 | \n",
" 0.118732 | \n",
" 0.141584 | \n",
" 0.130472 | \n",
" 0.137473 | \n",
"
\n",
" \n",
" 0 | \n",
" Ready_Baseline | \n",
" 0.949459 | \n",
" 0.752487 | \n",
" 0.091410 | \n",
" 0.037652 | \n",
" 0.046030 | \n",
" 0.061286 | \n",
" 0.079614 | \n",
" 0.056463 | \n",
"
\n",
" \n",
" 0 | \n",
" Ready_Random | \n",
" 1.525959 | \n",
" 1.225122 | \n",
" 0.047402 | \n",
" 0.020629 | \n",
" 0.024471 | \n",
" 0.032042 | \n",
" 0.027682 | \n",
" 0.019353 | \n",
"
\n",
" \n",
" 0 | \n",
" Self_TopRated | \n",
" 1.030712 | \n",
" 0.820904 | \n",
" 0.000954 | \n",
" 0.000188 | \n",
" 0.000298 | \n",
" 0.000481 | \n",
" 0.000644 | \n",
" 0.000223 | \n",
"
\n",
" \n",
" 0 | \n",
" Self_BaselineUI | \n",
" 0.967585 | \n",
" 0.762740 | \n",
" 0.000954 | \n",
" 0.000170 | \n",
" 0.000278 | \n",
" 0.000463 | \n",
" 0.000644 | \n",
" 0.000189 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" Model RMSE MAE precision recall F_1 \\\n",
"0 Self_TopPop 2.508258 2.217909 0.188865 0.116919 0.118732 \n",
"0 Ready_Baseline 0.949459 0.752487 0.091410 0.037652 0.046030 \n",
"0 Ready_Random 1.525959 1.225122 0.047402 0.020629 0.024471 \n",
"0 Self_TopRated 1.030712 0.820904 0.000954 0.000188 0.000298 \n",
"0 Self_BaselineUI 0.967585 0.762740 0.000954 0.000170 0.000278 \n",
"\n",
" F_05 precision_super recall_super \n",
"0 0.141584 0.130472 0.137473 \n",
"0 0.061286 0.079614 0.056463 \n",
"0 0.032042 0.027682 0.019353 \n",
"0 0.000481 0.000644 0.000223 \n",
"0 0.000463 0.000644 0.000189 "
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df.iloc[:,:9]"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" Model | \n",
" NDCG | \n",
" mAP | \n",
" MRR | \n",
" LAUC | \n",
" HR | \n",
" Reco in test | \n",
" Test coverage | \n",
" Shannon | \n",
" Gini | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" Self_TopPop | \n",
" 0.214651 | \n",
" 0.111707 | \n",
" 0.400939 | \n",
" 0.555546 | \n",
" 0.765642 | \n",
" 1.000000 | \n",
" 0.038961 | \n",
" 3.159079 | \n",
" 0.987317 | \n",
"
\n",
" \n",
" 0 | \n",
" Ready_Baseline | \n",
" 0.095957 | \n",
" 0.043178 | \n",
" 0.198193 | \n",
" 0.515501 | \n",
" 0.437964 | \n",
" 1.000000 | \n",
" 0.033911 | \n",
" 2.836513 | \n",
" 0.991139 | \n",
"
\n",
" \n",
" 0 | \n",
" Ready_Random | \n",
" 0.051593 | \n",
" 0.019428 | \n",
" 0.129062 | \n",
" 0.506826 | \n",
" 0.336161 | \n",
" 0.987593 | \n",
" 0.175325 | \n",
" 5.087656 | \n",
" 0.908118 | \n",
"
\n",
" \n",
" 0 | \n",
" Self_TopRated | \n",
" 0.001043 | \n",
" 0.000335 | \n",
" 0.003348 | \n",
" 0.496433 | \n",
" 0.009544 | \n",
" 0.699046 | \n",
" 0.005051 | \n",
" 1.945910 | \n",
" 0.995669 | \n",
"
\n",
" \n",
" 0 | \n",
" Self_BaselineUI | \n",
" 0.000752 | \n",
" 0.000168 | \n",
" 0.001677 | \n",
" 0.496424 | \n",
" 0.009544 | \n",
" 0.600530 | \n",
" 0.005051 | \n",
" 1.803126 | \n",
" 0.996380 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" Model NDCG mAP MRR LAUC HR \\\n",
"0 Self_TopPop 0.214651 0.111707 0.400939 0.555546 0.765642 \n",
"0 Ready_Baseline 0.095957 0.043178 0.198193 0.515501 0.437964 \n",
"0 Ready_Random 0.051593 0.019428 0.129062 0.506826 0.336161 \n",
"0 Self_TopRated 0.001043 0.000335 0.003348 0.496433 0.009544 \n",
"0 Self_BaselineUI 0.000752 0.000168 0.001677 0.496424 0.009544 \n",
"\n",
" Reco in test Test coverage Shannon Gini \n",
"0 1.000000 0.038961 3.159079 0.987317 \n",
"0 1.000000 0.033911 2.836513 0.991139 \n",
"0 0.987593 0.175325 5.087656 0.908118 \n",
"0 0.699046 0.005051 1.945910 0.995669 \n",
"0 0.600530 0.005051 1.803126 0.996380 "
]
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df.iloc[:,np.append(0,np.arange(9, df.shape[1]))]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Check metrics on toy dataset"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"3it [00:00, 1191.68it/s]\n"
]
},
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" Model | \n",
" RMSE | \n",
" MAE | \n",
" precision | \n",
" recall | \n",
" F_1 | \n",
" F_05 | \n",
" precision_super | \n",
" recall_super | \n",
" NDCG | \n",
" mAP | \n",
" MRR | \n",
" LAUC | \n",
" HR | \n",
" Reco in test | \n",
" Test coverage | \n",
" Shannon | \n",
" Gini | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" Self_BaselineUI | \n",
" 1.612452 | \n",
" 1.4 | \n",
" 0.444444 | \n",
" 0.888889 | \n",
" 0.555556 | \n",
" 0.478632 | \n",
" 0.333333 | \n",
" 0.75 | \n",
" 0.676907 | \n",
" 0.574074 | \n",
" 0.611111 | \n",
" 0.638889 | \n",
" 1.0 | \n",
" 0.888889 | \n",
" 0.8 | \n",
" 1.386294 | \n",
" 0.25 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" Model RMSE MAE precision recall F_1 F_05 \\\n",
"0 Self_BaselineUI 1.612452 1.4 0.444444 0.888889 0.555556 0.478632 \n",
"\n",
" precision_super recall_super NDCG mAP MRR LAUC HR \\\n",
"0 0.333333 0.75 0.676907 0.574074 0.611111 0.638889 1.0 \n",
"\n",
" Reco in test Test coverage Shannon Gini \n",
"0 0.888889 0.8 1.386294 0.25 "
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Training data:\n"
]
},
{
"data": {
"text/plain": [
"matrix([[3, 4, 0, 0, 5, 0, 0, 4],\n",
" [0, 1, 2, 3, 0, 0, 0, 0],\n",
" [0, 0, 0, 5, 0, 3, 4, 0]])"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Test data:\n"
]
},
{
"data": {
"text/plain": [
"matrix([[0, 0, 0, 0, 0, 0, 3, 0],\n",
" [0, 0, 0, 0, 5, 0, 0, 0],\n",
" [5, 0, 4, 0, 0, 0, 0, 2]])"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Recommendations:\n"
]
},
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" 0 | \n",
" 1 | \n",
" 2 | \n",
" 3 | \n",
" 4 | \n",
" 5 | \n",
" 6 | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" 0 | \n",
" 30 | \n",
" 5.0 | \n",
" 20 | \n",
" 4.0 | \n",
" 60 | \n",
" 4.0 | \n",
"
\n",
" \n",
" 1 | \n",
" 10 | \n",
" 40 | \n",
" 3.0 | \n",
" 60 | \n",
" 2.0 | \n",
" 70 | \n",
" 2.0 | \n",
"
\n",
" \n",
" 2 | \n",
" 20 | \n",
" 40 | \n",
" 5.0 | \n",
" 20 | \n",
" 4.0 | \n",
" 70 | \n",
" 4.0 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" 0 1 2 3 4 5 6\n",
"0 0 30 5.0 20 4.0 60 4.0\n",
"1 10 40 3.0 60 2.0 70 2.0\n",
"2 20 40 5.0 20 4.0 70 4.0"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Estimations:\n"
]
},
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" user | \n",
" item | \n",
" est_score | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" 0 | \n",
" 60 | \n",
" 4.0 | \n",
"
\n",
" \n",
" 1 | \n",
" 10 | \n",
" 40 | \n",
" 3.0 | \n",
"
\n",
" \n",
" 2 | \n",
" 20 | \n",
" 0 | \n",
" 3.0 | \n",
"
\n",
" \n",
" 3 | \n",
" 20 | \n",
" 20 | \n",
" 4.0 | \n",
"
\n",
" \n",
" 4 | \n",
" 20 | \n",
" 70 | \n",
" 4.0 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" user item est_score\n",
"0 0 60 4.0\n",
"1 10 40 3.0\n",
"2 20 0 3.0\n",
"3 20 20 4.0\n",
"4 20 70 4.0"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"import evaluation_measures as ev\n",
"import imp\n",
"import helpers\n",
"imp.reload(ev)\n",
"\n",
"dir_path=\"Recommendations generated/toy-example/\"\n",
"super_reactions=[4,5]\n",
"test=pd.read_csv('./Datasets/toy-example/test.csv', sep='\\t', header=None)\n",
"\n",
"display(ev.evaluate_all(test, dir_path, super_reactions, topK=3))\n",
"#also you can just type ev.evaluate_all() - I put above values as default\n",
"\n",
"toy_train_read=pd.read_csv('./Datasets/toy-example/train.csv', sep='\\t', header=None, names=['user', 'item', 'rating', 'timestamp'])\n",
"toy_test_read=pd.read_csv('./Datasets/toy-example/test.csv', sep='\\t', header=None, names=['user', 'item', 'rating', 'timestamp'])\n",
"reco=pd.read_csv('Recommendations generated/toy-example/Self_BaselineUI_reco.csv', header=None)\n",
"estimations=pd.read_csv('Recommendations generated/toy-example/Self_BaselineUI_estimations.csv', names=['user', 'item', 'est_score'])\n",
"toy_train_ui, toy_test_ui, toy_user_code_id, toy_user_id_code, \\\n",
"toy_item_code_id, toy_item_id_code = helpers.data_to_csr(toy_train_read, toy_test_read)\n",
"\n",
"print('Training data:')\n",
"display(toy_train_ui.todense())\n",
"\n",
"print('Test data:')\n",
"display(toy_test_ui.todense())\n",
"\n",
"print('Recommendations:')\n",
"display(reco)\n",
"\n",
"print('Estimations:')\n",
"display(estimations)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Sample recommendations"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Here is what user rated high:\n"
]
},
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" user | \n",
" rating | \n",
" title | \n",
" genres | \n",
"
\n",
" \n",
" \n",
" \n",
" 50941 | \n",
" 661 | \n",
" 5 | \n",
" It's a Wonderful Life (1946) | \n",
" Drama | \n",
"
\n",
" \n",
" 9531 | \n",
" 661 | \n",
" 5 | \n",
" Wizard of Oz, The (1939) | \n",
" Adventure, Children's, Drama, Musical | \n",
"
\n",
" \n",
" 27182 | \n",
" 661 | \n",
" 5 | \n",
" Empire Strikes Back, The (1980) | \n",
" Action, Adventure, Drama, Romance, Sci-Fi, War | \n",
"
\n",
" \n",
" 23944 | \n",
" 661 | \n",
" 5 | \n",
" Apocalypse Now (1979) | \n",
" Drama, War | \n",
"
\n",
" \n",
" 20285 | \n",
" 661 | \n",
" 5 | \n",
" Return of the Jedi (1983) | \n",
" Action, Adventure, Romance, Sci-Fi, War | \n",
"
\n",
" \n",
" 37504 | \n",
" 661 | \n",
" 5 | \n",
" Aladdin (1992) | \n",
" Animation, Children's, Comedy, Musical | \n",
"
\n",
" \n",
" 68312 | \n",
" 661 | \n",
" 5 | \n",
" Babe (1995) | \n",
" Children's, Comedy, Drama | \n",
"
\n",
" \n",
" 16362 | \n",
" 661 | \n",
" 5 | \n",
" Apollo 13 (1995) | \n",
" Action, Drama, Thriller | \n",
"
\n",
" \n",
" 15168 | \n",
" 661 | \n",
" 5 | \n",
" Indiana Jones and the Last Crusade (1989) | \n",
" Action, Adventure | \n",
"
\n",
" \n",
" 29402 | \n",
" 661 | \n",
" 5 | \n",
" Psycho (1960) | \n",
" Horror, Romance, Thriller | \n",
"
\n",
" \n",
" 40755 | \n",
" 661 | \n",
" 5 | \n",
" Jean de Florette (1986) | \n",
" Drama | \n",
"
\n",
" \n",
" 41950 | \n",
" 661 | \n",
" 5 | \n",
" Die Hard (1988) | \n",
" Action, Thriller | \n",
"
\n",
" \n",
" 58932 | \n",
" 661 | \n",
" 5 | \n",
" Enchanted April (1991) | \n",
" Drama | \n",
"
\n",
" \n",
" 43013 | \n",
" 661 | \n",
" 5 | \n",
" 2001: A Space Odyssey (1968) | \n",
" Drama, Mystery, Sci-Fi, Thriller | \n",
"
\n",
" \n",
" 65664 | \n",
" 661 | \n",
" 5 | \n",
" Star Trek: The Wrath of Khan (1982) | \n",
" Action, Adventure, Sci-Fi | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" user rating title \\\n",
"50941 661 5 It's a Wonderful Life (1946) \n",
"9531 661 5 Wizard of Oz, The (1939) \n",
"27182 661 5 Empire Strikes Back, The (1980) \n",
"23944 661 5 Apocalypse Now (1979) \n",
"20285 661 5 Return of the Jedi (1983) \n",
"37504 661 5 Aladdin (1992) \n",
"68312 661 5 Babe (1995) \n",
"16362 661 5 Apollo 13 (1995) \n",
"15168 661 5 Indiana Jones and the Last Crusade (1989) \n",
"29402 661 5 Psycho (1960) \n",
"40755 661 5 Jean de Florette (1986) \n",
"41950 661 5 Die Hard (1988) \n",
"58932 661 5 Enchanted April (1991) \n",
"43013 661 5 2001: A Space Odyssey (1968) \n",
"65664 661 5 Star Trek: The Wrath of Khan (1982) \n",
"\n",
" genres \n",
"50941 Drama \n",
"9531 Adventure, Children's, Drama, Musical \n",
"27182 Action, Adventure, Drama, Romance, Sci-Fi, War \n",
"23944 Drama, War \n",
"20285 Action, Adventure, Romance, Sci-Fi, War \n",
"37504 Animation, Children's, Comedy, Musical \n",
"68312 Children's, Comedy, Drama \n",
"16362 Action, Drama, Thriller \n",
"15168 Action, Adventure \n",
"29402 Horror, Romance, Thriller \n",
"40755 Drama \n",
"41950 Action, Thriller \n",
"58932 Drama \n",
"43013 Drama, Mystery, Sci-Fi, Thriller \n",
"65664 Action, Adventure, Sci-Fi "
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Here is what we recommend:\n"
]
},
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" user | \n",
" rec_nb | \n",
" title | \n",
" genres | \n",
"
\n",
" \n",
" \n",
" \n",
" 659 | \n",
" 661.0 | \n",
" 1 | \n",
" Great Day in Harlem, A (1994) | \n",
" Documentary | \n",
"
\n",
" \n",
" 1601 | \n",
" 661.0 | \n",
" 2 | \n",
" Tough and Deadly (1995) | \n",
" Action, Drama, Thriller | \n",
"
\n",
" \n",
" 2543 | \n",
" 661.0 | \n",
" 3 | \n",
" Aiqing wansui (1994) | \n",
" Drama | \n",
"
\n",
" \n",
" 3485 | \n",
" 661.0 | \n",
" 4 | \n",
" Delta of Venus (1994) | \n",
" Drama | \n",
"
\n",
" \n",
" 4427 | \n",
" 661.0 | \n",
" 5 | \n",
" Someone Else's America (1995) | \n",
" Drama | \n",
"
\n",
" \n",
" 5369 | \n",
" 661.0 | \n",
" 6 | \n",
" Saint of Fort Washington, The (1993) | \n",
" Drama | \n",
"
\n",
" \n",
" 6311 | \n",
" 661.0 | \n",
" 7 | \n",
" Celestial Clockwork (1994) | \n",
" Comedy | \n",
"
\n",
" \n",
" 7253 | \n",
" 661.0 | \n",
" 8 | \n",
" Some Mother's Son (1996) | \n",
" Drama | \n",
"
\n",
" \n",
" 9148 | \n",
" 661.0 | \n",
" 9 | \n",
" Maya Lin: A Strong Clear Vision (1994) | \n",
" Documentary | \n",
"
\n",
" \n",
" 8194 | \n",
" 661.0 | \n",
" 10 | \n",
" Prefontaine (1997) | \n",
" Drama | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" user rec_nb title \\\n",
"659 661.0 1 Great Day in Harlem, A (1994) \n",
"1601 661.0 2 Tough and Deadly (1995) \n",
"2543 661.0 3 Aiqing wansui (1994) \n",
"3485 661.0 4 Delta of Venus (1994) \n",
"4427 661.0 5 Someone Else's America (1995) \n",
"5369 661.0 6 Saint of Fort Washington, The (1993) \n",
"6311 661.0 7 Celestial Clockwork (1994) \n",
"7253 661.0 8 Some Mother's Son (1996) \n",
"9148 661.0 9 Maya Lin: A Strong Clear Vision (1994) \n",
"8194 661.0 10 Prefontaine (1997) \n",
"\n",
" genres \n",
"659 Documentary \n",
"1601 Action, Drama, Thriller \n",
"2543 Drama \n",
"3485 Drama \n",
"4427 Drama \n",
"5369 Drama \n",
"6311 Comedy \n",
"7253 Drama \n",
"9148 Documentary \n",
"8194 Drama "
]
},
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"train=pd.read_csv('./Datasets/ml-100k/train.csv', sep='\\t', header=None, names=['user', 'item', 'rating', 'timestamp'])\n",
"items=pd.read_csv('./Datasets/ml-100k/movies.csv')\n",
"\n",
"user=random.choice(list(set(train['user'])))\n",
"\n",
"train_content=pd.merge(train, items, left_on='item', right_on='id')\n",
"\n",
"print('Here is what user rated high:')\n",
"display(train_content[train_content['user']==user][['user', 'rating', 'title', 'genres']]\\\n",
" .sort_values(by='rating', ascending=False)[:15])\n",
"\n",
"reco = np.loadtxt('Recommendations generated/ml-100k/Self_BaselineUI_reco.csv', delimiter=',')\n",
"items=pd.read_csv('./Datasets/ml-100k/movies.csv')\n",
"\n",
"# Let's ignore scores - they are not used in evaluation: \n",
"reco_users=reco[:,:1]\n",
"reco_items=reco[:,1::2]\n",
"# Let's put them into one array\n",
"reco=np.concatenate((reco_users, reco_items), axis=1)\n",
"\n",
"# Let's rebuild it user-item dataframe\n",
"recommended=[]\n",
"for row in reco:\n",
" for rec_nb, entry in enumerate(row[1:]):\n",
" recommended.append((row[0], rec_nb+1, entry))\n",
"recommended=pd.DataFrame(recommended, columns=['user','rec_nb', 'item'])\n",
"\n",
"recommended_content=pd.merge(recommended, items, left_on='item', right_on='id')\n",
"\n",
"print('Here is what we recommend:')\n",
"recommended_content[recommended_content['user']==user][['user', 'rec_nb', 'title', 'genres']].sort_values(by='rec_nb')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# project task 3: implement some other evaluation measure"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [],
"source": [
"# it may be your idea, modification of what we have already implemented \n",
"# (for example Hit2 rate which would count as a success users whoreceived at least 2 relevant recommendations) \n",
"# or something well-known\n",
"# expected output: modification of evaluation_measures.py such that evaluate_all will also display your measure"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"943it [00:00, 4220.01it/s]\n",
"943it [00:00, 3015.35it/s]\n",
"943it [00:00, 2308.31it/s]\n",
"943it [00:00, 3461.11it/s]\n",
"943it [00:00, 3442.41it/s]\n"
]
},
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" Model | \n",
" RMSE | \n",
" MAE | \n",
" precision | \n",
" recall | \n",
" F_1 | \n",
" F_05 | \n",
" precision_super | \n",
" recall_super | \n",
" NDCG | \n",
" mAP | \n",
" MRR | \n",
" LAUC | \n",
" HR | \n",
" Reco in test | \n",
" Test coverage | \n",
" Shannon | \n",
" Gini | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" Self_TopPop | \n",
" 2.508258 | \n",
" 2.217909 | \n",
" 0.188865 | \n",
" 0.116919 | \n",
" 0.118732 | \n",
" 0.141584 | \n",
" 0.130472 | \n",
" 0.137473 | \n",
" 0.214651 | \n",
" 0.111707 | \n",
" 0.400939 | \n",
" 0.555546 | \n",
" 0.765642 | \n",
" 1.000000 | \n",
" 0.038961 | \n",
" 3.159079 | \n",
" 0.987317 | \n",
"
\n",
" \n",
" 0 | \n",
" Ready_Baseline | \n",
" 0.949459 | \n",
" 0.752487 | \n",
" 0.091410 | \n",
" 0.037652 | \n",
" 0.046030 | \n",
" 0.061286 | \n",
" 0.079614 | \n",
" 0.056463 | \n",
" 0.095957 | \n",
" 0.043178 | \n",
" 0.198193 | \n",
" 0.515501 | \n",
" 0.437964 | \n",
" 1.000000 | \n",
" 0.033911 | \n",
" 2.836513 | \n",
" 0.991139 | \n",
"
\n",
" \n",
" 0 | \n",
" Ready_Random | \n",
" 1.525959 | \n",
" 1.225122 | \n",
" 0.047402 | \n",
" 0.020629 | \n",
" 0.024471 | \n",
" 0.032042 | \n",
" 0.027682 | \n",
" 0.019353 | \n",
" 0.051593 | \n",
" 0.019428 | \n",
" 0.129062 | \n",
" 0.506826 | \n",
" 0.336161 | \n",
" 0.987593 | \n",
" 0.175325 | \n",
" 5.087656 | \n",
" 0.908118 | \n",
"
\n",
" \n",
" 0 | \n",
" Self_TopRated | \n",
" 1.030712 | \n",
" 0.820904 | \n",
" 0.000954 | \n",
" 0.000188 | \n",
" 0.000298 | \n",
" 0.000481 | \n",
" 0.000644 | \n",
" 0.000223 | \n",
" 0.001043 | \n",
" 0.000335 | \n",
" 0.003348 | \n",
" 0.496433 | \n",
" 0.009544 | \n",
" 0.699046 | \n",
" 0.005051 | \n",
" 1.945910 | \n",
" 0.995669 | \n",
"
\n",
" \n",
" 0 | \n",
" Self_BaselineUI | \n",
" 0.967585 | \n",
" 0.762740 | \n",
" 0.000954 | \n",
" 0.000170 | \n",
" 0.000278 | \n",
" 0.000463 | \n",
" 0.000644 | \n",
" 0.000189 | \n",
" 0.000752 | \n",
" 0.000168 | \n",
" 0.001677 | \n",
" 0.496424 | \n",
" 0.009544 | \n",
" 0.600530 | \n",
" 0.005051 | \n",
" 1.803126 | \n",
" 0.996380 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" Model RMSE MAE precision recall F_1 \\\n",
"0 Self_TopPop 2.508258 2.217909 0.188865 0.116919 0.118732 \n",
"0 Ready_Baseline 0.949459 0.752487 0.091410 0.037652 0.046030 \n",
"0 Ready_Random 1.525959 1.225122 0.047402 0.020629 0.024471 \n",
"0 Self_TopRated 1.030712 0.820904 0.000954 0.000188 0.000298 \n",
"0 Self_BaselineUI 0.967585 0.762740 0.000954 0.000170 0.000278 \n",
"\n",
" F_05 precision_super recall_super NDCG mAP MRR \\\n",
"0 0.141584 0.130472 0.137473 0.214651 0.111707 0.400939 \n",
"0 0.061286 0.079614 0.056463 0.095957 0.043178 0.198193 \n",
"0 0.032042 0.027682 0.019353 0.051593 0.019428 0.129062 \n",
"0 0.000481 0.000644 0.000223 0.001043 0.000335 0.003348 \n",
"0 0.000463 0.000644 0.000189 0.000752 0.000168 0.001677 \n",
"\n",
" LAUC HR Reco in test Test coverage Shannon Gini \n",
"0 0.555546 0.765642 1.000000 0.038961 3.159079 0.987317 \n",
"0 0.515501 0.437964 1.000000 0.033911 2.836513 0.991139 \n",
"0 0.506826 0.336161 0.987593 0.175325 5.087656 0.908118 \n",
"0 0.496433 0.009544 0.699046 0.005051 1.945910 0.995669 \n",
"0 0.496424 0.009544 0.600530 0.005051 1.803126 0.996380 "
]
},
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"dir_path=\"Recommendations generated/ml-100k/\"\n",
"super_reactions=[4,5]\n",
"test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\\t', header=None)\n",
"\n",
"ev.evaluate_all(test, dir_path, super_reactions)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.9"
}
},
"nbformat": 4,
"nbformat_minor": 4
}