{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Prepare test set"
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {
"slideshow": {
"slide_type": "-"
}
},
"outputs": [],
"source": [
"import pandas as pd\n",
"import numpy as np\n",
"import scipy.sparse as sparse\n",
"from collections import defaultdict\n",
"from itertools import chain\n",
"import random\n",
"from tqdm import tqdm\n",
"\n",
"# In evaluation we do not load train set - it is not needed\n",
"test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\\t', header=None)\n",
"test.columns=['user', 'item', 'rating', 'timestamp']\n",
"\n",
"test['user_code'] = test['user'].astype(\"category\").cat.codes\n",
"test['item_code'] = test['item'].astype(\"category\").cat.codes\n",
"\n",
"user_code_id = dict(enumerate(test['user'].astype(\"category\").cat.categories))\n",
"user_id_code = dict((v, k) for k, v in user_code_id.items())\n",
"item_code_id = dict(enumerate(test['item'].astype(\"category\").cat.categories))\n",
"item_id_code = dict((v, k) for k, v in item_code_id.items())\n",
"\n",
"test_ui = sparse.csr_matrix((test['rating'], (test['user_code'], test['item_code'])))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Estimations metrics"
]
},
{
"cell_type": "code",
"execution_count": 24,
"metadata": {},
"outputs": [],
"source": [
"estimations_df=pd.read_csv('Recommendations generated/ml-100k/Ready_Baseline_estimations.csv', header=None)\n",
"estimations_df.columns=['user', 'item' ,'score']\n",
"\n",
"estimations_df['user_code']=[user_id_code[user] for user in estimations_df['user']]\n",
"estimations_df['item_code']=[item_id_code[item] for item in estimations_df['item']]\n",
"estimations=sparse.csr_matrix((estimations_df['score'], (estimations_df['user_code'], estimations_df['item_code'])), shape=test_ui.shape)"
]
},
{
"cell_type": "code",
"execution_count": 35,
"metadata": {},
"outputs": [],
"source": [
"def estimations_metrics(test_ui, estimations):\n",
" result=[]\n",
"\n",
" RMSE=(np.sum((estimations.data-test_ui.data)**2)/estimations.nnz)**(1/2)\n",
" result.append(['RMSE', RMSE])\n",
"\n",
" MAE=np.sum(abs(estimations.data-test_ui.data))/estimations.nnz\n",
" result.append(['MAE', MAE])\n",
" \n",
" df_result=(pd.DataFrame(list(zip(*result))[1])).T\n",
" df_result.columns=list(zip(*result))[0]\n",
" return df_result"
]
},
{
"cell_type": "code",
"execution_count": 36,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"
\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" RMSE | \n",
" MAE | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" 0.949459 | \n",
" 0.752487 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" RMSE MAE\n",
"0 0.949459 0.752487"
]
},
"execution_count": 36,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"estimations_metrics(test_ui, estimations)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Ranking metrics"
]
},
{
"cell_type": "code",
"execution_count": 39,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"array([[663, 475, 62, ..., 472, 269, 503],\n",
" [ 48, 313, 475, ..., 591, 175, 466],\n",
" [351, 313, 475, ..., 591, 175, 466],\n",
" ...,\n",
" [259, 313, 475, ..., 11, 591, 175],\n",
" [ 33, 313, 475, ..., 11, 591, 175],\n",
" [ 77, 313, 475, ..., 11, 591, 175]])"
]
},
"execution_count": 39,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import numpy as np\n",
"reco = np.loadtxt('Recommendations generated/ml-100k/Ready_Baseline_reco.csv', delimiter=',')\n",
"# Let's ignore scores - they are not used in evaluation: \n",
"users=reco[:,:1]\n",
"items=reco[:,1::2]\n",
"# Let's use inner ids instead of real ones\n",
"users=np.vectorize(lambda x: user_id_code.setdefault(x, -1))(users)\n",
"items=np.vectorize(lambda x: item_id_code.setdefault(x, -1))(items) # maybe items we recommend are not in test set\n",
"# Let's put them into one array\n",
"reco=np.concatenate((users, items), axis=1)\n",
"reco"
]
},
{
"cell_type": "code",
"execution_count": 40,
"metadata": {},
"outputs": [],
"source": [
"def ranking_metrics(test_ui, reco, super_reactions=[], topK=10):\n",
" \n",
" nb_items=test_ui.shape[1]\n",
" relevant_users, super_relevant_users, prec, rec, F_1, F_05, prec_super, rec_super, ndcg, mAP, MRR, LAUC, HR=\\\n",
" 0,0,0,0,0,0,0,0,0,0,0,0,0\n",
" \n",
" cg = (1.0 / np.log2(np.arange(2, topK + 2)))\n",
" cg_sum = np.cumsum(cg)\n",
" \n",
" for (nb_user, user) in tqdm(enumerate(reco[:,0])):\n",
" u_rated_items=test_ui.indices[test_ui.indptr[user]:test_ui.indptr[user+1]]\n",
" nb_u_rated_items=len(u_rated_items)\n",
" if nb_u_rated_items>0: # skip users with no items in test set (still possible that there will be no super items)\n",
" relevant_users+=1\n",
" \n",
" u_super_items=u_rated_items[np.vectorize(lambda x: x in super_reactions)\\\n",
" (test_ui.data[test_ui.indptr[user]:test_ui.indptr[user+1]])]\n",
" # more natural seems u_super_items=[item for item in u_rated_items if test_ui[user,item] in super_reactions]\n",
" # but accesing test_ui[user,item] is expensive -we should avoid doing it\n",
" if len(u_super_items)>0:\n",
" super_relevant_users+=1\n",
" \n",
" user_successes=np.zeros(topK)\n",
" nb_user_successes=0\n",
" user_super_successes=np.zeros(topK)\n",
" nb_user_super_successes=0\n",
" \n",
" # evaluation\n",
" for (item_position,item) in enumerate(reco[nb_user,1:topK+1]):\n",
" if item in u_rated_items:\n",
" user_successes[item_position]=1\n",
" nb_user_successes+=1\n",
" if item in u_super_items:\n",
" user_super_successes[item_position]=1\n",
" nb_user_super_successes+=1\n",
" \n",
" prec_u=nb_user_successes/topK \n",
" prec+=prec_u\n",
" \n",
" rec_u=nb_user_successes/nb_u_rated_items\n",
" rec+=rec_u\n",
" \n",
" F_1+=2*(prec_u*rec_u)/(prec_u+rec_u) if prec_u+rec_u>0 else 0\n",
" F_05+=(0.5**2+1)*(prec_u*rec_u)/(0.5**2*prec_u+rec_u) if prec_u+rec_u>0 else 0\n",
" \n",
" prec_super+=nb_user_super_successes/topK\n",
" rec_super+=nb_user_super_successes/max(len(u_super_items),1) # to set 0 if no super items\n",
" ndcg+=np.dot(user_successes,cg)/cg_sum[min(topK, nb_u_rated_items)-1]\n",
" \n",
" cumsum_successes=np.cumsum(user_successes)\n",
" mAP+=np.dot(cumsum_successes/np.arange(1,topK+1), user_successes)/min(topK, nb_u_rated_items)\n",
" MRR+=1/(user_successes.nonzero()[0][0]+1) if user_successes.nonzero()[0].size>0 else 0\n",
" LAUC+=(np.dot(cumsum_successes, 1-user_successes)+\\\n",
" (nb_user_successes+nb_u_rated_items)/2*((nb_items-nb_u_rated_items)-(topK-nb_user_successes)))/\\\n",
" ((nb_items-nb_u_rated_items)*nb_u_rated_items)\n",
" \n",
" HR+=nb_user_successes>0\n",
" \n",
" \n",
" result=[]\n",
" result.append(('precision', prec/relevant_users))\n",
" result.append(('recall', rec/relevant_users))\n",
" result.append(('F_1', F_1/relevant_users))\n",
" result.append(('F_05', F_05/relevant_users))\n",
" result.append(('precision_super', prec_super/super_relevant_users))\n",
" result.append(('recall_super', rec_super/super_relevant_users))\n",
" result.append(('NDCG', ndcg/relevant_users))\n",
" result.append(('mAP', mAP/relevant_users))\n",
" result.append(('MRR', MRR/relevant_users))\n",
" result.append(('LAUC', LAUC/relevant_users))\n",
" result.append(('HR', HR/relevant_users))\n",
"\n",
" df_result=(pd.DataFrame(list(zip(*result))[1])).T\n",
" df_result.columns=list(zip(*result))[0]\n",
" return df_result"
]
},
{
"cell_type": "code",
"execution_count": 41,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"943it [00:00, 7832.26it/s]\n"
]
},
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" precision | \n",
" recall | \n",
" F_1 | \n",
" F_05 | \n",
" precision_super | \n",
" recall_super | \n",
" NDCG | \n",
" mAP | \n",
" MRR | \n",
" LAUC | \n",
" HR | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" 0.09141 | \n",
" 0.037652 | \n",
" 0.04603 | \n",
" 0.061286 | \n",
" 0.079614 | \n",
" 0.056463 | \n",
" 0.095957 | \n",
" 0.043178 | \n",
" 0.198193 | \n",
" 0.515501 | \n",
" 0.437964 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" precision recall F_1 F_05 precision_super recall_super \\\n",
"0 0.09141 0.037652 0.04603 0.061286 0.079614 0.056463 \n",
"\n",
" NDCG mAP MRR LAUC HR \n",
"0 0.095957 0.043178 0.198193 0.515501 0.437964 "
]
},
"execution_count": 41,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"ranking_metrics(test_ui, reco, super_reactions=[4,5], topK=10)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Diversity metrics"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"def diversity_metrics(test_ui, reco, topK=10):\n",
" \n",
" frequencies=defaultdict(int)\n",
" \n",
" # let's assign 0 to all items in test set\n",
" for item in list(set(test_ui.indices)):\n",
" frequencies[item]=0\n",
" \n",
" # counting frequencies\n",
" for item in reco[:,1:].flat:\n",
" frequencies[item]+=1\n",
" \n",
" nb_reco_outside_test=frequencies[-1]\n",
" del frequencies[-1]\n",
" \n",
" frequencies=np.array(list(frequencies.values()))\n",
" \n",
" nb_rec_items=len(frequencies[frequencies>0])\n",
" nb_reco_inside_test=np.sum(frequencies)\n",
" \n",
" frequencies=frequencies/np.sum(frequencies)\n",
" frequencies=np.sort(frequencies)\n",
" \n",
" with np.errstate(divide='ignore'): # let's put zeros put items with 0 frequency and ignore division warning\n",
" log_frequencies=np.nan_to_num(np.log(frequencies), posinf=0, neginf=0)\n",
" \n",
" result=[]\n",
" result.append(('Reco in test', nb_reco_inside_test/(nb_reco_inside_test+nb_reco_outside_test)))\n",
" result.append(('Test coverage', nb_rec_items/test_ui.shape[1]))\n",
" result.append(('Shannon', -np.dot(frequencies, log_frequencies)))\n",
" result.append(('Gini', np.dot(frequencies, np.arange(1-len(frequencies), len(frequencies), 2))/(len(frequencies)-1)))\n",
" \n",
" df_result=(pd.DataFrame(list(zip(*result))[1])).T\n",
" df_result.columns=list(zip(*result))[0]\n",
" return df_result"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" Reco in test | \n",
" Test coverage | \n",
" Shannon | \n",
" Gini | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" 1.0 | \n",
" 0.033911 | \n",
" 2.836513 | \n",
" 0.991139 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" Reco in test Test coverage Shannon Gini\n",
"0 1.0 0.033911 2.836513 0.991139"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import evaluation_measures as ev\n",
"import imp\n",
"imp.reload(ev)\n",
"\n",
"x=diversity_metrics(test_ui, reco, topK=10)\n",
"x"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# To be used in other notebooks"
]
},
{
"cell_type": "code",
"execution_count": 43,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"943it [00:00, 8174.46it/s]\n"
]
},
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" RMSE | \n",
" MAE | \n",
" precision | \n",
" recall | \n",
" F_1 | \n",
" F_05 | \n",
" precision_super | \n",
" recall_super | \n",
" NDCG | \n",
" mAP | \n",
" MRR | \n",
" LAUC | \n",
" HR | \n",
" Reco in test | \n",
" Test coverage | \n",
" Shannon | \n",
" Gini | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" 0.949459 | \n",
" 0.752487 | \n",
" 0.09141 | \n",
" 0.037652 | \n",
" 0.04603 | \n",
" 0.061286 | \n",
" 0.079614 | \n",
" 0.056463 | \n",
" 0.095957 | \n",
" 0.043178 | \n",
" 0.198193 | \n",
" 0.515501 | \n",
" 0.437964 | \n",
" 1.0 | \n",
" 0.033911 | \n",
" 2.836513 | \n",
" 0.991139 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" RMSE MAE precision recall F_1 F_05 \\\n",
"0 0.949459 0.752487 0.09141 0.037652 0.04603 0.061286 \n",
"\n",
" precision_super recall_super NDCG mAP MRR LAUC \\\n",
"0 0.079614 0.056463 0.095957 0.043178 0.198193 0.515501 \n",
"\n",
" HR Reco in test Test coverage Shannon Gini \n",
"0 0.437964 1.0 0.033911 2.836513 0.991139 "
]
},
"execution_count": 43,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import evaluation_measures as ev\n",
"import imp\n",
"imp.reload(ev)\n",
"\n",
"estimations_df=pd.read_csv('Recommendations generated/ml-100k/Ready_Baseline_estimations.csv', header=None)\n",
"reco=np.loadtxt('Recommendations generated/ml-100k/Ready_Baseline_reco.csv', delimiter=',')\n",
"\n",
"ev.evaluate(test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\\t', header=None),\n",
" estimations_df=estimations_df, \n",
" reco=reco,\n",
" super_reactions=[4,5])\n",
"#also you can just type ev.evaluate_all(estimations_df, reco) - I put above values as default"
]
},
{
"cell_type": "code",
"execution_count": 45,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"943it [00:00, 8620.89it/s]\n",
"943it [00:00, 7627.42it/s]\n",
"943it [00:00, 8642.57it/s]\n",
"943it [00:00, 7752.46it/s]\n",
"943it [00:00, 8864.93it/s]\n",
"943it [00:00, 8549.57it/s]\n",
"943it [00:00, 5768.05it/s]\n",
"943it [00:00, 8257.83it/s]\n",
"943it [00:00, 7608.73it/s]\n",
"943it [00:00, 8086.29it/s]\n",
"943it [00:00, 9124.19it/s]\n",
"943it [00:00, 8456.44it/s]\n",
"943it [00:00, 8696.29it/s]\n",
"943it [00:00, 8500.80it/s]\n",
"943it [00:00, 9023.45it/s]\n",
"943it [00:00, 8529.05it/s]\n"
]
}
],
"source": [
"import evaluation_measures as ev\n",
"import imp\n",
"imp.reload(ev)\n",
"\n",
"dir_path=\"Recommendations generated/ml-100k/\"\n",
"super_reactions=[4,5]\n",
"test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\\t', header=None)\n",
"\n",
"df=ev.evaluate_all(test, dir_path, super_reactions)\n",
"#also you can just type ev.evaluate_all() - I put above values as default"
]
},
{
"cell_type": "code",
"execution_count": 60,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" Model | \n",
" RMSE | \n",
" MAE | \n",
" precision | \n",
" recall | \n",
" F_1 | \n",
" F_05 | \n",
" precision_super | \n",
" recall_super | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" Self_RP3Beta | \n",
" 3.702446 | \n",
" 3.527273 | \n",
" 0.282185 | \n",
" 0.192092 | \n",
" 0.186749 | \n",
" 0.216980 | \n",
" 0.204185 | \n",
" 0.240096 | \n",
"
\n",
" \n",
" 0 | \n",
" Self_TopPop | \n",
" 2.508258 | \n",
" 2.217909 | \n",
" 0.188865 | \n",
" 0.116919 | \n",
" 0.118732 | \n",
" 0.141584 | \n",
" 0.130472 | \n",
" 0.137473 | \n",
"
\n",
" \n",
" 0 | \n",
" Ready_SVD | \n",
" 0.952784 | \n",
" 0.750597 | \n",
" 0.095228 | \n",
" 0.047497 | \n",
" 0.053142 | \n",
" 0.067082 | \n",
" 0.084871 | \n",
" 0.076457 | \n",
"
\n",
" \n",
" 0 | \n",
" Self_SVDBaseline | \n",
" 0.930321 | \n",
" 0.734643 | \n",
" 0.092683 | \n",
" 0.042046 | \n",
" 0.048568 | \n",
" 0.063218 | \n",
" 0.082940 | \n",
" 0.068730 | \n",
"
\n",
" \n",
" 0 | \n",
" Ready_SVDBiased | \n",
" 0.940375 | \n",
" 0.742264 | \n",
" 0.092153 | \n",
" 0.039645 | \n",
" 0.046804 | \n",
" 0.061886 | \n",
" 0.079399 | \n",
" 0.055967 | \n",
"
\n",
" \n",
" 0 | \n",
" Ready_Baseline | \n",
" 0.949459 | \n",
" 0.752487 | \n",
" 0.091410 | \n",
" 0.037652 | \n",
" 0.046030 | \n",
" 0.061286 | \n",
" 0.079614 | \n",
" 0.056463 | \n",
"
\n",
" \n",
" 0 | \n",
" Self_SVD | \n",
" 0.939326 | \n",
" 0.740022 | \n",
" 0.074549 | \n",
" 0.031755 | \n",
" 0.038425 | \n",
" 0.050562 | \n",
" 0.065665 | \n",
" 0.050602 | \n",
"
\n",
" \n",
" 0 | \n",
" Self_GlobalAvg | \n",
" 1.125760 | \n",
" 0.943534 | \n",
" 0.061188 | \n",
" 0.025968 | \n",
" 0.031383 | \n",
" 0.041343 | \n",
" 0.040558 | \n",
" 0.032107 | \n",
"
\n",
" \n",
" 0 | \n",
" Ready_Random | \n",
" 1.518551 | \n",
" 1.218784 | \n",
" 0.050583 | \n",
" 0.024085 | \n",
" 0.027323 | \n",
" 0.034826 | \n",
" 0.031223 | \n",
" 0.026436 | \n",
"
\n",
" \n",
" 0 | \n",
" Ready_I-KNN | \n",
" 1.030386 | \n",
" 0.813067 | \n",
" 0.026087 | \n",
" 0.006908 | \n",
" 0.010593 | \n",
" 0.016046 | \n",
" 0.021137 | \n",
" 0.009522 | \n",
"
\n",
" \n",
" 0 | \n",
" Ready_I-KNNBaseline | \n",
" 0.935327 | \n",
" 0.737424 | \n",
" 0.002545 | \n",
" 0.000755 | \n",
" 0.001105 | \n",
" 0.001602 | \n",
" 0.002253 | \n",
" 0.000930 | \n",
"
\n",
" \n",
" 0 | \n",
" Ready_U-KNNBaseline | \n",
" 0.935327 | \n",
" 0.737424 | \n",
" 0.002545 | \n",
" 0.000755 | \n",
" 0.001105 | \n",
" 0.001602 | \n",
" 0.002253 | \n",
" 0.000930 | \n",
"
\n",
" \n",
" 0 | \n",
" Ready_U-KNN | \n",
" 1.023495 | \n",
" 0.807913 | \n",
" 0.000742 | \n",
" 0.000205 | \n",
" 0.000305 | \n",
" 0.000449 | \n",
" 0.000536 | \n",
" 0.000198 | \n",
"
\n",
" \n",
" 0 | \n",
" Self_TopRated | \n",
" 1.033085 | \n",
" 0.822057 | \n",
" 0.000954 | \n",
" 0.000188 | \n",
" 0.000298 | \n",
" 0.000481 | \n",
" 0.000644 | \n",
" 0.000223 | \n",
"
\n",
" \n",
" 0 | \n",
" Self_BaselineUI | \n",
" 0.967585 | \n",
" 0.762740 | \n",
" 0.000954 | \n",
" 0.000170 | \n",
" 0.000278 | \n",
" 0.000463 | \n",
" 0.000644 | \n",
" 0.000189 | \n",
"
\n",
" \n",
" 0 | \n",
" Self_IKNN | \n",
" 1.018363 | \n",
" 0.808793 | \n",
" 0.000318 | \n",
" 0.000108 | \n",
" 0.000140 | \n",
" 0.000189 | \n",
" 0.000000 | \n",
" 0.000000 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" Model RMSE MAE precision recall F_1 \\\n",
"0 Self_RP3Beta 3.702446 3.527273 0.282185 0.192092 0.186749 \n",
"0 Self_TopPop 2.508258 2.217909 0.188865 0.116919 0.118732 \n",
"0 Ready_SVD 0.952784 0.750597 0.095228 0.047497 0.053142 \n",
"0 Self_SVDBaseline 0.930321 0.734643 0.092683 0.042046 0.048568 \n",
"0 Ready_SVDBiased 0.940375 0.742264 0.092153 0.039645 0.046804 \n",
"0 Ready_Baseline 0.949459 0.752487 0.091410 0.037652 0.046030 \n",
"0 Self_SVD 0.939326 0.740022 0.074549 0.031755 0.038425 \n",
"0 Self_GlobalAvg 1.125760 0.943534 0.061188 0.025968 0.031383 \n",
"0 Ready_Random 1.518551 1.218784 0.050583 0.024085 0.027323 \n",
"0 Ready_I-KNN 1.030386 0.813067 0.026087 0.006908 0.010593 \n",
"0 Ready_I-KNNBaseline 0.935327 0.737424 0.002545 0.000755 0.001105 \n",
"0 Ready_U-KNNBaseline 0.935327 0.737424 0.002545 0.000755 0.001105 \n",
"0 Ready_U-KNN 1.023495 0.807913 0.000742 0.000205 0.000305 \n",
"0 Self_TopRated 1.033085 0.822057 0.000954 0.000188 0.000298 \n",
"0 Self_BaselineUI 0.967585 0.762740 0.000954 0.000170 0.000278 \n",
"0 Self_IKNN 1.018363 0.808793 0.000318 0.000108 0.000140 \n",
"\n",
" F_05 precision_super recall_super \n",
"0 0.216980 0.204185 0.240096 \n",
"0 0.141584 0.130472 0.137473 \n",
"0 0.067082 0.084871 0.076457 \n",
"0 0.063218 0.082940 0.068730 \n",
"0 0.061886 0.079399 0.055967 \n",
"0 0.061286 0.079614 0.056463 \n",
"0 0.050562 0.065665 0.050602 \n",
"0 0.041343 0.040558 0.032107 \n",
"0 0.034826 0.031223 0.026436 \n",
"0 0.016046 0.021137 0.009522 \n",
"0 0.001602 0.002253 0.000930 \n",
"0 0.001602 0.002253 0.000930 \n",
"0 0.000449 0.000536 0.000198 \n",
"0 0.000481 0.000644 0.000223 \n",
"0 0.000463 0.000644 0.000189 \n",
"0 0.000189 0.000000 0.000000 "
]
},
"execution_count": 60,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df.iloc[:,:9]"
]
},
{
"cell_type": "code",
"execution_count": 61,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" Model | \n",
" NDCG | \n",
" mAP | \n",
" MRR | \n",
" LAUC | \n",
" HR | \n",
" Reco in test | \n",
" Test coverage | \n",
" Shannon | \n",
" Gini | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" Self_RP3Beta | \n",
" 0.339114 | \n",
" 0.204905 | \n",
" 0.572157 | \n",
" 0.593544 | \n",
" 0.875928 | \n",
" 1.000000 | \n",
" 0.077201 | \n",
" 3.875892 | \n",
" 0.974947 | \n",
"
\n",
" \n",
" 0 | \n",
" Self_TopPop | \n",
" 0.214651 | \n",
" 0.111707 | \n",
" 0.400939 | \n",
" 0.555546 | \n",
" 0.765642 | \n",
" 1.000000 | \n",
" 0.038961 | \n",
" 3.159079 | \n",
" 0.987317 | \n",
"
\n",
" \n",
" 0 | \n",
" Ready_SVD | \n",
" 0.109075 | \n",
" 0.050124 | \n",
" 0.241366 | \n",
" 0.520459 | \n",
" 0.499470 | \n",
" 0.992047 | \n",
" 0.217893 | \n",
" 4.405246 | \n",
" 0.953484 | \n",
"
\n",
" \n",
" 0 | \n",
" Self_SVDBaseline | \n",
" 0.098937 | \n",
" 0.044405 | \n",
" 0.203936 | \n",
" 0.517696 | \n",
" 0.469777 | \n",
" 1.000000 | \n",
" 0.058442 | \n",
" 3.085857 | \n",
" 0.988824 | \n",
"
\n",
" \n",
" 0 | \n",
" Ready_SVDBiased | \n",
" 0.102017 | \n",
" 0.047972 | \n",
" 0.216876 | \n",
" 0.516515 | \n",
" 0.441145 | \n",
" 0.997455 | \n",
" 0.167388 | \n",
" 4.235348 | \n",
" 0.962085 | \n",
"
\n",
" \n",
" 0 | \n",
" Ready_Baseline | \n",
" 0.095957 | \n",
" 0.043178 | \n",
" 0.198193 | \n",
" 0.515501 | \n",
" 0.437964 | \n",
" 1.000000 | \n",
" 0.033911 | \n",
" 2.836513 | \n",
" 0.991139 | \n",
"
\n",
" \n",
" 0 | \n",
" Self_SVD | \n",
" 0.077117 | \n",
" 0.031574 | \n",
" 0.165509 | \n",
" 0.512485 | \n",
" 0.414634 | \n",
" 0.981866 | \n",
" 0.080087 | \n",
" 3.858982 | \n",
" 0.975271 | \n",
"
\n",
" \n",
" 0 | \n",
" Self_GlobalAvg | \n",
" 0.067695 | \n",
" 0.027470 | \n",
" 0.171187 | \n",
" 0.509546 | \n",
" 0.384942 | \n",
" 1.000000 | \n",
" 0.025974 | \n",
" 2.711772 | \n",
" 0.992003 | \n",
"
\n",
" \n",
" 0 | \n",
" Ready_Random | \n",
" 0.054902 | \n",
" 0.020652 | \n",
" 0.137928 | \n",
" 0.508570 | \n",
" 0.353128 | \n",
" 0.987699 | \n",
" 0.183261 | \n",
" 5.093805 | \n",
" 0.908215 | \n",
"
\n",
" \n",
" 0 | \n",
" Ready_I-KNN | \n",
" 0.024214 | \n",
" 0.008958 | \n",
" 0.048068 | \n",
" 0.499885 | \n",
" 0.154825 | \n",
" 0.402333 | \n",
" 0.434343 | \n",
" 5.133650 | \n",
" 0.877999 | \n",
"
\n",
" \n",
" 0 | \n",
" Ready_I-KNNBaseline | \n",
" 0.003444 | \n",
" 0.001362 | \n",
" 0.011760 | \n",
" 0.496724 | \n",
" 0.021209 | \n",
" 0.482821 | \n",
" 0.059885 | \n",
" 2.232578 | \n",
" 0.994487 | \n",
"
\n",
" \n",
" 0 | \n",
" Ready_U-KNNBaseline | \n",
" 0.003444 | \n",
" 0.001362 | \n",
" 0.011760 | \n",
" 0.496724 | \n",
" 0.021209 | \n",
" 0.482821 | \n",
" 0.059885 | \n",
" 2.232578 | \n",
" 0.994487 | \n",
"
\n",
" \n",
" 0 | \n",
" Ready_U-KNN | \n",
" 0.000845 | \n",
" 0.000274 | \n",
" 0.002744 | \n",
" 0.496441 | \n",
" 0.007423 | \n",
" 0.602121 | \n",
" 0.010823 | \n",
" 2.089186 | \n",
" 0.995706 | \n",
"
\n",
" \n",
" 0 | \n",
" Self_TopRated | \n",
" 0.001043 | \n",
" 0.000335 | \n",
" 0.003348 | \n",
" 0.496433 | \n",
" 0.009544 | \n",
" 0.699046 | \n",
" 0.005051 | \n",
" 1.945910 | \n",
" 0.995669 | \n",
"
\n",
" \n",
" 0 | \n",
" Self_BaselineUI | \n",
" 0.000752 | \n",
" 0.000168 | \n",
" 0.001677 | \n",
" 0.496424 | \n",
" 0.009544 | \n",
" 0.600530 | \n",
" 0.005051 | \n",
" 1.803126 | \n",
" 0.996380 | \n",
"
\n",
" \n",
" 0 | \n",
" Self_IKNN | \n",
" 0.000214 | \n",
" 0.000037 | \n",
" 0.000368 | \n",
" 0.496391 | \n",
" 0.003181 | \n",
" 0.392153 | \n",
" 0.115440 | \n",
" 4.174741 | \n",
" 0.965327 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" Model NDCG mAP MRR LAUC HR \\\n",
"0 Self_RP3Beta 0.339114 0.204905 0.572157 0.593544 0.875928 \n",
"0 Self_TopPop 0.214651 0.111707 0.400939 0.555546 0.765642 \n",
"0 Ready_SVD 0.109075 0.050124 0.241366 0.520459 0.499470 \n",
"0 Self_SVDBaseline 0.098937 0.044405 0.203936 0.517696 0.469777 \n",
"0 Ready_SVDBiased 0.102017 0.047972 0.216876 0.516515 0.441145 \n",
"0 Ready_Baseline 0.095957 0.043178 0.198193 0.515501 0.437964 \n",
"0 Self_SVD 0.077117 0.031574 0.165509 0.512485 0.414634 \n",
"0 Self_GlobalAvg 0.067695 0.027470 0.171187 0.509546 0.384942 \n",
"0 Ready_Random 0.054902 0.020652 0.137928 0.508570 0.353128 \n",
"0 Ready_I-KNN 0.024214 0.008958 0.048068 0.499885 0.154825 \n",
"0 Ready_I-KNNBaseline 0.003444 0.001362 0.011760 0.496724 0.021209 \n",
"0 Ready_U-KNNBaseline 0.003444 0.001362 0.011760 0.496724 0.021209 \n",
"0 Ready_U-KNN 0.000845 0.000274 0.002744 0.496441 0.007423 \n",
"0 Self_TopRated 0.001043 0.000335 0.003348 0.496433 0.009544 \n",
"0 Self_BaselineUI 0.000752 0.000168 0.001677 0.496424 0.009544 \n",
"0 Self_IKNN 0.000214 0.000037 0.000368 0.496391 0.003181 \n",
"\n",
" Reco in test Test coverage Shannon Gini \n",
"0 1.000000 0.077201 3.875892 0.974947 \n",
"0 1.000000 0.038961 3.159079 0.987317 \n",
"0 0.992047 0.217893 4.405246 0.953484 \n",
"0 1.000000 0.058442 3.085857 0.988824 \n",
"0 0.997455 0.167388 4.235348 0.962085 \n",
"0 1.000000 0.033911 2.836513 0.991139 \n",
"0 0.981866 0.080087 3.858982 0.975271 \n",
"0 1.000000 0.025974 2.711772 0.992003 \n",
"0 0.987699 0.183261 5.093805 0.908215 \n",
"0 0.402333 0.434343 5.133650 0.877999 \n",
"0 0.482821 0.059885 2.232578 0.994487 \n",
"0 0.482821 0.059885 2.232578 0.994487 \n",
"0 0.602121 0.010823 2.089186 0.995706 \n",
"0 0.699046 0.005051 1.945910 0.995669 \n",
"0 0.600530 0.005051 1.803126 0.996380 \n",
"0 0.392153 0.115440 4.174741 0.965327 "
]
},
"execution_count": 61,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df.iloc[:,np.append(0,np.arange(9, df.shape[1]))]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Check metrics on toy dataset"
]
},
{
"cell_type": "code",
"execution_count": 62,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"3it [00:00, 4090.67it/s]\n"
]
},
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" Model | \n",
" RMSE | \n",
" MAE | \n",
" precision | \n",
" recall | \n",
" F_1 | \n",
" F_05 | \n",
" precision_super | \n",
" recall_super | \n",
" NDCG | \n",
" mAP | \n",
" MRR | \n",
" LAUC | \n",
" HR | \n",
" Reco in test | \n",
" Test coverage | \n",
" Shannon | \n",
" Gini | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" Self_BaselineUI | \n",
" 1.648337 | \n",
" 1.575 | \n",
" 0.444444 | \n",
" 0.888889 | \n",
" 0.555556 | \n",
" 0.478632 | \n",
" 0.333333 | \n",
" 0.75 | \n",
" 0.72055 | \n",
" 0.62963 | \n",
" 0.666667 | \n",
" 0.722222 | \n",
" 1.0 | \n",
" 0.777778 | \n",
" 0.8 | \n",
" 1.351784 | \n",
" 0.357143 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" Model RMSE MAE precision recall F_1 F_05 \\\n",
"0 Self_BaselineUI 1.648337 1.575 0.444444 0.888889 0.555556 0.478632 \n",
"\n",
" precision_super recall_super NDCG mAP MRR LAUC HR \\\n",
"0 0.333333 0.75 0.72055 0.62963 0.666667 0.722222 1.0 \n",
"\n",
" Reco in test Test coverage Shannon Gini \n",
"0 0.777778 0.8 1.351784 0.357143 "
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Training data:\n"
]
},
{
"data": {
"text/plain": [
"matrix([[3, 4, 0, 0, 5, 0, 0, 4],\n",
" [0, 1, 2, 3, 0, 0, 0, 0],\n",
" [0, 0, 0, 5, 0, 3, 4, 0]], dtype=int64)"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Test data:\n"
]
},
{
"data": {
"text/plain": [
"matrix([[0, 0, 0, 0, 0, 0, 3, 0],\n",
" [0, 0, 0, 0, 5, 0, 0, 0],\n",
" [5, 0, 4, 0, 0, 0, 0, 2]], dtype=int64)"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Recommendations:\n"
]
},
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" 0 | \n",
" 1 | \n",
" 2 | \n",
" 3 | \n",
" 4 | \n",
" 5 | \n",
" 6 | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" 0 | \n",
" 30 | \n",
" 4.375000 | \n",
" 60 | \n",
" 4.375000 | \n",
" 50 | \n",
" 3.375000 | \n",
"
\n",
" \n",
" 1 | \n",
" 10 | \n",
" 40 | \n",
" 4.166667 | \n",
" 60 | \n",
" 3.166667 | \n",
" 70 | \n",
" 3.166667 | \n",
"
\n",
" \n",
" 2 | \n",
" 20 | \n",
" 40 | \n",
" 5.333333 | \n",
" 70 | \n",
" 4.333333 | \n",
" 0 | \n",
" 3.333333 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" 0 1 2 3 4 5 6\n",
"0 0 30 4.375000 60 4.375000 50 3.375000\n",
"1 10 40 4.166667 60 3.166667 70 3.166667\n",
"2 20 40 5.333333 70 4.333333 0 3.333333"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Estimations:\n"
]
},
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" user | \n",
" item | \n",
" est_score | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" 0 | \n",
" 60 | \n",
" 4.375000 | \n",
"
\n",
" \n",
" 1 | \n",
" 10 | \n",
" 40 | \n",
" 4.166667 | \n",
"
\n",
" \n",
" 2 | \n",
" 20 | \n",
" 0 | \n",
" 3.333333 | \n",
"
\n",
" \n",
" 3 | \n",
" 20 | \n",
" 20 | \n",
" 2.333333 | \n",
"
\n",
" \n",
" 4 | \n",
" 20 | \n",
" 70 | \n",
" 4.333333 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" user item est_score\n",
"0 0 60 4.375000\n",
"1 10 40 4.166667\n",
"2 20 0 3.333333\n",
"3 20 20 2.333333\n",
"4 20 70 4.333333"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"import evaluation_measures as ev\n",
"import imp\n",
"import helpers\n",
"imp.reload(ev)\n",
"\n",
"dir_path=\"Recommendations generated/toy-example/\"\n",
"super_reactions=[4,5]\n",
"test=pd.read_csv('./Datasets/toy-example/test.csv', sep='\\t', header=None)\n",
"\n",
"display(ev.evaluate_all(test, dir_path, super_reactions, topK=3))\n",
"#also you can just type ev.evaluate_all() - I put above values as default\n",
"\n",
"toy_train_read=pd.read_csv('./Datasets/toy-example/train.csv', sep='\\t', header=None, names=['user', 'item', 'rating', 'timestamp'])\n",
"toy_test_read=pd.read_csv('./Datasets/toy-example/test.csv', sep='\\t', header=None, names=['user', 'item', 'rating', 'timestamp'])\n",
"reco=pd.read_csv('Recommendations generated/toy-example/Self_BaselineUI_reco.csv', header=None)\n",
"estimations=pd.read_csv('Recommendations generated/toy-example/Self_BaselineUI_estimations.csv', names=['user', 'item', 'est_score'])\n",
"toy_train_ui, toy_test_ui, toy_user_code_id, toy_user_id_code, \\\n",
"toy_item_code_id, toy_item_id_code = helpers.data_to_csr(toy_train_read, toy_test_read)\n",
"\n",
"print('Training data:')\n",
"display(toy_train_ui.todense())\n",
"\n",
"print('Test data:')\n",
"display(toy_test_ui.todense())\n",
"\n",
"print('Recommendations:')\n",
"display(reco)\n",
"\n",
"print('Estimations:')\n",
"display(estimations)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Sample recommendations"
]
},
{
"cell_type": "code",
"execution_count": 66,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Here is what user rated high:\n"
]
},
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" user | \n",
" rating | \n",
" title | \n",
" genres | \n",
"
\n",
" \n",
" \n",
" \n",
" 269 | \n",
" 523 | \n",
" 5 | \n",
" Toy Story (1995) | \n",
" Animation, Children's, Comedy | \n",
"
\n",
" \n",
" 31247 | \n",
" 523 | \n",
" 5 | \n",
" Grease (1978) | \n",
" Comedy, Musical, Romance | \n",
"
\n",
" \n",
" 35233 | \n",
" 523 | \n",
" 5 | \n",
" Much Ado About Nothing (1993) | \n",
" Comedy, Romance | \n",
"
\n",
" \n",
" 35436 | \n",
" 523 | \n",
" 5 | \n",
" Fantasia (1940) | \n",
" Animation, Children's, Musical | \n",
"
\n",
" \n",
" 36537 | \n",
" 523 | \n",
" 5 | \n",
" Shine (1996) | \n",
" Drama, Romance | \n",
"
\n",
" \n",
" 37146 | \n",
" 523 | \n",
" 5 | \n",
" Contact (1997) | \n",
" Drama, Sci-Fi | \n",
"
\n",
" \n",
" 38982 | \n",
" 523 | \n",
" 5 | \n",
" Full Monty, The (1997) | \n",
" Comedy | \n",
"
\n",
" \n",
" 1197 | \n",
" 523 | \n",
" 5 | \n",
" Four Weddings and a Funeral (1994) | \n",
" Comedy, Romance | \n",
"
\n",
" \n",
" 44756 | \n",
" 523 | \n",
" 5 | \n",
" Butch Cassidy and the Sundance Kid (1969) | \n",
" Action, Comedy, Western | \n",
"
\n",
" \n",
" 45918 | \n",
" 523 | \n",
" 5 | \n",
" Wallace & Gromit: The Best of Aardman Animatio... | \n",
" Animation | \n",
"
\n",
" \n",
" 46339 | \n",
" 523 | \n",
" 5 | \n",
" Grand Day Out, A (1992) | \n",
" Animation, Comedy | \n",
"
\n",
" \n",
" 50119 | \n",
" 523 | \n",
" 5 | \n",
" Mrs. Brown (Her Majesty, Mrs. Brown) (1997) | \n",
" Drama, Romance | \n",
"
\n",
" \n",
" 50338 | \n",
" 523 | \n",
" 5 | \n",
" Close Shave, A (1995) | \n",
" Animation, Comedy, Thriller | \n",
"
\n",
" \n",
" 52950 | \n",
" 523 | \n",
" 5 | \n",
" Kolya (1996) | \n",
" Comedy | \n",
"
\n",
" \n",
" 53361 | \n",
" 523 | \n",
" 5 | \n",
" Multiplicity (1996) | \n",
" Comedy | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" user rating title \\\n",
"269 523 5 Toy Story (1995) \n",
"31247 523 5 Grease (1978) \n",
"35233 523 5 Much Ado About Nothing (1993) \n",
"35436 523 5 Fantasia (1940) \n",
"36537 523 5 Shine (1996) \n",
"37146 523 5 Contact (1997) \n",
"38982 523 5 Full Monty, The (1997) \n",
"1197 523 5 Four Weddings and a Funeral (1994) \n",
"44756 523 5 Butch Cassidy and the Sundance Kid (1969) \n",
"45918 523 5 Wallace & Gromit: The Best of Aardman Animatio... \n",
"46339 523 5 Grand Day Out, A (1992) \n",
"50119 523 5 Mrs. Brown (Her Majesty, Mrs. Brown) (1997) \n",
"50338 523 5 Close Shave, A (1995) \n",
"52950 523 5 Kolya (1996) \n",
"53361 523 5 Multiplicity (1996) \n",
"\n",
" genres \n",
"269 Animation, Children's, Comedy \n",
"31247 Comedy, Musical, Romance \n",
"35233 Comedy, Romance \n",
"35436 Animation, Children's, Musical \n",
"36537 Drama, Romance \n",
"37146 Drama, Sci-Fi \n",
"38982 Comedy \n",
"1197 Comedy, Romance \n",
"44756 Action, Comedy, Western \n",
"45918 Animation \n",
"46339 Animation, Comedy \n",
"50119 Drama, Romance \n",
"50338 Animation, Comedy, Thriller \n",
"52950 Comedy \n",
"53361 Comedy "
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Here is what we recommend:\n"
]
},
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" user | \n",
" rec_nb | \n",
" title | \n",
" genres | \n",
"
\n",
" \n",
" \n",
" \n",
" 521 | \n",
" 523.0 | \n",
" 1 | \n",
" Great Day in Harlem, A (1994) | \n",
" Documentary | \n",
"
\n",
" \n",
" 1463 | \n",
" 523.0 | \n",
" 2 | \n",
" Tough and Deadly (1995) | \n",
" Action, Drama, Thriller | \n",
"
\n",
" \n",
" 2405 | \n",
" 523.0 | \n",
" 3 | \n",
" Aiqing wansui (1994) | \n",
" Drama | \n",
"
\n",
" \n",
" 3347 | \n",
" 523.0 | \n",
" 4 | \n",
" Delta of Venus (1994) | \n",
" Drama | \n",
"
\n",
" \n",
" 4289 | \n",
" 523.0 | \n",
" 5 | \n",
" Someone Else's America (1995) | \n",
" Drama | \n",
"
\n",
" \n",
" 5231 | \n",
" 523.0 | \n",
" 6 | \n",
" Saint of Fort Washington, The (1993) | \n",
" Drama | \n",
"
\n",
" \n",
" 6173 | \n",
" 523.0 | \n",
" 7 | \n",
" Celestial Clockwork (1994) | \n",
" Comedy | \n",
"
\n",
" \n",
" 7116 | \n",
" 523.0 | \n",
" 8 | \n",
" Some Mother's Son (1996) | \n",
" Drama | \n",
"
\n",
" \n",
" 9010 | \n",
" 523.0 | \n",
" 9 | \n",
" Maya Lin: A Strong Clear Vision (1994) | \n",
" Documentary | \n",
"
\n",
" \n",
" 8056 | \n",
" 523.0 | \n",
" 10 | \n",
" Prefontaine (1997) | \n",
" Drama | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" user rec_nb title \\\n",
"521 523.0 1 Great Day in Harlem, A (1994) \n",
"1463 523.0 2 Tough and Deadly (1995) \n",
"2405 523.0 3 Aiqing wansui (1994) \n",
"3347 523.0 4 Delta of Venus (1994) \n",
"4289 523.0 5 Someone Else's America (1995) \n",
"5231 523.0 6 Saint of Fort Washington, The (1993) \n",
"6173 523.0 7 Celestial Clockwork (1994) \n",
"7116 523.0 8 Some Mother's Son (1996) \n",
"9010 523.0 9 Maya Lin: A Strong Clear Vision (1994) \n",
"8056 523.0 10 Prefontaine (1997) \n",
"\n",
" genres \n",
"521 Documentary \n",
"1463 Action, Drama, Thriller \n",
"2405 Drama \n",
"3347 Drama \n",
"4289 Drama \n",
"5231 Drama \n",
"6173 Comedy \n",
"7116 Drama \n",
"9010 Documentary \n",
"8056 Drama "
]
},
"execution_count": 66,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"train=pd.read_csv('./Datasets/ml-100k/train.csv', sep='\\t', header=None, names=['user', 'item', 'rating', 'timestamp'])\n",
"items=pd.read_csv('./Datasets/ml-100k/movies.csv')\n",
"\n",
"user=random.choice(list(set(train['user'])))\n",
"\n",
"train_content=pd.merge(train, items, left_on='item', right_on='id')\n",
"\n",
"print('Here is what user rated high:')\n",
"display(train_content[train_content['user']==user][['user', 'rating', 'title', 'genres']]\\\n",
" .sort_values(by='rating', ascending=False)[:15])\n",
"\n",
"reco = np.loadtxt('Recommendations generated/ml-100k/Self_BaselineUI_reco.csv', delimiter=',')\n",
"items=pd.read_csv('./Datasets/ml-100k/movies.csv')\n",
"\n",
"# Let's ignore scores - they are not used in evaluation: \n",
"reco_users=reco[:,:1]\n",
"reco_items=reco[:,1::2]\n",
"# Let's put them into one array\n",
"reco=np.concatenate((reco_users, reco_items), axis=1)\n",
"\n",
"# Let's rebuild it user-item dataframe\n",
"recommended=[]\n",
"for row in reco:\n",
" for rec_nb, entry in enumerate(row[1:]):\n",
" recommended.append((row[0], rec_nb+1, entry))\n",
"recommended=pd.DataFrame(recommended, columns=['user','rec_nb', 'item'])\n",
"\n",
"recommended_content=pd.merge(recommended, items, left_on='item', right_on='id')\n",
"\n",
"print('Here is what we recommend:')\n",
"recommended_content[recommended_content['user']==user][['user', 'rec_nb', 'title', 'genres']].sort_values(by='rec_nb')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# project task 3: implement some other evaluation measure"
]
},
{
"cell_type": "code",
"execution_count": 71,
"metadata": {},
"outputs": [],
"source": [
"# it may be your idea, modification of what we have already implemented \n",
"# (for example Hit2 rate which would count as a success users whoreceived at least 2 relevant recommendations) \n",
"# or something well-known\n",
"# expected output: modification of evaluation_measures.py such that evaluate_all will also display your measure"
]
},
{
"cell_type": "code",
"execution_count": 72,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"943it [00:00, 8687.43it/s]\n",
"943it [00:00, 7296.38it/s]\n",
"943it [00:00, 8704.77it/s]\n",
"943it [00:00, 8001.89it/s]\n",
"943it [00:00, 8997.15it/s]\n",
"943it [00:00, 8387.52it/s]\n",
"943it [00:00, 8062.71it/s]\n",
"943it [00:00, 7400.45it/s]\n",
"943it [00:00, 7525.94it/s]\n",
"943it [00:00, 8338.86it/s]\n",
"943it [00:00, 8715.87it/s]\n",
"943it [00:00, 8283.65it/s]\n",
"943it [00:00, 8345.05it/s]\n",
"943it [00:00, 7972.31it/s]\n",
"943it [00:00, 8179.38it/s]\n",
"943it [00:00, 8320.16it/s]\n"
]
},
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" Model | \n",
" RMSE | \n",
" MAE | \n",
" precision | \n",
" recall | \n",
" F_1 | \n",
" F_05 | \n",
" precision_super | \n",
" recall_super | \n",
" NDCG | \n",
" mAP | \n",
" MRR | \n",
" LAUC | \n",
" HR | \n",
" Reco in test | \n",
" Test coverage | \n",
" Shannon | \n",
" Gini | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" Self_RP3Beta | \n",
" 3.702446 | \n",
" 3.527273 | \n",
" 0.282185 | \n",
" 0.192092 | \n",
" 0.186749 | \n",
" 0.216980 | \n",
" 0.204185 | \n",
" 0.240096 | \n",
" 0.339114 | \n",
" 0.204905 | \n",
" 0.572157 | \n",
" 0.593544 | \n",
" 0.875928 | \n",
" 1.000000 | \n",
" 0.077201 | \n",
" 3.875892 | \n",
" 0.974947 | \n",
"
\n",
" \n",
" 0 | \n",
" Self_TopPop | \n",
" 2.508258 | \n",
" 2.217909 | \n",
" 0.188865 | \n",
" 0.116919 | \n",
" 0.118732 | \n",
" 0.141584 | \n",
" 0.130472 | \n",
" 0.137473 | \n",
" 0.214651 | \n",
" 0.111707 | \n",
" 0.400939 | \n",
" 0.555546 | \n",
" 0.765642 | \n",
" 1.000000 | \n",
" 0.038961 | \n",
" 3.159079 | \n",
" 0.987317 | \n",
"
\n",
" \n",
" 0 | \n",
" Ready_SVD | \n",
" 0.952784 | \n",
" 0.750597 | \n",
" 0.095228 | \n",
" 0.047497 | \n",
" 0.053142 | \n",
" 0.067082 | \n",
" 0.084871 | \n",
" 0.076457 | \n",
" 0.109075 | \n",
" 0.050124 | \n",
" 0.241366 | \n",
" 0.520459 | \n",
" 0.499470 | \n",
" 0.992047 | \n",
" 0.217893 | \n",
" 4.405246 | \n",
" 0.953484 | \n",
"
\n",
" \n",
" 0 | \n",
" Self_SVDBaseline | \n",
" 0.930321 | \n",
" 0.734643 | \n",
" 0.092683 | \n",
" 0.042046 | \n",
" 0.048568 | \n",
" 0.063218 | \n",
" 0.082940 | \n",
" 0.068730 | \n",
" 0.098937 | \n",
" 0.044405 | \n",
" 0.203936 | \n",
" 0.517696 | \n",
" 0.469777 | \n",
" 1.000000 | \n",
" 0.058442 | \n",
" 3.085857 | \n",
" 0.988824 | \n",
"
\n",
" \n",
" 0 | \n",
" Ready_SVDBiased | \n",
" 0.940375 | \n",
" 0.742264 | \n",
" 0.092153 | \n",
" 0.039645 | \n",
" 0.046804 | \n",
" 0.061886 | \n",
" 0.079399 | \n",
" 0.055967 | \n",
" 0.102017 | \n",
" 0.047972 | \n",
" 0.216876 | \n",
" 0.516515 | \n",
" 0.441145 | \n",
" 0.997455 | \n",
" 0.167388 | \n",
" 4.235348 | \n",
" 0.962085 | \n",
"
\n",
" \n",
" 0 | \n",
" Ready_Baseline | \n",
" 0.949459 | \n",
" 0.752487 | \n",
" 0.091410 | \n",
" 0.037652 | \n",
" 0.046030 | \n",
" 0.061286 | \n",
" 0.079614 | \n",
" 0.056463 | \n",
" 0.095957 | \n",
" 0.043178 | \n",
" 0.198193 | \n",
" 0.515501 | \n",
" 0.437964 | \n",
" 1.000000 | \n",
" 0.033911 | \n",
" 2.836513 | \n",
" 0.991139 | \n",
"
\n",
" \n",
" 0 | \n",
" Self_SVD | \n",
" 0.939326 | \n",
" 0.740022 | \n",
" 0.074549 | \n",
" 0.031755 | \n",
" 0.038425 | \n",
" 0.050562 | \n",
" 0.065665 | \n",
" 0.050602 | \n",
" 0.077117 | \n",
" 0.031574 | \n",
" 0.165509 | \n",
" 0.512485 | \n",
" 0.414634 | \n",
" 0.981866 | \n",
" 0.080087 | \n",
" 3.858982 | \n",
" 0.975271 | \n",
"
\n",
" \n",
" 0 | \n",
" Self_GlobalAvg | \n",
" 1.125760 | \n",
" 0.943534 | \n",
" 0.061188 | \n",
" 0.025968 | \n",
" 0.031383 | \n",
" 0.041343 | \n",
" 0.040558 | \n",
" 0.032107 | \n",
" 0.067695 | \n",
" 0.027470 | \n",
" 0.171187 | \n",
" 0.509546 | \n",
" 0.384942 | \n",
" 1.000000 | \n",
" 0.025974 | \n",
" 2.711772 | \n",
" 0.992003 | \n",
"
\n",
" \n",
" 0 | \n",
" Ready_Random | \n",
" 1.518551 | \n",
" 1.218784 | \n",
" 0.050583 | \n",
" 0.024085 | \n",
" 0.027323 | \n",
" 0.034826 | \n",
" 0.031223 | \n",
" 0.026436 | \n",
" 0.054902 | \n",
" 0.020652 | \n",
" 0.137928 | \n",
" 0.508570 | \n",
" 0.353128 | \n",
" 0.987699 | \n",
" 0.183261 | \n",
" 5.093805 | \n",
" 0.908215 | \n",
"
\n",
" \n",
" 0 | \n",
" Ready_I-KNN | \n",
" 1.030386 | \n",
" 0.813067 | \n",
" 0.026087 | \n",
" 0.006908 | \n",
" 0.010593 | \n",
" 0.016046 | \n",
" 0.021137 | \n",
" 0.009522 | \n",
" 0.024214 | \n",
" 0.008958 | \n",
" 0.048068 | \n",
" 0.499885 | \n",
" 0.154825 | \n",
" 0.402333 | \n",
" 0.434343 | \n",
" 5.133650 | \n",
" 0.877999 | \n",
"
\n",
" \n",
" 0 | \n",
" Ready_I-KNNBaseline | \n",
" 0.935327 | \n",
" 0.737424 | \n",
" 0.002545 | \n",
" 0.000755 | \n",
" 0.001105 | \n",
" 0.001602 | \n",
" 0.002253 | \n",
" 0.000930 | \n",
" 0.003444 | \n",
" 0.001362 | \n",
" 0.011760 | \n",
" 0.496724 | \n",
" 0.021209 | \n",
" 0.482821 | \n",
" 0.059885 | \n",
" 2.232578 | \n",
" 0.994487 | \n",
"
\n",
" \n",
" 0 | \n",
" Ready_U-KNNBaseline | \n",
" 0.935327 | \n",
" 0.737424 | \n",
" 0.002545 | \n",
" 0.000755 | \n",
" 0.001105 | \n",
" 0.001602 | \n",
" 0.002253 | \n",
" 0.000930 | \n",
" 0.003444 | \n",
" 0.001362 | \n",
" 0.011760 | \n",
" 0.496724 | \n",
" 0.021209 | \n",
" 0.482821 | \n",
" 0.059885 | \n",
" 2.232578 | \n",
" 0.994487 | \n",
"
\n",
" \n",
" 0 | \n",
" Ready_U-KNN | \n",
" 1.023495 | \n",
" 0.807913 | \n",
" 0.000742 | \n",
" 0.000205 | \n",
" 0.000305 | \n",
" 0.000449 | \n",
" 0.000536 | \n",
" 0.000198 | \n",
" 0.000845 | \n",
" 0.000274 | \n",
" 0.002744 | \n",
" 0.496441 | \n",
" 0.007423 | \n",
" 0.602121 | \n",
" 0.010823 | \n",
" 2.089186 | \n",
" 0.995706 | \n",
"
\n",
" \n",
" 0 | \n",
" Self_TopRated | \n",
" 1.033085 | \n",
" 0.822057 | \n",
" 0.000954 | \n",
" 0.000188 | \n",
" 0.000298 | \n",
" 0.000481 | \n",
" 0.000644 | \n",
" 0.000223 | \n",
" 0.001043 | \n",
" 0.000335 | \n",
" 0.003348 | \n",
" 0.496433 | \n",
" 0.009544 | \n",
" 0.699046 | \n",
" 0.005051 | \n",
" 1.945910 | \n",
" 0.995669 | \n",
"
\n",
" \n",
" 0 | \n",
" Self_BaselineUI | \n",
" 0.967585 | \n",
" 0.762740 | \n",
" 0.000954 | \n",
" 0.000170 | \n",
" 0.000278 | \n",
" 0.000463 | \n",
" 0.000644 | \n",
" 0.000189 | \n",
" 0.000752 | \n",
" 0.000168 | \n",
" 0.001677 | \n",
" 0.496424 | \n",
" 0.009544 | \n",
" 0.600530 | \n",
" 0.005051 | \n",
" 1.803126 | \n",
" 0.996380 | \n",
"
\n",
" \n",
" 0 | \n",
" Self_IKNN | \n",
" 1.018363 | \n",
" 0.808793 | \n",
" 0.000318 | \n",
" 0.000108 | \n",
" 0.000140 | \n",
" 0.000189 | \n",
" 0.000000 | \n",
" 0.000000 | \n",
" 0.000214 | \n",
" 0.000037 | \n",
" 0.000368 | \n",
" 0.496391 | \n",
" 0.003181 | \n",
" 0.392153 | \n",
" 0.115440 | \n",
" 4.174741 | \n",
" 0.965327 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" Model RMSE MAE precision recall F_1 \\\n",
"0 Self_RP3Beta 3.702446 3.527273 0.282185 0.192092 0.186749 \n",
"0 Self_TopPop 2.508258 2.217909 0.188865 0.116919 0.118732 \n",
"0 Ready_SVD 0.952784 0.750597 0.095228 0.047497 0.053142 \n",
"0 Self_SVDBaseline 0.930321 0.734643 0.092683 0.042046 0.048568 \n",
"0 Ready_SVDBiased 0.940375 0.742264 0.092153 0.039645 0.046804 \n",
"0 Ready_Baseline 0.949459 0.752487 0.091410 0.037652 0.046030 \n",
"0 Self_SVD 0.939326 0.740022 0.074549 0.031755 0.038425 \n",
"0 Self_GlobalAvg 1.125760 0.943534 0.061188 0.025968 0.031383 \n",
"0 Ready_Random 1.518551 1.218784 0.050583 0.024085 0.027323 \n",
"0 Ready_I-KNN 1.030386 0.813067 0.026087 0.006908 0.010593 \n",
"0 Ready_I-KNNBaseline 0.935327 0.737424 0.002545 0.000755 0.001105 \n",
"0 Ready_U-KNNBaseline 0.935327 0.737424 0.002545 0.000755 0.001105 \n",
"0 Ready_U-KNN 1.023495 0.807913 0.000742 0.000205 0.000305 \n",
"0 Self_TopRated 1.033085 0.822057 0.000954 0.000188 0.000298 \n",
"0 Self_BaselineUI 0.967585 0.762740 0.000954 0.000170 0.000278 \n",
"0 Self_IKNN 1.018363 0.808793 0.000318 0.000108 0.000140 \n",
"\n",
" F_05 precision_super recall_super NDCG mAP MRR \\\n",
"0 0.216980 0.204185 0.240096 0.339114 0.204905 0.572157 \n",
"0 0.141584 0.130472 0.137473 0.214651 0.111707 0.400939 \n",
"0 0.067082 0.084871 0.076457 0.109075 0.050124 0.241366 \n",
"0 0.063218 0.082940 0.068730 0.098937 0.044405 0.203936 \n",
"0 0.061886 0.079399 0.055967 0.102017 0.047972 0.216876 \n",
"0 0.061286 0.079614 0.056463 0.095957 0.043178 0.198193 \n",
"0 0.050562 0.065665 0.050602 0.077117 0.031574 0.165509 \n",
"0 0.041343 0.040558 0.032107 0.067695 0.027470 0.171187 \n",
"0 0.034826 0.031223 0.026436 0.054902 0.020652 0.137928 \n",
"0 0.016046 0.021137 0.009522 0.024214 0.008958 0.048068 \n",
"0 0.001602 0.002253 0.000930 0.003444 0.001362 0.011760 \n",
"0 0.001602 0.002253 0.000930 0.003444 0.001362 0.011760 \n",
"0 0.000449 0.000536 0.000198 0.000845 0.000274 0.002744 \n",
"0 0.000481 0.000644 0.000223 0.001043 0.000335 0.003348 \n",
"0 0.000463 0.000644 0.000189 0.000752 0.000168 0.001677 \n",
"0 0.000189 0.000000 0.000000 0.000214 0.000037 0.000368 \n",
"\n",
" LAUC HR Reco in test Test coverage Shannon Gini \n",
"0 0.593544 0.875928 1.000000 0.077201 3.875892 0.974947 \n",
"0 0.555546 0.765642 1.000000 0.038961 3.159079 0.987317 \n",
"0 0.520459 0.499470 0.992047 0.217893 4.405246 0.953484 \n",
"0 0.517696 0.469777 1.000000 0.058442 3.085857 0.988824 \n",
"0 0.516515 0.441145 0.997455 0.167388 4.235348 0.962085 \n",
"0 0.515501 0.437964 1.000000 0.033911 2.836513 0.991139 \n",
"0 0.512485 0.414634 0.981866 0.080087 3.858982 0.975271 \n",
"0 0.509546 0.384942 1.000000 0.025974 2.711772 0.992003 \n",
"0 0.508570 0.353128 0.987699 0.183261 5.093805 0.908215 \n",
"0 0.499885 0.154825 0.402333 0.434343 5.133650 0.877999 \n",
"0 0.496724 0.021209 0.482821 0.059885 2.232578 0.994487 \n",
"0 0.496724 0.021209 0.482821 0.059885 2.232578 0.994487 \n",
"0 0.496441 0.007423 0.602121 0.010823 2.089186 0.995706 \n",
"0 0.496433 0.009544 0.699046 0.005051 1.945910 0.995669 \n",
"0 0.496424 0.009544 0.600530 0.005051 1.803126 0.996380 \n",
"0 0.496391 0.003181 0.392153 0.115440 4.174741 0.965327 "
]
},
"execution_count": 72,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"dir_path=\"Recommendations generated/ml-100k/\"\n",
"super_reactions=[4,5]\n",
"test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\\t', header=None)\n",
"\n",
"ev.evaluate_all(test, dir_path, super_reactions)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.9"
}
},
"nbformat": 4,
"nbformat_minor": 4
}