1815 lines
59 KiB
Plaintext
1815 lines
59 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Prepare test set"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 1,
|
|
"metadata": {
|
|
"slideshow": {
|
|
"slide_type": "-"
|
|
}
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"import pandas as pd\n",
|
|
"import numpy as np\n",
|
|
"import scipy.sparse as sparse\n",
|
|
"from collections import defaultdict\n",
|
|
"from itertools import chain\n",
|
|
"import random\n",
|
|
"from tqdm import tqdm\n",
|
|
"\n",
|
|
"# In evaluation we do not load train set - it is not needed\n",
|
|
"test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\\t', header=None)\n",
|
|
"test.columns=['user', 'item', 'rating', 'timestamp']\n",
|
|
"\n",
|
|
"test['user_code'] = test['user'].astype(\"category\").cat.codes\n",
|
|
"test['item_code'] = test['item'].astype(\"category\").cat.codes\n",
|
|
"\n",
|
|
"user_code_id = dict(enumerate(test['user'].astype(\"category\").cat.categories))\n",
|
|
"user_id_code = dict((v, k) for k, v in user_code_id.items())\n",
|
|
"item_code_id = dict(enumerate(test['item'].astype(\"category\").cat.categories))\n",
|
|
"item_id_code = dict((v, k) for k, v in item_code_id.items())\n",
|
|
"\n",
|
|
"test_ui = sparse.csr_matrix((test['rating'], (test['user_code'], test['item_code'])))"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Estimations metrics"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 2,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"estimations_df=pd.read_csv('Recommendations generated/ml-100k/Ready_Baseline_estimations.csv', header=None)\n",
|
|
"estimations_df.columns=['user', 'item' ,'score']\n",
|
|
"\n",
|
|
"estimations_df['user_code']=[user_id_code[user] for user in estimations_df['user']]\n",
|
|
"estimations_df['item_code']=[item_id_code[item] for item in estimations_df['item']]\n",
|
|
"estimations=sparse.csr_matrix((estimations_df['score'], (estimations_df['user_code'], estimations_df['item_code'])), shape=test_ui.shape)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 3,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def estimations_metrics(test_ui, estimations):\n",
|
|
" result=[]\n",
|
|
"\n",
|
|
" RMSE=(np.sum((estimations.data-test_ui.data)**2)/estimations.nnz)**(1/2)\n",
|
|
" result.append(['RMSE', RMSE])\n",
|
|
"\n",
|
|
" MAE=np.sum(abs(estimations.data-test_ui.data))/estimations.nnz\n",
|
|
" result.append(['MAE', MAE])\n",
|
|
" \n",
|
|
" df_result=(pd.DataFrame(list(zip(*result))[1])).T\n",
|
|
" df_result.columns=list(zip(*result))[0]\n",
|
|
" return df_result"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 4,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/html": [
|
|
"<div>\n",
|
|
"<style scoped>\n",
|
|
" .dataframe tbody tr th:only-of-type {\n",
|
|
" vertical-align: middle;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe tbody tr th {\n",
|
|
" vertical-align: top;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe thead th {\n",
|
|
" text-align: right;\n",
|
|
" }\n",
|
|
"</style>\n",
|
|
"<table border=\"1\" class=\"dataframe\">\n",
|
|
" <thead>\n",
|
|
" <tr style=\"text-align: right;\">\n",
|
|
" <th></th>\n",
|
|
" <th>RMSE</th>\n",
|
|
" <th>MAE</th>\n",
|
|
" </tr>\n",
|
|
" </thead>\n",
|
|
" <tbody>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>0.949459</td>\n",
|
|
" <td>0.752487</td>\n",
|
|
" </tr>\n",
|
|
" </tbody>\n",
|
|
"</table>\n",
|
|
"</div>"
|
|
],
|
|
"text/plain": [
|
|
" RMSE MAE\n",
|
|
"0 0.949459 0.752487"
|
|
]
|
|
},
|
|
"execution_count": 4,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"# in case of error (in the laboratories) you might have to switch to the other version of pandas\n",
|
|
"# try !pip3 install pandas=='1.0.3' (or pip if you use python 2) and restart the kernel\n",
|
|
"\n",
|
|
"estimations_metrics(test_ui, estimations)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Ranking metrics"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 5,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"array([[663, 475, 62, ..., 472, 269, 503],\n",
|
|
" [ 48, 313, 475, ..., 591, 175, 466],\n",
|
|
" [351, 313, 475, ..., 591, 175, 466],\n",
|
|
" ...,\n",
|
|
" [259, 313, 475, ..., 11, 591, 175],\n",
|
|
" [ 33, 313, 475, ..., 11, 591, 175],\n",
|
|
" [ 77, 313, 475, ..., 11, 591, 175]])"
|
|
]
|
|
},
|
|
"execution_count": 5,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"import numpy as np\n",
|
|
"reco = np.loadtxt('Recommendations generated/ml-100k/Ready_Baseline_reco.csv', delimiter=',')\n",
|
|
"# Let's ignore scores - they are not used in evaluation: \n",
|
|
"users=reco[:,:1]\n",
|
|
"items=reco[:,1::2]\n",
|
|
"# Let's use inner ids instead of real ones\n",
|
|
"users=np.vectorize(lambda x: user_id_code.setdefault(x, -1))(users)\n",
|
|
"items=np.vectorize(lambda x: item_id_code.setdefault(x, -1))(items) # maybe items we recommend are not in test set\n",
|
|
"# Let's put them into one array\n",
|
|
"reco=np.concatenate((users, items), axis=1)\n",
|
|
"reco"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 6,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def ranking_metrics(test_ui, reco, super_reactions=[], topK=10):\n",
|
|
" \n",
|
|
" nb_items=test_ui.shape[1]\n",
|
|
" relevant_users, super_relevant_users, prec, rec, F_1, F_05, prec_super, rec_super, ndcg, mAP, MRR, LAUC, HR=\\\n",
|
|
" 0,0,0,0,0,0,0,0,0,0,0,0,0\n",
|
|
" \n",
|
|
" cg = (1.0 / np.log2(np.arange(2, topK + 2)))\n",
|
|
" cg_sum = np.cumsum(cg)\n",
|
|
" \n",
|
|
" for (nb_user, user) in tqdm(enumerate(reco[:,0])):\n",
|
|
" u_rated_items=test_ui.indices[test_ui.indptr[user]:test_ui.indptr[user+1]]\n",
|
|
" nb_u_rated_items=len(u_rated_items)\n",
|
|
" if nb_u_rated_items>0: # skip users with no items in test set (still possible that there will be no super items)\n",
|
|
" relevant_users+=1\n",
|
|
" \n",
|
|
" u_super_items=u_rated_items[np.vectorize(lambda x: x in super_reactions)\\\n",
|
|
" (test_ui.data[test_ui.indptr[user]:test_ui.indptr[user+1]])]\n",
|
|
" # more natural seems u_super_items=[item for item in u_rated_items if test_ui[user,item] in super_reactions]\n",
|
|
" # but accesing test_ui[user,item] is expensive -we should avoid doing it\n",
|
|
" if len(u_super_items)>0:\n",
|
|
" super_relevant_users+=1\n",
|
|
" \n",
|
|
" user_successes=np.zeros(topK)\n",
|
|
" nb_user_successes=0\n",
|
|
" user_super_successes=np.zeros(topK)\n",
|
|
" nb_user_super_successes=0\n",
|
|
" \n",
|
|
" # evaluation\n",
|
|
" for (item_position,item) in enumerate(reco[nb_user,1:topK+1]):\n",
|
|
" if item in u_rated_items:\n",
|
|
" user_successes[item_position]=1\n",
|
|
" nb_user_successes+=1\n",
|
|
" if item in u_super_items:\n",
|
|
" user_super_successes[item_position]=1\n",
|
|
" nb_user_super_successes+=1\n",
|
|
" \n",
|
|
" prec_u=nb_user_successes/topK \n",
|
|
" prec+=prec_u\n",
|
|
" \n",
|
|
" rec_u=nb_user_successes/nb_u_rated_items\n",
|
|
" rec+=rec_u\n",
|
|
" \n",
|
|
" F_1+=2*(prec_u*rec_u)/(prec_u+rec_u) if prec_u+rec_u>0 else 0\n",
|
|
" F_05+=(0.5**2+1)*(prec_u*rec_u)/(0.5**2*prec_u+rec_u) if prec_u+rec_u>0 else 0\n",
|
|
" \n",
|
|
" prec_super+=nb_user_super_successes/topK\n",
|
|
" rec_super+=nb_user_super_successes/max(len(u_super_items),1) # to set 0 if no super items\n",
|
|
" ndcg+=np.dot(user_successes,cg)/cg_sum[min(topK, nb_u_rated_items)-1]\n",
|
|
" \n",
|
|
" cumsum_successes=np.cumsum(user_successes)\n",
|
|
" mAP+=np.dot(cumsum_successes/np.arange(1,topK+1), user_successes)/min(topK, nb_u_rated_items)\n",
|
|
" MRR+=1/(user_successes.nonzero()[0][0]+1) if user_successes.nonzero()[0].size>0 else 0\n",
|
|
" LAUC+=(np.dot(cumsum_successes, 1-user_successes)+\\\n",
|
|
" (nb_user_successes+nb_u_rated_items)/2*((nb_items-nb_u_rated_items)-(topK-nb_user_successes)))/\\\n",
|
|
" ((nb_items-nb_u_rated_items)*nb_u_rated_items)\n",
|
|
" \n",
|
|
" HR+=nb_user_successes>0\n",
|
|
" \n",
|
|
" \n",
|
|
" result=[]\n",
|
|
" result.append(('precision', prec/relevant_users))\n",
|
|
" result.append(('recall', rec/relevant_users))\n",
|
|
" result.append(('F_1', F_1/relevant_users))\n",
|
|
" result.append(('F_05', F_05/relevant_users))\n",
|
|
" result.append(('precision_super', prec_super/super_relevant_users))\n",
|
|
" result.append(('recall_super', rec_super/super_relevant_users))\n",
|
|
" result.append(('NDCG', ndcg/relevant_users))\n",
|
|
" result.append(('mAP', mAP/relevant_users))\n",
|
|
" result.append(('MRR', MRR/relevant_users))\n",
|
|
" result.append(('LAUC', LAUC/relevant_users))\n",
|
|
" result.append(('HR', HR/relevant_users))\n",
|
|
"\n",
|
|
" df_result=(pd.DataFrame(list(zip(*result))[1])).T\n",
|
|
" df_result.columns=list(zip(*result))[0]\n",
|
|
" return df_result"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 7,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"943it [00:00, 7666.87it/s]\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/html": [
|
|
"<div>\n",
|
|
"<style scoped>\n",
|
|
" .dataframe tbody tr th:only-of-type {\n",
|
|
" vertical-align: middle;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe tbody tr th {\n",
|
|
" vertical-align: top;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe thead th {\n",
|
|
" text-align: right;\n",
|
|
" }\n",
|
|
"</style>\n",
|
|
"<table border=\"1\" class=\"dataframe\">\n",
|
|
" <thead>\n",
|
|
" <tr style=\"text-align: right;\">\n",
|
|
" <th></th>\n",
|
|
" <th>precision</th>\n",
|
|
" <th>recall</th>\n",
|
|
" <th>F_1</th>\n",
|
|
" <th>F_05</th>\n",
|
|
" <th>precision_super</th>\n",
|
|
" <th>recall_super</th>\n",
|
|
" <th>NDCG</th>\n",
|
|
" <th>mAP</th>\n",
|
|
" <th>MRR</th>\n",
|
|
" <th>LAUC</th>\n",
|
|
" <th>HR</th>\n",
|
|
" </tr>\n",
|
|
" </thead>\n",
|
|
" <tbody>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>0.09141</td>\n",
|
|
" <td>0.037652</td>\n",
|
|
" <td>0.04603</td>\n",
|
|
" <td>0.061286</td>\n",
|
|
" <td>0.079614</td>\n",
|
|
" <td>0.056463</td>\n",
|
|
" <td>0.095957</td>\n",
|
|
" <td>0.043178</td>\n",
|
|
" <td>0.198193</td>\n",
|
|
" <td>0.515501</td>\n",
|
|
" <td>0.437964</td>\n",
|
|
" </tr>\n",
|
|
" </tbody>\n",
|
|
"</table>\n",
|
|
"</div>"
|
|
],
|
|
"text/plain": [
|
|
" precision recall F_1 F_05 precision_super recall_super \\\n",
|
|
"0 0.09141 0.037652 0.04603 0.061286 0.079614 0.056463 \n",
|
|
"\n",
|
|
" NDCG mAP MRR LAUC HR \n",
|
|
"0 0.095957 0.043178 0.198193 0.515501 0.437964 "
|
|
]
|
|
},
|
|
"execution_count": 7,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"ranking_metrics(test_ui, reco, super_reactions=[4,5], topK=10)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Diversity metrics"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 8,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def diversity_metrics(test_ui, reco, topK=10):\n",
|
|
" \n",
|
|
" frequencies=defaultdict(int)\n",
|
|
" \n",
|
|
" # let's assign 0 to all items in test set\n",
|
|
" for item in list(set(test_ui.indices)):\n",
|
|
" frequencies[item]=0\n",
|
|
" \n",
|
|
" # counting frequencies\n",
|
|
" for item in reco[:,1:].flat:\n",
|
|
" frequencies[item]+=1\n",
|
|
" \n",
|
|
" nb_reco_outside_test=frequencies[-1]\n",
|
|
" del frequencies[-1]\n",
|
|
" \n",
|
|
" frequencies=np.array(list(frequencies.values()))\n",
|
|
" \n",
|
|
" nb_rec_items=len(frequencies[frequencies>0])\n",
|
|
" nb_reco_inside_test=np.sum(frequencies)\n",
|
|
" \n",
|
|
" frequencies=frequencies/np.sum(frequencies)\n",
|
|
" frequencies=np.sort(frequencies)\n",
|
|
" \n",
|
|
" with np.errstate(divide='ignore'): # let's put zeros put items with 0 frequency and ignore division warning\n",
|
|
" log_frequencies=np.nan_to_num(np.log(frequencies), posinf=0, neginf=0)\n",
|
|
" \n",
|
|
" result=[]\n",
|
|
" result.append(('Reco in test', nb_reco_inside_test/(nb_reco_inside_test+nb_reco_outside_test)))\n",
|
|
" result.append(('Test coverage', nb_rec_items/test_ui.shape[1]))\n",
|
|
" result.append(('Shannon', -np.dot(frequencies, log_frequencies)))\n",
|
|
" result.append(('Gini', np.dot(frequencies, np.arange(1-len(frequencies), len(frequencies), 2))/(len(frequencies)-1)))\n",
|
|
" \n",
|
|
" df_result=(pd.DataFrame(list(zip(*result))[1])).T\n",
|
|
" df_result.columns=list(zip(*result))[0]\n",
|
|
" return df_result"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 9,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/html": [
|
|
"<div>\n",
|
|
"<style scoped>\n",
|
|
" .dataframe tbody tr th:only-of-type {\n",
|
|
" vertical-align: middle;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe tbody tr th {\n",
|
|
" vertical-align: top;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe thead th {\n",
|
|
" text-align: right;\n",
|
|
" }\n",
|
|
"</style>\n",
|
|
"<table border=\"1\" class=\"dataframe\">\n",
|
|
" <thead>\n",
|
|
" <tr style=\"text-align: right;\">\n",
|
|
" <th></th>\n",
|
|
" <th>Reco in test</th>\n",
|
|
" <th>Test coverage</th>\n",
|
|
" <th>Shannon</th>\n",
|
|
" <th>Gini</th>\n",
|
|
" </tr>\n",
|
|
" </thead>\n",
|
|
" <tbody>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>1.0</td>\n",
|
|
" <td>0.033911</td>\n",
|
|
" <td>2.836513</td>\n",
|
|
" <td>0.991139</td>\n",
|
|
" </tr>\n",
|
|
" </tbody>\n",
|
|
"</table>\n",
|
|
"</div>"
|
|
],
|
|
"text/plain": [
|
|
" Reco in test Test coverage Shannon Gini\n",
|
|
"0 1.0 0.033911 2.836513 0.991139"
|
|
]
|
|
},
|
|
"execution_count": 9,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"# in case of errors try !pip3 install numpy==1.18.4 (or pip if you use python 2) and restart the kernel\n",
|
|
"\n",
|
|
"import evaluation_measures as ev\n",
|
|
"import imp\n",
|
|
"imp.reload(ev)\n",
|
|
"\n",
|
|
"x=diversity_metrics(test_ui, reco, topK=10)\n",
|
|
"x"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"# To be used in other notebooks"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 10,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"943it [00:00, 7370.69it/s]\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/html": [
|
|
"<div>\n",
|
|
"<style scoped>\n",
|
|
" .dataframe tbody tr th:only-of-type {\n",
|
|
" vertical-align: middle;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe tbody tr th {\n",
|
|
" vertical-align: top;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe thead th {\n",
|
|
" text-align: right;\n",
|
|
" }\n",
|
|
"</style>\n",
|
|
"<table border=\"1\" class=\"dataframe\">\n",
|
|
" <thead>\n",
|
|
" <tr style=\"text-align: right;\">\n",
|
|
" <th></th>\n",
|
|
" <th>RMSE</th>\n",
|
|
" <th>MAE</th>\n",
|
|
" <th>precision</th>\n",
|
|
" <th>recall</th>\n",
|
|
" <th>F_1</th>\n",
|
|
" <th>F_05</th>\n",
|
|
" <th>precision_super</th>\n",
|
|
" <th>recall_super</th>\n",
|
|
" <th>NDCG</th>\n",
|
|
" <th>mAP</th>\n",
|
|
" <th>MRR</th>\n",
|
|
" <th>LAUC</th>\n",
|
|
" <th>HR</th>\n",
|
|
" <th>Reco in test</th>\n",
|
|
" <th>Test coverage</th>\n",
|
|
" <th>Shannon</th>\n",
|
|
" <th>Gini</th>\n",
|
|
" </tr>\n",
|
|
" </thead>\n",
|
|
" <tbody>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>0.949459</td>\n",
|
|
" <td>0.752487</td>\n",
|
|
" <td>0.09141</td>\n",
|
|
" <td>0.037652</td>\n",
|
|
" <td>0.04603</td>\n",
|
|
" <td>0.061286</td>\n",
|
|
" <td>0.079614</td>\n",
|
|
" <td>0.056463</td>\n",
|
|
" <td>0.095957</td>\n",
|
|
" <td>0.043178</td>\n",
|
|
" <td>0.198193</td>\n",
|
|
" <td>0.515501</td>\n",
|
|
" <td>0.437964</td>\n",
|
|
" <td>1.0</td>\n",
|
|
" <td>0.033911</td>\n",
|
|
" <td>2.836513</td>\n",
|
|
" <td>0.991139</td>\n",
|
|
" </tr>\n",
|
|
" </tbody>\n",
|
|
"</table>\n",
|
|
"</div>"
|
|
],
|
|
"text/plain": [
|
|
" RMSE MAE precision recall F_1 F_05 \\\n",
|
|
"0 0.949459 0.752487 0.09141 0.037652 0.04603 0.061286 \n",
|
|
"\n",
|
|
" precision_super recall_super NDCG mAP MRR LAUC \\\n",
|
|
"0 0.079614 0.056463 0.095957 0.043178 0.198193 0.515501 \n",
|
|
"\n",
|
|
" HR Reco in test Test coverage Shannon Gini \n",
|
|
"0 0.437964 1.0 0.033911 2.836513 0.991139 "
|
|
]
|
|
},
|
|
"execution_count": 10,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"import evaluation_measures as ev\n",
|
|
"import imp\n",
|
|
"imp.reload(ev)\n",
|
|
"\n",
|
|
"estimations_df=pd.read_csv('Recommendations generated/ml-100k/Ready_Baseline_estimations.csv', header=None)\n",
|
|
"reco=np.loadtxt('Recommendations generated/ml-100k/Ready_Baseline_reco.csv', delimiter=',')\n",
|
|
"\n",
|
|
"ev.evaluate(test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\\t', header=None),\n",
|
|
" estimations_df=estimations_df, \n",
|
|
" reco=reco,\n",
|
|
" super_reactions=[4,5])\n",
|
|
"#also you can just type ev.evaluate_all(estimations_df, reco) - I put above values as default"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 11,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"943it [00:00, 7772.74it/s]\n",
|
|
"943it [00:00, 5607.69it/s]\n",
|
|
"943it [00:00, 4737.64it/s]\n",
|
|
"943it [00:00, 4986.41it/s]\n",
|
|
"943it [00:00, 3513.77it/s]\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"import evaluation_measures as ev\n",
|
|
"import imp\n",
|
|
"imp.reload(ev)\n",
|
|
"\n",
|
|
"dir_path=\"Recommendations generated/ml-100k/\"\n",
|
|
"super_reactions=[4,5]\n",
|
|
"test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\\t', header=None)\n",
|
|
"\n",
|
|
"df=ev.evaluate_all(test, dir_path, super_reactions)\n",
|
|
"#also you can just type ev.evaluate_all() - I put above values as default"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 12,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/html": [
|
|
"<div>\n",
|
|
"<style scoped>\n",
|
|
" .dataframe tbody tr th:only-of-type {\n",
|
|
" vertical-align: middle;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe tbody tr th {\n",
|
|
" vertical-align: top;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe thead th {\n",
|
|
" text-align: right;\n",
|
|
" }\n",
|
|
"</style>\n",
|
|
"<table border=\"1\" class=\"dataframe\">\n",
|
|
" <thead>\n",
|
|
" <tr style=\"text-align: right;\">\n",
|
|
" <th></th>\n",
|
|
" <th>Model</th>\n",
|
|
" <th>RMSE</th>\n",
|
|
" <th>MAE</th>\n",
|
|
" <th>precision</th>\n",
|
|
" <th>recall</th>\n",
|
|
" <th>F_1</th>\n",
|
|
" <th>F_05</th>\n",
|
|
" <th>precision_super</th>\n",
|
|
" <th>recall_super</th>\n",
|
|
" </tr>\n",
|
|
" </thead>\n",
|
|
" <tbody>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>Self_TopPop</td>\n",
|
|
" <td>2.508258</td>\n",
|
|
" <td>2.217909</td>\n",
|
|
" <td>0.188865</td>\n",
|
|
" <td>0.116919</td>\n",
|
|
" <td>0.118732</td>\n",
|
|
" <td>0.141584</td>\n",
|
|
" <td>0.130472</td>\n",
|
|
" <td>0.137473</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>Ready_Baseline</td>\n",
|
|
" <td>0.949459</td>\n",
|
|
" <td>0.752487</td>\n",
|
|
" <td>0.091410</td>\n",
|
|
" <td>0.037652</td>\n",
|
|
" <td>0.046030</td>\n",
|
|
" <td>0.061286</td>\n",
|
|
" <td>0.079614</td>\n",
|
|
" <td>0.056463</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>Self_GlobalAvg</td>\n",
|
|
" <td>1.125760</td>\n",
|
|
" <td>0.943534</td>\n",
|
|
" <td>0.061188</td>\n",
|
|
" <td>0.025968</td>\n",
|
|
" <td>0.031383</td>\n",
|
|
" <td>0.041343</td>\n",
|
|
" <td>0.040558</td>\n",
|
|
" <td>0.032107</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>Ready_Random</td>\n",
|
|
" <td>1.531724</td>\n",
|
|
" <td>1.230384</td>\n",
|
|
" <td>0.049417</td>\n",
|
|
" <td>0.022558</td>\n",
|
|
" <td>0.025490</td>\n",
|
|
" <td>0.033242</td>\n",
|
|
" <td>0.030365</td>\n",
|
|
" <td>0.022626</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>Self_BaselineUI</td>\n",
|
|
" <td>0.967585</td>\n",
|
|
" <td>0.762740</td>\n",
|
|
" <td>0.000954</td>\n",
|
|
" <td>0.000170</td>\n",
|
|
" <td>0.000278</td>\n",
|
|
" <td>0.000463</td>\n",
|
|
" <td>0.000644</td>\n",
|
|
" <td>0.000189</td>\n",
|
|
" </tr>\n",
|
|
" </tbody>\n",
|
|
"</table>\n",
|
|
"</div>"
|
|
],
|
|
"text/plain": [
|
|
" Model RMSE MAE precision recall F_1 \\\n",
|
|
"0 Self_TopPop 2.508258 2.217909 0.188865 0.116919 0.118732 \n",
|
|
"0 Ready_Baseline 0.949459 0.752487 0.091410 0.037652 0.046030 \n",
|
|
"0 Self_GlobalAvg 1.125760 0.943534 0.061188 0.025968 0.031383 \n",
|
|
"0 Ready_Random 1.531724 1.230384 0.049417 0.022558 0.025490 \n",
|
|
"0 Self_BaselineUI 0.967585 0.762740 0.000954 0.000170 0.000278 \n",
|
|
"\n",
|
|
" F_05 precision_super recall_super \n",
|
|
"0 0.141584 0.130472 0.137473 \n",
|
|
"0 0.061286 0.079614 0.056463 \n",
|
|
"0 0.041343 0.040558 0.032107 \n",
|
|
"0 0.033242 0.030365 0.022626 \n",
|
|
"0 0.000463 0.000644 0.000189 "
|
|
]
|
|
},
|
|
"execution_count": 12,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"df.iloc[:,:9]"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 13,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/html": [
|
|
"<div>\n",
|
|
"<style scoped>\n",
|
|
" .dataframe tbody tr th:only-of-type {\n",
|
|
" vertical-align: middle;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe tbody tr th {\n",
|
|
" vertical-align: top;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe thead th {\n",
|
|
" text-align: right;\n",
|
|
" }\n",
|
|
"</style>\n",
|
|
"<table border=\"1\" class=\"dataframe\">\n",
|
|
" <thead>\n",
|
|
" <tr style=\"text-align: right;\">\n",
|
|
" <th></th>\n",
|
|
" <th>Model</th>\n",
|
|
" <th>NDCG</th>\n",
|
|
" <th>mAP</th>\n",
|
|
" <th>MRR</th>\n",
|
|
" <th>LAUC</th>\n",
|
|
" <th>HR</th>\n",
|
|
" <th>Reco in test</th>\n",
|
|
" <th>Test coverage</th>\n",
|
|
" <th>Shannon</th>\n",
|
|
" <th>Gini</th>\n",
|
|
" </tr>\n",
|
|
" </thead>\n",
|
|
" <tbody>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>Self_TopPop</td>\n",
|
|
" <td>0.214651</td>\n",
|
|
" <td>0.111707</td>\n",
|
|
" <td>0.400939</td>\n",
|
|
" <td>0.555546</td>\n",
|
|
" <td>0.765642</td>\n",
|
|
" <td>1.000000</td>\n",
|
|
" <td>0.038961</td>\n",
|
|
" <td>3.159079</td>\n",
|
|
" <td>0.987317</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>Ready_Baseline</td>\n",
|
|
" <td>0.095957</td>\n",
|
|
" <td>0.043178</td>\n",
|
|
" <td>0.198193</td>\n",
|
|
" <td>0.515501</td>\n",
|
|
" <td>0.437964</td>\n",
|
|
" <td>1.000000</td>\n",
|
|
" <td>0.033911</td>\n",
|
|
" <td>2.836513</td>\n",
|
|
" <td>0.991139</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>Self_GlobalAvg</td>\n",
|
|
" <td>0.067695</td>\n",
|
|
" <td>0.027470</td>\n",
|
|
" <td>0.171187</td>\n",
|
|
" <td>0.509546</td>\n",
|
|
" <td>0.384942</td>\n",
|
|
" <td>1.000000</td>\n",
|
|
" <td>0.025974</td>\n",
|
|
" <td>2.711772</td>\n",
|
|
" <td>0.992003</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>Ready_Random</td>\n",
|
|
" <td>0.054166</td>\n",
|
|
" <td>0.021656</td>\n",
|
|
" <td>0.128378</td>\n",
|
|
" <td>0.507802</td>\n",
|
|
" <td>0.325557</td>\n",
|
|
" <td>0.988865</td>\n",
|
|
" <td>0.190476</td>\n",
|
|
" <td>5.100033</td>\n",
|
|
" <td>0.907724</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>Self_BaselineUI</td>\n",
|
|
" <td>0.000752</td>\n",
|
|
" <td>0.000168</td>\n",
|
|
" <td>0.001677</td>\n",
|
|
" <td>0.496424</td>\n",
|
|
" <td>0.009544</td>\n",
|
|
" <td>0.600530</td>\n",
|
|
" <td>0.005051</td>\n",
|
|
" <td>1.803126</td>\n",
|
|
" <td>0.996380</td>\n",
|
|
" </tr>\n",
|
|
" </tbody>\n",
|
|
"</table>\n",
|
|
"</div>"
|
|
],
|
|
"text/plain": [
|
|
" Model NDCG mAP MRR LAUC HR \\\n",
|
|
"0 Self_TopPop 0.214651 0.111707 0.400939 0.555546 0.765642 \n",
|
|
"0 Ready_Baseline 0.095957 0.043178 0.198193 0.515501 0.437964 \n",
|
|
"0 Self_GlobalAvg 0.067695 0.027470 0.171187 0.509546 0.384942 \n",
|
|
"0 Ready_Random 0.054166 0.021656 0.128378 0.507802 0.325557 \n",
|
|
"0 Self_BaselineUI 0.000752 0.000168 0.001677 0.496424 0.009544 \n",
|
|
"\n",
|
|
" Reco in test Test coverage Shannon Gini \n",
|
|
"0 1.000000 0.038961 3.159079 0.987317 \n",
|
|
"0 1.000000 0.033911 2.836513 0.991139 \n",
|
|
"0 1.000000 0.025974 2.711772 0.992003 \n",
|
|
"0 0.988865 0.190476 5.100033 0.907724 \n",
|
|
"0 0.600530 0.005051 1.803126 0.996380 "
|
|
]
|
|
},
|
|
"execution_count": 13,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"df.iloc[:,np.append(0,np.arange(9, df.shape[1]))]"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"# Check metrics on toy dataset"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 14,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"3it [00:00, 1941.81it/s]\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/html": [
|
|
"<div>\n",
|
|
"<style scoped>\n",
|
|
" .dataframe tbody tr th:only-of-type {\n",
|
|
" vertical-align: middle;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe tbody tr th {\n",
|
|
" vertical-align: top;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe thead th {\n",
|
|
" text-align: right;\n",
|
|
" }\n",
|
|
"</style>\n",
|
|
"<table border=\"1\" class=\"dataframe\">\n",
|
|
" <thead>\n",
|
|
" <tr style=\"text-align: right;\">\n",
|
|
" <th></th>\n",
|
|
" <th>Model</th>\n",
|
|
" <th>RMSE</th>\n",
|
|
" <th>MAE</th>\n",
|
|
" <th>precision</th>\n",
|
|
" <th>recall</th>\n",
|
|
" <th>F_1</th>\n",
|
|
" <th>F_05</th>\n",
|
|
" <th>precision_super</th>\n",
|
|
" <th>recall_super</th>\n",
|
|
" <th>NDCG</th>\n",
|
|
" <th>mAP</th>\n",
|
|
" <th>MRR</th>\n",
|
|
" <th>LAUC</th>\n",
|
|
" <th>HR</th>\n",
|
|
" <th>Reco in test</th>\n",
|
|
" <th>Test coverage</th>\n",
|
|
" <th>Shannon</th>\n",
|
|
" <th>Gini</th>\n",
|
|
" </tr>\n",
|
|
" </thead>\n",
|
|
" <tbody>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>Self_BaselineUI</td>\n",
|
|
" <td>1.612452</td>\n",
|
|
" <td>1.4</td>\n",
|
|
" <td>0.444444</td>\n",
|
|
" <td>0.888889</td>\n",
|
|
" <td>0.555556</td>\n",
|
|
" <td>0.478632</td>\n",
|
|
" <td>0.333333</td>\n",
|
|
" <td>0.75</td>\n",
|
|
" <td>0.676907</td>\n",
|
|
" <td>0.574074</td>\n",
|
|
" <td>0.611111</td>\n",
|
|
" <td>0.638889</td>\n",
|
|
" <td>1.0</td>\n",
|
|
" <td>0.888889</td>\n",
|
|
" <td>0.8</td>\n",
|
|
" <td>1.386294</td>\n",
|
|
" <td>0.25</td>\n",
|
|
" </tr>\n",
|
|
" </tbody>\n",
|
|
"</table>\n",
|
|
"</div>"
|
|
],
|
|
"text/plain": [
|
|
" Model RMSE MAE precision recall F_1 F_05 \\\n",
|
|
"0 Self_BaselineUI 1.612452 1.4 0.444444 0.888889 0.555556 0.478632 \n",
|
|
"\n",
|
|
" precision_super recall_super NDCG mAP MRR LAUC HR \\\n",
|
|
"0 0.333333 0.75 0.676907 0.574074 0.611111 0.638889 1.0 \n",
|
|
"\n",
|
|
" Reco in test Test coverage Shannon Gini \n",
|
|
"0 0.888889 0.8 1.386294 0.25 "
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Training data:\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"matrix([[3, 4, 0, 0, 5, 0, 0, 4],\n",
|
|
" [0, 1, 2, 3, 0, 0, 0, 0],\n",
|
|
" [0, 0, 0, 5, 0, 3, 4, 0]])"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Test data:\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"matrix([[0, 0, 0, 0, 0, 0, 3, 0],\n",
|
|
" [0, 0, 0, 0, 5, 0, 0, 0],\n",
|
|
" [5, 0, 4, 0, 0, 0, 0, 2]])"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Recommendations:\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/html": [
|
|
"<div>\n",
|
|
"<style scoped>\n",
|
|
" .dataframe tbody tr th:only-of-type {\n",
|
|
" vertical-align: middle;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe tbody tr th {\n",
|
|
" vertical-align: top;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe thead th {\n",
|
|
" text-align: right;\n",
|
|
" }\n",
|
|
"</style>\n",
|
|
"<table border=\"1\" class=\"dataframe\">\n",
|
|
" <thead>\n",
|
|
" <tr style=\"text-align: right;\">\n",
|
|
" <th></th>\n",
|
|
" <th>0</th>\n",
|
|
" <th>1</th>\n",
|
|
" <th>2</th>\n",
|
|
" <th>3</th>\n",
|
|
" <th>4</th>\n",
|
|
" <th>5</th>\n",
|
|
" <th>6</th>\n",
|
|
" </tr>\n",
|
|
" </thead>\n",
|
|
" <tbody>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>0</td>\n",
|
|
" <td>30</td>\n",
|
|
" <td>5.0</td>\n",
|
|
" <td>20</td>\n",
|
|
" <td>4.0</td>\n",
|
|
" <td>60</td>\n",
|
|
" <td>4.0</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>1</th>\n",
|
|
" <td>10</td>\n",
|
|
" <td>40</td>\n",
|
|
" <td>3.0</td>\n",
|
|
" <td>60</td>\n",
|
|
" <td>2.0</td>\n",
|
|
" <td>70</td>\n",
|
|
" <td>2.0</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>2</th>\n",
|
|
" <td>20</td>\n",
|
|
" <td>40</td>\n",
|
|
" <td>5.0</td>\n",
|
|
" <td>20</td>\n",
|
|
" <td>4.0</td>\n",
|
|
" <td>70</td>\n",
|
|
" <td>4.0</td>\n",
|
|
" </tr>\n",
|
|
" </tbody>\n",
|
|
"</table>\n",
|
|
"</div>"
|
|
],
|
|
"text/plain": [
|
|
" 0 1 2 3 4 5 6\n",
|
|
"0 0 30 5.0 20 4.0 60 4.0\n",
|
|
"1 10 40 3.0 60 2.0 70 2.0\n",
|
|
"2 20 40 5.0 20 4.0 70 4.0"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Estimations:\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/html": [
|
|
"<div>\n",
|
|
"<style scoped>\n",
|
|
" .dataframe tbody tr th:only-of-type {\n",
|
|
" vertical-align: middle;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe tbody tr th {\n",
|
|
" vertical-align: top;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe thead th {\n",
|
|
" text-align: right;\n",
|
|
" }\n",
|
|
"</style>\n",
|
|
"<table border=\"1\" class=\"dataframe\">\n",
|
|
" <thead>\n",
|
|
" <tr style=\"text-align: right;\">\n",
|
|
" <th></th>\n",
|
|
" <th>user</th>\n",
|
|
" <th>item</th>\n",
|
|
" <th>est_score</th>\n",
|
|
" </tr>\n",
|
|
" </thead>\n",
|
|
" <tbody>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>0</td>\n",
|
|
" <td>60</td>\n",
|
|
" <td>4.0</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>1</th>\n",
|
|
" <td>10</td>\n",
|
|
" <td>40</td>\n",
|
|
" <td>3.0</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>2</th>\n",
|
|
" <td>20</td>\n",
|
|
" <td>0</td>\n",
|
|
" <td>3.0</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>3</th>\n",
|
|
" <td>20</td>\n",
|
|
" <td>20</td>\n",
|
|
" <td>4.0</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>4</th>\n",
|
|
" <td>20</td>\n",
|
|
" <td>70</td>\n",
|
|
" <td>4.0</td>\n",
|
|
" </tr>\n",
|
|
" </tbody>\n",
|
|
"</table>\n",
|
|
"</div>"
|
|
],
|
|
"text/plain": [
|
|
" user item est_score\n",
|
|
"0 0 60 4.0\n",
|
|
"1 10 40 3.0\n",
|
|
"2 20 0 3.0\n",
|
|
"3 20 20 4.0\n",
|
|
"4 20 70 4.0"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
}
|
|
],
|
|
"source": [
|
|
"import evaluation_measures as ev\n",
|
|
"import imp\n",
|
|
"import helpers\n",
|
|
"imp.reload(ev)\n",
|
|
"\n",
|
|
"dir_path=\"Recommendations generated/toy-example/\"\n",
|
|
"super_reactions=[4,5]\n",
|
|
"test=pd.read_csv('./Datasets/toy-example/test.csv', sep='\\t', header=None)\n",
|
|
"\n",
|
|
"display(ev.evaluate_all(test, dir_path, super_reactions, topK=3))\n",
|
|
"#also you can just type ev.evaluate_all() - I put above values as default\n",
|
|
"\n",
|
|
"toy_train_read=pd.read_csv('./Datasets/toy-example/train.csv', sep='\\t', header=None, names=['user', 'item', 'rating', 'timestamp'])\n",
|
|
"toy_test_read=pd.read_csv('./Datasets/toy-example/test.csv', sep='\\t', header=None, names=['user', 'item', 'rating', 'timestamp'])\n",
|
|
"reco=pd.read_csv('Recommendations generated/toy-example/Self_BaselineUI_reco.csv', header=None)\n",
|
|
"estimations=pd.read_csv('Recommendations generated/toy-example/Self_BaselineUI_estimations.csv', names=['user', 'item', 'est_score'])\n",
|
|
"toy_train_ui, toy_test_ui, toy_user_code_id, toy_user_id_code, \\\n",
|
|
"toy_item_code_id, toy_item_id_code = helpers.data_to_csr(toy_train_read, toy_test_read)\n",
|
|
"\n",
|
|
"print('Training data:')\n",
|
|
"display(toy_train_ui.todense())\n",
|
|
"\n",
|
|
"print('Test data:')\n",
|
|
"display(toy_test_ui.todense())\n",
|
|
"\n",
|
|
"print('Recommendations:')\n",
|
|
"display(reco)\n",
|
|
"\n",
|
|
"print('Estimations:')\n",
|
|
"display(estimations)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"# Sample recommendations"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 15,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Here is what user rated high:\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/html": [
|
|
"<div>\n",
|
|
"<style scoped>\n",
|
|
" .dataframe tbody tr th:only-of-type {\n",
|
|
" vertical-align: middle;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe tbody tr th {\n",
|
|
" vertical-align: top;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe thead th {\n",
|
|
" text-align: right;\n",
|
|
" }\n",
|
|
"</style>\n",
|
|
"<table border=\"1\" class=\"dataframe\">\n",
|
|
" <thead>\n",
|
|
" <tr style=\"text-align: right;\">\n",
|
|
" <th></th>\n",
|
|
" <th>user</th>\n",
|
|
" <th>rating</th>\n",
|
|
" <th>title</th>\n",
|
|
" <th>genres</th>\n",
|
|
" </tr>\n",
|
|
" </thead>\n",
|
|
" <tbody>\n",
|
|
" <tr>\n",
|
|
" <th>2985</th>\n",
|
|
" <td>789</td>\n",
|
|
" <td>5</td>\n",
|
|
" <td>Star Wars (1977)</td>\n",
|
|
" <td>Action, Adventure, Romance, Sci-Fi, War</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>25980</th>\n",
|
|
" <td>789</td>\n",
|
|
" <td>5</td>\n",
|
|
" <td>Dead Man Walking (1995)</td>\n",
|
|
" <td>Drama</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>9357</th>\n",
|
|
" <td>789</td>\n",
|
|
" <td>5</td>\n",
|
|
" <td>Last Supper, The (1995)</td>\n",
|
|
" <td>Drama, Thriller</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>17306</th>\n",
|
|
" <td>789</td>\n",
|
|
" <td>5</td>\n",
|
|
" <td>Leaving Las Vegas (1995)</td>\n",
|
|
" <td>Drama, Romance</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>36474</th>\n",
|
|
" <td>789</td>\n",
|
|
" <td>5</td>\n",
|
|
" <td>Swingers (1996)</td>\n",
|
|
" <td>Comedy, Drama</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>65139</th>\n",
|
|
" <td>789</td>\n",
|
|
" <td>4</td>\n",
|
|
" <td>Welcome to the Dollhouse (1995)</td>\n",
|
|
" <td>Comedy, Drama</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>61975</th>\n",
|
|
" <td>789</td>\n",
|
|
" <td>4</td>\n",
|
|
" <td>Private Parts (1997)</td>\n",
|
|
" <td>Comedy, Drama</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>56522</th>\n",
|
|
" <td>789</td>\n",
|
|
" <td>4</td>\n",
|
|
" <td>Waiting for Guffman (1996)</td>\n",
|
|
" <td>Comedy</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>41414</th>\n",
|
|
" <td>789</td>\n",
|
|
" <td>4</td>\n",
|
|
" <td>Donnie Brasco (1997)</td>\n",
|
|
" <td>Crime, Drama</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>36617</th>\n",
|
|
" <td>789</td>\n",
|
|
" <td>4</td>\n",
|
|
" <td>Lone Star (1996)</td>\n",
|
|
" <td>Drama, Mystery</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>24501</th>\n",
|
|
" <td>789</td>\n",
|
|
" <td>4</td>\n",
|
|
" <td>People vs. Larry Flynt, The (1996)</td>\n",
|
|
" <td>Drama</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>20210</th>\n",
|
|
" <td>789</td>\n",
|
|
" <td>4</td>\n",
|
|
" <td>Return of the Jedi (1983)</td>\n",
|
|
" <td>Action, Adventure, Romance, Sci-Fi, War</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>8230</th>\n",
|
|
" <td>789</td>\n",
|
|
" <td>3</td>\n",
|
|
" <td>Beautiful Girls (1996)</td>\n",
|
|
" <td>Drama</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>19781</th>\n",
|
|
" <td>789</td>\n",
|
|
" <td>3</td>\n",
|
|
" <td>Liar Liar (1997)</td>\n",
|
|
" <td>Comedy</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>39387</th>\n",
|
|
" <td>789</td>\n",
|
|
" <td>3</td>\n",
|
|
" <td>Sleepers (1996)</td>\n",
|
|
" <td>Crime, Drama</td>\n",
|
|
" </tr>\n",
|
|
" </tbody>\n",
|
|
"</table>\n",
|
|
"</div>"
|
|
],
|
|
"text/plain": [
|
|
" user rating title \\\n",
|
|
"2985 789 5 Star Wars (1977) \n",
|
|
"25980 789 5 Dead Man Walking (1995) \n",
|
|
"9357 789 5 Last Supper, The (1995) \n",
|
|
"17306 789 5 Leaving Las Vegas (1995) \n",
|
|
"36474 789 5 Swingers (1996) \n",
|
|
"65139 789 4 Welcome to the Dollhouse (1995) \n",
|
|
"61975 789 4 Private Parts (1997) \n",
|
|
"56522 789 4 Waiting for Guffman (1996) \n",
|
|
"41414 789 4 Donnie Brasco (1997) \n",
|
|
"36617 789 4 Lone Star (1996) \n",
|
|
"24501 789 4 People vs. Larry Flynt, The (1996) \n",
|
|
"20210 789 4 Return of the Jedi (1983) \n",
|
|
"8230 789 3 Beautiful Girls (1996) \n",
|
|
"19781 789 3 Liar Liar (1997) \n",
|
|
"39387 789 3 Sleepers (1996) \n",
|
|
"\n",
|
|
" genres \n",
|
|
"2985 Action, Adventure, Romance, Sci-Fi, War \n",
|
|
"25980 Drama \n",
|
|
"9357 Drama, Thriller \n",
|
|
"17306 Drama, Romance \n",
|
|
"36474 Comedy, Drama \n",
|
|
"65139 Comedy, Drama \n",
|
|
"61975 Comedy, Drama \n",
|
|
"56522 Comedy \n",
|
|
"41414 Crime, Drama \n",
|
|
"36617 Drama, Mystery \n",
|
|
"24501 Drama \n",
|
|
"20210 Action, Adventure, Romance, Sci-Fi, War \n",
|
|
"8230 Drama \n",
|
|
"19781 Comedy \n",
|
|
"39387 Crime, Drama "
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Here is what we recommend:\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/html": [
|
|
"<div>\n",
|
|
"<style scoped>\n",
|
|
" .dataframe tbody tr th:only-of-type {\n",
|
|
" vertical-align: middle;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe tbody tr th {\n",
|
|
" vertical-align: top;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe thead th {\n",
|
|
" text-align: right;\n",
|
|
" }\n",
|
|
"</style>\n",
|
|
"<table border=\"1\" class=\"dataframe\">\n",
|
|
" <thead>\n",
|
|
" <tr style=\"text-align: right;\">\n",
|
|
" <th></th>\n",
|
|
" <th>user</th>\n",
|
|
" <th>rec_nb</th>\n",
|
|
" <th>title</th>\n",
|
|
" <th>genres</th>\n",
|
|
" </tr>\n",
|
|
" </thead>\n",
|
|
" <tbody>\n",
|
|
" <tr>\n",
|
|
" <th>787</th>\n",
|
|
" <td>789.0</td>\n",
|
|
" <td>1</td>\n",
|
|
" <td>Great Day in Harlem, A (1994)</td>\n",
|
|
" <td>Documentary</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>1729</th>\n",
|
|
" <td>789.0</td>\n",
|
|
" <td>2</td>\n",
|
|
" <td>Tough and Deadly (1995)</td>\n",
|
|
" <td>Action, Drama, Thriller</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>2671</th>\n",
|
|
" <td>789.0</td>\n",
|
|
" <td>3</td>\n",
|
|
" <td>Aiqing wansui (1994)</td>\n",
|
|
" <td>Drama</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>3613</th>\n",
|
|
" <td>789.0</td>\n",
|
|
" <td>4</td>\n",
|
|
" <td>Delta of Venus (1994)</td>\n",
|
|
" <td>Drama</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>4555</th>\n",
|
|
" <td>789.0</td>\n",
|
|
" <td>5</td>\n",
|
|
" <td>Someone Else's America (1995)</td>\n",
|
|
" <td>Drama</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>5497</th>\n",
|
|
" <td>789.0</td>\n",
|
|
" <td>6</td>\n",
|
|
" <td>Saint of Fort Washington, The (1993)</td>\n",
|
|
" <td>Drama</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>6439</th>\n",
|
|
" <td>789.0</td>\n",
|
|
" <td>7</td>\n",
|
|
" <td>Celestial Clockwork (1994)</td>\n",
|
|
" <td>Comedy</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>7380</th>\n",
|
|
" <td>789.0</td>\n",
|
|
" <td>8</td>\n",
|
|
" <td>Some Mother's Son (1996)</td>\n",
|
|
" <td>Drama</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>9276</th>\n",
|
|
" <td>789.0</td>\n",
|
|
" <td>9</td>\n",
|
|
" <td>Maya Lin: A Strong Clear Vision (1994)</td>\n",
|
|
" <td>Documentary</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>8322</th>\n",
|
|
" <td>789.0</td>\n",
|
|
" <td>10</td>\n",
|
|
" <td>Prefontaine (1997)</td>\n",
|
|
" <td>Drama</td>\n",
|
|
" </tr>\n",
|
|
" </tbody>\n",
|
|
"</table>\n",
|
|
"</div>"
|
|
],
|
|
"text/plain": [
|
|
" user rec_nb title \\\n",
|
|
"787 789.0 1 Great Day in Harlem, A (1994) \n",
|
|
"1729 789.0 2 Tough and Deadly (1995) \n",
|
|
"2671 789.0 3 Aiqing wansui (1994) \n",
|
|
"3613 789.0 4 Delta of Venus (1994) \n",
|
|
"4555 789.0 5 Someone Else's America (1995) \n",
|
|
"5497 789.0 6 Saint of Fort Washington, The (1993) \n",
|
|
"6439 789.0 7 Celestial Clockwork (1994) \n",
|
|
"7380 789.0 8 Some Mother's Son (1996) \n",
|
|
"9276 789.0 9 Maya Lin: A Strong Clear Vision (1994) \n",
|
|
"8322 789.0 10 Prefontaine (1997) \n",
|
|
"\n",
|
|
" genres \n",
|
|
"787 Documentary \n",
|
|
"1729 Action, Drama, Thriller \n",
|
|
"2671 Drama \n",
|
|
"3613 Drama \n",
|
|
"4555 Drama \n",
|
|
"5497 Drama \n",
|
|
"6439 Comedy \n",
|
|
"7380 Drama \n",
|
|
"9276 Documentary \n",
|
|
"8322 Drama "
|
|
]
|
|
},
|
|
"execution_count": 15,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"train=pd.read_csv('./Datasets/ml-100k/train.csv', sep='\\t', header=None, names=['user', 'item', 'rating', 'timestamp'])\n",
|
|
"items=pd.read_csv('./Datasets/ml-100k/movies.csv')\n",
|
|
"\n",
|
|
"user=random.choice(list(set(train['user'])))\n",
|
|
"\n",
|
|
"train_content=pd.merge(train, items, left_on='item', right_on='id')\n",
|
|
"\n",
|
|
"print('Here is what user rated high:')\n",
|
|
"display(train_content[train_content['user']==user][['user', 'rating', 'title', 'genres']]\\\n",
|
|
" .sort_values(by='rating', ascending=False)[:15])\n",
|
|
"\n",
|
|
"reco = np.loadtxt('Recommendations generated/ml-100k/Self_BaselineUI_reco.csv', delimiter=',')\n",
|
|
"items=pd.read_csv('./Datasets/ml-100k/movies.csv')\n",
|
|
"\n",
|
|
"# Let's ignore scores - they are not used in evaluation: \n",
|
|
"reco_users=reco[:,:1]\n",
|
|
"reco_items=reco[:,1::2]\n",
|
|
"# Let's put them into one array\n",
|
|
"reco=np.concatenate((reco_users, reco_items), axis=1)\n",
|
|
"\n",
|
|
"# Let's rebuild it user-item dataframe\n",
|
|
"recommended=[]\n",
|
|
"for row in reco:\n",
|
|
" for rec_nb, entry in enumerate(row[1:]):\n",
|
|
" recommended.append((row[0], rec_nb+1, entry))\n",
|
|
"recommended=pd.DataFrame(recommended, columns=['user','rec_nb', 'item'])\n",
|
|
"\n",
|
|
"recommended_content=pd.merge(recommended, items, left_on='item', right_on='id')\n",
|
|
"\n",
|
|
"print('Here is what we recommend:')\n",
|
|
"recommended_content[recommended_content['user']==user][['user', 'rec_nb', 'title', 'genres']].sort_values(by='rec_nb')"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"# project task 3: implement some other evaluation measure"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 16,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# it may be your idea, modification of what we have already implemented \n",
|
|
"# (for example Hit2 rate which would count as a success users whoreceived at least 2 relevant recommendations) \n",
|
|
"# or something well-known\n",
|
|
"# expected output: modification of evaluation_measures.py such that evaluate_all will also display your measure"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 17,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"943it [00:00, 4479.94it/s]\n",
|
|
"943it [00:00, 4036.40it/s]\n",
|
|
"943it [00:00, 4598.99it/s]\n",
|
|
"943it [00:00, 5170.18it/s]\n",
|
|
"943it [00:00, 4778.23it/s]\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/html": [
|
|
"<div>\n",
|
|
"<style scoped>\n",
|
|
" .dataframe tbody tr th:only-of-type {\n",
|
|
" vertical-align: middle;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe tbody tr th {\n",
|
|
" vertical-align: top;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe thead th {\n",
|
|
" text-align: right;\n",
|
|
" }\n",
|
|
"</style>\n",
|
|
"<table border=\"1\" class=\"dataframe\">\n",
|
|
" <thead>\n",
|
|
" <tr style=\"text-align: right;\">\n",
|
|
" <th></th>\n",
|
|
" <th>Model</th>\n",
|
|
" <th>RMSE</th>\n",
|
|
" <th>MAE</th>\n",
|
|
" <th>precision</th>\n",
|
|
" <th>recall</th>\n",
|
|
" <th>F_1</th>\n",
|
|
" <th>F_05</th>\n",
|
|
" <th>precision_super</th>\n",
|
|
" <th>recall_super</th>\n",
|
|
" <th>NDCG</th>\n",
|
|
" <th>mAP</th>\n",
|
|
" <th>MRR</th>\n",
|
|
" <th>LAUC</th>\n",
|
|
" <th>HR</th>\n",
|
|
" <th>Reco in test</th>\n",
|
|
" <th>Test coverage</th>\n",
|
|
" <th>Shannon</th>\n",
|
|
" <th>Gini</th>\n",
|
|
" </tr>\n",
|
|
" </thead>\n",
|
|
" <tbody>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>Self_TopPop</td>\n",
|
|
" <td>2.508258</td>\n",
|
|
" <td>2.217909</td>\n",
|
|
" <td>0.188865</td>\n",
|
|
" <td>0.116919</td>\n",
|
|
" <td>0.118732</td>\n",
|
|
" <td>0.141584</td>\n",
|
|
" <td>0.130472</td>\n",
|
|
" <td>0.137473</td>\n",
|
|
" <td>0.214651</td>\n",
|
|
" <td>0.111707</td>\n",
|
|
" <td>0.400939</td>\n",
|
|
" <td>0.555546</td>\n",
|
|
" <td>0.765642</td>\n",
|
|
" <td>1.000000</td>\n",
|
|
" <td>0.038961</td>\n",
|
|
" <td>3.159079</td>\n",
|
|
" <td>0.987317</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>Ready_Baseline</td>\n",
|
|
" <td>0.949459</td>\n",
|
|
" <td>0.752487</td>\n",
|
|
" <td>0.091410</td>\n",
|
|
" <td>0.037652</td>\n",
|
|
" <td>0.046030</td>\n",
|
|
" <td>0.061286</td>\n",
|
|
" <td>0.079614</td>\n",
|
|
" <td>0.056463</td>\n",
|
|
" <td>0.095957</td>\n",
|
|
" <td>0.043178</td>\n",
|
|
" <td>0.198193</td>\n",
|
|
" <td>0.515501</td>\n",
|
|
" <td>0.437964</td>\n",
|
|
" <td>1.000000</td>\n",
|
|
" <td>0.033911</td>\n",
|
|
" <td>2.836513</td>\n",
|
|
" <td>0.991139</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>Self_GlobalAvg</td>\n",
|
|
" <td>1.125760</td>\n",
|
|
" <td>0.943534</td>\n",
|
|
" <td>0.061188</td>\n",
|
|
" <td>0.025968</td>\n",
|
|
" <td>0.031383</td>\n",
|
|
" <td>0.041343</td>\n",
|
|
" <td>0.040558</td>\n",
|
|
" <td>0.032107</td>\n",
|
|
" <td>0.067695</td>\n",
|
|
" <td>0.027470</td>\n",
|
|
" <td>0.171187</td>\n",
|
|
" <td>0.509546</td>\n",
|
|
" <td>0.384942</td>\n",
|
|
" <td>1.000000</td>\n",
|
|
" <td>0.025974</td>\n",
|
|
" <td>2.711772</td>\n",
|
|
" <td>0.992003</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>Ready_Random</td>\n",
|
|
" <td>1.531724</td>\n",
|
|
" <td>1.230384</td>\n",
|
|
" <td>0.049417</td>\n",
|
|
" <td>0.022558</td>\n",
|
|
" <td>0.025490</td>\n",
|
|
" <td>0.033242</td>\n",
|
|
" <td>0.030365</td>\n",
|
|
" <td>0.022626</td>\n",
|
|
" <td>0.054166</td>\n",
|
|
" <td>0.021656</td>\n",
|
|
" <td>0.128378</td>\n",
|
|
" <td>0.507802</td>\n",
|
|
" <td>0.325557</td>\n",
|
|
" <td>0.988865</td>\n",
|
|
" <td>0.190476</td>\n",
|
|
" <td>5.100033</td>\n",
|
|
" <td>0.907724</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>Self_BaselineUI</td>\n",
|
|
" <td>0.967585</td>\n",
|
|
" <td>0.762740</td>\n",
|
|
" <td>0.000954</td>\n",
|
|
" <td>0.000170</td>\n",
|
|
" <td>0.000278</td>\n",
|
|
" <td>0.000463</td>\n",
|
|
" <td>0.000644</td>\n",
|
|
" <td>0.000189</td>\n",
|
|
" <td>0.000752</td>\n",
|
|
" <td>0.000168</td>\n",
|
|
" <td>0.001677</td>\n",
|
|
" <td>0.496424</td>\n",
|
|
" <td>0.009544</td>\n",
|
|
" <td>0.600530</td>\n",
|
|
" <td>0.005051</td>\n",
|
|
" <td>1.803126</td>\n",
|
|
" <td>0.996380</td>\n",
|
|
" </tr>\n",
|
|
" </tbody>\n",
|
|
"</table>\n",
|
|
"</div>"
|
|
],
|
|
"text/plain": [
|
|
" Model RMSE MAE precision recall F_1 \\\n",
|
|
"0 Self_TopPop 2.508258 2.217909 0.188865 0.116919 0.118732 \n",
|
|
"0 Ready_Baseline 0.949459 0.752487 0.091410 0.037652 0.046030 \n",
|
|
"0 Self_GlobalAvg 1.125760 0.943534 0.061188 0.025968 0.031383 \n",
|
|
"0 Ready_Random 1.531724 1.230384 0.049417 0.022558 0.025490 \n",
|
|
"0 Self_BaselineUI 0.967585 0.762740 0.000954 0.000170 0.000278 \n",
|
|
"\n",
|
|
" F_05 precision_super recall_super NDCG mAP MRR \\\n",
|
|
"0 0.141584 0.130472 0.137473 0.214651 0.111707 0.400939 \n",
|
|
"0 0.061286 0.079614 0.056463 0.095957 0.043178 0.198193 \n",
|
|
"0 0.041343 0.040558 0.032107 0.067695 0.027470 0.171187 \n",
|
|
"0 0.033242 0.030365 0.022626 0.054166 0.021656 0.128378 \n",
|
|
"0 0.000463 0.000644 0.000189 0.000752 0.000168 0.001677 \n",
|
|
"\n",
|
|
" LAUC HR Reco in test Test coverage Shannon Gini \n",
|
|
"0 0.555546 0.765642 1.000000 0.038961 3.159079 0.987317 \n",
|
|
"0 0.515501 0.437964 1.000000 0.033911 2.836513 0.991139 \n",
|
|
"0 0.509546 0.384942 1.000000 0.025974 2.711772 0.992003 \n",
|
|
"0 0.507802 0.325557 0.988865 0.190476 5.100033 0.907724 \n",
|
|
"0 0.496424 0.009544 0.600530 0.005051 1.803126 0.996380 "
|
|
]
|
|
},
|
|
"execution_count": 17,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"dir_path=\"Recommendations generated/ml-100k/\"\n",
|
|
"super_reactions=[4,5]\n",
|
|
"test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\\t', header=None)\n",
|
|
"\n",
|
|
"ev.evaluate_all(test, dir_path, super_reactions)"
|
|
]
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.6.9"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 4
|
|
}
|