1464 lines
57 KiB
Plaintext
1464 lines
57 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Prepare test set"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 14,
|
|
"metadata": {
|
|
"slideshow": {
|
|
"slide_type": "-"
|
|
}
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"import pandas as pd\n",
|
|
"import numpy as np\n",
|
|
"import scipy.sparse as sparse\n",
|
|
"from collections import defaultdict\n",
|
|
"from itertools import chain\n",
|
|
"import random\n",
|
|
"from tqdm import tqdm\n",
|
|
"\n",
|
|
"# In evaluation we do not load train set - it is not needed\n",
|
|
"test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\\t', header=None)\n",
|
|
"test.columns=['user', 'item', 'rating', 'timestamp']\n",
|
|
"\n",
|
|
"test['user_code'] = test['user'].astype(\"category\").cat.codes\n",
|
|
"test['item_code'] = test['item'].astype(\"category\").cat.codes\n",
|
|
"\n",
|
|
"user_code_id = dict(enumerate(test['user'].astype(\"category\").cat.categories))\n",
|
|
"user_id_code = dict((v, k) for k, v in user_code_id.items())\n",
|
|
"item_code_id = dict(enumerate(test['item'].astype(\"category\").cat.categories))\n",
|
|
"item_id_code = dict((v, k) for k, v in item_code_id.items())\n",
|
|
"\n",
|
|
"test_ui = sparse.csr_matrix((test['rating'], (test['user_code'], test['item_code'])))"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Estimations metrics"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 15,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"estimations_df=pd.read_csv('Recommendations generated/ml-100k/Ready_Baseline_estimations.csv', header=None)\n",
|
|
"estimations_df.columns=['user', 'item' ,'score']\n",
|
|
"\n",
|
|
"estimations_df['user_code']=[user_id_code[user] for user in estimations_df['user']]\n",
|
|
"estimations_df['item_code']=[item_id_code[item] for item in estimations_df['item']]\n",
|
|
"estimations=sparse.csr_matrix((estimations_df['score'], (estimations_df['user_code'], estimations_df['item_code'])), shape=test_ui.shape)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 16,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def estimations_metrics(test_ui, estimations):\n",
|
|
" result=[]\n",
|
|
"\n",
|
|
" RMSE=(np.sum((estimations.data-test_ui.data)**2)/estimations.nnz)**(1/2)\n",
|
|
" result.append(['RMSE', RMSE])\n",
|
|
"\n",
|
|
" MAE=np.sum(abs(estimations.data-test_ui.data))/estimations.nnz\n",
|
|
" result.append(['MAE', MAE])\n",
|
|
" \n",
|
|
" df_result=(pd.DataFrame(list(zip(*result))[1])).T\n",
|
|
" df_result.columns=list(zip(*result))[0]\n",
|
|
" return df_result"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 17,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/html": [
|
|
"<div>\n",
|
|
"<style scoped>\n",
|
|
" .dataframe tbody tr th:only-of-type {\n",
|
|
" vertical-align: middle;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe tbody tr th {\n",
|
|
" vertical-align: top;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe thead th {\n",
|
|
" text-align: right;\n",
|
|
" }\n",
|
|
"</style>\n",
|
|
"<table border=\"1\" class=\"dataframe\">\n",
|
|
" <thead>\n",
|
|
" <tr style=\"text-align: right;\">\n",
|
|
" <th></th>\n",
|
|
" <th>RMSE</th>\n",
|
|
" <th>MAE</th>\n",
|
|
" </tr>\n",
|
|
" </thead>\n",
|
|
" <tbody>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>0.949459</td>\n",
|
|
" <td>0.752487</td>\n",
|
|
" </tr>\n",
|
|
" </tbody>\n",
|
|
"</table>\n",
|
|
"</div>"
|
|
],
|
|
"text/plain": [
|
|
" RMSE MAE\n",
|
|
"0 0.949459 0.752487"
|
|
]
|
|
},
|
|
"execution_count": 17,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"# in case of error (in the laboratories) you might have to switch to the other version of pandas\n",
|
|
"# try !pip3 install pandas=='1.0.3' (or pip if you use python 2) and restart the kernel\n",
|
|
"\n",
|
|
"estimations_metrics(test_ui, estimations)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Ranking metrics"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 18,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"array([[663, 475, 62, ..., 472, 269, 503],\n",
|
|
" [ 48, 313, 475, ..., 591, 175, 466],\n",
|
|
" [351, 313, 475, ..., 591, 175, 466],\n",
|
|
" ...,\n",
|
|
" [259, 313, 475, ..., 11, 591, 175],\n",
|
|
" [ 33, 313, 475, ..., 11, 591, 175],\n",
|
|
" [ 77, 313, 475, ..., 11, 591, 175]])"
|
|
]
|
|
},
|
|
"execution_count": 18,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"import numpy as np\n",
|
|
"reco = np.loadtxt('Recommendations generated/ml-100k/Ready_Baseline_reco.csv', delimiter=',')\n",
|
|
"# Let's ignore scores - they are not used in evaluation: \n",
|
|
"users=reco[:,:1]\n",
|
|
"items=reco[:,1::2]\n",
|
|
"# Let's use inner ids instead of real ones\n",
|
|
"users=np.vectorize(lambda x: user_id_code.setdefault(x, -1))(users)\n",
|
|
"items=np.vectorize(lambda x: item_id_code.setdefault(x, -1))(items) # maybe items we recommend are not in test set\n",
|
|
"# Let's put them into one array\n",
|
|
"reco=np.concatenate((users, items), axis=1)\n",
|
|
"reco"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 19,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def ranking_metrics(test_ui, reco, super_reactions=[], topK=10):\n",
|
|
" \n",
|
|
" nb_items=test_ui.shape[1]\n",
|
|
" relevant_users, super_relevant_users, prec, rec, F_1, F_05, prec_super, rec_super, ndcg, mAP, MRR, LAUC, HR=\\\n",
|
|
" 0,0,0,0,0,0,0,0,0,0,0,0,0\n",
|
|
" \n",
|
|
" cg = (1.0 / np.log2(np.arange(2, topK + 2)))\n",
|
|
" cg_sum = np.cumsum(cg)\n",
|
|
" \n",
|
|
" for (nb_user, user) in tqdm(enumerate(reco[:,0])):\n",
|
|
" u_rated_items=test_ui.indices[test_ui.indptr[user]:test_ui.indptr[user+1]]\n",
|
|
" nb_u_rated_items=len(u_rated_items)\n",
|
|
" if nb_u_rated_items>0: # skip users with no items in test set (still possible that there will be no super items)\n",
|
|
" relevant_users+=1\n",
|
|
" \n",
|
|
" u_super_items=u_rated_items[np.vectorize(lambda x: x in super_reactions)\\\n",
|
|
" (test_ui.data[test_ui.indptr[user]:test_ui.indptr[user+1]])]\n",
|
|
" # more natural seems u_super_items=[item for item in u_rated_items if test_ui[user,item] in super_reactions]\n",
|
|
" # but accesing test_ui[user,item] is expensive -we should avoid doing it\n",
|
|
" if len(u_super_items)>0:\n",
|
|
" super_relevant_users+=1\n",
|
|
" \n",
|
|
" user_successes=np.zeros(topK)\n",
|
|
" nb_user_successes=0\n",
|
|
" user_super_successes=np.zeros(topK)\n",
|
|
" nb_user_super_successes=0\n",
|
|
" \n",
|
|
" # evaluation\n",
|
|
" for (item_position,item) in enumerate(reco[nb_user,1:topK+1]):\n",
|
|
" if item in u_rated_items:\n",
|
|
" user_successes[item_position]=1\n",
|
|
" nb_user_successes+=1\n",
|
|
" if item in u_super_items:\n",
|
|
" user_super_successes[item_position]=1\n",
|
|
" nb_user_super_successes+=1\n",
|
|
" \n",
|
|
" prec_u=nb_user_successes/topK \n",
|
|
" prec+=prec_u\n",
|
|
" \n",
|
|
" rec_u=nb_user_successes/nb_u_rated_items\n",
|
|
" rec+=rec_u\n",
|
|
" \n",
|
|
" F_1+=2*(prec_u*rec_u)/(prec_u+rec_u) if prec_u+rec_u>0 else 0\n",
|
|
" F_05+=(0.5**2+1)*(prec_u*rec_u)/(0.5**2*prec_u+rec_u) if prec_u+rec_u>0 else 0\n",
|
|
" \n",
|
|
" prec_super+=nb_user_super_successes/topK\n",
|
|
" rec_super+=nb_user_super_successes/max(len(u_super_items),1) # to set 0 if no super items\n",
|
|
" ndcg+=np.dot(user_successes,cg)/cg_sum[min(topK, nb_u_rated_items)-1]\n",
|
|
" \n",
|
|
" cumsum_successes=np.cumsum(user_successes)\n",
|
|
" mAP+=np.dot(cumsum_successes/np.arange(1,topK+1), user_successes)/min(topK, nb_u_rated_items)\n",
|
|
" MRR+=1/(user_successes.nonzero()[0][0]+1) if user_successes.nonzero()[0].size>0 else 0\n",
|
|
" LAUC+=(np.dot(cumsum_successes, 1-user_successes)+\\\n",
|
|
" (nb_user_successes+nb_u_rated_items)/2*((nb_items-nb_u_rated_items)-(topK-nb_user_successes)))/\\\n",
|
|
" ((nb_items-nb_u_rated_items)*nb_u_rated_items)\n",
|
|
" \n",
|
|
" HR+=nb_user_successes>0\n",
|
|
" \n",
|
|
" \n",
|
|
" result=[]\n",
|
|
" result.append(('precision', prec/relevant_users))\n",
|
|
" result.append(('recall', rec/relevant_users))\n",
|
|
" result.append(('F_1', F_1/relevant_users))\n",
|
|
" result.append(('F_05', F_05/relevant_users))\n",
|
|
" result.append(('precision_super', prec_super/super_relevant_users))\n",
|
|
" result.append(('recall_super', rec_super/super_relevant_users))\n",
|
|
" result.append(('NDCG', ndcg/relevant_users))\n",
|
|
" result.append(('mAP', mAP/relevant_users))\n",
|
|
" result.append(('MRR', MRR/relevant_users))\n",
|
|
" result.append(('LAUC', LAUC/relevant_users))\n",
|
|
" result.append(('HR', HR/relevant_users))\n",
|
|
"\n",
|
|
" df_result=(pd.DataFrame(list(zip(*result))[1])).T\n",
|
|
" df_result.columns=list(zip(*result))[0]\n",
|
|
" return df_result"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 20,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"943it [00:00, 10596.27it/s]\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/html": [
|
|
"<div>\n",
|
|
"<style scoped>\n",
|
|
" .dataframe tbody tr th:only-of-type {\n",
|
|
" vertical-align: middle;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe tbody tr th {\n",
|
|
" vertical-align: top;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe thead th {\n",
|
|
" text-align: right;\n",
|
|
" }\n",
|
|
"</style>\n",
|
|
"<table border=\"1\" class=\"dataframe\">\n",
|
|
" <thead>\n",
|
|
" <tr style=\"text-align: right;\">\n",
|
|
" <th></th>\n",
|
|
" <th>precision</th>\n",
|
|
" <th>recall</th>\n",
|
|
" <th>F_1</th>\n",
|
|
" <th>F_05</th>\n",
|
|
" <th>precision_super</th>\n",
|
|
" <th>recall_super</th>\n",
|
|
" <th>NDCG</th>\n",
|
|
" <th>mAP</th>\n",
|
|
" <th>MRR</th>\n",
|
|
" <th>LAUC</th>\n",
|
|
" <th>HR</th>\n",
|
|
" </tr>\n",
|
|
" </thead>\n",
|
|
" <tbody>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>0.09141</td>\n",
|
|
" <td>0.037652</td>\n",
|
|
" <td>0.04603</td>\n",
|
|
" <td>0.061286</td>\n",
|
|
" <td>0.079614</td>\n",
|
|
" <td>0.056463</td>\n",
|
|
" <td>0.095957</td>\n",
|
|
" <td>0.043178</td>\n",
|
|
" <td>0.198193</td>\n",
|
|
" <td>0.515501</td>\n",
|
|
" <td>0.437964</td>\n",
|
|
" </tr>\n",
|
|
" </tbody>\n",
|
|
"</table>\n",
|
|
"</div>"
|
|
],
|
|
"text/plain": [
|
|
" precision recall F_1 F_05 precision_super recall_super \\\n",
|
|
"0 0.09141 0.037652 0.04603 0.061286 0.079614 0.056463 \n",
|
|
"\n",
|
|
" NDCG mAP MRR LAUC HR \n",
|
|
"0 0.095957 0.043178 0.198193 0.515501 0.437964 "
|
|
]
|
|
},
|
|
"execution_count": 20,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"ranking_metrics(test_ui, reco, super_reactions=[4,5], topK=10)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Diversity metrics"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 21,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def diversity_metrics(test_ui, reco, topK=10):\n",
|
|
" \n",
|
|
" frequencies=defaultdict(int)\n",
|
|
" \n",
|
|
" # let's assign 0 to all items in test set\n",
|
|
" for item in list(set(test_ui.indices)):\n",
|
|
" frequencies[item]=0\n",
|
|
" \n",
|
|
" # counting frequencies\n",
|
|
" for item in reco[:,1:].flat:\n",
|
|
" frequencies[item]+=1\n",
|
|
" \n",
|
|
" nb_reco_outside_test=frequencies[-1]\n",
|
|
" del frequencies[-1]\n",
|
|
" \n",
|
|
" frequencies=np.array(list(frequencies.values()))\n",
|
|
" \n",
|
|
" nb_rec_items=len(frequencies[frequencies>0])\n",
|
|
" nb_reco_inside_test=np.sum(frequencies)\n",
|
|
" \n",
|
|
" frequencies=frequencies/np.sum(frequencies)\n",
|
|
" frequencies=np.sort(frequencies)\n",
|
|
" \n",
|
|
" with np.errstate(divide='ignore'): # let's put zeros put items with 0 frequency and ignore division warning\n",
|
|
" log_frequencies=np.nan_to_num(np.log(frequencies), posinf=0, neginf=0)\n",
|
|
" \n",
|
|
" result=[]\n",
|
|
" result.append(('Reco in test', nb_reco_inside_test/(nb_reco_inside_test+nb_reco_outside_test)))\n",
|
|
" result.append(('Test coverage', nb_rec_items/test_ui.shape[1]))\n",
|
|
" result.append(('Shannon', -np.dot(frequencies, log_frequencies)))\n",
|
|
" result.append(('Gini', np.dot(frequencies, np.arange(1-len(frequencies), len(frequencies), 2))/(len(frequencies)-1)))\n",
|
|
" \n",
|
|
" df_result=(pd.DataFrame(list(zip(*result))[1])).T\n",
|
|
" df_result.columns=list(zip(*result))[0]\n",
|
|
" return df_result"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 22,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/html": [
|
|
"<div>\n",
|
|
"<style scoped>\n",
|
|
" .dataframe tbody tr th:only-of-type {\n",
|
|
" vertical-align: middle;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe tbody tr th {\n",
|
|
" vertical-align: top;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe thead th {\n",
|
|
" text-align: right;\n",
|
|
" }\n",
|
|
"</style>\n",
|
|
"<table border=\"1\" class=\"dataframe\">\n",
|
|
" <thead>\n",
|
|
" <tr style=\"text-align: right;\">\n",
|
|
" <th></th>\n",
|
|
" <th>Reco in test</th>\n",
|
|
" <th>Test coverage</th>\n",
|
|
" <th>Shannon</th>\n",
|
|
" <th>Gini</th>\n",
|
|
" </tr>\n",
|
|
" </thead>\n",
|
|
" <tbody>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>1.0</td>\n",
|
|
" <td>0.033911</td>\n",
|
|
" <td>2.836513</td>\n",
|
|
" <td>0.991139</td>\n",
|
|
" </tr>\n",
|
|
" </tbody>\n",
|
|
"</table>\n",
|
|
"</div>"
|
|
],
|
|
"text/plain": [
|
|
" Reco in test Test coverage Shannon Gini\n",
|
|
"0 1.0 0.033911 2.836513 0.991139"
|
|
]
|
|
},
|
|
"execution_count": 22,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"# in case of errors try !pip3 install numpy==1.18.4 (or pip if you use python 2) and restart the kernel\n",
|
|
"\n",
|
|
"import evaluation_measures as ev\n",
|
|
"import imp\n",
|
|
"imp.reload(ev)\n",
|
|
"\n",
|
|
"x=diversity_metrics(test_ui, reco, topK=10)\n",
|
|
"x"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"# To be used in other notebooks"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 23,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"943it [00:00, 9831.71it/s]\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/html": [
|
|
"<div>\n",
|
|
"<style scoped>\n",
|
|
" .dataframe tbody tr th:only-of-type {\n",
|
|
" vertical-align: middle;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe tbody tr th {\n",
|
|
" vertical-align: top;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe thead th {\n",
|
|
" text-align: right;\n",
|
|
" }\n",
|
|
"</style>\n",
|
|
"<table border=\"1\" class=\"dataframe\">\n",
|
|
" <thead>\n",
|
|
" <tr style=\"text-align: right;\">\n",
|
|
" <th></th>\n",
|
|
" <th>RMSE</th>\n",
|
|
" <th>MAE</th>\n",
|
|
" <th>precision</th>\n",
|
|
" <th>recall</th>\n",
|
|
" <th>F_1</th>\n",
|
|
" <th>F_05</th>\n",
|
|
" <th>precision_super</th>\n",
|
|
" <th>recall_super</th>\n",
|
|
" <th>NDCG</th>\n",
|
|
" <th>mAP</th>\n",
|
|
" <th>MRR</th>\n",
|
|
" <th>LAUC</th>\n",
|
|
" <th>HR</th>\n",
|
|
" <th>Reco in test</th>\n",
|
|
" <th>Test coverage</th>\n",
|
|
" <th>Shannon</th>\n",
|
|
" <th>Gini</th>\n",
|
|
" </tr>\n",
|
|
" </thead>\n",
|
|
" <tbody>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>0.949459</td>\n",
|
|
" <td>0.752487</td>\n",
|
|
" <td>0.09141</td>\n",
|
|
" <td>0.037652</td>\n",
|
|
" <td>0.04603</td>\n",
|
|
" <td>0.061286</td>\n",
|
|
" <td>0.079614</td>\n",
|
|
" <td>0.056463</td>\n",
|
|
" <td>0.095957</td>\n",
|
|
" <td>0.043178</td>\n",
|
|
" <td>0.198193</td>\n",
|
|
" <td>0.515501</td>\n",
|
|
" <td>0.437964</td>\n",
|
|
" <td>1.0</td>\n",
|
|
" <td>0.033911</td>\n",
|
|
" <td>2.836513</td>\n",
|
|
" <td>0.991139</td>\n",
|
|
" </tr>\n",
|
|
" </tbody>\n",
|
|
"</table>\n",
|
|
"</div>"
|
|
],
|
|
"text/plain": [
|
|
" RMSE MAE precision recall F_1 F_05 \\\n",
|
|
"0 0.949459 0.752487 0.09141 0.037652 0.04603 0.061286 \n",
|
|
"\n",
|
|
" precision_super recall_super NDCG mAP MRR LAUC \\\n",
|
|
"0 0.079614 0.056463 0.095957 0.043178 0.198193 0.515501 \n",
|
|
"\n",
|
|
" HR Reco in test Test coverage Shannon Gini \n",
|
|
"0 0.437964 1.0 0.033911 2.836513 0.991139 "
|
|
]
|
|
},
|
|
"execution_count": 23,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"import evaluation_measures as ev\n",
|
|
"import imp\n",
|
|
"imp.reload(ev)\n",
|
|
"\n",
|
|
"estimations_df=pd.read_csv('Recommendations generated/ml-100k/Ready_Baseline_estimations.csv', header=None)\n",
|
|
"reco=np.loadtxt('Recommendations generated/ml-100k/Ready_Baseline_reco.csv', delimiter=',')\n",
|
|
"\n",
|
|
"ev.evaluate(test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\\t', header=None),\n",
|
|
" estimations_df=estimations_df, \n",
|
|
" reco=reco,\n",
|
|
" super_reactions=[4,5])\n",
|
|
"#also you can just type ev.evaluate_all(estimations_df, reco) - I put above values as default"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 24,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"943it [00:00, 11236.19it/s]\n",
|
|
"943it [00:00, 10128.11it/s]\n",
|
|
"943it [00:00, 10299.38it/s]\n",
|
|
"943it [00:00, 10525.19it/s]\n"
|
|
]
|
|
},
|
|
{
|
|
"ename": "OSError",
|
|
"evalue": "Recommendations generated/ml-100k/Ready_I-KNNWithZScore.csv_reco.csv not found.",
|
|
"output_type": "error",
|
|
"traceback": [
|
|
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
|
|
"\u001b[1;31mOSError\u001b[0m Traceback (most recent call last)",
|
|
"\u001b[1;32m<ipython-input-24-bc90f36b06c1>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 7\u001b[0m \u001b[0mtest\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mpd\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mread_csv\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'./Datasets/ml-100k/test.csv'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0msep\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m'\\t'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mheader\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mNone\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 8\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 9\u001b[1;33m \u001b[0mdf\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mev\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mevaluate_all\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtest\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdir_path\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0msuper_reactions\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 10\u001b[0m \u001b[1;31m#also you can just type ev.evaluate_all() - I put above values as default\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
|
"\u001b[1;32m~\\Desktop\\warsztaty\\moje\\evaluation_measures.py\u001b[0m in \u001b[0;36mevaluate_all\u001b[1;34m(test, dir_path, super_reactions, topK)\u001b[0m\n\u001b[0;32m 205\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mmodel\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mmodels\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 206\u001b[0m \u001b[0mestimations_df\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mpd\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mread_csv\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'{}{}_estimations.csv'\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdir_path\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmodel\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mreplace\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\".csv\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m\"\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mheader\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mNone\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 207\u001b[1;33m \u001b[0mreco\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mloadtxt\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'{}{}_reco.csv'\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdir_path\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmodel\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdelimiter\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m','\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 208\u001b[0m \u001b[0mto_append\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mevaluate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtest\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mestimations_df\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mreco\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0msuper_reactions\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtopK\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 209\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
|
|
"\u001b[1;32mc:\\users\\adrian\\appdata\\local\\programs\\python\\python38-32\\lib\\site-packages\\numpy\\lib\\npyio.py\u001b[0m in \u001b[0;36mloadtxt\u001b[1;34m(fname, dtype, comments, delimiter, converters, skiprows, usecols, unpack, ndmin, encoding, max_rows)\u001b[0m\n\u001b[0;32m 979\u001b[0m \u001b[0mfname\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mos_fspath\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfname\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 980\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0m_is_string_like\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfname\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 981\u001b[1;33m \u001b[0mfh\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlib\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_datasource\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mopen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfname\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'rt'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mencoding\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mencoding\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 982\u001b[0m \u001b[0mfencoding\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mgetattr\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfh\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'encoding'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'latin1'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 983\u001b[0m \u001b[0mfh\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0miter\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfh\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
|
"\u001b[1;32mc:\\users\\adrian\\appdata\\local\\programs\\python\\python38-32\\lib\\site-packages\\numpy\\lib\\_datasource.py\u001b[0m in \u001b[0;36mopen\u001b[1;34m(path, mode, destpath, encoding, newline)\u001b[0m\n\u001b[0;32m 267\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 268\u001b[0m \u001b[0mds\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mDataSource\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdestpath\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 269\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mds\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mopen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mpath\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mencoding\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mencoding\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnewline\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mnewline\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 270\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 271\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
|
|
"\u001b[1;32mc:\\users\\adrian\\appdata\\local\\programs\\python\\python38-32\\lib\\site-packages\\numpy\\lib\\_datasource.py\u001b[0m in \u001b[0;36mopen\u001b[1;34m(self, path, mode, encoding, newline)\u001b[0m\n\u001b[0;32m 621\u001b[0m encoding=encoding, newline=newline)\n\u001b[0;32m 622\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 623\u001b[1;33m \u001b[1;32mraise\u001b[0m \u001b[0mIOError\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"%s not found.\"\u001b[0m \u001b[1;33m%\u001b[0m \u001b[0mpath\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 624\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 625\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
|
|
"\u001b[1;31mOSError\u001b[0m: Recommendations generated/ml-100k/Ready_I-KNNWithZScore.csv_reco.csv not found."
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"import evaluation_measures as ev\n",
|
|
"import imp\n",
|
|
"imp.reload(ev)\n",
|
|
"\n",
|
|
"dir_path=\"Recommendations generated/ml-100k/\"\n",
|
|
"super_reactions=[4,5]\n",
|
|
"test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\\t', header=None)\n",
|
|
"\n",
|
|
"df=ev.evaluate_all(test, dir_path, super_reactions)\n",
|
|
"#also you can just type ev.evaluate_all() - I put above values as default"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"df.iloc[:,:9]"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"df.iloc[:,np.append(0,np.arange(9, df.shape[1]))]"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"# Check metrics on toy dataset"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"import evaluation_measures as ev\n",
|
|
"import imp\n",
|
|
"import helpers\n",
|
|
"imp.reload(ev)\n",
|
|
"\n",
|
|
"dir_path=\"Recommendations generated/toy-example/\"\n",
|
|
"super_reactions=[4,5]\n",
|
|
"test=pd.read_csv('./Datasets/toy-example/test.csv', sep='\\t', header=None)\n",
|
|
"\n",
|
|
"display(ev.evaluate_all(test, dir_path, super_reactions, topK=3))\n",
|
|
"#also you can just type ev.evaluate_all() - I put above values as default\n",
|
|
"\n",
|
|
"toy_train_read=pd.read_csv('./Datasets/toy-example/train.csv', sep='\\t', header=None, names=['user', 'item', 'rating', 'timestamp'])\n",
|
|
"toy_test_read=pd.read_csv('./Datasets/toy-example/test.csv', sep='\\t', header=None, names=['user', 'item', 'rating', 'timestamp'])\n",
|
|
"reco=pd.read_csv('Recommendations generated/toy-example/Self_BaselineUI_reco.csv', header=None)\n",
|
|
"estimations=pd.read_csv('Recommendations generated/toy-example/Self_BaselineUI_estimations.csv', names=['user', 'item', 'est_score'])\n",
|
|
"toy_train_ui, toy_test_ui, toy_user_code_id, toy_user_id_code, \\\n",
|
|
"toy_item_code_id, toy_item_id_code = helpers.data_to_csr(toy_train_read, toy_test_read)\n",
|
|
"\n",
|
|
"print('Training data:')\n",
|
|
"display(toy_train_ui.todense())\n",
|
|
"\n",
|
|
"print('Test data:')\n",
|
|
"display(toy_test_ui.todense())\n",
|
|
"\n",
|
|
"print('Recommendations:')\n",
|
|
"display(reco)\n",
|
|
"\n",
|
|
"print('Estimations:')\n",
|
|
"display(estimations)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"# Sample recommendations"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 25,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Here is what user rated high:\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/html": [
|
|
"<div>\n",
|
|
"<style scoped>\n",
|
|
" .dataframe tbody tr th:only-of-type {\n",
|
|
" vertical-align: middle;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe tbody tr th {\n",
|
|
" vertical-align: top;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe thead th {\n",
|
|
" text-align: right;\n",
|
|
" }\n",
|
|
"</style>\n",
|
|
"<table border=\"1\" class=\"dataframe\">\n",
|
|
" <thead>\n",
|
|
" <tr style=\"text-align: right;\">\n",
|
|
" <th></th>\n",
|
|
" <th>user</th>\n",
|
|
" <th>rating</th>\n",
|
|
" <th>title</th>\n",
|
|
" <th>genres</th>\n",
|
|
" </tr>\n",
|
|
" </thead>\n",
|
|
" <tbody>\n",
|
|
" <tr>\n",
|
|
" <th>153</th>\n",
|
|
" <td>1</td>\n",
|
|
" <td>5</td>\n",
|
|
" <td>Toy Story (1995)</td>\n",
|
|
" <td>Animation, Children's, Comedy</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>57962</th>\n",
|
|
" <td>1</td>\n",
|
|
" <td>5</td>\n",
|
|
" <td>Wrong Trousers, The (1993)</td>\n",
|
|
" <td>Animation, Comedy</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>52845</th>\n",
|
|
" <td>1</td>\n",
|
|
" <td>5</td>\n",
|
|
" <td>Antonia's Line (1995)</td>\n",
|
|
" <td>Drama</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>23811</th>\n",
|
|
" <td>1</td>\n",
|
|
" <td>5</td>\n",
|
|
" <td>Chasing Amy (1997)</td>\n",
|
|
" <td>Drama, Romance</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>53494</th>\n",
|
|
" <td>1</td>\n",
|
|
" <td>5</td>\n",
|
|
" <td>Nightmare Before Christmas, The (1993)</td>\n",
|
|
" <td>Children's, Comedy, Musical</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>23377</th>\n",
|
|
" <td>1</td>\n",
|
|
" <td>5</td>\n",
|
|
" <td>Eat Drink Man Woman (1994)</td>\n",
|
|
" <td>Comedy, Drama</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>54249</th>\n",
|
|
" <td>1</td>\n",
|
|
" <td>5</td>\n",
|
|
" <td>12 Angry Men (1957)</td>\n",
|
|
" <td>Drama</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>56869</th>\n",
|
|
" <td>1</td>\n",
|
|
" <td>5</td>\n",
|
|
" <td>Henry V (1989)</td>\n",
|
|
" <td>Drama, War</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>22174</th>\n",
|
|
" <td>1</td>\n",
|
|
" <td>5</td>\n",
|
|
" <td>Groundhog Day (1993)</td>\n",
|
|
" <td>Comedy, Romance</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>21742</th>\n",
|
|
" <td>1</td>\n",
|
|
" <td>5</td>\n",
|
|
" <td>Good, The Bad and The Ugly, The (1966)</td>\n",
|
|
" <td>Action, Western</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>51915</th>\n",
|
|
" <td>1</td>\n",
|
|
" <td>5</td>\n",
|
|
" <td>Mighty Aphrodite (1995)</td>\n",
|
|
" <td>Comedy</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>58218</th>\n",
|
|
" <td>1</td>\n",
|
|
" <td>5</td>\n",
|
|
" <td>Chasing Amy (1997)</td>\n",
|
|
" <td>Drama, Romance</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>58440</th>\n",
|
|
" <td>1</td>\n",
|
|
" <td>5</td>\n",
|
|
" <td>Manon of the Spring (Manon des sources) (1986)</td>\n",
|
|
" <td>Drama</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>20120</th>\n",
|
|
" <td>1</td>\n",
|
|
" <td>5</td>\n",
|
|
" <td>Return of the Jedi (1983)</td>\n",
|
|
" <td>Action, Adventure, Romance, Sci-Fi, War</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>19043</th>\n",
|
|
" <td>1</td>\n",
|
|
" <td>5</td>\n",
|
|
" <td>Mars Attacks! (1996)</td>\n",
|
|
" <td>Action, Comedy, Sci-Fi, War</td>\n",
|
|
" </tr>\n",
|
|
" </tbody>\n",
|
|
"</table>\n",
|
|
"</div>"
|
|
],
|
|
"text/plain": [
|
|
" user rating title \\\n",
|
|
"153 1 5 Toy Story (1995) \n",
|
|
"57962 1 5 Wrong Trousers, The (1993) \n",
|
|
"52845 1 5 Antonia's Line (1995) \n",
|
|
"23811 1 5 Chasing Amy (1997) \n",
|
|
"53494 1 5 Nightmare Before Christmas, The (1993) \n",
|
|
"23377 1 5 Eat Drink Man Woman (1994) \n",
|
|
"54249 1 5 12 Angry Men (1957) \n",
|
|
"56869 1 5 Henry V (1989) \n",
|
|
"22174 1 5 Groundhog Day (1993) \n",
|
|
"21742 1 5 Good, The Bad and The Ugly, The (1966) \n",
|
|
"51915 1 5 Mighty Aphrodite (1995) \n",
|
|
"58218 1 5 Chasing Amy (1997) \n",
|
|
"58440 1 5 Manon of the Spring (Manon des sources) (1986) \n",
|
|
"20120 1 5 Return of the Jedi (1983) \n",
|
|
"19043 1 5 Mars Attacks! (1996) \n",
|
|
"\n",
|
|
" genres \n",
|
|
"153 Animation, Children's, Comedy \n",
|
|
"57962 Animation, Comedy \n",
|
|
"52845 Drama \n",
|
|
"23811 Drama, Romance \n",
|
|
"53494 Children's, Comedy, Musical \n",
|
|
"23377 Comedy, Drama \n",
|
|
"54249 Drama \n",
|
|
"56869 Drama, War \n",
|
|
"22174 Comedy, Romance \n",
|
|
"21742 Action, Western \n",
|
|
"51915 Comedy \n",
|
|
"58218 Drama, Romance \n",
|
|
"58440 Drama \n",
|
|
"20120 Action, Adventure, Romance, Sci-Fi, War \n",
|
|
"19043 Action, Comedy, Sci-Fi, War "
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Here is what we recommend:\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/html": [
|
|
"<div>\n",
|
|
"<style scoped>\n",
|
|
" .dataframe tbody tr th:only-of-type {\n",
|
|
" vertical-align: middle;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe tbody tr th {\n",
|
|
" vertical-align: top;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe thead th {\n",
|
|
" text-align: right;\n",
|
|
" }\n",
|
|
"</style>\n",
|
|
"<table border=\"1\" class=\"dataframe\">\n",
|
|
" <thead>\n",
|
|
" <tr style=\"text-align: right;\">\n",
|
|
" <th></th>\n",
|
|
" <th>user</th>\n",
|
|
" <th>rec_nb</th>\n",
|
|
" <th>title</th>\n",
|
|
" <th>genres</th>\n",
|
|
" </tr>\n",
|
|
" </thead>\n",
|
|
" <tbody>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>1.0</td>\n",
|
|
" <td>1</td>\n",
|
|
" <td>Great Day in Harlem, A (1994)</td>\n",
|
|
" <td>Documentary</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>942</th>\n",
|
|
" <td>1.0</td>\n",
|
|
" <td>2</td>\n",
|
|
" <td>Tough and Deadly (1995)</td>\n",
|
|
" <td>Action, Drama, Thriller</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>1884</th>\n",
|
|
" <td>1.0</td>\n",
|
|
" <td>3</td>\n",
|
|
" <td>Aiqing wansui (1994)</td>\n",
|
|
" <td>Drama</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>2826</th>\n",
|
|
" <td>1.0</td>\n",
|
|
" <td>4</td>\n",
|
|
" <td>Delta of Venus (1994)</td>\n",
|
|
" <td>Drama</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>3768</th>\n",
|
|
" <td>1.0</td>\n",
|
|
" <td>5</td>\n",
|
|
" <td>Someone Else's America (1995)</td>\n",
|
|
" <td>Drama</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>4710</th>\n",
|
|
" <td>1.0</td>\n",
|
|
" <td>6</td>\n",
|
|
" <td>Saint of Fort Washington, The (1993)</td>\n",
|
|
" <td>Drama</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>5652</th>\n",
|
|
" <td>1.0</td>\n",
|
|
" <td>7</td>\n",
|
|
" <td>Celestial Clockwork (1994)</td>\n",
|
|
" <td>Comedy</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>6594</th>\n",
|
|
" <td>1.0</td>\n",
|
|
" <td>8</td>\n",
|
|
" <td>Some Mother's Son (1996)</td>\n",
|
|
" <td>Drama</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>7535</th>\n",
|
|
" <td>1.0</td>\n",
|
|
" <td>9</td>\n",
|
|
" <td>Prefontaine (1997)</td>\n",
|
|
" <td>Drama</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>8477</th>\n",
|
|
" <td>1.0</td>\n",
|
|
" <td>10</td>\n",
|
|
" <td>Santa with Muscles (1996)</td>\n",
|
|
" <td>Comedy</td>\n",
|
|
" </tr>\n",
|
|
" </tbody>\n",
|
|
"</table>\n",
|
|
"</div>"
|
|
],
|
|
"text/plain": [
|
|
" user rec_nb title \\\n",
|
|
"0 1.0 1 Great Day in Harlem, A (1994) \n",
|
|
"942 1.0 2 Tough and Deadly (1995) \n",
|
|
"1884 1.0 3 Aiqing wansui (1994) \n",
|
|
"2826 1.0 4 Delta of Venus (1994) \n",
|
|
"3768 1.0 5 Someone Else's America (1995) \n",
|
|
"4710 1.0 6 Saint of Fort Washington, The (1993) \n",
|
|
"5652 1.0 7 Celestial Clockwork (1994) \n",
|
|
"6594 1.0 8 Some Mother's Son (1996) \n",
|
|
"7535 1.0 9 Prefontaine (1997) \n",
|
|
"8477 1.0 10 Santa with Muscles (1996) \n",
|
|
"\n",
|
|
" genres \n",
|
|
"0 Documentary \n",
|
|
"942 Action, Drama, Thriller \n",
|
|
"1884 Drama \n",
|
|
"2826 Drama \n",
|
|
"3768 Drama \n",
|
|
"4710 Drama \n",
|
|
"5652 Comedy \n",
|
|
"6594 Drama \n",
|
|
"7535 Drama \n",
|
|
"8477 Comedy "
|
|
]
|
|
},
|
|
"execution_count": 25,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"train=pd.read_csv('./Datasets/ml-100k/train.csv', sep='\\t', header=None, names=['user', 'item', 'rating', 'timestamp'])\n",
|
|
"items=pd.read_csv('./Datasets/ml-100k/movies.csv')\n",
|
|
"\n",
|
|
"user=random.choice(list(set(train['user'])))\n",
|
|
"\n",
|
|
"train_content=pd.merge(train, items, left_on='item', right_on='id')\n",
|
|
"\n",
|
|
"print('Here is what user rated high:')\n",
|
|
"display(train_content[train_content['user']==user][['user', 'rating', 'title', 'genres']]\\\n",
|
|
" .sort_values(by='rating', ascending=False)[:15])\n",
|
|
"\n",
|
|
"reco = np.loadtxt('Recommendations generated/ml-100k/Self_BaselineUI_reco.csv', delimiter=',')\n",
|
|
"items=pd.read_csv('./Datasets/ml-100k/movies.csv')\n",
|
|
"\n",
|
|
"# Let's ignore scores - they are not used in evaluation: \n",
|
|
"reco_users=reco[:,:1]\n",
|
|
"reco_items=reco[:,1::2]\n",
|
|
"# Let's put them into one array\n",
|
|
"reco=np.concatenate((reco_users, reco_items), axis=1)\n",
|
|
"\n",
|
|
"# Let's rebuild it user-item dataframe\n",
|
|
"recommended=[]\n",
|
|
"for row in reco:\n",
|
|
" for rec_nb, entry in enumerate(row[1:]):\n",
|
|
" recommended.append((row[0], rec_nb+1, entry))\n",
|
|
"recommended=pd.DataFrame(recommended, columns=['user','rec_nb', 'item'])\n",
|
|
"\n",
|
|
"recommended_content=pd.merge(recommended, items, left_on='item', right_on='id')\n",
|
|
"\n",
|
|
"print('Here is what we recommend:')\n",
|
|
"recommended_content[recommended_content['user']==user][['user', 'rec_nb', 'title', 'genres']].sort_values(by='rec_nb')"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"# project task 3: implement some other evaluation measure"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 26,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# it may be your idea, modification of what we have already implemented \n",
|
|
"# (for example Hit2 rate which would count as a success users whoreceived at least 2 relevant recommendations) \n",
|
|
"# or something well-known\n",
|
|
"# expected output: modification of evaluation_measures.py such that evaluate_all will also display your measure"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 30,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"943it [00:00, 12346.28it/s]\n",
|
|
"943it [00:00, 9951.36it/s]\n",
|
|
"943it [00:00, 10833.90it/s]\n",
|
|
"943it [00:00, 10505.70it/s]\n",
|
|
"943it [00:00, 10502.72it/s]\n",
|
|
"943it [00:00, 10505.98it/s]\n",
|
|
"943it [00:00, 10449.06it/s]\n",
|
|
"943it [00:00, 11646.25it/s]\n",
|
|
"943it [00:00, 8780.32it/s]\n",
|
|
"943it [00:00, 10063.91it/s]\n",
|
|
"943it [00:00, 10621.71it/s]\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/html": [
|
|
"<div>\n",
|
|
"<style scoped>\n",
|
|
" .dataframe tbody tr th:only-of-type {\n",
|
|
" vertical-align: middle;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe tbody tr th {\n",
|
|
" vertical-align: top;\n",
|
|
" }\n",
|
|
"\n",
|
|
" .dataframe thead th {\n",
|
|
" text-align: right;\n",
|
|
" }\n",
|
|
"</style>\n",
|
|
"<table border=\"1\" class=\"dataframe\">\n",
|
|
" <thead>\n",
|
|
" <tr style=\"text-align: right;\">\n",
|
|
" <th></th>\n",
|
|
" <th>Model</th>\n",
|
|
" <th>RMSE</th>\n",
|
|
" <th>MAE</th>\n",
|
|
" <th>precision</th>\n",
|
|
" <th>recall</th>\n",
|
|
" <th>F_1</th>\n",
|
|
" <th>F_05</th>\n",
|
|
" <th>precision_super</th>\n",
|
|
" <th>recall_super</th>\n",
|
|
" <th>NDCG</th>\n",
|
|
" <th>mAP</th>\n",
|
|
" <th>MRR</th>\n",
|
|
" <th>LAUC</th>\n",
|
|
" <th>HR</th>\n",
|
|
" <th>Reco in test</th>\n",
|
|
" <th>Test coverage</th>\n",
|
|
" <th>Shannon</th>\n",
|
|
" <th>Gini</th>\n",
|
|
" </tr>\n",
|
|
" </thead>\n",
|
|
" <tbody>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>Self_TopPop</td>\n",
|
|
" <td>2.508258</td>\n",
|
|
" <td>2.217909</td>\n",
|
|
" <td>0.188865</td>\n",
|
|
" <td>0.116919</td>\n",
|
|
" <td>0.118732</td>\n",
|
|
" <td>0.141584</td>\n",
|
|
" <td>0.130472</td>\n",
|
|
" <td>0.137473</td>\n",
|
|
" <td>0.214651</td>\n",
|
|
" <td>0.111707</td>\n",
|
|
" <td>0.400939</td>\n",
|
|
" <td>0.555546</td>\n",
|
|
" <td>0.765642</td>\n",
|
|
" <td>1.000000</td>\n",
|
|
" <td>0.038961</td>\n",
|
|
" <td>3.159079</td>\n",
|
|
" <td>0.987317</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>Ready_Baseline</td>\n",
|
|
" <td>0.949459</td>\n",
|
|
" <td>0.752487</td>\n",
|
|
" <td>0.091410</td>\n",
|
|
" <td>0.037652</td>\n",
|
|
" <td>0.046030</td>\n",
|
|
" <td>0.061286</td>\n",
|
|
" <td>0.079614</td>\n",
|
|
" <td>0.056463</td>\n",
|
|
" <td>0.095957</td>\n",
|
|
" <td>0.043178</td>\n",
|
|
" <td>0.198193</td>\n",
|
|
" <td>0.515501</td>\n",
|
|
" <td>0.437964</td>\n",
|
|
" <td>1.000000</td>\n",
|
|
" <td>0.033911</td>\n",
|
|
" <td>2.836513</td>\n",
|
|
" <td>0.991139</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>Self_GlobalAvg</td>\n",
|
|
" <td>1.125760</td>\n",
|
|
" <td>0.943534</td>\n",
|
|
" <td>0.061188</td>\n",
|
|
" <td>0.025968</td>\n",
|
|
" <td>0.031383</td>\n",
|
|
" <td>0.041343</td>\n",
|
|
" <td>0.040558</td>\n",
|
|
" <td>0.032107</td>\n",
|
|
" <td>0.067695</td>\n",
|
|
" <td>0.027470</td>\n",
|
|
" <td>0.171187</td>\n",
|
|
" <td>0.509546</td>\n",
|
|
" <td>0.384942</td>\n",
|
|
" <td>1.000000</td>\n",
|
|
" <td>0.025974</td>\n",
|
|
" <td>2.711772</td>\n",
|
|
" <td>0.992003</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>Ready_Random</td>\n",
|
|
" <td>1.510030</td>\n",
|
|
" <td>1.211848</td>\n",
|
|
" <td>0.050053</td>\n",
|
|
" <td>0.022367</td>\n",
|
|
" <td>0.025984</td>\n",
|
|
" <td>0.033727</td>\n",
|
|
" <td>0.030687</td>\n",
|
|
" <td>0.023255</td>\n",
|
|
" <td>0.055392</td>\n",
|
|
" <td>0.021602</td>\n",
|
|
" <td>0.137690</td>\n",
|
|
" <td>0.507713</td>\n",
|
|
" <td>0.338282</td>\n",
|
|
" <td>0.987911</td>\n",
|
|
" <td>0.187590</td>\n",
|
|
" <td>5.111878</td>\n",
|
|
" <td>0.906685</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>Ready_I-KNN</td>\n",
|
|
" <td>1.030386</td>\n",
|
|
" <td>0.813067</td>\n",
|
|
" <td>0.026087</td>\n",
|
|
" <td>0.006908</td>\n",
|
|
" <td>0.010593</td>\n",
|
|
" <td>0.016046</td>\n",
|
|
" <td>0.021137</td>\n",
|
|
" <td>0.009522</td>\n",
|
|
" <td>0.024214</td>\n",
|
|
" <td>0.008958</td>\n",
|
|
" <td>0.048068</td>\n",
|
|
" <td>0.499885</td>\n",
|
|
" <td>0.154825</td>\n",
|
|
" <td>0.402333</td>\n",
|
|
" <td>0.434343</td>\n",
|
|
" <td>5.133650</td>\n",
|
|
" <td>0.877999</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>Ready_I-KNNWithMeans</td>\n",
|
|
" <td>0.955921</td>\n",
|
|
" <td>0.754037</td>\n",
|
|
" <td>0.004984</td>\n",
|
|
" <td>0.003225</td>\n",
|
|
" <td>0.003406</td>\n",
|
|
" <td>0.003956</td>\n",
|
|
" <td>0.004506</td>\n",
|
|
" <td>0.003861</td>\n",
|
|
" <td>0.006815</td>\n",
|
|
" <td>0.002906</td>\n",
|
|
" <td>0.020332</td>\n",
|
|
" <td>0.497969</td>\n",
|
|
" <td>0.039236</td>\n",
|
|
" <td>0.587699</td>\n",
|
|
" <td>0.071429</td>\n",
|
|
" <td>2.699278</td>\n",
|
|
" <td>0.991353</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>Ready_I-KNNBaseline</td>\n",
|
|
" <td>0.935327</td>\n",
|
|
" <td>0.737424</td>\n",
|
|
" <td>0.002545</td>\n",
|
|
" <td>0.000755</td>\n",
|
|
" <td>0.001105</td>\n",
|
|
" <td>0.001602</td>\n",
|
|
" <td>0.002253</td>\n",
|
|
" <td>0.000930</td>\n",
|
|
" <td>0.003444</td>\n",
|
|
" <td>0.001362</td>\n",
|
|
" <td>0.011760</td>\n",
|
|
" <td>0.496724</td>\n",
|
|
" <td>0.021209</td>\n",
|
|
" <td>0.482821</td>\n",
|
|
" <td>0.059885</td>\n",
|
|
" <td>2.232578</td>\n",
|
|
" <td>0.994487</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>Ready_U-KNN</td>\n",
|
|
" <td>1.023495</td>\n",
|
|
" <td>0.807913</td>\n",
|
|
" <td>0.000742</td>\n",
|
|
" <td>0.000205</td>\n",
|
|
" <td>0.000305</td>\n",
|
|
" <td>0.000449</td>\n",
|
|
" <td>0.000536</td>\n",
|
|
" <td>0.000198</td>\n",
|
|
" <td>0.000845</td>\n",
|
|
" <td>0.000274</td>\n",
|
|
" <td>0.002744</td>\n",
|
|
" <td>0.496441</td>\n",
|
|
" <td>0.007423</td>\n",
|
|
" <td>0.602121</td>\n",
|
|
" <td>0.010823</td>\n",
|
|
" <td>2.089186</td>\n",
|
|
" <td>0.995706</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>Self_TopRated</td>\n",
|
|
" <td>2.508258</td>\n",
|
|
" <td>2.217909</td>\n",
|
|
" <td>0.000954</td>\n",
|
|
" <td>0.000188</td>\n",
|
|
" <td>0.000298</td>\n",
|
|
" <td>0.000481</td>\n",
|
|
" <td>0.000644</td>\n",
|
|
" <td>0.000223</td>\n",
|
|
" <td>0.001043</td>\n",
|
|
" <td>0.000335</td>\n",
|
|
" <td>0.003348</td>\n",
|
|
" <td>0.496433</td>\n",
|
|
" <td>0.009544</td>\n",
|
|
" <td>0.699046</td>\n",
|
|
" <td>0.005051</td>\n",
|
|
" <td>1.945910</td>\n",
|
|
" <td>0.995669</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>Self_BaselineUI</td>\n",
|
|
" <td>0.967585</td>\n",
|
|
" <td>0.762740</td>\n",
|
|
" <td>0.000954</td>\n",
|
|
" <td>0.000170</td>\n",
|
|
" <td>0.000278</td>\n",
|
|
" <td>0.000463</td>\n",
|
|
" <td>0.000644</td>\n",
|
|
" <td>0.000189</td>\n",
|
|
" <td>0.000752</td>\n",
|
|
" <td>0.000168</td>\n",
|
|
" <td>0.001677</td>\n",
|
|
" <td>0.496424</td>\n",
|
|
" <td>0.009544</td>\n",
|
|
" <td>0.600530</td>\n",
|
|
" <td>0.005051</td>\n",
|
|
" <td>1.803126</td>\n",
|
|
" <td>0.996380</td>\n",
|
|
" </tr>\n",
|
|
" <tr>\n",
|
|
" <th>0</th>\n",
|
|
" <td>Self_IKNN</td>\n",
|
|
" <td>1.018363</td>\n",
|
|
" <td>0.808793</td>\n",
|
|
" <td>0.000318</td>\n",
|
|
" <td>0.000108</td>\n",
|
|
" <td>0.000140</td>\n",
|
|
" <td>0.000189</td>\n",
|
|
" <td>0.000000</td>\n",
|
|
" <td>0.000000</td>\n",
|
|
" <td>0.000214</td>\n",
|
|
" <td>0.000037</td>\n",
|
|
" <td>0.000368</td>\n",
|
|
" <td>0.496391</td>\n",
|
|
" <td>0.003181</td>\n",
|
|
" <td>0.392153</td>\n",
|
|
" <td>0.115440</td>\n",
|
|
" <td>4.174741</td>\n",
|
|
" <td>0.965327</td>\n",
|
|
" </tr>\n",
|
|
" </tbody>\n",
|
|
"</table>\n",
|
|
"</div>"
|
|
],
|
|
"text/plain": [
|
|
" Model RMSE MAE precision recall F_1 \\\n",
|
|
"0 Self_TopPop 2.508258 2.217909 0.188865 0.116919 0.118732 \n",
|
|
"0 Ready_Baseline 0.949459 0.752487 0.091410 0.037652 0.046030 \n",
|
|
"0 Self_GlobalAvg 1.125760 0.943534 0.061188 0.025968 0.031383 \n",
|
|
"0 Ready_Random 1.510030 1.211848 0.050053 0.022367 0.025984 \n",
|
|
"0 Ready_I-KNN 1.030386 0.813067 0.026087 0.006908 0.010593 \n",
|
|
"0 Ready_I-KNNWithMeans 0.955921 0.754037 0.004984 0.003225 0.003406 \n",
|
|
"0 Ready_I-KNNBaseline 0.935327 0.737424 0.002545 0.000755 0.001105 \n",
|
|
"0 Ready_U-KNN 1.023495 0.807913 0.000742 0.000205 0.000305 \n",
|
|
"0 Self_TopRated 2.508258 2.217909 0.000954 0.000188 0.000298 \n",
|
|
"0 Self_BaselineUI 0.967585 0.762740 0.000954 0.000170 0.000278 \n",
|
|
"0 Self_IKNN 1.018363 0.808793 0.000318 0.000108 0.000140 \n",
|
|
"\n",
|
|
" F_05 precision_super recall_super NDCG mAP MRR \\\n",
|
|
"0 0.141584 0.130472 0.137473 0.214651 0.111707 0.400939 \n",
|
|
"0 0.061286 0.079614 0.056463 0.095957 0.043178 0.198193 \n",
|
|
"0 0.041343 0.040558 0.032107 0.067695 0.027470 0.171187 \n",
|
|
"0 0.033727 0.030687 0.023255 0.055392 0.021602 0.137690 \n",
|
|
"0 0.016046 0.021137 0.009522 0.024214 0.008958 0.048068 \n",
|
|
"0 0.003956 0.004506 0.003861 0.006815 0.002906 0.020332 \n",
|
|
"0 0.001602 0.002253 0.000930 0.003444 0.001362 0.011760 \n",
|
|
"0 0.000449 0.000536 0.000198 0.000845 0.000274 0.002744 \n",
|
|
"0 0.000481 0.000644 0.000223 0.001043 0.000335 0.003348 \n",
|
|
"0 0.000463 0.000644 0.000189 0.000752 0.000168 0.001677 \n",
|
|
"0 0.000189 0.000000 0.000000 0.000214 0.000037 0.000368 \n",
|
|
"\n",
|
|
" LAUC HR Reco in test Test coverage Shannon Gini \n",
|
|
"0 0.555546 0.765642 1.000000 0.038961 3.159079 0.987317 \n",
|
|
"0 0.515501 0.437964 1.000000 0.033911 2.836513 0.991139 \n",
|
|
"0 0.509546 0.384942 1.000000 0.025974 2.711772 0.992003 \n",
|
|
"0 0.507713 0.338282 0.987911 0.187590 5.111878 0.906685 \n",
|
|
"0 0.499885 0.154825 0.402333 0.434343 5.133650 0.877999 \n",
|
|
"0 0.497969 0.039236 0.587699 0.071429 2.699278 0.991353 \n",
|
|
"0 0.496724 0.021209 0.482821 0.059885 2.232578 0.994487 \n",
|
|
"0 0.496441 0.007423 0.602121 0.010823 2.089186 0.995706 \n",
|
|
"0 0.496433 0.009544 0.699046 0.005051 1.945910 0.995669 \n",
|
|
"0 0.496424 0.009544 0.600530 0.005051 1.803126 0.996380 \n",
|
|
"0 0.496391 0.003181 0.392153 0.115440 4.174741 0.965327 "
|
|
]
|
|
},
|
|
"execution_count": 30,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"dir_path='Recommendations generated/ml-100k/'\n",
|
|
"super_reactions=[4,5]\n",
|
|
"test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\\t', header=None)\n",
|
|
"\n",
|
|
"ev.evaluate_all(test, dir_path, super_reactions)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.8.3"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 4
|
|
}
|