{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "### Prepare test set" ] }, { "cell_type": "code", "execution_count": 1, "metadata": { "slideshow": { "slide_type": "-" } }, "outputs": [], "source": [ "import pandas as pd\n", "import numpy as np\n", "import scipy.sparse as sparse\n", "from collections import defaultdict\n", "from itertools import chain\n", "import random\n", "from tqdm import tqdm\n", "\n", "# In evaluation we do not load train set - it is not needed\n", "test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\\t', header=None)\n", "test.columns=['user', 'item', 'rating', 'timestamp']\n", "\n", "test['user_code'] = test['user'].astype(\"category\").cat.codes\n", "test['item_code'] = test['item'].astype(\"category\").cat.codes\n", "\n", "user_code_id = dict(enumerate(test['user'].astype(\"category\").cat.categories))\n", "user_id_code = dict((v, k) for k, v in user_code_id.items())\n", "item_code_id = dict(enumerate(test['item'].astype(\"category\").cat.categories))\n", "item_id_code = dict((v, k) for k, v in item_code_id.items())\n", "\n", "test_ui = sparse.csr_matrix((test['rating'], (test['user_code'], test['item_code'])))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Estimations metrics" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "estimations_df=pd.read_csv('Recommendations generated/ml-100k/Ready_Baseline_estimations.csv', header=None)\n", "estimations_df.columns=['user', 'item' ,'score']\n", "\n", "estimations_df['user_code']=[user_id_code[user] for user in estimations_df['user']]\n", "estimations_df['item_code']=[item_id_code[item] for item in estimations_df['item']]\n", "estimations=sparse.csr_matrix((estimations_df['score'], (estimations_df['user_code'], estimations_df['item_code'])), shape=test_ui.shape)" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "def estimations_metrics(test_ui, estimations):\n", " result=[]\n", "\n", " RMSE=(np.sum((estimations.data-test_ui.data)**2)/estimations.nnz)**(1/2)\n", " result.append(['RMSE', RMSE])\n", "\n", " MAE=np.sum(abs(estimations.data-test_ui.data))/estimations.nnz\n", " result.append(['MAE', MAE])\n", " \n", " df_result=(pd.DataFrame(list(zip(*result))[1])).T\n", " df_result.columns=list(zip(*result))[0]\n", " return df_result" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
RMSEMAE
00.9494590.752487
\n", "
" ], "text/plain": [ " RMSE MAE\n", "0 0.949459 0.752487" ] }, "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# in case of error (in the laboratories) you might have to switch to the other version of pandas\n", "# try !pip3 install pandas=='1.0.3' (or pip if you use python 2) and restart the kernel\n", "\n", "estimations_metrics(test_ui, estimations)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Ranking metrics" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([[663, 475, 62, ..., 472, 269, 503],\n", " [ 48, 313, 475, ..., 591, 175, 466],\n", " [351, 313, 475, ..., 591, 175, 466],\n", " ...,\n", " [259, 313, 475, ..., 11, 591, 175],\n", " [ 33, 313, 475, ..., 11, 591, 175],\n", " [ 77, 313, 475, ..., 11, 591, 175]])" ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import numpy as np\n", "reco = np.loadtxt('Recommendations generated/ml-100k/Ready_Baseline_reco.csv', delimiter=',')\n", "# Let's ignore scores - they are not used in evaluation: \n", "users=reco[:,:1]\n", "items=reco[:,1::2]\n", "# Let's use inner ids instead of real ones\n", "users=np.vectorize(lambda x: user_id_code.setdefault(x, -1))(users)\n", "items=np.vectorize(lambda x: item_id_code.setdefault(x, -1))(items) # maybe items we recommend are not in test set\n", "# Let's put them into one array\n", "reco=np.concatenate((users, items), axis=1)\n", "reco" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "def ranking_metrics(test_ui, reco, super_reactions=[], topK=10):\n", " \n", " nb_items=test_ui.shape[1]\n", " relevant_users, super_relevant_users, prec, rec, F_1, F_05, prec_super, rec_super, ndcg, mAP, MRR, LAUC, HR=\\\n", " 0,0,0,0,0,0,0,0,0,0,0,0,0\n", " \n", " cg = (1.0 / np.log2(np.arange(2, topK + 2)))\n", " cg_sum = np.cumsum(cg)\n", " \n", " for (nb_user, user) in tqdm(enumerate(reco[:,0])):\n", " u_rated_items=test_ui.indices[test_ui.indptr[user]:test_ui.indptr[user+1]]\n", " nb_u_rated_items=len(u_rated_items)\n", " if nb_u_rated_items>0: # skip users with no items in test set (still possible that there will be no super items)\n", " relevant_users+=1\n", " \n", " u_super_items=u_rated_items[np.vectorize(lambda x: x in super_reactions)\\\n", " (test_ui.data[test_ui.indptr[user]:test_ui.indptr[user+1]])]\n", " # more natural seems u_super_items=[item for item in u_rated_items if test_ui[user,item] in super_reactions]\n", " # but accesing test_ui[user,item] is expensive -we should avoid doing it\n", " if len(u_super_items)>0:\n", " super_relevant_users+=1\n", " \n", " user_successes=np.zeros(topK)\n", " nb_user_successes=0\n", " user_super_successes=np.zeros(topK)\n", " nb_user_super_successes=0\n", " \n", " # evaluation\n", " for (item_position,item) in enumerate(reco[nb_user,1:topK+1]):\n", " if item in u_rated_items:\n", " user_successes[item_position]=1\n", " nb_user_successes+=1\n", " if item in u_super_items:\n", " user_super_successes[item_position]=1\n", " nb_user_super_successes+=1\n", " \n", " prec_u=nb_user_successes/topK \n", " prec+=prec_u\n", " \n", " rec_u=nb_user_successes/nb_u_rated_items\n", " rec+=rec_u\n", " \n", " F_1+=2*(prec_u*rec_u)/(prec_u+rec_u) if prec_u+rec_u>0 else 0\n", " F_05+=(0.5**2+1)*(prec_u*rec_u)/(0.5**2*prec_u+rec_u) if prec_u+rec_u>0 else 0\n", " \n", " prec_super+=nb_user_super_successes/topK\n", " rec_super+=nb_user_super_successes/max(len(u_super_items),1) # to set 0 if no super items\n", " ndcg+=np.dot(user_successes,cg)/cg_sum[min(topK, nb_u_rated_items)-1]\n", " \n", " cumsum_successes=np.cumsum(user_successes)\n", " mAP+=np.dot(cumsum_successes/np.arange(1,topK+1), user_successes)/min(topK, nb_u_rated_items)\n", " MRR+=1/(user_successes.nonzero()[0][0]+1) if user_successes.nonzero()[0].size>0 else 0\n", " LAUC+=(np.dot(cumsum_successes, 1-user_successes)+\\\n", " (nb_user_successes+nb_u_rated_items)/2*((nb_items-nb_u_rated_items)-(topK-nb_user_successes)))/\\\n", " ((nb_items-nb_u_rated_items)*nb_u_rated_items)\n", " \n", " HR+=nb_user_successes>0\n", " \n", " \n", " result=[]\n", " result.append(('precision', prec/relevant_users))\n", " result.append(('recall', rec/relevant_users))\n", " result.append(('F_1', F_1/relevant_users))\n", " result.append(('F_05', F_05/relevant_users))\n", " result.append(('precision_super', prec_super/super_relevant_users))\n", " result.append(('recall_super', rec_super/super_relevant_users))\n", " result.append(('NDCG', ndcg/relevant_users))\n", " result.append(('mAP', mAP/relevant_users))\n", " result.append(('MRR', MRR/relevant_users))\n", " result.append(('LAUC', LAUC/relevant_users))\n", " result.append(('HR', HR/relevant_users))\n", "\n", " df_result=(pd.DataFrame(list(zip(*result))[1])).T\n", " df_result.columns=list(zip(*result))[0]\n", " return df_result" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "943it [00:00, 3056.45it/s]\n" ] }, { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
precisionrecallF_1F_05precision_superrecall_superNDCGmAPMRRLAUCHR
00.091410.0376520.046030.0612860.0796140.0564630.0959570.0431780.1981930.5155010.437964
\n", "
" ], "text/plain": [ " precision recall F_1 F_05 precision_super recall_super \\\n", "0 0.09141 0.037652 0.04603 0.061286 0.079614 0.056463 \n", "\n", " NDCG mAP MRR LAUC HR \n", "0 0.095957 0.043178 0.198193 0.515501 0.437964 " ] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "ranking_metrics(test_ui, reco, super_reactions=[4,5], topK=10)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Diversity metrics" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [], "source": [ "def diversity_metrics(test_ui, reco, topK=10):\n", " \n", " frequencies=defaultdict(int)\n", " \n", " # let's assign 0 to all items in test set\n", " for item in list(set(test_ui.indices)):\n", " frequencies[item]=0\n", " \n", " # counting frequencies\n", " for item in reco[:,1:].flat:\n", " frequencies[item]+=1\n", " \n", " nb_reco_outside_test=frequencies[-1]\n", " del frequencies[-1]\n", " \n", " frequencies=np.array(list(frequencies.values()))\n", " \n", " nb_rec_items=len(frequencies[frequencies>0])\n", " nb_reco_inside_test=np.sum(frequencies)\n", " \n", " frequencies=frequencies/np.sum(frequencies)\n", " frequencies=np.sort(frequencies)\n", " \n", " with np.errstate(divide='ignore'): # let's put zeros put items with 0 frequency and ignore division warning\n", " log_frequencies=np.nan_to_num(np.log(frequencies), posinf=0, neginf=0)\n", " \n", " result=[]\n", " result.append(('Reco in test', nb_reco_inside_test/(nb_reco_inside_test+nb_reco_outside_test)))\n", " result.append(('Test coverage', nb_rec_items/test_ui.shape[1]))\n", " result.append(('Shannon', -np.dot(frequencies, log_frequencies)))\n", " result.append(('Gini', np.dot(frequencies, np.arange(1-len(frequencies), len(frequencies), 2))/(len(frequencies)-1)))\n", " \n", " df_result=(pd.DataFrame(list(zip(*result))[1])).T\n", " df_result.columns=list(zip(*result))[0]\n", " return df_result" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
Reco in testTest coverageShannonGini
01.00.0339112.8365130.991139
\n", "
" ], "text/plain": [ " Reco in test Test coverage Shannon Gini\n", "0 1.0 0.033911 2.836513 0.991139" ] }, "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# in case of errors try !pip3 install numpy==1.18.4 (or pip if you use python 2) and restart the kernel\n", "\n", "import evaluation_measures as ev\n", "import imp\n", "imp.reload(ev)\n", "\n", "x=diversity_metrics(test_ui, reco, topK=10)\n", "x" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# To be used in other notebooks" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "943it [00:00, 2872.87it/s]\n" ] }, { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
RMSEMAEprecisionrecallF_1F_05precision_superrecall_superNDCGmAPMRRLAUCHRReco in testTest coverageShannonGini
00.9494590.7524870.091410.0376520.046030.0612860.0796140.0564630.0959570.0431780.1981930.5155010.4379641.00.0339112.8365130.991139
\n", "
" ], "text/plain": [ " RMSE MAE precision recall F_1 F_05 \\\n", "0 0.949459 0.752487 0.09141 0.037652 0.04603 0.061286 \n", "\n", " precision_super recall_super NDCG mAP MRR LAUC \\\n", "0 0.079614 0.056463 0.095957 0.043178 0.198193 0.515501 \n", "\n", " HR Reco in test Test coverage Shannon Gini \n", "0 0.437964 1.0 0.033911 2.836513 0.991139 " ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import evaluation_measures as ev\n", "import imp\n", "imp.reload(ev)\n", "\n", "estimations_df=pd.read_csv('Recommendations generated/ml-100k/Ready_Baseline_estimations.csv', header=None)\n", "reco=np.loadtxt('Recommendations generated/ml-100k/Ready_Baseline_reco.csv', delimiter=',')\n", "\n", "ev.evaluate(test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\\t', header=None),\n", " estimations_df=estimations_df, \n", " reco=reco,\n", " super_reactions=[4,5])\n", "#also you can just type ev.evaluate_all(estimations_df, reco) - I put above values as default" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "943it [00:00, 3486.43it/s]\n", "943it [00:00, 4513.34it/s]\n", "943it [00:00, 3772.94it/s]\n", "943it [00:00, 2882.13it/s]\n", "943it [00:00, 2333.14it/s]\n", "943it [00:00, 2868.64it/s]\n", "943it [00:00, 3744.73it/s]\n" ] } ], "source": [ "import evaluation_measures as ev\n", "import imp\n", "imp.reload(ev)\n", "\n", "dir_path=\"Recommendations generated/ml-100k/\"\n", "super_reactions=[4,5]\n", "test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\\t', header=None)\n", "\n", "df=ev.evaluate_all(test, dir_path, super_reactions)\n", "#also you can just type ev.evaluate_all() - I put above values as default" ] }, { "cell_type": "code", "execution_count": 12, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
ModelRMSEMAEprecisionrecallF_1F_05precision_superrecall_super
0Self_TopPop2.5082582.2179090.1888650.1169190.1187320.1415840.1304720.137473
0Ready_Baseline0.9494590.7524870.0914100.0376520.0460300.0612860.0796140.056463
0Self_GlobalAvg1.1257600.9435340.0611880.0259680.0313830.0413430.0405580.032107
0Ready_Random1.5175931.2201810.0460230.0190380.0231180.0307340.0292920.021639
0Self_TopRated2.5082582.2179090.0009540.0001880.0002980.0004810.0006440.000223
0Self_BaselineIU0.9581360.7540510.0009540.0001880.0002980.0004810.0006440.000223
0Self_BaselineUI0.9675850.7627400.0009540.0001700.0002780.0004630.0006440.000189
\n", "
" ], "text/plain": [ " Model RMSE MAE precision recall F_1 \\\n", "0 Self_TopPop 2.508258 2.217909 0.188865 0.116919 0.118732 \n", "0 Ready_Baseline 0.949459 0.752487 0.091410 0.037652 0.046030 \n", "0 Self_GlobalAvg 1.125760 0.943534 0.061188 0.025968 0.031383 \n", "0 Ready_Random 1.517593 1.220181 0.046023 0.019038 0.023118 \n", "0 Self_TopRated 2.508258 2.217909 0.000954 0.000188 0.000298 \n", "0 Self_BaselineIU 0.958136 0.754051 0.000954 0.000188 0.000298 \n", "0 Self_BaselineUI 0.967585 0.762740 0.000954 0.000170 0.000278 \n", "\n", " F_05 precision_super recall_super \n", "0 0.141584 0.130472 0.137473 \n", "0 0.061286 0.079614 0.056463 \n", "0 0.041343 0.040558 0.032107 \n", "0 0.030734 0.029292 0.021639 \n", "0 0.000481 0.000644 0.000223 \n", "0 0.000481 0.000644 0.000223 \n", "0 0.000463 0.000644 0.000189 " ] }, "execution_count": 12, "metadata": {}, "output_type": "execute_result" } ], "source": [ "df.iloc[:,:9]" ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
ModelNDCGmAPMRRLAUCHRReco in testTest coverageShannonGini
0Self_TopPop0.2146510.1117070.4009390.5555460.7656421.0000000.0389613.1590790.987317
0Ready_Baseline0.0959570.0431780.1981930.5155010.4379641.0000000.0339112.8365130.991139
0Self_GlobalAvg0.0676950.0274700.1711870.5095460.3849421.0000000.0259742.7117720.992003
0Ready_Random0.0508180.0199580.1266460.5060310.3054080.9885470.1746035.0823830.908434
0Self_TopRated0.0010430.0003350.0033480.4964330.0095440.6990460.0050511.9459100.995669
0Self_BaselineIU0.0010430.0003350.0033480.4964330.0095440.6990460.0050511.9459100.995669
0Self_BaselineUI0.0007520.0001680.0016770.4964240.0095440.6005300.0050511.8031260.996380
\n", "
" ], "text/plain": [ " Model NDCG mAP MRR LAUC HR \\\n", "0 Self_TopPop 0.214651 0.111707 0.400939 0.555546 0.765642 \n", "0 Ready_Baseline 0.095957 0.043178 0.198193 0.515501 0.437964 \n", "0 Self_GlobalAvg 0.067695 0.027470 0.171187 0.509546 0.384942 \n", "0 Ready_Random 0.050818 0.019958 0.126646 0.506031 0.305408 \n", "0 Self_TopRated 0.001043 0.000335 0.003348 0.496433 0.009544 \n", "0 Self_BaselineIU 0.001043 0.000335 0.003348 0.496433 0.009544 \n", "0 Self_BaselineUI 0.000752 0.000168 0.001677 0.496424 0.009544 \n", "\n", " Reco in test Test coverage Shannon Gini \n", "0 1.000000 0.038961 3.159079 0.987317 \n", "0 1.000000 0.033911 2.836513 0.991139 \n", "0 1.000000 0.025974 2.711772 0.992003 \n", "0 0.988547 0.174603 5.082383 0.908434 \n", "0 0.699046 0.005051 1.945910 0.995669 \n", "0 0.699046 0.005051 1.945910 0.995669 \n", "0 0.600530 0.005051 1.803126 0.996380 " ] }, "execution_count": 13, "metadata": {}, "output_type": "execute_result" } ], "source": [ "df.iloc[:,np.append(0,np.arange(9, df.shape[1]))]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Check metrics on toy dataset" ] }, { "cell_type": "code", "execution_count": 17, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "3it [00:00, 3003.08it/s]\n", "3it [00:00, 1975.65it/s]\n" ] }, { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
ModelRMSEMAEprecisionrecallF_1F_05precision_superrecall_superNDCGmAPMRRLAUCHRHR2Reco in testTest coverageShannonGini
0Self_BaselineUI1.6124521.4000.4444440.8888890.5555560.4786320.3333330.750.6769070.5740740.6111110.6388891.00.3333330.8888890.81.3862940.250000
0Self_BaselineIU1.6483371.5750.4444440.8888890.5555560.4786320.3333330.750.7205500.6296300.6666670.7222221.00.3333330.7777780.81.3517840.357143
\n", "
" ], "text/plain": [ " Model RMSE MAE precision recall F_1 F_05 \\\n", "0 Self_BaselineUI 1.612452 1.400 0.444444 0.888889 0.555556 0.478632 \n", "0 Self_BaselineIU 1.648337 1.575 0.444444 0.888889 0.555556 0.478632 \n", "\n", " precision_super recall_super NDCG mAP MRR LAUC HR \\\n", "0 0.333333 0.75 0.676907 0.574074 0.611111 0.638889 1.0 \n", "0 0.333333 0.75 0.720550 0.629630 0.666667 0.722222 1.0 \n", "\n", " HR2 Reco in test Test coverage Shannon Gini \n", "0 0.333333 0.888889 0.8 1.386294 0.250000 \n", "0 0.333333 0.777778 0.8 1.351784 0.357143 " ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Training data:\n" ] }, { "data": { "text/plain": [ "matrix([[3, 4, 0, 0, 5, 0, 0, 4],\n", " [0, 1, 2, 3, 0, 0, 0, 0],\n", " [0, 0, 0, 5, 0, 3, 4, 0]], dtype=int64)" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Test data:\n" ] }, { "data": { "text/plain": [ "matrix([[0, 0, 0, 0, 0, 0, 3, 0],\n", " [0, 0, 0, 0, 5, 0, 0, 0],\n", " [5, 0, 4, 0, 0, 0, 0, 2]], dtype=int64)" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Recommendations:\n" ] }, { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
0123456
00305.0204.0604.0
110403.0602.0702.0
220405.0204.0704.0
\n", "
" ], "text/plain": [ " 0 1 2 3 4 5 6\n", "0 0 30 5.0 20 4.0 60 4.0\n", "1 10 40 3.0 60 2.0 70 2.0\n", "2 20 40 5.0 20 4.0 70 4.0" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Estimations:\n" ] }, { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
useritemest_score
00604.0
110403.0
22003.0
320204.0
420704.0
\n", "
" ], "text/plain": [ " user item est_score\n", "0 0 60 4.0\n", "1 10 40 3.0\n", "2 20 0 3.0\n", "3 20 20 4.0\n", "4 20 70 4.0" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "import evaluation_measures as ev\n", "import imp\n", "import helpers\n", "imp.reload(ev)\n", "\n", "dir_path=\"Recommendations generated/toy-example/\"\n", "super_reactions=[4,5]\n", "test=pd.read_csv('./Datasets/toy-example/test.csv', sep='\\t', header=None)\n", "\n", "display(ev.evaluate_all(test, dir_path, super_reactions, topK=3))\n", "#also you can just type ev.evaluate_all() - I put above values as default\n", "\n", "toy_train_read=pd.read_csv('./Datasets/toy-example/train.csv', sep='\\t', header=None, names=['user', 'item', 'rating', 'timestamp'])\n", "toy_test_read=pd.read_csv('./Datasets/toy-example/test.csv', sep='\\t', header=None, names=['user', 'item', 'rating', 'timestamp'])\n", "reco=pd.read_csv('Recommendations generated/toy-example/Self_BaselineUI_reco.csv', header=None)\n", "estimations=pd.read_csv('Recommendations generated/toy-example/Self_BaselineUI_estimations.csv', names=['user', 'item', 'est_score'])\n", "toy_train_ui, toy_test_ui, toy_user_code_id, toy_user_id_code, \\\n", "toy_item_code_id, toy_item_id_code = helpers.data_to_csr(toy_train_read, toy_test_read)\n", "\n", "print('Training data:')\n", "display(toy_train_ui.todense())\n", "\n", "print('Test data:')\n", "display(toy_test_ui.todense())\n", "\n", "print('Recommendations:')\n", "display(reco)\n", "\n", "print('Estimations:')\n", "display(estimations)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Sample recommendations" ] }, { "cell_type": "code", "execution_count": 18, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Here is what user rated high:\n" ] }, { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
userratingtitlegenres
4043955Toy Story (1995)Animation, Children's, Comedy
141503955Powder (1995)Drama
640793955Alien: Resurrection (1997)Action, Horror, Sci-Fi
436443955Amistad (1997)Drama
408963955Miracle on 34th Street (1994)Drama
360883955Silence of the Lambs, The (1991)Drama, Thriller
318483955Shawshank Redemption, The (1994)Drama
272113955Empire Strikes Back, The (1980)Action, Adventure, Drama, Romance, Sci-Fi, War
218703955Men in Black (1997)Action, Adventure, Comedy, Sci-Fi
202573955Return of the Jedi (1983)Action, Adventure, Romance, Sci-Fi, War
166823955Blues Brothers, The (1980)Action, Comedy, Musical
159623955Godfather, The (1972)Action, Crime, Drama
154163955Blade Runner (1982)Film-Noir, Sci-Fi
150473955Indiana Jones and the Last Crusade (1989)Action, Adventure
230253955Apt Pupil (1998)Drama, Thriller
\n", "
" ], "text/plain": [ " user rating title \\\n", "404 395 5 Toy Story (1995) \n", "14150 395 5 Powder (1995) \n", "64079 395 5 Alien: Resurrection (1997) \n", "43644 395 5 Amistad (1997) \n", "40896 395 5 Miracle on 34th Street (1994) \n", "36088 395 5 Silence of the Lambs, The (1991) \n", "31848 395 5 Shawshank Redemption, The (1994) \n", "27211 395 5 Empire Strikes Back, The (1980) \n", "21870 395 5 Men in Black (1997) \n", "20257 395 5 Return of the Jedi (1983) \n", "16682 395 5 Blues Brothers, The (1980) \n", "15962 395 5 Godfather, The (1972) \n", "15416 395 5 Blade Runner (1982) \n", "15047 395 5 Indiana Jones and the Last Crusade (1989) \n", "23025 395 5 Apt Pupil (1998) \n", "\n", " genres \n", "404 Animation, Children's, Comedy \n", "14150 Drama \n", "64079 Action, Horror, Sci-Fi \n", "43644 Drama \n", "40896 Drama \n", "36088 Drama, Thriller \n", "31848 Drama \n", "27211 Action, Adventure, Drama, Romance, Sci-Fi, War \n", "21870 Action, Adventure, Comedy, Sci-Fi \n", "20257 Action, Adventure, Romance, Sci-Fi, War \n", "16682 Action, Comedy, Musical \n", "15962 Action, Crime, Drama \n", "15416 Film-Noir, Sci-Fi \n", "15047 Action, Adventure \n", "23025 Drama, Thriller " ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Here is what we recommend:\n" ] }, { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
userrec_nbtitlegenres
393395.01Great Day in Harlem, A (1994)Documentary
1335395.02Tough and Deadly (1995)Action, Drama, Thriller
2277395.03Aiqing wansui (1994)Drama
3219395.04Delta of Venus (1994)Drama
4162395.05Someone Else's America (1995)Drama
5103395.06Saint of Fort Washington, The (1993)Drama
6045395.07Celestial Clockwork (1994)Comedy
6988395.08Some Mother's Son (1996)Drama
8882395.09Maya Lin: A Strong Clear Vision (1994)Documentary
7928395.010Prefontaine (1997)Drama
\n", "
" ], "text/plain": [ " user rec_nb title \\\n", "393 395.0 1 Great Day in Harlem, A (1994) \n", "1335 395.0 2 Tough and Deadly (1995) \n", "2277 395.0 3 Aiqing wansui (1994) \n", "3219 395.0 4 Delta of Venus (1994) \n", "4162 395.0 5 Someone Else's America (1995) \n", "5103 395.0 6 Saint of Fort Washington, The (1993) \n", "6045 395.0 7 Celestial Clockwork (1994) \n", "6988 395.0 8 Some Mother's Son (1996) \n", "8882 395.0 9 Maya Lin: A Strong Clear Vision (1994) \n", "7928 395.0 10 Prefontaine (1997) \n", "\n", " genres \n", "393 Documentary \n", "1335 Action, Drama, Thriller \n", "2277 Drama \n", "3219 Drama \n", "4162 Drama \n", "5103 Drama \n", "6045 Comedy \n", "6988 Drama \n", "8882 Documentary \n", "7928 Drama " ] }, "execution_count": 18, "metadata": {}, "output_type": "execute_result" } ], "source": [ "train=pd.read_csv('./Datasets/ml-100k/train.csv', sep='\\t', header=None, names=['user', 'item', 'rating', 'timestamp'])\n", "items=pd.read_csv('./Datasets/ml-100k/movies.csv')\n", "\n", "user=random.choice(list(set(train['user'])))\n", "\n", "train_content=pd.merge(train, items, left_on='item', right_on='id')\n", "\n", "print('Here is what user rated high:')\n", "display(train_content[train_content['user']==user][['user', 'rating', 'title', 'genres']]\\\n", " .sort_values(by='rating', ascending=False)[:15])\n", "\n", "reco = np.loadtxt('Recommendations generated/ml-100k/Self_BaselineUI_reco.csv', delimiter=',')\n", "items=pd.read_csv('./Datasets/ml-100k/movies.csv')\n", "\n", "# Let's ignore scores - they are not used in evaluation: \n", "reco_users=reco[:,:1]\n", "reco_items=reco[:,1::2]\n", "# Let's put them into one array\n", "reco=np.concatenate((reco_users, reco_items), axis=1)\n", "\n", "# Let's rebuild it user-item dataframe\n", "recommended=[]\n", "for row in reco:\n", " for rec_nb, entry in enumerate(row[1:]):\n", " recommended.append((row[0], rec_nb+1, entry))\n", "recommended=pd.DataFrame(recommended, columns=['user','rec_nb', 'item'])\n", "\n", "recommended_content=pd.merge(recommended, items, left_on='item', right_on='id')\n", "\n", "print('Here is what we recommend:')\n", "recommended_content[recommended_content['user']==user][['user', 'rec_nb', 'title', 'genres']].sort_values(by='rec_nb')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# project task 3: implement some other evaluation measure" ] }, { "cell_type": "code", "execution_count": 16, "metadata": {}, "outputs": [], "source": [ "# it may be your idea, modification of what we have already implemented \n", "# (for example Hit2 rate which would count as a success users whoreceived at least 2 relevant recommendations) \n", "# or something well-known\n", "# expected output: modification of evaluation_measures.py such that evaluate_all will also display your measure" ] }, { "cell_type": "code", "execution_count": 19, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "943it [00:00, 6823.12it/s]\n", "943it [00:00, 7031.46it/s]\n", "943it [00:00, 6757.46it/s]\n", "943it [00:00, 4266.43it/s]\n", "943it [00:00, 6333.51it/s]\n", "943it [00:00, 7252.84it/s]\n", "943it [00:00, 5832.99it/s]\n" ] }, { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
ModelRMSEMAEprecisionrecallF_1F_05precision_superrecall_superNDCGmAPMRRLAUCHRHR2Reco in testTest coverageShannonGini
0Self_TopPop2.5082582.2179090.1888650.1169190.1187320.1415840.1304720.1374730.2146510.1117070.4009390.5555460.7656420.4920471.0000000.0389613.1590790.987317
0Ready_Baseline0.9494590.7524870.0914100.0376520.0460300.0612860.0796140.0564630.0959570.0431780.1981930.5155010.4379640.2396611.0000000.0339112.8365130.991139
0Self_GlobalAvg1.1257600.9435340.0611880.0259680.0313830.0413430.0405580.0321070.0676950.0274700.1711870.5095460.3849420.1421001.0000000.0259742.7117720.992003
0Ready_Random1.5175931.2201810.0460230.0190380.0231180.0307340.0292920.0216390.0508180.0199580.1266460.5060310.3054080.1113470.9885470.1746035.0823830.908434
0Self_TopRated2.5082582.2179090.0009540.0001880.0002980.0004810.0006440.0002230.0010430.0003350.0033480.4964330.0095440.0000000.6990460.0050511.9459100.995669
0Self_BaselineIU0.9581360.7540510.0009540.0001880.0002980.0004810.0006440.0002230.0010430.0003350.0033480.4964330.0095440.0000000.6990460.0050511.9459100.995669
0Self_BaselineUI0.9675850.7627400.0009540.0001700.0002780.0004630.0006440.0001890.0007520.0001680.0016770.4964240.0095440.0000000.6005300.0050511.8031260.996380
\n", "
" ], "text/plain": [ " Model RMSE MAE precision recall F_1 \\\n", "0 Self_TopPop 2.508258 2.217909 0.188865 0.116919 0.118732 \n", "0 Ready_Baseline 0.949459 0.752487 0.091410 0.037652 0.046030 \n", "0 Self_GlobalAvg 1.125760 0.943534 0.061188 0.025968 0.031383 \n", "0 Ready_Random 1.517593 1.220181 0.046023 0.019038 0.023118 \n", "0 Self_TopRated 2.508258 2.217909 0.000954 0.000188 0.000298 \n", "0 Self_BaselineIU 0.958136 0.754051 0.000954 0.000188 0.000298 \n", "0 Self_BaselineUI 0.967585 0.762740 0.000954 0.000170 0.000278 \n", "\n", " F_05 precision_super recall_super NDCG mAP MRR \\\n", "0 0.141584 0.130472 0.137473 0.214651 0.111707 0.400939 \n", "0 0.061286 0.079614 0.056463 0.095957 0.043178 0.198193 \n", "0 0.041343 0.040558 0.032107 0.067695 0.027470 0.171187 \n", "0 0.030734 0.029292 0.021639 0.050818 0.019958 0.126646 \n", "0 0.000481 0.000644 0.000223 0.001043 0.000335 0.003348 \n", "0 0.000481 0.000644 0.000223 0.001043 0.000335 0.003348 \n", "0 0.000463 0.000644 0.000189 0.000752 0.000168 0.001677 \n", "\n", " LAUC HR HR2 Reco in test Test coverage Shannon \\\n", "0 0.555546 0.765642 0.492047 1.000000 0.038961 3.159079 \n", "0 0.515501 0.437964 0.239661 1.000000 0.033911 2.836513 \n", "0 0.509546 0.384942 0.142100 1.000000 0.025974 2.711772 \n", "0 0.506031 0.305408 0.111347 0.988547 0.174603 5.082383 \n", "0 0.496433 0.009544 0.000000 0.699046 0.005051 1.945910 \n", "0 0.496433 0.009544 0.000000 0.699046 0.005051 1.945910 \n", "0 0.496424 0.009544 0.000000 0.600530 0.005051 1.803126 \n", "\n", " Gini \n", "0 0.987317 \n", "0 0.991139 \n", "0 0.992003 \n", "0 0.908434 \n", "0 0.995669 \n", "0 0.995669 \n", "0 0.996380 " ] }, "execution_count": 19, "metadata": {}, "output_type": "execute_result" } ], "source": [ "dir_path=\"Recommendations generated/ml-100k/\"\n", "super_reactions=[4,5]\n", "test=pd.read_csv('./Datasets/ml-100k/test.csv', sep='\\t', header=None)\n", "\n", "ev.evaluate_all(test, dir_path, super_reactions)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.5" } }, "nbformat": 4, "nbformat_minor": 4 }