WSR-432813/P2. Evaluation.ipynb

1738 lines
54 KiB
Plaintext
Raw Permalink Normal View History

2021-06-11 01:28:24 +02:00
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Prepare test set"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"slideshow": {
"slide_type": "-"
}
},
"outputs": [],
"source": [
"import pandas as pd\n",
"import numpy as np\n",
"import scipy.sparse as sparse\n",
"from collections import defaultdict\n",
"from itertools import chain\n",
"import random\n",
"from tqdm import tqdm\n",
"\n",
"# In evaluation we do not load train set - it is not needed\n",
"test = pd.read_csv(\"./Datasets/ml-100k/test.csv\", sep=\"\\t\", header=None)\n",
"test.columns = [\"user\", \"item\", \"rating\", \"timestamp\"]\n",
"\n",
"test[\"user_code\"] = test[\"user\"].astype(\"category\").cat.codes\n",
"test[\"item_code\"] = test[\"item\"].astype(\"category\").cat.codes\n",
"\n",
"user_code_id = dict(enumerate(test[\"user\"].astype(\"category\").cat.categories))\n",
"user_id_code = dict((v, k) for k, v in user_code_id.items())\n",
"item_code_id = dict(enumerate(test[\"item\"].astype(\"category\").cat.categories))\n",
"item_id_code = dict((v, k) for k, v in item_code_id.items())\n",
"\n",
"test_ui = sparse.csr_matrix((test[\"rating\"], (test[\"user_code\"], test[\"item_code\"])))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Estimations metrics"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"estimations_df = pd.read_csv(\n",
" \"Recommendations generated/ml-100k/Ready_Baseline_estimations.csv\", header=None\n",
")\n",
"estimations_df.columns = [\"user\", \"item\", \"score\"]\n",
"\n",
"estimations_df[\"user_code\"] = [user_id_code[user] for user in estimations_df[\"user\"]]\n",
"estimations_df[\"item_code\"] = [item_id_code[item] for item in estimations_df[\"item\"]]\n",
"estimations = sparse.csr_matrix(\n",
" (\n",
" estimations_df[\"score\"],\n",
" (estimations_df[\"user_code\"], estimations_df[\"item_code\"]),\n",
" ),\n",
" shape=test_ui.shape,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"def estimations_metrics(test_ui, estimations):\n",
" result = []\n",
"\n",
" RMSE = (np.sum((estimations.data - test_ui.data) ** 2) / estimations.nnz) ** (1 / 2)\n",
" result.append([\"RMSE\", RMSE])\n",
"\n",
" MAE = np.sum(abs(estimations.data - test_ui.data)) / estimations.nnz\n",
" result.append([\"MAE\", MAE])\n",
"\n",
" df_result = (pd.DataFrame(list(zip(*result))[1])).T\n",
" df_result.columns = list(zip(*result))[0]\n",
" return df_result"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>RMSE</th>\n",
" <th>MAE</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>0.949459</td>\n",
" <td>0.752487</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" RMSE MAE\n",
"0 0.949459 0.752487"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# in case of error (in the laboratories) you might have to switch to the other version of pandas\n",
"# try !pip3 install pandas=='1.0.3' (or pip if you use python 2) and restart the kernel\n",
"\n",
"estimations_metrics(test_ui, estimations)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Ranking metrics"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"array([[663, 475, 62, ..., 472, 269, 503],\n",
" [ 48, 313, 475, ..., 591, 175, 466],\n",
" [351, 313, 475, ..., 591, 175, 466],\n",
" ...,\n",
" [259, 313, 475, ..., 11, 591, 175],\n",
" [ 33, 313, 475, ..., 11, 591, 175],\n",
" [ 77, 313, 475, ..., 11, 591, 175]])"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import numpy as np\n",
"\n",
"reco = np.loadtxt(\n",
" \"Recommendations generated/ml-100k/Ready_Baseline_reco.csv\", delimiter=\",\"\n",
")\n",
"# Let's ignore scores - they are not used in evaluation:\n",
"users = reco[:, :1]\n",
"items = reco[:, 1::2]\n",
"# Let's use inner ids instead of real ones\n",
"users = np.vectorize(lambda x: user_id_code.setdefault(x, -1))(users)\n",
"items = np.vectorize(lambda x: item_id_code.setdefault(x, -1))(items)\n",
"reco = np.concatenate((users, items), axis=1)\n",
"reco"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"def ranking_metrics(test_ui, reco, super_reactions=[], topK=10):\n",
"\n",
" nb_items = test_ui.shape[1]\n",
" (\n",
" relevant_users,\n",
" super_relevant_users,\n",
" prec,\n",
" rec,\n",
" F_1,\n",
" F_05,\n",
" prec_super,\n",
" rec_super,\n",
" ndcg,\n",
" mAP,\n",
" MRR,\n",
" LAUC,\n",
" HR,\n",
" ) = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)\n",
"\n",
" cg = 1.0 / np.log2(np.arange(2, topK + 2))\n",
" cg_sum = np.cumsum(cg)\n",
"\n",
" for (nb_user, user) in tqdm(enumerate(reco[:, 0])):\n",
" u_rated_items = test_ui.indices[test_ui.indptr[user] : test_ui.indptr[user + 1]]\n",
" nb_u_rated_items = len(u_rated_items)\n",
" if (\n",
" nb_u_rated_items > 0\n",
" ): # skip users with no items in test set (still possible that there will be no super items)\n",
" relevant_users += 1\n",
"\n",
" u_super_items = u_rated_items[\n",
" np.vectorize(lambda x: x in super_reactions)(\n",
" test_ui.data[test_ui.indptr[user] : test_ui.indptr[user + 1]]\n",
" )\n",
" ]\n",
" # more natural seems u_super_items=[item for item in u_rated_items if test_ui[user,item] in super_reactions]\n",
" # but accesing test_ui[user,item] is expensive -we should avoid doing it\n",
" if len(u_super_items) > 0:\n",
" super_relevant_users += 1\n",
"\n",
" user_successes = np.zeros(topK)\n",
" nb_user_successes = 0\n",
" user_super_successes = np.zeros(topK)\n",
" nb_user_super_successes = 0\n",
"\n",
" # evaluation\n",
" for (item_position, item) in enumerate(reco[nb_user, 1 : topK + 1]):\n",
" if item in u_rated_items:\n",
" user_successes[item_position] = 1\n",
" nb_user_successes += 1\n",
" if item in u_super_items:\n",
" user_super_successes[item_position] = 1\n",
" nb_user_super_successes += 1\n",
"\n",
" prec_u = nb_user_successes / topK\n",
" prec += prec_u\n",
"\n",
" rec_u = nb_user_successes / nb_u_rated_items\n",
" rec += rec_u\n",
"\n",
" F_1 += 2 * (prec_u * rec_u) / (prec_u + rec_u) if prec_u + rec_u > 0 else 0\n",
" F_05 += (\n",
" (0.5 ** 2 + 1) * (prec_u * rec_u) / (0.5 ** 2 * prec_u + rec_u)\n",
" if prec_u + rec_u > 0\n",
" else 0\n",
" )\n",
"\n",
" prec_super += nb_user_super_successes / topK\n",
" rec_super += nb_user_super_successes / max(\n",
" len(u_super_items), 1\n",
" ) # to set 0 if no super items\n",
" ndcg += np.dot(user_successes, cg) / cg_sum[min(topK, nb_u_rated_items) - 1]\n",
"\n",
" cumsum_successes = np.cumsum(user_successes)\n",
" mAP += np.dot(\n",
" cumsum_successes / np.arange(1, topK + 1), user_successes\n",
" ) / min(topK, nb_u_rated_items)\n",
" MRR += (\n",
" 1 / (user_successes.nonzero()[0][0] + 1)\n",
" if user_successes.nonzero()[0].size > 0\n",
" else 0\n",
" )\n",
" LAUC += (\n",
" np.dot(cumsum_successes, 1 - user_successes)\n",
" + (nb_user_successes + nb_u_rated_items)\n",
" / 2\n",
" * ((nb_items - nb_u_rated_items) - (topK - nb_user_successes))\n",
" ) / ((nb_items - nb_u_rated_items) * nb_u_rated_items)\n",
"\n",
" HR += nb_user_successes > 0\n",
"\n",
" result = []\n",
" result.append((\"precision\", prec / relevant_users))\n",
" result.append((\"recall\", rec / relevant_users))\n",
" result.append((\"F_1\", F_1 / relevant_users))\n",
" result.append((\"F_05\", F_05 / relevant_users))\n",
" result.append((\"precision_super\", prec_super / super_relevant_users))\n",
" result.append((\"recall_super\", rec_super / super_relevant_users))\n",
" result.append((\"NDCG\", ndcg / relevant_users))\n",
" result.append((\"mAP\", mAP / relevant_users))\n",
" result.append((\"MRR\", MRR / relevant_users))\n",
" result.append((\"LAUC\", LAUC / relevant_users))\n",
" result.append((\"HR\", HR / relevant_users))\n",
"\n",
" df_result = (pd.DataFrame(list(zip(*result))[1])).T\n",
" df_result.columns = list(zip(*result))[0]\n",
" return df_result"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"943it [00:00, 7955.25it/s]\n"
]
},
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>precision</th>\n",
" <th>recall</th>\n",
" <th>F_1</th>\n",
" <th>F_05</th>\n",
" <th>precision_super</th>\n",
" <th>recall_super</th>\n",
" <th>NDCG</th>\n",
" <th>mAP</th>\n",
" <th>MRR</th>\n",
" <th>LAUC</th>\n",
" <th>HR</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>0.09141</td>\n",
" <td>0.037652</td>\n",
" <td>0.04603</td>\n",
" <td>0.061286</td>\n",
" <td>0.079614</td>\n",
" <td>0.056463</td>\n",
" <td>0.095957</td>\n",
" <td>0.043178</td>\n",
" <td>0.198193</td>\n",
" <td>0.515501</td>\n",
" <td>0.437964</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" precision recall F_1 F_05 precision_super recall_super \\\n",
"0 0.09141 0.037652 0.04603 0.061286 0.079614 0.056463 \n",
"\n",
" NDCG mAP MRR LAUC HR \n",
"0 0.095957 0.043178 0.198193 0.515501 0.437964 "
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"ranking_metrics(test_ui, reco, super_reactions=[4, 5], topK=10)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Diversity metrics"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"def diversity_metrics(test_ui, reco, topK=10):\n",
"\n",
" frequencies = defaultdict(int)\n",
"\n",
" # let's assign 0 to all items in test set\n",
" for item in list(set(test_ui.indices)):\n",
" frequencies[item] = 0\n",
"\n",
" # counting frequencies\n",
" for item in reco[:, 1:].flat:\n",
" frequencies[item] += 1\n",
"\n",
" nb_reco_outside_test = frequencies[-1]\n",
" del frequencies[-1]\n",
"\n",
" frequencies = np.array(list(frequencies.values()))\n",
"\n",
" nb_rec_items = len(frequencies[frequencies > 0])\n",
" nb_reco_inside_test = np.sum(frequencies)\n",
"\n",
" frequencies = frequencies / np.sum(frequencies)\n",
" frequencies = np.sort(frequencies)\n",
"\n",
" with np.errstate(\n",
" divide=\"ignore\"\n",
" ): # let's put zeros put items with 0 frequency and ignore division warning\n",
" log_frequencies = np.nan_to_num(np.log(frequencies), posinf=0, neginf=0)\n",
"\n",
" result = []\n",
" result.append(\n",
" (\n",
" \"Reco in test\",\n",
" nb_reco_inside_test / (nb_reco_inside_test + nb_reco_outside_test),\n",
" )\n",
" )\n",
" result.append((\"Test coverage\", nb_rec_items / test_ui.shape[1]))\n",
" result.append((\"Shannon\", -np.dot(frequencies, log_frequencies)))\n",
" result.append(\n",
" (\n",
" \"Gini\",\n",
" np.dot(frequencies, np.arange(1 - len(frequencies), len(frequencies), 2))\n",
" / (len(frequencies) - 1),\n",
" )\n",
" )\n",
"\n",
" df_result = (pd.DataFrame(list(zip(*result))[1])).T\n",
" df_result.columns = list(zip(*result))[0]\n",
" return df_result"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>Reco in test</th>\n",
" <th>Test coverage</th>\n",
" <th>Shannon</th>\n",
" <th>Gini</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>1.0</td>\n",
" <td>0.033911</td>\n",
" <td>2.836513</td>\n",
" <td>0.991139</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" Reco in test Test coverage Shannon Gini\n",
"0 1.0 0.033911 2.836513 0.991139"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# in case of errors try !pip3 install numpy==1.18.4 (or pip if you use python 2) and restart the kernel\n",
"\n",
"x = diversity_metrics(test_ui, reco, topK=10)\n",
"x"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# To be used in other notebooks"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"943it [00:00, 7872.32it/s]\n"
]
},
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>RMSE</th>\n",
" <th>MAE</th>\n",
" <th>precision</th>\n",
" <th>recall</th>\n",
" <th>F_1</th>\n",
" <th>F_05</th>\n",
" <th>precision_super</th>\n",
" <th>recall_super</th>\n",
" <th>NDCG</th>\n",
" <th>mAP</th>\n",
" <th>MRR</th>\n",
" <th>LAUC</th>\n",
" <th>HR</th>\n",
" <th>H2R</th>\n",
" <th>Reco in test</th>\n",
" <th>Test coverage</th>\n",
" <th>Shannon</th>\n",
" <th>Gini</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>0.949459</td>\n",
" <td>0.752487</td>\n",
" <td>0.09141</td>\n",
" <td>0.037652</td>\n",
" <td>0.04603</td>\n",
" <td>0.061286</td>\n",
" <td>0.079614</td>\n",
" <td>0.056463</td>\n",
" <td>0.095957</td>\n",
" <td>0.043178</td>\n",
" <td>0.198193</td>\n",
" <td>0.515501</td>\n",
" <td>0.437964</td>\n",
" <td>0.239661</td>\n",
" <td>1.0</td>\n",
" <td>0.033911</td>\n",
" <td>2.836513</td>\n",
" <td>0.991139</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" RMSE MAE precision recall F_1 F_05 \\\n",
"0 0.949459 0.752487 0.09141 0.037652 0.04603 0.061286 \n",
"\n",
" precision_super recall_super NDCG mAP MRR LAUC \\\n",
"0 0.079614 0.056463 0.095957 0.043178 0.198193 0.515501 \n",
"\n",
" HR H2R Reco in test Test coverage Shannon Gini \n",
"0 0.437964 0.239661 1.0 0.033911 2.836513 0.991139 "
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import evaluation_measures as ev\n",
"\n",
"estimations_df = pd.read_csv(\n",
" \"Recommendations generated/ml-100k/Ready_Baseline_estimations.csv\", header=None\n",
")\n",
"reco = np.loadtxt(\n",
" \"Recommendations generated/ml-100k/Ready_Baseline_reco.csv\", delimiter=\",\"\n",
")\n",
"\n",
"ev.evaluate(\n",
" test=pd.read_csv(\"./Datasets/ml-100k/test.csv\", sep=\"\\t\", header=None),\n",
" estimations_df=estimations_df,\n",
" reco=reco,\n",
" super_reactions=[4, 5],\n",
")\n",
"# also you can just type ev.evaluate_all(estimations_df, reco) - I put above values as default"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"943it [00:00, 6795.25it/s]\n",
"943it [00:00, 7953.42it/s]\n",
"943it [00:00, 7915.55it/s]\n",
"943it [00:00, 8704.77it/s]\n",
"943it [00:00, 8266.93it/s]\n"
]
}
],
"source": [
"dir_path = \"Recommendations generated/ml-100k/\"\n",
"super_reactions = [4, 5]\n",
"test = pd.read_csv(\"./Datasets/ml-100k/test.csv\", sep=\"\\t\", header=None)\n",
"\n",
"df = ev.evaluate_all(test, dir_path, super_reactions)\n",
"# also you can just type ev.evaluate_all() - I put above values as default"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>Model</th>\n",
" <th>RMSE</th>\n",
" <th>MAE</th>\n",
" <th>precision</th>\n",
" <th>recall</th>\n",
" <th>F_1</th>\n",
" <th>F_05</th>\n",
" <th>precision_super</th>\n",
" <th>recall_super</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>Self_TopPop</td>\n",
" <td>2.508258</td>\n",
" <td>2.217909</td>\n",
" <td>0.188865</td>\n",
" <td>0.116919</td>\n",
" <td>0.118732</td>\n",
" <td>0.141584</td>\n",
" <td>0.130472</td>\n",
" <td>0.137473</td>\n",
" </tr>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>Ready_Baseline</td>\n",
" <td>0.949459</td>\n",
" <td>0.752487</td>\n",
" <td>0.091410</td>\n",
" <td>0.037652</td>\n",
" <td>0.046030</td>\n",
" <td>0.061286</td>\n",
" <td>0.079614</td>\n",
" <td>0.056463</td>\n",
" </tr>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>Ready_Random</td>\n",
" <td>1.521557</td>\n",
" <td>1.222653</td>\n",
" <td>0.046766</td>\n",
" <td>0.021357</td>\n",
" <td>0.024113</td>\n",
" <td>0.031441</td>\n",
" <td>0.027468</td>\n",
" <td>0.021247</td>\n",
" </tr>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>Self_TopRated</td>\n",
" <td>1.030712</td>\n",
" <td>0.820904</td>\n",
" <td>0.000954</td>\n",
" <td>0.000188</td>\n",
" <td>0.000298</td>\n",
" <td>0.000481</td>\n",
" <td>0.000644</td>\n",
" <td>0.000223</td>\n",
" </tr>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>Self_BaselineUI</td>\n",
" <td>0.967585</td>\n",
" <td>0.762740</td>\n",
" <td>0.000954</td>\n",
" <td>0.000170</td>\n",
" <td>0.000278</td>\n",
" <td>0.000463</td>\n",
" <td>0.000644</td>\n",
" <td>0.000189</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" Model RMSE MAE precision recall F_1 \\\n",
"0 Self_TopPop 2.508258 2.217909 0.188865 0.116919 0.118732 \n",
"0 Ready_Baseline 0.949459 0.752487 0.091410 0.037652 0.046030 \n",
"0 Ready_Random 1.521557 1.222653 0.046766 0.021357 0.024113 \n",
"0 Self_TopRated 1.030712 0.820904 0.000954 0.000188 0.000298 \n",
"0 Self_BaselineUI 0.967585 0.762740 0.000954 0.000170 0.000278 \n",
"\n",
" F_05 precision_super recall_super \n",
"0 0.141584 0.130472 0.137473 \n",
"0 0.061286 0.079614 0.056463 \n",
"0 0.031441 0.027468 0.021247 \n",
"0 0.000481 0.000644 0.000223 \n",
"0 0.000463 0.000644 0.000189 "
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df.iloc[:, :9]"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>Model</th>\n",
" <th>NDCG</th>\n",
" <th>mAP</th>\n",
" <th>MRR</th>\n",
" <th>LAUC</th>\n",
" <th>HR</th>\n",
" <th>H2R</th>\n",
" <th>Reco in test</th>\n",
" <th>Test coverage</th>\n",
" <th>Shannon</th>\n",
" <th>Gini</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>Self_TopPop</td>\n",
" <td>0.214651</td>\n",
" <td>0.111707</td>\n",
" <td>0.400939</td>\n",
" <td>0.555546</td>\n",
" <td>0.765642</td>\n",
" <td>0.492047</td>\n",
" <td>1.000000</td>\n",
" <td>0.038961</td>\n",
" <td>3.159079</td>\n",
" <td>0.987317</td>\n",
" </tr>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>Ready_Baseline</td>\n",
" <td>0.095957</td>\n",
" <td>0.043178</td>\n",
" <td>0.198193</td>\n",
" <td>0.515501</td>\n",
" <td>0.437964</td>\n",
" <td>0.239661</td>\n",
" <td>1.000000</td>\n",
" <td>0.033911</td>\n",
" <td>2.836513</td>\n",
" <td>0.991139</td>\n",
" </tr>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>Ready_Random</td>\n",
" <td>0.050715</td>\n",
" <td>0.019635</td>\n",
" <td>0.121185</td>\n",
" <td>0.507191</td>\n",
" <td>0.314952</td>\n",
" <td>0.109226</td>\n",
" <td>0.988547</td>\n",
" <td>0.188312</td>\n",
" <td>5.094569</td>\n",
" <td>0.908346</td>\n",
" </tr>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>Self_TopRated</td>\n",
" <td>0.001043</td>\n",
" <td>0.000335</td>\n",
" <td>0.003348</td>\n",
" <td>0.496433</td>\n",
" <td>0.009544</td>\n",
" <td>0.000000</td>\n",
" <td>0.699046</td>\n",
" <td>0.005051</td>\n",
" <td>1.945910</td>\n",
" <td>0.995669</td>\n",
" </tr>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>Self_BaselineUI</td>\n",
" <td>0.000752</td>\n",
" <td>0.000168</td>\n",
" <td>0.001677</td>\n",
" <td>0.496424</td>\n",
" <td>0.009544</td>\n",
" <td>0.000000</td>\n",
" <td>0.600530</td>\n",
" <td>0.005051</td>\n",
" <td>1.803126</td>\n",
" <td>0.996380</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" Model NDCG mAP MRR LAUC HR \\\n",
"0 Self_TopPop 0.214651 0.111707 0.400939 0.555546 0.765642 \n",
"0 Ready_Baseline 0.095957 0.043178 0.198193 0.515501 0.437964 \n",
"0 Ready_Random 0.050715 0.019635 0.121185 0.507191 0.314952 \n",
"0 Self_TopRated 0.001043 0.000335 0.003348 0.496433 0.009544 \n",
"0 Self_BaselineUI 0.000752 0.000168 0.001677 0.496424 0.009544 \n",
"\n",
" H2R Reco in test Test coverage Shannon Gini \n",
"0 0.492047 1.000000 0.038961 3.159079 0.987317 \n",
"0 0.239661 1.000000 0.033911 2.836513 0.991139 \n",
"0 0.109226 0.988547 0.188312 5.094569 0.908346 \n",
"0 0.000000 0.699046 0.005051 1.945910 0.995669 \n",
"0 0.000000 0.600530 0.005051 1.803126 0.996380 "
]
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df.iloc[:, np.append(0, np.arange(9, df.shape[1]))]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Check metrics on toy dataset"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"3it [00:00, 4983.33it/s]\n",
"3it [00:00, 5262.61it/s]\n"
]
},
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>Model</th>\n",
" <th>RMSE</th>\n",
" <th>MAE</th>\n",
" <th>precision</th>\n",
" <th>recall</th>\n",
" <th>F_1</th>\n",
" <th>F_05</th>\n",
" <th>precision_super</th>\n",
" <th>recall_super</th>\n",
" <th>NDCG</th>\n",
" <th>mAP</th>\n",
" <th>MRR</th>\n",
" <th>LAUC</th>\n",
" <th>HR</th>\n",
" <th>H2R</th>\n",
" <th>Reco in test</th>\n",
" <th>Test coverage</th>\n",
" <th>Shannon</th>\n",
" <th>Gini</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>Self_BaselineUI</td>\n",
" <td>1.612452</td>\n",
" <td>1.400</td>\n",
" <td>0.444444</td>\n",
" <td>0.888889</td>\n",
" <td>0.555556</td>\n",
" <td>0.478632</td>\n",
" <td>0.333333</td>\n",
" <td>0.75</td>\n",
" <td>0.676907</td>\n",
" <td>0.574074</td>\n",
" <td>0.611111</td>\n",
" <td>0.638889</td>\n",
" <td>1.0</td>\n",
" <td>0.333333</td>\n",
" <td>0.888889</td>\n",
" <td>0.8</td>\n",
" <td>1.386294</td>\n",
" <td>0.250000</td>\n",
" </tr>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>Self_BaselineIU</td>\n",
" <td>1.648337</td>\n",
" <td>1.575</td>\n",
" <td>0.444444</td>\n",
" <td>0.888889</td>\n",
" <td>0.555556</td>\n",
" <td>0.478632</td>\n",
" <td>0.333333</td>\n",
" <td>0.75</td>\n",
" <td>0.720550</td>\n",
" <td>0.629630</td>\n",
" <td>0.666667</td>\n",
" <td>0.722222</td>\n",
" <td>1.0</td>\n",
" <td>0.333333</td>\n",
" <td>0.777778</td>\n",
" <td>0.8</td>\n",
" <td>1.351784</td>\n",
" <td>0.357143</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" Model RMSE MAE precision recall F_1 F_05 \\\n",
"0 Self_BaselineUI 1.612452 1.400 0.444444 0.888889 0.555556 0.478632 \n",
"0 Self_BaselineIU 1.648337 1.575 0.444444 0.888889 0.555556 0.478632 \n",
"\n",
" precision_super recall_super NDCG mAP MRR LAUC HR \\\n",
"0 0.333333 0.75 0.676907 0.574074 0.611111 0.638889 1.0 \n",
"0 0.333333 0.75 0.720550 0.629630 0.666667 0.722222 1.0 \n",
"\n",
" H2R Reco in test Test coverage Shannon Gini \n",
"0 0.333333 0.888889 0.8 1.386294 0.250000 \n",
"0 0.333333 0.777778 0.8 1.351784 0.357143 "
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Training data:\n"
]
},
{
"data": {
"text/plain": [
"matrix([[3, 4, 0, 0, 5, 0, 0, 4],\n",
" [0, 1, 2, 3, 0, 0, 0, 0],\n",
" [0, 0, 0, 5, 0, 3, 4, 0]])"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Test data:\n"
]
},
{
"data": {
"text/plain": [
"matrix([[0, 0, 0, 0, 0, 0, 3, 0],\n",
" [0, 0, 0, 0, 5, 0, 0, 0],\n",
" [5, 0, 4, 0, 0, 0, 0, 2]])"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Recommendations:\n"
]
},
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>0</th>\n",
" <th>1</th>\n",
" <th>2</th>\n",
" <th>3</th>\n",
" <th>4</th>\n",
" <th>5</th>\n",
" <th>6</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>0</td>\n",
" <td>30</td>\n",
" <td>5.0</td>\n",
" <td>20</td>\n",
" <td>4.0</td>\n",
" <td>60</td>\n",
" <td>4.0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>10</td>\n",
" <td>40</td>\n",
" <td>3.0</td>\n",
" <td>60</td>\n",
" <td>2.0</td>\n",
" <td>70</td>\n",
" <td>2.0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>20</td>\n",
" <td>40</td>\n",
" <td>5.0</td>\n",
" <td>20</td>\n",
" <td>4.0</td>\n",
" <td>70</td>\n",
" <td>4.0</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" 0 1 2 3 4 5 6\n",
"0 0 30 5.0 20 4.0 60 4.0\n",
"1 10 40 3.0 60 2.0 70 2.0\n",
"2 20 40 5.0 20 4.0 70 4.0"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Estimations:\n"
]
},
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>user</th>\n",
" <th>item</th>\n",
" <th>est_score</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>0</td>\n",
" <td>60</td>\n",
" <td>4.0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>10</td>\n",
" <td>40</td>\n",
" <td>3.0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>20</td>\n",
" <td>0</td>\n",
" <td>3.0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>20</td>\n",
" <td>20</td>\n",
" <td>4.0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>20</td>\n",
" <td>70</td>\n",
" <td>4.0</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" user item est_score\n",
"0 0 60 4.0\n",
"1 10 40 3.0\n",
"2 20 0 3.0\n",
"3 20 20 4.0\n",
"4 20 70 4.0"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"import helpers\n",
"\n",
"dir_path = \"Recommendations generated/toy-example/\"\n",
"super_reactions = [4, 5]\n",
"test = pd.read_csv(\"./Datasets/toy-example/test.csv\", sep=\"\\t\", header=None)\n",
"\n",
"display(ev.evaluate_all(test, dir_path, super_reactions, topK=3))\n",
"# also you can just type ev.evaluate_all() - I put above values as default\n",
"\n",
"toy_train_read = pd.read_csv(\n",
" \"./Datasets/toy-example/train.csv\",\n",
" sep=\"\\t\",\n",
" header=None,\n",
" names=[\"user\", \"item\", \"rating\", \"timestamp\"],\n",
")\n",
"toy_test_read = pd.read_csv(\n",
" \"./Datasets/toy-example/test.csv\",\n",
" sep=\"\\t\",\n",
" header=None,\n",
" names=[\"user\", \"item\", \"rating\", \"timestamp\"],\n",
")\n",
"reco = pd.read_csv(\n",
" \"Recommendations generated/toy-example/Self_BaselineUI_reco.csv\", header=None\n",
")\n",
"estimations = pd.read_csv(\n",
" \"Recommendations generated/toy-example/Self_BaselineUI_estimations.csv\",\n",
" names=[\"user\", \"item\", \"est_score\"],\n",
")\n",
"(\n",
" toy_train_ui,\n",
" toy_test_ui,\n",
" toy_user_code_id,\n",
" toy_user_id_code,\n",
" toy_item_code_id,\n",
" toy_item_id_code,\n",
") = helpers.data_to_csr(toy_train_read, toy_test_read)\n",
"\n",
"print(\"Training data:\")\n",
"display(toy_train_ui.todense())\n",
"\n",
"print(\"Test data:\")\n",
"display(toy_test_ui.todense())\n",
"\n",
"print(\"Recommendations:\")\n",
"display(reco)\n",
"\n",
"print(\"Estimations:\")\n",
"display(estimations)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Sample recommendations"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Here is what user rated high:\n"
]
},
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>user</th>\n",
" <th>rating</th>\n",
" <th>title</th>\n",
" <th>genres</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>37537</th>\n",
" <td>506</td>\n",
" <td>5</td>\n",
" <td>Aladdin (1992)</td>\n",
" <td>Animation, Children's, Comedy, Musical</td>\n",
" </tr>\n",
" <tr>\n",
" <th>29233</th>\n",
" <td>506</td>\n",
" <td>5</td>\n",
" <td>Usual Suspects, The (1995)</td>\n",
" <td>Crime, Thriller</td>\n",
" </tr>\n",
" <tr>\n",
" <th>68329</th>\n",
" <td>506</td>\n",
" <td>5</td>\n",
" <td>Babe (1995)</td>\n",
" <td>Children's, Comedy, Drama</td>\n",
" </tr>\n",
" <tr>\n",
" <th>31142</th>\n",
" <td>506</td>\n",
" <td>5</td>\n",
" <td>GoodFellas (1990)</td>\n",
" <td>Crime, Drama</td>\n",
" </tr>\n",
" <tr>\n",
" <th>30354</th>\n",
" <td>506</td>\n",
" <td>5</td>\n",
" <td>Manchurian Candidate, The (1962)</td>\n",
" <td>Film-Noir, Thriller</td>\n",
" </tr>\n",
" <tr>\n",
" <th>50796</th>\n",
" <td>506</td>\n",
" <td>5</td>\n",
" <td>It's a Wonderful Life (1946)</td>\n",
" <td>Drama</td>\n",
" </tr>\n",
" <tr>\n",
" <th>67161</th>\n",
" <td>506</td>\n",
" <td>5</td>\n",
" <td>Little Princess, A (1995)</td>\n",
" <td>Children's, Drama</td>\n",
" </tr>\n",
" <tr>\n",
" <th>66726</th>\n",
" <td>506</td>\n",
" <td>5</td>\n",
" <td>Deer Hunter, The (1978)</td>\n",
" <td>Drama, War</td>\n",
" </tr>\n",
" <tr>\n",
" <th>66672</th>\n",
" <td>506</td>\n",
" <td>5</td>\n",
" <td>Bringing Up Baby (1938)</td>\n",
" <td>Comedy</td>\n",
" </tr>\n",
" <tr>\n",
" <th>66201</th>\n",
" <td>506</td>\n",
" <td>5</td>\n",
" <td>In the Line of Fire (1993)</td>\n",
" <td>Action, Thriller</td>\n",
" </tr>\n",
" <tr>\n",
" <th>65397</th>\n",
" <td>506</td>\n",
" <td>5</td>\n",
" <td>Speed (1994)</td>\n",
" <td>Action, Romance, Thriller</td>\n",
" </tr>\n",
" <tr>\n",
" <th>47566</th>\n",
" <td>506</td>\n",
" <td>5</td>\n",
" <td>Alien (1979)</td>\n",
" <td>Action, Horror, Sci-Fi, Thriller</td>\n",
" </tr>\n",
" <tr>\n",
" <th>44744</th>\n",
" <td>506</td>\n",
" <td>5</td>\n",
" <td>Ransom (1996)</td>\n",
" <td>Drama, Thriller</td>\n",
" </tr>\n",
" <tr>\n",
" <th>60460</th>\n",
" <td>506</td>\n",
" <td>5</td>\n",
" <td>Singin' in the Rain (1952)</td>\n",
" <td>Musical, Romance</td>\n",
" </tr>\n",
" <tr>\n",
" <th>16736</th>\n",
" <td>506</td>\n",
" <td>5</td>\n",
" <td>Man Who Would Be King, The (1975)</td>\n",
" <td>Adventure</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" user rating title \\\n",
"37537 506 5 Aladdin (1992) \n",
"29233 506 5 Usual Suspects, The (1995) \n",
"68329 506 5 Babe (1995) \n",
"31142 506 5 GoodFellas (1990) \n",
"30354 506 5 Manchurian Candidate, The (1962) \n",
"50796 506 5 It's a Wonderful Life (1946) \n",
"67161 506 5 Little Princess, A (1995) \n",
"66726 506 5 Deer Hunter, The (1978) \n",
"66672 506 5 Bringing Up Baby (1938) \n",
"66201 506 5 In the Line of Fire (1993) \n",
"65397 506 5 Speed (1994) \n",
"47566 506 5 Alien (1979) \n",
"44744 506 5 Ransom (1996) \n",
"60460 506 5 Singin' in the Rain (1952) \n",
"16736 506 5 Man Who Would Be King, The (1975) \n",
"\n",
" genres \n",
"37537 Animation, Children's, Comedy, Musical \n",
"29233 Crime, Thriller \n",
"68329 Children's, Comedy, Drama \n",
"31142 Crime, Drama \n",
"30354 Film-Noir, Thriller \n",
"50796 Drama \n",
"67161 Children's, Drama \n",
"66726 Drama, War \n",
"66672 Comedy \n",
"66201 Action, Thriller \n",
"65397 Action, Romance, Thriller \n",
"47566 Action, Horror, Sci-Fi, Thriller \n",
"44744 Drama, Thriller \n",
"60460 Musical, Romance \n",
"16736 Adventure "
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Here is what we recommend:\n"
]
},
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>user</th>\n",
" <th>rec_nb</th>\n",
" <th>title</th>\n",
" <th>genres</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>504</th>\n",
" <td>506.0</td>\n",
" <td>1</td>\n",
" <td>Great Day in Harlem, A (1994)</td>\n",
" <td>Documentary</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1446</th>\n",
" <td>506.0</td>\n",
" <td>2</td>\n",
" <td>Tough and Deadly (1995)</td>\n",
" <td>Action, Drama, Thriller</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2388</th>\n",
" <td>506.0</td>\n",
" <td>3</td>\n",
" <td>Aiqing wansui (1994)</td>\n",
" <td>Drama</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3330</th>\n",
" <td>506.0</td>\n",
" <td>4</td>\n",
" <td>Delta of Venus (1994)</td>\n",
" <td>Drama</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4272</th>\n",
" <td>506.0</td>\n",
" <td>5</td>\n",
" <td>Someone Else's America (1995)</td>\n",
" <td>Drama</td>\n",
" </tr>\n",
" <tr>\n",
" <th>5214</th>\n",
" <td>506.0</td>\n",
" <td>6</td>\n",
" <td>Saint of Fort Washington, The (1993)</td>\n",
" <td>Drama</td>\n",
" </tr>\n",
" <tr>\n",
" <th>6156</th>\n",
" <td>506.0</td>\n",
" <td>7</td>\n",
" <td>Celestial Clockwork (1994)</td>\n",
" <td>Comedy</td>\n",
" </tr>\n",
" <tr>\n",
" <th>7099</th>\n",
" <td>506.0</td>\n",
" <td>8</td>\n",
" <td>Some Mother's Son (1996)</td>\n",
" <td>Drama</td>\n",
" </tr>\n",
" <tr>\n",
" <th>8993</th>\n",
" <td>506.0</td>\n",
" <td>9</td>\n",
" <td>Maya Lin: A Strong Clear Vision (1994)</td>\n",
" <td>Documentary</td>\n",
" </tr>\n",
" <tr>\n",
" <th>8039</th>\n",
" <td>506.0</td>\n",
" <td>10</td>\n",
" <td>Prefontaine (1997)</td>\n",
" <td>Drama</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" user rec_nb title \\\n",
"504 506.0 1 Great Day in Harlem, A (1994) \n",
"1446 506.0 2 Tough and Deadly (1995) \n",
"2388 506.0 3 Aiqing wansui (1994) \n",
"3330 506.0 4 Delta of Venus (1994) \n",
"4272 506.0 5 Someone Else's America (1995) \n",
"5214 506.0 6 Saint of Fort Washington, The (1993) \n",
"6156 506.0 7 Celestial Clockwork (1994) \n",
"7099 506.0 8 Some Mother's Son (1996) \n",
"8993 506.0 9 Maya Lin: A Strong Clear Vision (1994) \n",
"8039 506.0 10 Prefontaine (1997) \n",
"\n",
" genres \n",
"504 Documentary \n",
"1446 Action, Drama, Thriller \n",
"2388 Drama \n",
"3330 Drama \n",
"4272 Drama \n",
"5214 Drama \n",
"6156 Comedy \n",
"7099 Drama \n",
"8993 Documentary \n",
"8039 Drama "
]
},
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"train = pd.read_csv(\n",
" \"./Datasets/ml-100k/train.csv\",\n",
" sep=\"\\t\",\n",
" header=None,\n",
" names=[\"user\", \"item\", \"rating\", \"timestamp\"],\n",
")\n",
"items = pd.read_csv(\"./Datasets/ml-100k/movies.csv\")\n",
"\n",
"user = random.choice(list(set(train[\"user\"])))\n",
"\n",
"train_content = pd.merge(train, items, left_on=\"item\", right_on=\"id\")\n",
"\n",
"print(\"Here is what user rated high:\")\n",
"display(\n",
" train_content[train_content[\"user\"] == user][\n",
" [\"user\", \"rating\", \"title\", \"genres\"]\n",
" ].sort_values(by=\"rating\", ascending=False)[:15]\n",
")\n",
"\n",
"reco = np.loadtxt(\n",
" \"Recommendations generated/ml-100k/Self_BaselineUI_reco.csv\", delimiter=\",\"\n",
")\n",
"items = pd.read_csv(\"./Datasets/ml-100k/movies.csv\")\n",
"\n",
"# Let's ignore scores - they are not used in evaluation:\n",
"reco_users = reco[:, :1]\n",
"reco_items = reco[:, 1::2]\n",
"# Let's put them into one array\n",
"reco = np.concatenate((reco_users, reco_items), axis=1)\n",
"\n",
"# Let's rebuild it user-item dataframe\n",
"recommended = []\n",
"for row in reco:\n",
" for rec_nb, entry in enumerate(row[1:]):\n",
" recommended.append((row[0], rec_nb + 1, entry))\n",
"recommended = pd.DataFrame(recommended, columns=[\"user\", \"rec_nb\", \"item\"])\n",
"\n",
"recommended_content = pd.merge(recommended, items, left_on=\"item\", right_on=\"id\")\n",
"\n",
"print(\"Here is what we recommend:\")\n",
"recommended_content[recommended_content[\"user\"] == user][\n",
" [\"user\", \"rec_nb\", \"title\", \"genres\"]\n",
"].sort_values(by=\"rec_nb\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# project task 2: implement some other evaluation measure"
]
},
{
"cell_type": "code",
"execution_count": 38,
"metadata": {},
"outputs": [],
"source": [
"# it may be your idea, modification of what we have already implemented\n",
"# (for example Hit2 rate which would count as a success users whoreceived at least 2 relevant recommendations)\n",
"# or something well-known\n",
"# expected output: modification of evaluation_measures.py such that evaluate_all will also display your measure\n",
"\n",
"# Hit2Rate - implemented."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.3"
}
},
"nbformat": 4,
"nbformat_minor": 4
}