diff --git a/project_2_recommender_and_evaluation-0_116.ipynb b/project_2_recommender_and_evaluation-0_116.ipynb deleted file mode 100644 index ab5b05d..0000000 --- a/project_2_recommender_and_evaluation-0_116.ipynb +++ /dev/null @@ -1,1873 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 302, - "id": "alike-morgan", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The autoreload extension is already loaded. To reload it, use:\n", - " %reload_ext autoreload\n" - ] - } - ], - "source": [ - "%matplotlib inline\n", - "%load_ext autoreload\n", - "%autoreload 2\n", - "\n", - "import numpy as np\n", - "import pandas as pd\n", - "import matplotlib.pyplot as plt\n", - "import seaborn as sns\n", - "from IPython.display import Markdown, display, HTML\n", - "from collections import defaultdict\n", - "\n", - "import torch\n", - "import torch.nn as nn\n", - "import torch.optim as optim\n", - "from livelossplot import PlotLosses\n", - "\n", - "# Fix the dying kernel problem (only a problem in some installations - you can remove it, if it works without it)\n", - "import os\n", - "os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'" - ] - }, - { - "cell_type": "markdown", - "id": "blessed-knitting", - "metadata": {}, - "source": [ - "# Load the dataset for recommenders" - ] - }, - { - "cell_type": "code", - "execution_count": 303, - "id": "victorian-bottom", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
user_iditem_idtermlength_of_stay_bucketrate_planroom_segmentn_people_bucketweekend_stay
010WinterVacation[2-3]Standard[260-360][5-inf]True
121WinterVacation[2-3]Standard[160-260][3-4]True
232WinterVacation[2-3]Standard[160-260][2-2]False
343WinterVacation[4-7]Standard[160-260][3-4]True
454WinterVacation[4-7]Standard[0-160][2-2]True
565Easter[4-7]Standard[260-360][5-inf]True
676OffSeason[2-3]Standard[260-360][5-inf]True
787HighSeason[2-3]Standard[160-260][1-1]True
898HighSeason[2-3]Standard[0-160][1-1]True
987HighSeason[2-3]Standard[160-260][1-1]True
1087HighSeason[2-3]Standard[160-260][1-1]True
11109HighSeason[2-3]Standard[160-260][3-4]True
12119HighSeason[2-3]Standard[160-260][3-4]True
131210HighSeason[8-inf]Standard[160-260][3-4]True
141411HighSeason[2-3]Standard[0-160][3-4]True
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "data_path = os.path.join(\"data\", \"hotel_data\")\n", - "\n", - "interactions_df = pd.read_csv(os.path.join(data_path, \"hotel_data_interactions_df.csv\"), index_col=0)\n", - "\n", - "base_item_features = ['term', 'length_of_stay_bucket', 'rate_plan', 'room_segment', 'n_people_bucket', 'weekend_stay']\n", - "\n", - "column_values_dict = {\n", - " 'term': ['WinterVacation', 'Easter', 'OffSeason', 'HighSeason', 'LowSeason', 'MayLongWeekend', 'NewYear', 'Christmas'],\n", - " 'length_of_stay_bucket': ['[0-1]', '[2-3]', '[4-7]', '[8-inf]'],\n", - " 'rate_plan': ['Standard', 'Nonref'],\n", - " 'room_segment': ['[0-160]', '[160-260]', '[260-360]', '[360-500]', '[500-900]'],\n", - " 'n_people_bucket': ['[1-1]', '[2-2]', '[3-4]', '[5-inf]'],\n", - " 'weekend_stay': ['True', 'False']\n", - "}\n", - "\n", - "interactions_df.loc[:, 'term'] = pd.Categorical(\n", - " interactions_df['term'], categories=column_values_dict['term'])\n", - "interactions_df.loc[:, 'length_of_stay_bucket'] = pd.Categorical(\n", - " interactions_df['length_of_stay_bucket'], categories=column_values_dict['length_of_stay_bucket'])\n", - "interactions_df.loc[:, 'rate_plan'] = pd.Categorical(\n", - " interactions_df['rate_plan'], categories=column_values_dict['rate_plan'])\n", - "interactions_df.loc[:, 'room_segment'] = pd.Categorical(\n", - " interactions_df['room_segment'], categories=column_values_dict['room_segment'])\n", - "interactions_df.loc[:, 'n_people_bucket'] = pd.Categorical(\n", - " interactions_df['n_people_bucket'], categories=column_values_dict['n_people_bucket'])\n", - "interactions_df.loc[:, 'weekend_stay'] = interactions_df['weekend_stay'].astype('str')\n", - "interactions_df.loc[:, 'weekend_stay'] = pd.Categorical(\n", - " interactions_df['weekend_stay'], categories=column_values_dict['weekend_stay'])\n", - "\n", - "display(HTML(interactions_df.head(15).to_html()))" - ] - }, - { - "cell_type": "markdown", - "id": "realistic-third", - "metadata": {}, - "source": [ - "# (Optional) Prepare numerical user features\n", - "\n", - "The method below is left here for convenience if you want to experiment with content-based user features as an input for your neural network." - ] - }, - { - "cell_type": "code", - "execution_count": 304, - "id": "variable-jaguar", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "['user_term_WinterVacation', 'user_term_Easter', 'user_term_OffSeason', 'user_term_HighSeason', 'user_term_LowSeason', 'user_term_MayLongWeekend', 'user_term_NewYear', 'user_term_Christmas', 'user_length_of_stay_bucket_[0-1]', 'user_length_of_stay_bucket_[2-3]', 'user_length_of_stay_bucket_[4-7]', 'user_length_of_stay_bucket_[8-inf]', 'user_rate_plan_Standard', 'user_rate_plan_Nonref', 'user_room_segment_[0-160]', 'user_room_segment_[160-260]', 'user_room_segment_[260-360]', 'user_room_segment_[360-500]', 'user_room_segment_[500-900]', 'user_n_people_bucket_[1-1]', 'user_n_people_bucket_[2-2]', 'user_n_people_bucket_[3-4]', 'user_n_people_bucket_[5-inf]', 'user_weekend_stay_True', 'user_weekend_stay_False']\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
user_iduser_term_WinterVacationuser_term_Easteruser_term_OffSeasonuser_term_HighSeasonuser_term_LowSeasonuser_term_MayLongWeekenduser_term_NewYearuser_term_Christmasuser_length_of_stay_bucket_[0-1]user_length_of_stay_bucket_[2-3]user_length_of_stay_bucket_[4-7]user_length_of_stay_bucket_[8-inf]user_rate_plan_Standarduser_rate_plan_Nonrefuser_room_segment_[0-160]user_room_segment_[160-260]user_room_segment_[260-360]user_room_segment_[360-500]user_room_segment_[500-900]user_n_people_bucket_[1-1]user_n_people_bucket_[2-2]user_n_people_bucket_[3-4]user_n_people_bucket_[5-inf]user_weekend_stay_Trueuser_weekend_stay_False
010.1304350.00.6521740.0869570.1304350.0000000.0000000.0000000.0000000.6086960.3913040.0000000.5217390.4782610.0000000.8695650.1304350.0000000.00.0000000.7391300.1739130.0869570.7826090.217391
47500.0434780.00.4347830.3043480.2173910.0000000.0000000.0000000.0000000.9130430.0869570.0000000.2608700.7391300.0000000.5652170.4347830.0000000.00.0000000.1739130.5217390.3043480.7826090.217391
92960.0833330.00.7083330.1250000.0416670.0416670.0000000.0000000.2500000.6666670.0416670.0416670.2916670.7083330.1250000.7916670.0833330.0000000.00.0416670.3333330.5416670.0833330.7500000.250000
1111150.7272730.00.2727270.0000000.0000000.0000000.0000000.0000000.5000000.3636360.1363640.0000001.0000000.0000000.0000000.8181820.1818180.0000000.00.8181820.0909090.0454550.0454550.3636360.636364
6757060.0919880.00.4510390.1899110.2077150.0385760.0118690.0089020.1691390.4599410.2729970.0979230.9940650.0059350.0207720.8397630.1305640.0089020.00.0415430.0949550.7388720.1246290.6765580.323442
169917360.0344830.00.4827590.2068970.2758620.0000000.0000000.0000000.2413790.5517240.2068970.0000000.1724140.8275860.0000000.9310340.0689660.0000000.00.3793100.4137930.2068970.0000000.4482760.551724
763977790.0370370.00.2962960.2592590.3703700.0000000.0000000.0370370.1111110.2962960.4814810.1111111.0000000.0000000.0000000.8148150.1851850.0000000.00.0000000.0370370.7407410.2222220.8148150.185185
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "def n_to_p(l):\n", - " n = sum(l)\n", - " return [x / n for x in l] if n > 0 else l\n", - "\n", - "def calculate_p(x, values):\n", - " counts = [0]*len(values)\n", - " for v in x:\n", - " counts[values.index(v)] += 1\n", - "\n", - " return n_to_p(counts)\n", - "\n", - "def prepare_users_df(interactions_df):\n", - "\n", - " users_df = interactions_df.loc[:, [\"user_id\"]]\n", - " users_df = users_df.groupby(\"user_id\").first().reset_index(drop=False)\n", - " \n", - " user_features = []\n", - "\n", - " for column in base_item_features:\n", - "\n", - " column_values = column_values_dict[column]\n", - " df = interactions_df.loc[:, ['user_id', column]]\n", - " df = df.groupby('user_id').aggregate(lambda x: list(x)).reset_index(drop=False)\n", - "\n", - " def calc_p(x):\n", - " return calculate_p(x, column_values)\n", - "\n", - " df.loc[:, column] = df[column].apply(lambda x: calc_p(x))\n", - "\n", - " p_columns = []\n", - " for i in range(len(column_values)):\n", - " p_columns.append(\"user_\" + column + \"_\" + column_values[i])\n", - " df.loc[:, p_columns[i]] = df[column].apply(lambda x: x[i])\n", - " user_features.append(p_columns[i])\n", - "\n", - " users_df = pd.merge(users_df, df.loc[:, ['user_id'] + p_columns], on=[\"user_id\"])\n", - " \n", - " return users_df, user_features\n", - " \n", - "\n", - "users_df, user_features = prepare_users_df(interactions_df)\n", - "\n", - "print(user_features)\n", - "\n", - "display(HTML(users_df.loc[users_df['user_id'].isin([706, 1736, 7779, 96, 1, 50, 115])].head(15).to_html()))" - ] - }, - { - "cell_type": "markdown", - "id": "amino-keyboard", - "metadata": {}, - "source": [ - "# (Optional) Prepare numerical item features\n", - "\n", - "The method below is left here for convenience if you want to experiment with content-based item features as an input for your neural network." - ] - }, - { - "cell_type": "code", - "execution_count": 305, - "id": "formal-munich", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "['term_WinterVacation', 'term_Easter', 'term_OffSeason', 'term_HighSeason', 'term_LowSeason', 'term_MayLongWeekend', 'term_NewYear', 'term_Christmas', 'length_of_stay_bucket_[0-1]', 'length_of_stay_bucket_[2-3]', 'length_of_stay_bucket_[4-7]', 'length_of_stay_bucket_[8-inf]', 'rate_plan_Standard', 'rate_plan_Nonref', 'room_segment_[0-160]', 'room_segment_[160-260]', 'room_segment_[260-360]', 'room_segment_[360-500]', 'room_segment_[500-900]', 'n_people_bucket_[1-1]', 'n_people_bucket_[2-2]', 'n_people_bucket_[3-4]', 'n_people_bucket_[5-inf]', 'weekend_stay_True', 'weekend_stay_False']\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
item_idterm_WinterVacationterm_Easterterm_OffSeasonterm_HighSeasonterm_LowSeasonterm_MayLongWeekendterm_NewYearterm_Christmaslength_of_stay_bucket_[0-1]length_of_stay_bucket_[2-3]length_of_stay_bucket_[4-7]length_of_stay_bucket_[8-inf]rate_plan_Standardrate_plan_Nonrefroom_segment_[0-160]room_segment_[160-260]room_segment_[260-360]room_segment_[360-500]room_segment_[500-900]n_people_bucket_[1-1]n_people_bucket_[2-2]n_people_bucket_[3-4]n_people_bucket_[5-inf]weekend_stay_Trueweekend_stay_False
001000000001001000100000110
111000000001001001000001010
221000000001001001000010001
331000000000101001000001010
441000000000101010000010010
550100000000101000100000110
660010000001001000100000110
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "def map_items_to_onehot(df):\n", - " one_hot = pd.get_dummies(df.loc[:, base_item_features])\n", - " df = df.drop(base_item_features, axis = 1)\n", - " df = df.join(one_hot)\n", - " \n", - " return df, list(one_hot.columns)\n", - "\n", - "def prepare_items_df(interactions_df):\n", - " items_df = interactions_df.loc[:, [\"item_id\"] + base_item_features].drop_duplicates()\n", - " \n", - " items_df, item_features = map_items_to_onehot(items_df)\n", - " \n", - " return items_df, item_features\n", - "\n", - "\n", - "items_df, item_features = prepare_items_df(interactions_df)\n", - "\n", - "print(item_features)\n", - "\n", - "display(HTML(items_df.loc[items_df['item_id'].isin([0, 1, 2, 3, 4, 5, 6])].head(15).to_html()))" - ] - }, - { - "cell_type": "markdown", - "id": "figured-imaging", - "metadata": {}, - "source": [ - "# Neural network recommender\n", - "\n", - "**Task:**
\n", - "Code a recommender based on a neural network model. You are free to choose any network architecture you find appropriate. The network can use the interaction vectors for users and items, embeddings of users and items, as well as user and item features (you can use the features you developed in the first project).\n", - "\n", - "Remember to keep control over randomness - in the init method add the seed as a parameter and initialize the random seed generator with that seed (both for numpy and pytorch):\n", - "\n", - "```python\n", - "self.seed = seed\n", - "self.rng = np.random.RandomState(seed=seed)\n", - "```\n", - "in the network model:\n", - "```python\n", - "self.seed = torch.manual_seed(seed)\n", - "```\n", - "\n", - "You are encouraged to experiment with:\n", - " - the number of layers in the network, the number of neurons and different activation functions,\n", - " - different optimizers and their parameters,\n", - " - batch size and the number of epochs,\n", - " - embedding layers,\n", - " - content-based features of both users and items." - ] - }, - { - "cell_type": "code", - "execution_count": 427, - "id": "unlike-recipient", - "metadata": {}, - "outputs": [], - "source": [ - "from recommenders.recommender import Recommender\n", - "\n", - "\n", - "# HR10 = 0.07\n", - "# class Net(nn.Module):\n", - "# def __init__(self, features_len, output_len):\n", - "# super(Net, self).__init__()\n", - " \n", - "# self.fc1 = nn.Linear(features_len, 150)\n", - "# self.fc2 = nn.Linear(150, 100)\n", - "# self.fc3 = nn.Linear(100, output_len)\n", - "# self.fc4 = nn.Linear(output_len, output_len+200)\n", - " \n", - "# self.dropout = nn.Dropout(p=0.5)\n", - " \n", - "# def forward(self, x):\n", - "# x = F.relu(self.fc1(x))\n", - "# x = self.dropout(x)\n", - "# x = F.relu(self.fc2(x))\n", - "# x = self.dropout(x)\n", - "# x = F.relu(self.fc3(x))\n", - "# return self.fc4(x)\n", - "\n", - "# HR10 = 0.06\n", - "# class Net(nn.Module):\n", - "# def __init__(self, features_len, output_len):\n", - "# super(Net, self).__init__()\n", - " \n", - "# self.fc1 = nn.Linear(features_len, 150)\n", - "# self.fc2 = nn.Linear(150, 100)\n", - "# self.fc3 = nn.Linear(100, output_len)\n", - "# self.fc4 = nn.Linear(output_len, output_len+150)\n", - "\n", - "# self.dropout = nn.Dropout(p=0.5)\n", - " \n", - "# def forward(self, x):\n", - "# x = F.relu(self.fc1(x))\n", - "# x = self.dropout(x)\n", - "# x = F.relu(self.fc2(x))\n", - "# x = self.dropout(x)\n", - "# x = F.relu(self.fc3(x))\n", - "# x = self.dropout(x)\n", - "# return self.fc4(x)\n", - "\n", - "# Softmax very bad choice for multiclassification\n", - "# class Net(nn.Module):\n", - "# def __init__(self, features_len, output_len):\n", - "# super(Net, self).__init__()\n", - " \n", - "# self.fc1 = nn.Linear(features_len, 150)\n", - "# self.fc2 = nn.Linear(150, 100)\n", - "# self.fc3 = nn.Linear(100, output_len)\n", - "# self.fc4 = nn.Linear(output_len, output_len+200)\n", - " \n", - "# self.dropout = nn.Dropout(p=0.5)\n", - "# self.softmax = nn.Softmax()\n", - " \n", - "# def forward(self, x):\n", - "# x = F.relu(self.fc1(x))\n", - "# x = self.dropout(x)\n", - "# x = F.relu(self.fc2(x))\n", - "# x = self.dropout(x)\n", - "# x = F.relu(self.fc3(x))\n", - "# x = self.fc4(x)\n", - "# x = self.softmax(x)\n", - "# return x\n", - " \n", - "# HR10 = 0.083\n", - "class Net(nn.Module):\n", - " def __init__(self, features_len, output_len):\n", - " super(Net, self).__init__()\n", - " \n", - " self.fc1 = nn.Linear(features_len, 150)\n", - " self.fc2 = nn.Linear(150, 100)\n", - " self.fc3 = nn.Linear(100, output_len)\n", - " self.fc4 = nn.Linear(output_len, output_len+200)\n", - " \n", - " self.dropout = nn.Dropout(p=0.5)\n", - " \n", - " def forward(self, x):\n", - " x = F.relu(self.fc1(x))\n", - " x = self.dropout(x)\n", - " x = F.relu(self.fc2(x))\n", - " x = self.dropout(x)\n", - " x = F.relu(self.fc3(x))\n", - " return self.fc4(x)\n", - " \n", - "class NNRecommender(Recommender):\n", - " \"\"\"\n", - " Linear recommender class based on user and item features.\n", - " \"\"\"\n", - " \n", - " def __init__(self, seed=6789, n_neg_per_pos=5, n_epochs=5000, lr=0.01,):\n", - " \"\"\"\n", - " Initialize base recommender params and variables.\n", - " \"\"\"\n", - " self.model = None\n", - " self.n_neg_per_pos = n_neg_per_pos\n", - " \n", - " self.recommender_df = pd.DataFrame(columns=['user_id', 'item_id', 'score'])\n", - " self.users_df = None\n", - " self.user_features = None\n", - " \n", - " self.seed = seed\n", - " self.rng = np.random.RandomState(seed=seed)\n", - " \n", - " self.n_epochs = n_epochs\n", - " self.lr = lr\n", - " \n", - " def calculate_accuracy(self, y_true, y_pred):\n", - " predictions=(y_pred.argmax(1))\n", - " return (predictions == y_true).sum().float() / len(y_true)\n", - " \n", - " def round_tensor(self, t, decimal_places=3):\n", - " return round(t.item(), decimal_places)\n", - " \n", - " def fit(self, interactions_df, users_df, items_df):\n", - " \"\"\"\n", - " Training of the recommender.\n", - " \n", - " :param pd.DataFrame interactions_df: DataFrame with recorded interactions between users and items \n", - " defined by user_id, item_id and features of the interaction.\n", - " :param pd.DataFrame users_df: DataFrame with users and their features defined by user_id and the user feature columns.\n", - " :param pd.DataFrame items_df: DataFrame with items and their features defined by item_id and the item feature columns.\n", - " \"\"\"\n", - " \n", - " interactions_df = interactions_df.copy()\n", - " # Prepare users_df and items_df \n", - " # (optional - use only if you want to train a hybrid model with content-based features)\n", - " \n", - " users_df, user_features = prepare_users_df(interactions_df)\n", - " \n", - " self.users_df = users_df\n", - " self.user_features = user_features\n", - " \n", - " items_df, item_features = prepare_items_df(interactions_df)\n", - " items_df = items_df.loc[:, ['item_id'] + item_features]\n", - " \n", - " X = items_df[['term_WinterVacation', 'term_Easter', 'term_OffSeason', 'term_HighSeason', 'term_LowSeason', 'term_MayLongWeekend', 'term_NewYear', 'term_Christmas', 'rate_plan_Standard', 'rate_plan_Nonref', 'room_segment_[0-160]', 'room_segment_[160-260]', 'room_segment_[260-360]', 'room_segment_[360-500]', 'room_segment_[500-900]', 'n_people_bucket_[1-1]', 'n_people_bucket_[2-2]', 'n_people_bucket_[3-4]', 'n_people_bucket_[5-inf]', 'weekend_stay_True', 'weekend_stay_False']]\n", - " y = items_df[['item_id']]\n", - " X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=self.seed)\n", - " \n", - " X_train = torch.from_numpy(X_train.to_numpy()).float()\n", - " y_train = torch.squeeze(torch.from_numpy(y_train.to_numpy()).long())\n", - " X_test = torch.from_numpy(X_test.to_numpy()).float()\n", - " y_test = torch.squeeze(torch.from_numpy(y_test.to_numpy()).long())\n", - " \n", - " self.net = Net(X_train.shape[1], items_df['item_id'].unique().size)\n", - " \n", - " optimizer = optim.Adam(self.net.parameters(), lr=self.lr)\n", - " criterion = nn.CrossEntropyLoss()\n", - " \n", - " for epoch in range(self.n_epochs):\n", - " y_pred = self.net(X_train)\n", - " y_pred = torch.squeeze(y_pred)\n", - " train_loss = criterion(y_pred, y_train)\n", - " \n", - "# if epoch % 100 == 0:\n", - "# train_acc = self.calculate_accuracy(y_train, y_pred)\n", - "# y_test_pred = self.net(X_test)\n", - "# y_test_pred = torch.squeeze(y_test_pred)\n", - "# test_loss = criterion(y_test_pred, y_test)\n", - "# test_acc = self.calculate_accuracy(y_test, y_test_pred)\n", - "# print(\n", - "# f'''epoch {epoch}\n", - "# Train set - loss: {self.round_tensor(train_loss)}, accuracy: {self.round_tensor(train_acc)}\n", - "# Test set - loss: {self.round_tensor(test_loss)}, accuracy: {self.round_tensor(test_acc)}\n", - "# ''')\n", - " \n", - " optimizer.zero_grad()\n", - " train_loss.backward()\n", - " optimizer.step()\n", - " \n", - " def recommend(self, users_df, items_df, n_recommendations=1):\n", - " \"\"\"\n", - " Serving of recommendations. Scores items in items_df for each user in users_df and returns \n", - " top n_recommendations for each user.\n", - " \n", - " :param pd.DataFrame users_df: DataFrame with users and their features for which recommendations should be generated.\n", - " :param pd.DataFrame items_df: DataFrame with items and their features which should be scored.\n", - " :param int n_recommendations: Number of recommendations to be returned for each user.\n", - " :return: DataFrame with user_id, item_id and score as columns returning n_recommendations top recommendations \n", - " for each user.\n", - " :rtype: pd.DataFrame\n", - " \"\"\"\n", - " \n", - " # Clean previous recommendations (iloc could be used alternatively)\n", - " self.recommender_df = self.recommender_df[:0]\n", - " \n", - " # Prepare users_df and items_df\n", - " # (optional - use only if you want to train a hybrid model with content-based features)\n", - " \n", - " users_df = users_df.loc[:, 'user_id']\n", - " users_df = pd.merge(users_df, self.users_df, on=['user_id'], how='left').fillna(0)\n", - " \n", - " # items_df, item_features = prepare_items_df(items_df)\n", - " # items_df = items_df.loc[:, ['item_id'] + item_features]\n", - " \n", - " # Score the items\n", - " \n", - " recommendations = pd.DataFrame(columns=['user_id', 'item_id', 'score'])\n", - " \n", - " for ix, user in users_df.iterrows():\n", - " prep_user = torch.from_numpy(user[['user_term_WinterVacation', 'user_term_Easter', 'user_term_OffSeason', 'user_term_HighSeason', 'user_term_LowSeason', 'user_term_MayLongWeekend', 'user_term_NewYear', 'user_term_Christmas', 'user_rate_plan_Standard', 'user_rate_plan_Nonref', 'user_room_segment_[0-160]', 'user_room_segment_[160-260]', 'user_room_segment_[260-360]', 'user_room_segment_[360-500]', 'user_room_segment_[500-900]', 'user_n_people_bucket_[1-1]', 'user_n_people_bucket_[2-2]', 'user_n_people_bucket_[3-4]', 'user_n_people_bucket_[5-inf]', 'user_weekend_stay_True', 'user_weekend_stay_False']].to_numpy()).float()\n", - " \n", - " scores = self.net(prep_user).detach().numpy()\n", - " \n", - " chosen_ids = np.argsort(-scores)[:n_recommendations]\n", - " \n", - " recommendations = []\n", - " for item_id in chosen_ids:\n", - " recommendations.append(\n", - " {\n", - " 'user_id': user['user_id'],\n", - " 'item_id': item_id,\n", - " 'score': scores[item_id]\n", - " }\n", - " )\n", - " \n", - " user_recommendations = pd.DataFrame(recommendations)\n", - " \n", - " self.recommender_df = pd.concat([self.recommender_df, user_recommendations])\n", - " \n", - " return self.recommender_df\n", - "\n", - "# Fit method\n", - "# nn_recommender = NNRecommender(10000, 0.02)\n", - "# nn_recommender.fit(interactions_df.head(1000), None, None)\n", - "# nn_recommender.fit(interactions_df, None, None)" - ] - }, - { - "cell_type": "markdown", - "id": "copyrighted-relative", - "metadata": {}, - "source": [ - "# Quick test of the recommender" - ] - }, - { - "cell_type": "code", - "execution_count": 412, - "id": "greatest-canon", - "metadata": {}, - "outputs": [], - "source": [ - "items_df = interactions_df.loc[:, ['item_id'] + base_item_features].drop_duplicates()" - ] - }, - { - "cell_type": "code", - "execution_count": 413, - "id": "initial-capital", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "epoch 0\n", - " Train set - loss: 6.042, accuracy: 0.011\n", - " Test set - loss: 6.025, accuracy: 0.0\n", - " \n", - "epoch 100\n", - " Train set - loss: 1.162, accuracy: 0.506\n", - " Test set - loss: 36.526, accuracy: 0.0\n", - " \n" - ] - } - ], - "source": [ - "# Fit method\n", - "nn_recommender = NNRecommender(n_epochs=200, lr=0.01)\n", - "nn_recommender.fit(interactions_df.head(1000), None, None)\n", - "# nn_recommender.fit(interactions_df, None, None)" - ] - }, - { - "cell_type": "code", - "execution_count": 414, - "id": "digital-consolidation", - "metadata": { - "scrolled": true - }, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
user_iditem_idscoretermlength_of_stay_bucketrate_planroom_segmentn_people_bucketweekend_stay
01.01195.364058Easter[2-3]Standard[160-260][2-2]True
11.0885.033441WinterVacation[0-1]Standard[160-260][2-2]True
21.0574.771185WinterVacation[2-3]Standard[160-260][2-2]True
33.0211.286193WinterVacation[2-3]Standard[160-260][2-2]False
43.07410.848604WinterVacation[4-7]Standard[160-260][2-2]False
53.08110.656947WinterVacation[0-1]Standard[160-260][2-2]False
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "# Recommender method\n", - "\n", - "recommendations = nn_recommender.recommend(pd.DataFrame([[1],[3]], columns=['user_id']), items_df, 3)\n", - "\n", - "recommendations = pd.merge(recommendations, items_df, on='item_id', how='left')\n", - "display(HTML(recommendations.to_html()))" - ] - }, - { - "cell_type": "markdown", - "id": "advanced-eleven", - "metadata": {}, - "source": [ - "# Tuning method" - ] - }, - { - "cell_type": "code", - "execution_count": 310, - "id": "strange-alaska", - "metadata": {}, - "outputs": [], - "source": [ - "from evaluation_and_testing.testing import evaluate_train_test_split_implicit\n", - "\n", - "seed = 6789" - ] - }, - { - "cell_type": "code", - "execution_count": 311, - "id": "stable-theta", - "metadata": {}, - "outputs": [], - "source": [ - "from hyperopt import hp, fmin, tpe, Trials\n", - "import traceback\n", - "\n", - "def tune_recommender(recommender_class, interactions_df, items_df, \n", - " param_space, max_evals=1, show_progressbar=True, seed=6789):\n", - " # Split into train_validation and test sets\n", - "\n", - " shuffle = np.arange(len(interactions_df))\n", - " rng = np.random.RandomState(seed=seed)\n", - " rng.shuffle(shuffle)\n", - " shuffle = list(shuffle)\n", - "\n", - " train_test_split = 0.8\n", - " split_index = int(len(interactions_df) * train_test_split)\n", - "\n", - " train_validation = interactions_df.iloc[shuffle[:split_index]]\n", - " test = interactions_df.iloc[shuffle[split_index:]]\n", - "\n", - " # Tune\n", - "\n", - " def loss(tuned_params):\n", - " recommender = recommender_class(seed=seed, **tuned_params)\n", - " hr1, hr3, hr5, hr10, ndcg1, ndcg3, ndcg5, ndcg10 = evaluate_train_test_split_implicit(\n", - " recommender, train_validation, items_df, seed=seed)\n", - " return -hr10\n", - "\n", - " n_tries = 1\n", - " succeded = False\n", - " try_id = 0\n", - " while not succeded and try_id < n_tries:\n", - " try:\n", - " trials = Trials()\n", - " best_param_set = fmin(loss, space=param_space, algo=tpe.suggest, \n", - " max_evals=max_evals, show_progressbar=show_progressbar, trials=trials, verbose=True)\n", - " succeded = True\n", - " except:\n", - " traceback.print_exc()\n", - " try_id += 1\n", - " \n", - " if not succeded:\n", - " return None\n", - " \n", - " # Validate\n", - " \n", - " recommender = recommender_class(seed=seed, **best_param_set)\n", - "\n", - " results = [[recommender_class.__name__] + list(evaluate_train_test_split_implicit(\n", - " recommender, {'train': train_validation, 'test': test}, items_df, seed=seed))]\n", - "\n", - " results = pd.DataFrame(results, \n", - " columns=['Recommender', 'HR@1', 'HR@3', 'HR@5', 'HR@10', 'NDCG@1', 'NDCG@3', 'NDCG@5', 'NDCG@10'])\n", - "\n", - " display(HTML(results.to_html()))\n", - " \n", - " return best_param_set" - ] - }, - { - "cell_type": "markdown", - "id": "reliable-switzerland", - "metadata": {}, - "source": [ - "## Tuning of the recommender\n", - "\n", - "**Task:**
\n", - "Tune your model using the code below. You only need to put the class name of your recommender and choose an appropriate parameter space." - ] - }, - { - "cell_type": "code", - "execution_count": 428, - "id": "obvious-astrology", - "metadata": { - "scrolled": false - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " 0%| | 0/10 [00:01\", line 33, in tune_recommender\n", - " best_param_set = fmin(loss, space=param_space, algo=tpe.suggest,\n", - " File \"/opt/conda/envs/rek_uno/lib/python3.8/site-packages/hyperopt/fmin.py\", line 507, in fmin\n", - " return trials.fmin(\n", - " File \"/opt/conda/envs/rek_uno/lib/python3.8/site-packages/hyperopt/base.py\", line 682, in fmin\n", - " return fmin(\n", - " File \"/opt/conda/envs/rek_uno/lib/python3.8/site-packages/hyperopt/fmin.py\", line 553, in fmin\n", - " rval.exhaust()\n", - " File \"/opt/conda/envs/rek_uno/lib/python3.8/site-packages/hyperopt/fmin.py\", line 356, in exhaust\n", - " self.run(self.max_evals - n_done, block_until_done=self.asynchronous)\n", - " File \"/opt/conda/envs/rek_uno/lib/python3.8/site-packages/hyperopt/fmin.py\", line 292, in run\n", - " self.serial_evaluate()\n", - " File \"/opt/conda/envs/rek_uno/lib/python3.8/site-packages/hyperopt/fmin.py\", line 170, in serial_evaluate\n", - " result = self.domain.evaluate(spec, ctrl)\n", - " File \"/opt/conda/envs/rek_uno/lib/python3.8/site-packages/hyperopt/base.py\", line 907, in evaluate\n", - " rval = self.fn(pyll_rval)\n", - " File \"\", line 23, in loss\n", - " hr1, hr3, hr5, hr10, ndcg1, ndcg3, ndcg5, ndcg10 = evaluate_train_test_split_implicit(\n", - " File \"/home/jovyan/REK/evaluation_and_testing/testing.py\", line 93, in evaluate_train_test_split_implicit\n", - " recommender.fit(interactions_df_train, None, items_df)\n", - " File \"\", line 131, in fit\n", - " users_df, user_features = prepare_users_df(interactions_df)\n", - " File \"\", line 15, in prepare_users_df\n", - " users_df = users_df.groupby(\"user_id\").first().reset_index(drop=False)\n", - " File \"/opt/conda/envs/rek_uno/lib/python3.8/site-packages/pandas/core/groupby/groupby.py\", line 1698, in first\n", - " return self._agg_general(\n", - " File \"/opt/conda/envs/rek_uno/lib/python3.8/site-packages/pandas/core/groupby/groupby.py\", line 1044, in _agg_general\n", - " result = self.aggregate(lambda x: npfunc(x, axis=self.axis))\n", - " File \"/opt/conda/envs/rek_uno/lib/python3.8/site-packages/pandas/core/groupby/generic.py\", line 977, in aggregate\n", - " result = self._aggregate_frame(func)\n", - " File \"/opt/conda/envs/rek_uno/lib/python3.8/site-packages/pandas/core/groupby/generic.py\", line 1135, in _aggregate_frame\n", - " fres = func(data, *args, **kwargs)\n", - " File \"/opt/conda/envs/rek_uno/lib/python3.8/site-packages/pandas/core/groupby/groupby.py\", line 1044, in \n", - " result = self.aggregate(lambda x: npfunc(x, axis=self.axis))\n", - " File \"/opt/conda/envs/rek_uno/lib/python3.8/site-packages/pandas/core/groupby/groupby.py\", line 1692, in first_compat\n", - " return obj.apply(first, axis=axis)\n", - " File \"/opt/conda/envs/rek_uno/lib/python3.8/site-packages/pandas/core/frame.py\", line 7768, in apply\n", - " return op.get_result()\n", - " File \"/opt/conda/envs/rek_uno/lib/python3.8/site-packages/pandas/core/apply.py\", line 185, in get_result\n", - " return self.apply_standard()\n", - " File \"/opt/conda/envs/rek_uno/lib/python3.8/site-packages/pandas/core/apply.py\", line 276, in apply_standard\n", - " results, res_index = self.apply_series_generator()\n", - " File \"/opt/conda/envs/rek_uno/lib/python3.8/site-packages/pandas/core/apply.py\", line 288, in apply_series_generator\n", - " for i, v in enumerate(series_gen):\n", - " File \"/opt/conda/envs/rek_uno/lib/python3.8/site-packages/pandas/core/apply.py\", line 330, in \n", - " return (self.obj._ixs(i, axis=1) for i in range(len(self.columns)))\n", - " File \"/opt/conda/envs/rek_uno/lib/python3.8/site-packages/pandas/core/frame.py\", line 2964, in _ixs\n", - " values = self._mgr.iget(i)\n", - " File \"/opt/conda/envs/rek_uno/lib/python3.8/site-packages/pandas/core/internals/managers.py\", line 1006, in iget\n", - " return SingleBlockManager(\n", - " File \"/opt/conda/envs/rek_uno/lib/python3.8/site-packages/pandas/core/internals/managers.py\", line 1555, in __init__\n", - " if fastpath is not lib.no_default:\n", - "KeyboardInterrupt\n" - ] - } - ], - "source": [ - "param_space = {\n", - " 'n_neg_per_pos': hp.quniform('n_neg_per_pos', 1, 10, 1)\n", - "}\n", - "items_df['item_id'].unique().size\n", - "\n", - "best_param_set = tune_recommender(NNRecommender, interactions_df, items_df,\n", - " param_space, max_evals=10, show_progressbar=True, seed=seed)\n", - "\n", - "print(\"Best parameters:\")\n", - "print(best_param_set)" - ] - }, - { - "cell_type": "markdown", - "id": "accredited-strap", - "metadata": {}, - "source": [ - "# Final evaluation\n", - "\n", - "**Task:**
\n", - "Run the final evaluation of your recommender and present its results against the Amazon and Netflix recommenders' results. You just need to give the class name of your recommender and its tuned parameters below." - ] - }, - { - "cell_type": "code", - "execution_count": 434, - "id": "given-homework", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
RecommenderHR@1HR@3HR@5HR@10NDCG@1NDCG@3NDCG@5NDCG@10
0NNRecommender0.0250080.0352090.0664690.1168150.0250080.03110.0436970.059459
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "nn_recommender = NNRecommender(n_neg_per_pos=6, n_epochs=20000) # Initialize your recommender here\n", - "\n", - "# Give the name of your recommender in the line below\n", - "nn_tts_results = [['NNRecommender'] + list(evaluate_train_test_split_implicit(\n", - " nn_recommender, interactions_df, items_df))]\n", - "\n", - "nn_tts_results = pd.DataFrame(\n", - " nn_tts_results, columns=['Recommender', 'HR@1', 'HR@3', 'HR@5', 'HR@10', 'NDCG@1', 'NDCG@3', 'NDCG@5', 'NDCG@10'])\n", - "\n", - "display(HTML(nn_tts_results.to_html()))" - ] - }, - { - "cell_type": "code", - "execution_count": 314, - "id": "suited-nomination", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
RecommenderHR@1HR@3HR@5HR@10NDCG@1NDCG@3NDCG@5NDCG@10
0AmazonRecommender0.0421190.104640.1405070.1994080.0421190.0768260.0917970.110711
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "from recommenders.amazon_recommender import AmazonRecommender\n", - "\n", - "amazon_recommender = AmazonRecommender()\n", - "\n", - "amazon_tts_results = [['AmazonRecommender'] + list(evaluate_train_test_split_implicit(\n", - " amazon_recommender, interactions_df, items_df))]\n", - "\n", - "amazon_tts_results = pd.DataFrame(\n", - " amazon_tts_results, columns=['Recommender', 'HR@1', 'HR@3', 'HR@5', 'HR@10', 'NDCG@1', 'NDCG@3', 'NDCG@5', 'NDCG@10'])\n", - "\n", - "display(HTML(amazon_tts_results.to_html()))" - ] - }, - { - "cell_type": "code", - "execution_count": 315, - "id": "conservative-remedy", - "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAbwAAAI4CAYAAAAReVyMAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8vihELAAAACXBIWXMAAAsTAAALEwEAmpwYAABE1UlEQVR4nO3de3ycdZ33/9cnM5NMzkmTtCk90II9AKVALYcV5SDggq6iLmoR+YmKiKu3h929f6K/XVH3dm93f6y3694gIuJhZUFEVG4F8bCgshxsOQgU2lJKoaG0TZO2ac6nz/3HdU06SSfJpJlkkrnez8djnJnrumbmmzHNm+/3+l6fr7k7IiIiha4o3w0QERGZDgo8ERGJBAWeiIhEggJPREQiQYEnIiKRoMATEZFIUOCJiEgkKPBE8szMtpvZ+fluh0ihU+CJiEgkKPBEZiAzKzGzr5nZzvD2NTMrCffVm9nPzWy/mbWa2R/MrCjc9xkze8XMDprZZjM7L78/icjMEc93A0Qko/8POAM4GXDgZ8DfAX8P/A3QBDSEx54BuJmtAD4OnOruO81sCRCb3maLzFzq4YnMTJcBX3L3Pe7eDHwRuDzc1wfMB4529z53/4MHRXEHgBLgeDNLuPt2d38hL60XmYEUeCIz01HAS2nPXwq3Afz/wFbgV2a2zcyuAXD3rcCngC8Ae8zsdjM7ChEBFHgiM9VO4Oi054vDbbj7QXf/G3c/Bngr8Nepc3Xu/h/u/vrwtQ780/Q2W2TmUuCJzAwJM0umbsBtwN+ZWYOZ1QOfB34AYGZ/YWavMTMD2giGMgfMbIWZvTGc3NINdIX7RAQFnshMcQ9BQKVuSWAD8BTwNPA48D/CY5cBvwHagYeBG9z9AYLzd18B9gK7gLnA56btJxCZ4UwLwIqISBSohyciIpGgwBMRkUhQ4ImISCQo8EREJBJmZGmx+vp6X7JkSb6bISIis9Bjjz22190bRm6fkYG3ZMkSNmzYkO9miIjILGRmL2XariFNERGJBAWeiIhEggJPREQiQYEnIiKRoMATEZFIUOCJiEgkKPBERCQSFHgiIhIJCjwREYkEBZ6IiESCAk9ERCJBgSciIpGgwBMRkUhQ4ImISCQo8EREJBIUeCIiEgkKPBERiQQFnoiIRIICT0REIkGBJyIikVC4gffCf8Ke5/LdChERmSEKN/B+eDk89r18t0JERGaIwg28ZA107893K0REZIYo3MArrYGu/fluhYiIzBCFG3jq4YmISJrCDTz18EREJE3hBp56eCIikqZwA089PBERSVO4gZesgf4u6O/Jd0tERGQGKNzAK60J7rsP5LUZIiIyMxRu4CVrgnsNa4qICFkGnpldaGabzWyrmV2TYf9lZvZUeHvIzE4asT9mZk+Y2c9z1fBxDfXw9k/bR4qIyMw1buCZWQy4HrgIOB641MyOH3HYi8DZ7r4a+AfgphH7PwlMb2FL9fBERCRNNj2804Ct7r7N3XuB24GL0w9w94fcfV/49BFgYWqfmS0E3gLcnJsmZylZHdyrhyciImQXeAuAHWnPm8Jto/kQcG/a868B/y8wONaHmNlVZrbBzDY0Nzdn0axxpIY01cMTERGyCzzLsM0zHmh2LkHgfSZ8/hfAHnd/bLwPcfeb3H2tu69taGjIolnjSA1pqocnIiJAPItjmoBFac8XAjtHHmRmqwmGLS9y95Zw85nA28zszUASqDKzH7j7+ybX7CzEiyFRph6eiIgA2fXw1gPLzGypmRUD64C70w8ws8XAXcDl7r4ltd3dP+vuC919Sfi6/5yWsEtReTEREQmN28Nz934z+zhwHxADbnH3jWZ2dbj/RuDzQB1wg5kB9Lv72qlrdpZUXkxERELZDGni7vcA94zYdmPa4yuBK8d5jweABybcwslQD09EREKFW2kFgh6eSouJiAiFHnjJGg1piogIUOiBV1qjIU0REQEKPfCSNdDbDgN9+W6JiIjkWWEHnpYIEhGRUGEHXqqeps7jiYhEXoEHXk1wr/N4IiKRV9iBpwLSIiISKuzAUw9PRERChR14Qz28fWMeJiIiha+wA089PBERCRV24CWSEE/qHJ6IiBR44EFYQFrX4YmIRF3hB57Ki4mICFEIPBWQFhERohB46uGJiAhRCLxkDXTpHJ6ISNRFIPCq1cMTEZEIBF5pDfS0weBAvlsiIiJ5VPiBN3TxuYY1RUSirPADT+XFRESEKASeyouJiAhRCDwtESQiIkQh8NTDExERohB4qR6eJq2IiERa4QdeqoenIU0RkUgr/MBLlEKsWEOaIiIRV/iBZ6YC0iIiEoHAAxWQFhGRiAReslo9PBGRiItI4NWohyciEnHRCLzSGvXwREQiLhqBpx6eiEjkRSPwSmuguw0GB/PdEhERyZNoBF6yBnDoUbUVEZGoikbgqYC0iEjkRSPwtAisiEjkRSPwhgpI789nK0REJI+iEXgqIC0iEnnRCDz18EREIi8agacenohI5EUj8IrLoSiuHp6ISIRFI/DMVEBaRCTiohF4oPJiIiIRF53AUwFpEZFIi07gqYcnIhJp0Qk89fBERCItq8AzswvNbLOZbTWzazLsv8zMngpvD5nZSeH2pJn90cz+ZGYbzeyLuf4BspasUWkxEZEIi493gJnFgOuBC4AmYL2Z3e3uz6Yd9iJwtrvvM7OLgJuA04Ee4I3u3m5mCeBBM7vX3R/J+U8yntKaIPDcg1mbIiISKdn08E4Dtrr7NnfvBW4HLk4/wN0fcvd94dNHgIXhdnf39nB7Irx5Tlo+Ucka8AHoOZiXjxcRkfzKJvAWADvSnjeF20bzIeDe1BMzi5nZk8Ae4Nfu/ugRtHPyVF5MRCTSsgm8TON/GXtpZnYuQeB9ZuhA9wF3P5mg13eama0a5bVXmdkGM9vQ3NycRbMmSOXFREQiLZvAawIWpT1fCOwceZCZrQZuBi5295aR+919P/AAcGGmD3H3m9x9rbuvbWhoyKJZE6QenohIpGUTeOuBZWa21MyKgXXA3ekHmNli4C7gcnffkra9wcxqwselwPnAphy1fWLUwxMRibRxZ2m6e7+ZfRy4D4gBt7j7RjO7Otx/I/B5oA64wYIZkP3uvhaYD3wvnOlZBNzh7j+fmh9lHMnq4F49PBGRSBo38ADc/R7gnhHbbkx7fCVwZYbXPQWcMsk25kZqSFM9PBGRSIpOpZXiSrAi9fBERCIqOoFXVKQlgkREIiw6gQcqIC0iEmHRCrxUeTEREYmcgg28x17ax4t7O4ZvTNZoSFNEJKIKNvDef8sf+feHXxq+sbRGQ5oiIhFVsIFXlYzT1t03fKN6eCIikVW4gVea4EDXiMBL9fA8Pws2iIhI/hR04LWNDLxkDQz2Q29HxteIiEjhKtjAqx6thwc6jyciEkEFG3hVyQQHu/uHb0zV09R5PBGRyCnYwMvYw0utmKAenohI5BRs4FWVxmnv6ad/YPDQRhWQFhGJrIINvOrSBMDwYU318EREIqtgA68qGQTesGFN9fBERCKrYAMv1cMbdvF5STVgqqcpIhJBhRt4ZRl6eEVFkKzSkKaISAQVbOClhjTbukZemlCjIU0RkQgq2MBLDWmOWl5MREQipWADr6o0DqAC0iIiAhRw4JUmYiRiph6eiIgABRx4ZkZVcpQC0urhiYhETsEGHoxWXqxaSwSJiERQQQdeZWmCtpEFpEtrYKAX+rry0iYREcmPgg48FZAWEZGUgg68qmT88HN4Ki8mIhJJBR141aOteg7q4YmIRExBB15VOKTp6RNUhlY9Vz1NEZEoKejAqy5N0D/odPUNHNqY6uFpSFNEJFIKOvAyLxFUG9xrSFNEJFIKOvCGlghKLyCdrA7u1cMTEYmUSATe8CWCYlCiJYJERKKmoANvqIC0youJiEReQQfe6EsEVauHJyISMQUdeEOLwGqJIBGRyCvowKtMBkOaoxaQFhGRyCjowIvHiqgoiQ+fpQnBxefq4YmIREpBBx6MUUBaPTwRkUgp+MCrTMYzr3re3w193Xlpk4iITL+CD7zq0kTmSSugepoiIhFS8IFXlWnFBJUXExGJnIIPvDGXCNLEFRGRyCj4wKtKZpi0MrRE0P7pbo6IiORJwQdedWmCjt4B+gcGD21UD09EJHIKPvCG6ml2p12Lpx6eiEjkFHzgHVoiKG1YU0sEiYhETmQCb9h5vFgCEuXq4YmIREjBB15V6SgFpFVeTEQkUrIKPDO70Mw2m9lWM7smw/7LzOyp8PaQmZ0Ubl9kZveb2XNmttHMPpnrH2A8oy4RpPJiIiKREh/vADOLAdcDFwBNwHozu9vdn0077EXgbHffZ2YXATcBpwP9wN+4++NmVgk8Zma/HvHaKTW0RJAKSIuIRFo2PbzTgK3uvs3de4HbgYvTD3D3h9x9X/j0EWBhuP1Vd388fHwQeA5YkKvGZ2PsHp5Ki4mIREU2gbcA2JH2vImxQ+tDwL0jN5rZEuAU4NEJtG/SkokiEjHLfA5PQ5oiIpEx7pAmYBm2ecYDzc4lCLzXj9heAfwY+JS7t43y2quAqwAWL16cRbOyY2ajLxGkIU0RkcjIpofXBCxKe74Q2DnyIDNbDdwMXOzuLWnbEwRhd6u73zXah7j7Te6+1t3XNjQ0ZNv+rIxaXqyvAwb6Mr5GREQKSzaBtx5YZmZLzawYWAfcnX6AmS0G7gIud/ctadsN+DbwnLt/NXfNnpiMKyaovJiISKSMG3ju3g98HLiPYNLJHe6+0cyuNrOrw8M+D9QBN5jZk2a2Idx+JnA58MZw+5Nm9ubc/xhjy7xEUE1wr/N4IiKRkM05PNz9HuCeEdtuTHt8JXBlhtc9SOZzgNOqujTBjtbO4RvVwxMRiZSCr7QCUJWMa4kgEZGIi0TgpRaBdU+bXKoC0iIikRKJwKsqTdA/6HT2DhzamBrSVA9PRCQSIhF41ZkKSKeGNNXDExGJhEgEXqqe5rDzePESiJeqhyciEhGRCLxDi8BmKCCtwBMRiYRIBZ7Ki4mIRFckAq+qNLjcMOPF51oxQUQkEiIReOrhiYhIJAKvMtOkFdA5PBGRCIlE4MWKjMqS+OFr4qmHJyISGZEIPAguPs/Yw+s9CAP9GV8jIiKFI1KBd9hlCUPVVjRxRUSk0EUn8JLxDGvihfU0dR5PRKTgRSbwqksTh5/DU3kxEZHIiEzgZTyHNzSkuW/a2yMiItMrMoFXPdaq5+rhiYgUvMgEXlUyQUfvAH0Dg4c2atKKiEhkRCbwqsPyYge702ZqatVzEZHIiEzgVWUqL5YohViJhjRFRCIgMoF3aIkglRcTEYmiyAWeCkiLiERTZAIvNaSZ8Vo89fBERApeZAJPPTwRkWiLTOBVaYkgEZFIi0zgJRNFFMeKMhSQroYuXYcnIlLoIhN4ZkZVaTzzkGbPARgcyEu7RERkekQm8CBcImi0AtKqtiIiUtCiFXjJDPU0h8qL7Z/u5oiIyDSKVOCNWUBaPTwRkYIWqcAbc4kgXZogIlLQIhV41aVx2rpHzNJUAWkRkUiIVOBVJYMenrsf2qgenohIJEQq8KpLEwwMOp29aZcgqIcnIhIJkQq8zEsElUFRQj08EZECF6nAq85UQNpM5cVERCIgkoF3oFMFpEVEoiZSgTdqAelktXp4IiIFLlKBd2hIM8OlCerhiYgUtEgFXlVpHBhlTTz18EREClqkAq8yHNLMWF5MPTwRkYIWqcCLFRmVJaMtEdQGg4N5aZeIiEy9SAUejLFEkA9C78G8tElERKZeNANPBaRFRCIncoFXXRqnrUsFpEVEoiZygZcqID2MengiIgUvcoFXPdo5PFAPT0SkgEUu8LQIrIhINGUVeGZ2oZltNrOtZnZNhv2XmdlT4e0hMzspbd8tZrbHzJ7JZcOPVHVpgs7eAfoG0i5BUA9PRKTgjRt4ZhYDrgcuAo4HLjWz40cc9iJwtruvBv4BuClt33eBC3PS2hyoSgbVVobN1CyuAIuphyciUsCy6eGdBmx1923u3gvcDlycfoC7P+Tu+8KnjwAL0/b9HmjNUXsnrbosQz1NMxWQFhEpcNkE3gJgR9rzpnDbaD4E3DvRhpjZVWa2wcw2NDc3T/TlWavOtAgsqLyYiEiByybwLMM2z3ig2bkEgfeZiTbE3W9y97XuvrahoWGiL8/a6EsE1UD3gSn7XBERya94Fsc0AYvSni8Edo48yMxWAzcDF7l7S26al3tDSwRl6uFpSFNEpGBl08NbDywzs6VmVgysA+5OP8DMFgN3AZe7+5bcNzN3qkYb0tSq5yIiBW3cwHP3fuDjwH3Ac8Ad7r7RzK42s6vDwz4P1AE3mNmTZrYh9Xozuw14GFhhZk1m9qGc/xQTcGgRWPXwRESiJJshTdz9HuCeEdtuTHt8JXDlKK+9dDINzLWSeBHFsaLRe3juwaxNEREpKJGrtGJm4YoJGQpI+wD0tuelXSIiMrUiF3gAVaVxLREkIhIxkQw8FZAWEYmeSAaelggSEYmeSAZedaZVz9XDExEpaJEMvKrSeIYeXnVwrx6eiEhBimTgBefw+nFPq5CWGtJUD09EpCBFMvCqkgkGBp2O3oFDG0uqAFM9TRGRAhXJwMu4YkJRUTCsqSFNEZGCFOnAUwFpEZHoiGTgqYC0iEj0RDLw1MMTEYmeSAbemIvAqocnIlKQIhl4h5YIylBAWj08EZGCFMnAq0gGqyKNuUSQiIgUlEgGXqzIqExmWDGhtAYG+6CvMy/tEhGRqRPJwIPgPJ6WCBIRiY7IBl7GJYJS9TR1Hk9EpOBENvAyFpBOrZigHp6ISMGJbOAFSwSNmKU5VEBa9TRFRApNZAMv4yKwWhNPRKRgRTbwMp/DqwnuNaQpIlJwIht4VaUJOnsH6BsYPLRRk1ZERApWZAMv8xJBsSD0Olvz1CoREZkqkQ+8w67FqzkaWrfloUUiIjKVIht4VaWjlBdrWAF7n89Di0REZCrF892AfBm1gHT9cnj6R9DbCcVleWiZiBSivr4+mpqa6O7uzndTCkYymWThwoUkEomsjo9s4I26RFD9suC+5XmYf9I0t0pEClVTUxOVlZUsWbIEM8t3c2Y9d6elpYWmpiaWLl2a1WsiO6Q56jm8+hXBvYY1RSSHuru7qaurU9jliJlRV1c3oR5zZAOvKtMsTYA5x4AVwd4teWiViBQyhV1uTfT7jGzgJRMxiuNFh198nkgGMzWbN+enYSIiU2T//v3ccMMNE37dm9/8Zvbv3z/mMZ///Of5zW9+c4Qtmx6RDTwYZYkg0ExNESlIowXewMDAmK+75557qKmpGfOYL33pS5x//vmTad6Ui3TgVZfGDy8gDcHElZatMDj2L4GIyGxyzTXX8MILL3DyySdz6qmncu655/Le976XE088EYC3v/3tvPa1r+WEE07gpptuGnrdkiVL2Lt3L9u3b+e4447jwx/+MCeccAJvetOb6OrqAuCKK67gzjvvHDr+2muvZc2aNZx44ols2rQJgObmZi644ALWrFnDRz7yEY4++mj27t07bT9/ZGdpQnAe77BzeBBcmjDQA/tfCs7piYjk0Bf/z0ae3dmW0/c8/qgqrn3rCWMe85WvfIVnnnmGJ598kgceeIC3vOUtPPPMM0OzHG+55RbmzJlDV1cXp556Kn/5l39JXV3dsPd4/vnnue222/jWt77Fu9/9bn784x/zvve977DPqq+v5/HHH+eGG27guuuu4+abb+aLX/wib3zjG/nsZz/LL3/5y2GhOh0i3sPLUEAaNFNTRCLhtNNOGzal/+tf/zonnXQSZ5xxBjt27OD55w//G7h06VJOPvlkAF772teyffv2jO/9zne+87BjHnzwQdatWwfAhRdeSG1tbe5+mCxEu4eXTPDi3o7Dd6Suxdu7BZb/+fQ2SkQK3ng9selSXl4+9PiBBx7gN7/5DQ8//DBlZWWcc845Gaf8l5SUDD2OxWJDQ5qjHReLxejvD04duXsumz9hke/hZRzSLJsDZfWaqSkiBaWyspKDBw9m3HfgwAFqa2spKytj06ZNPPLIIzn//Ne//vXccccdAPzqV79i3759Of+MsUS7h1cap62rD3c//HoOzdQUkQJTV1fHmWeeyapVqygtLWXevHlD+y688EJuvPFGVq9ezYoVKzjjjDNy/vnXXnstl156KT/84Q85++yzmT9/PpWVlTn/nNFYvruYmaxdu9Y3bNgw5Z9z0+9f4B/v2cTTX3gTlckRtdj+zyfh2bvhMy9OeTtEpPA999xzHHfccfluRl719PQQi8WIx+M8/PDDfPSjH+XJJ5+c1Htm+l7N7DF3Xzvy2Ej38NILSB8WePXLoasVOvZCeX0eWiciUlhefvll3v3udzM4OEhxcTHf+ta3pvXzIx14QwWkO/tYUFM6fOfQTM0tCjwRkRxYtmwZTzzxRN4+P/KTVoBRLk1Im6kpIiKzXqQDb9QC0gDViyBeCs0KPBGRQhDpwBt1iSCAoiKof416eCIiBSLSgTfqIrAp9csVeCIiBSLSgVeZjGMWzNLMqH4F7H8Z+jJXEhARKWQVFRUA7Ny5k0suuSTjMeeccw7jXUb2ta99jc7OzqHn2Sw3NBUiHXhFRUZFSTzzkCaEE1c8WDlBRCSijjrqqKGVEI7EyMDLZrmhqRDpwIOwgPRYQ5qgYU0RKQif+cxnhq2H94UvfIEvfvGLnHfeeUNL+fzsZz877HXbt29n1apVAHR1dbFu3TpWr17Ne97znmG1ND/60Y+ydu1aTjjhBK699logKEi9c+dOzj33XM4991zg0HJDAF/96ldZtWoVq1at4mtf+9rQ5422DNFkZHUdnpldCPwrEANudvevjNh/GfCZ8Gk78FF3/1M2r823quQo9TQB6l4DmGZqikhu3XsN7Ho6t+/ZeCJcNPaf13Xr1vGpT32Kv/qrvwLgjjvu4Je//CWf/vSnqaqqYu/evZxxxhm87W1vO7zcYugb3/gGZWVlPPXUUzz11FOsWbNmaN+Xv/xl5syZw8DAAOeddx5PPfUUn/jEJ/jqV7/K/fffT3398GuaH3vsMb7zne/w6KOP4u6cfvrpnH322dTW1ma9DNFEjNvDM7MYcD1wEXA8cKmZHT/isBeBs919NfAPwE0TeG1ejbpEEEAiCbVHq4cnIgXhlFNOYc+ePezcuZM//elP1NbWMn/+fD73uc+xevVqzj//fF555RV279496nv8/ve/Hwqe1atXs3r16qF9d9xxB2vWrOGUU05h48aNPPvss2O258EHH+Qd73gH5eXlVFRU8M53vpM//OEPQPbLEE1ENj2804Ct7r4NwMxuBy4Ghn4Sd38o7fhHgIXZvjbfqkrjmZcISqlfriLSIpJb4/TEptIll1zCnXfeya5du1i3bh233norzc3NPPbYYyQSCZYsWZJxWaB0mXp/L774Itdddx3r16+ntraWK664Ytz3GauWc7bLEE1ENufwFgA70p43hdtG8yHg3om+1syuMrMNZrahubk5i2blxqhLBKXUL4eW52FwYNraJCIyVdatW8ftt9/OnXfeySWXXMKBAweYO3cuiUSC+++/n5deemnM15911lnceuutADzzzDM89dRTALS1tVFeXk51dTW7d+/m3nvvHXrNaMsSnXXWWfz0pz+ls7OTjo4OfvKTn/CGN7whhz/tcNn08DIN5GaMZTM7lyDwXj/R17r7TYRDoWvXrp22JRyqkgnauka5LAGCwOvvhgM7oHbJdDVLRGRKnHDCCRw8eJAFCxYwf/58LrvsMt761reydu1aTj75ZFauXDnm6z/60Y/ygQ98gNWrV3PyySdz2mmnAXDSSSdxyimncMIJJ3DMMcdw5plnDr3mqquu4qKLLmL+/Pncf//9Q9vXrFnDFVdcMfQeV155JaecckpOhi8zGXd5IDP7M+AL7v7n4fPPArj7/xxx3GrgJ8BF7r5lIq8dabqWBwL4t98+z7/8egtb/sdFFMczdHhfehi+cyFcdicsu2Ba2iQihUfLA02NiSwPlM2Q5npgmZktNbNiYB1w94g3XwzcBVyeCrtsX5tvVWMVkIZgIVjQ6uciIrPcuEOa7t5vZh8H7iO4tOAWd99oZleH+28EPg/UATeEJzP73X3taK+dop/liFSnFZCuryg5/ICyOVBWp5maIiKzXFbX4bn7PcA9I7bdmPb4SuDKbF87k4xZQDpFMzVFRGa9yFdaqSoNMn/cmZp7NaQpIpMz3pwJmZiJfp+RD7xDi8COM1OzswU6WqapVSJSaJLJJC0tLQq9HHF3WlpaSCaTWb8mqyHNQjbuEkFwqKZmy/NQXjcNrRKRQrNw4UKampqYzuuMC10ymWThwoXjHxhS4GVzDq8hDLzmzbD4jGlolYgUmkQiwdKlS/PdjEiL/JBmMhGjOF40duBVL4J4UjM1RURmscgHHoxTQBqgKBasnKCZmiIis5YCD6hKxsc+hweaqSkiMssp8MiigDQEgbfvJegbu/q3iIjMTAo8gokrYxaQBqhfBji0vjAtbRIRkdxS4JFlD081NUVEZjUFHuESQWNNWgGYcyxgmrgiIjJLKfAIZ2l29TE4OEYFhOIyqFmkSxNERGYpBR5BPc1Bh47e8c7jrdBMTRGRWUqBx/AlgsZUvxz2boXBwWlolYiI5JICj/Qlgsbp4TUsh/4uaGuahlaJiEguKfDIsoA0HCoi3azzeCIis40Cj7QC0uPN1EwFniauiIjMOgo8JnAOr7weSuco8EREZiEFHlkuEZRSv1yBJyIyCynwgMqSOGbZBt4yBZ6IyCykwAOKiozKkixWTICgxFhHM3S2Tn3DREQkZxR4oarSBG3d41yWAGkTV1RiTERkNlHghbIqIA3hqgloWFNEZJZR4IWqkonszuHVHA2xEgWeiMgso8ALZd3DK4pB3WsUeCIis4wCL1RVGh//wvMUzdQUEZl1FHihrHt4EMzU3Lcd+numtE0iIpI7CrxQVTJBd98gPf0D4x9cvxx8EFpemPqGiYhITijwQtVlWa6YAJqpKSIyCynwQtXZFpAGqEsFnq7FExGZLRR4oayXCAIoLoPqxVr9XERkFlHghSZUQBo0U1NEZJZR4IWqS+NAlj08CGZq7n0eBgensFUiIpIrCrzQoUVgs5i0AkEPr68T2l6ZwlaJiEiuKPBCqXN42Q9pavVzEZHZRIEXSiZilMSLsh/SrF8R3GumpojIrKDAS1NVmmUBaYDyekjWaKamiMgsocBLM6HyYmbBsKZ6eCIis4ICL01VcgIFpAEaluscnojILKHASzOhHh4EPbz23dC1f8raJCIiuaHASxOcw8vysgTQxBURkVlEgZdm4j08FZEWEZktFHhpqpIJDnb3MTjo2b2g5miIFWumpojILKDAS1NdmmDQob03y2HNWBzqXqMhTRGRWUCBl6Z6ogWkQUWkRURmCQVemqqJFpCGYKZm64vQ3ztFrRIRkVxQ4KU5tETQBGdq+gC0bpuiVomISC4o8NJMaBHYlKGZmpq4IiIyk2UVeGZ2oZltNrOtZnZNhv0rzexhM+sxs78dse+TZvaMmW00s0/lqN1T4ojP4YHO44mIzHDjBp6ZxYDrgYuA44FLzez4EYe1Ap8Arhvx2lXAh4HTgJOAvzCzZTlo95Q4tCbeBAKvuByqF2mmpojIDJdND+80YKu7b3P3XuB24OL0A9x9j7uvB0YmxXHAI+7e6e79wO+Ad+Sg3VOisiSO2QSHNCHo5TVrSFNEZCbLJvAWADvSnjeF27LxDHCWmdWZWRnwZmBRpgPN7Coz22BmG5qbm7N8+9wqKjKqkglaOyY44zK1aoJnecG6iIhMu2wCzzJsy+ovu7s/B/wT8Gvgl8CfgIxTIN39Jndf6+5rGxoasnn7KbG0vpwXmtsn9qL65dDXAW07p6ZRIiIyadkEXhPDe2ULgaz/srv7t919jbufRXCub0af7FrZWMnmXQfxifTW6pcH95qpKSIyY2UTeOuBZWa21MyKgXXA3dl+gJnNDe8XA+8EbjuShk6XFY2V7Ovso/lgT/YvGgq8GZ3lIiKRFh/vAHfvN7OPA/cBMeAWd99oZleH+280s0ZgA1AFDIaXHxzv7m3Aj82sjmBCy8fcfd8U/Sw5saKxEoBNuw4ytyqZ3Ysq5kKyGnY9PYUtExGRyRg38ADc/R7gnhHbbkx7vItgqDPTa98wmQZOt5WNVQBs3nWQs5ZneS7RDI59I2y+Bwb6IJaYwhaKiMiRUKWVEeaUF9NQWcKmXQcn9sIT3w2dLfDC/VPTMBERmRQFXgYrGyvZvLttYi96zfmQrIGnfzQlbRIRkclR4GWwYl4lz+9uZyDbhWAB4sVwwtth0y+gt2PK2iYiIkdGgZfBisZKevoH2d4yweA68V3B9Xib752ahomIyBFT4GWQPnFlQha/DqoWaFhTRGQGUuBlsGxeBUXGxCeuFBXBqr+Erb+BjpapaZyIiBwRBV4GyUSMJXXlbN41wYkrEAxrDvbDsz/NebtEROTIKfBGsSIsMTZhjSdCw0oNa4qIzDAKvFGsaKzkpdZOOnsz1roenRmceAm8/DDsf3lqGiciIhOmwBvFysZK3OH53RNcOQGCYU2Ap+/MbaNEROSIKfBGseJIZ2oC1C6Bhacp8EREZhAF3igWzykjmSia+EzNlNXvhj0bYffG3DZMRESOiAJvFLEiY/m8IygxlnL828FimrwiIjJDKPDGsGLeEc7UBKhoCFZQePpOGBzMbcNERGTCFHhjWNFYyd72Xva2T2Ax2HQnvgsO7IAdj+a2YSIiMmEKvDGkSoxtOdJe3sq3QLwUnr4jh60SEZEjocAbw/LGCuAISoyllFTAyjfDxp9Af28OWyYiIhOlwBtDQ0UJc8qLj/w8HgQLw3btg21aGFZEJJ8UeGMwM1bMq2TT7kkE3rFvhNJaeErDmiIi+aTAG8eKxkqe332QwYksBpsuXgwnvAM23wM9R1C1RUREckKBN46VjZV09g6wY1/nkb/Jie+Cvs4g9EREJC8UeONY0VgJTGLiCsCiM6B6kS5CFxHJIwXeOJbPCwJvUhNXhhaG/S107M1Ry0REZCIUeOMoL4mzeE7Z5AIPgmFNHwguURARkWmnwMvCisZKNh3J6ufpGlfB3OM1rCkikicKvCysbKxke0sn3X0Dk3ujEy8Jyozt256TdomISPYUeFlY0VjJwKCzdc8kLytYdUlwr3XyRESmnQIvCysbczBxBaD26GDG5tM/Aj/C6/pEROSIKPCysKSunOJ4EZsnU3ElZfW7oHkT7H5m8u8lIiJZU+BlIR4r4jUNFZO7Fi/l+HdAUVyTV0REppkCL0srGyvZPNmZmgDldXDsefD0j7UwrIjINFLgZWlFYyW723rY35mDZX5OfBe0NcHLD0/+vUREJCsKvCzlpMRYyso3Q6JMw5oiItNIgZel1Ornk56pCVBcHqyG/uxPtTCsiMg0UeBlaV5VCdWlidz08ODQwrAv/DY37yciImNS4GXJzFiRq4krAMeeC2V1WhhWRGSaKPAmYGVjJVt2t+O5uGg8lggXhr0XenLUaxQRkVEp8CZgRWMl7T39NO3rys0bnvgu6O+CTb/IzfuJiMioFHgTkLMSYymLTofapfC7f4LuHA2ViohIRgq8CViWWgw2FyXGAMzg7TfAvpfg7v+m+poiIlNIgTcBVckEC2pKczdTE+Do18F5fx9corD+5ty9r4iIDKPAm6CcztRMed0nYdmb4L7PwSuP5/a9RUQEUOBN2IrGSrY1d9Dbn8M6mEVF8I5vQvlc+NEV0LU/d+8tIiKAAm/CVjZW0j/obNs7ycVgRyqbA+/6LrS9Aj/7mM7niYjkmAJvglbkeqZmukWnwgVfgk0/h0duyP37i4hEmAJvgo6pryBeZLmduJLujL+ClX8Bv/487Fg/NZ8hIhJBCrwJKo4XcWxDxdT08CC4VOHi66FqQXA+r7N1aj5HRCRiFHhHIJipOYXlwEprgvN5HXvgJx/RQrEiIjmQVeCZ2YVmttnMtprZNRn2rzSzh82sx8z+dsS+T5vZRjN7xsxuM7NkrhqfLysaK3llfxdt3X1T9yEL1sCf/yM8/yt46F+n7nNERCJi3MAzsxhwPXARcDxwqZkdP+KwVuATwHUjXrsg3L7W3VcBMWBdDtqdV6kSY1umspcHcOqVQYHp3/4DbP+vqf0sEZECl00P7zRgq7tvc/de4Hbg4vQD3H2Pu68HMnV54kCpmcWBMmDnJNucdzld/XwsZvDWr0PtErjzg9DePLWfJyJSwLIJvAXAjrTnTeG2cbn7KwS9vpeBV4ED7v6rTMea2VVmtsHMNjQ3z+w/7AtqSqksiU/tebyUZBW8+3vQvR/uuhIGB6b+M0VEClA2gWcZtmV1VbSZ1RL0BpcCRwHlZva+TMe6+03uvtbd1zY0NGTz9nljZiyf6okr6RpPhIv+GbY9AL+/btzDRUTkcNkEXhOwKO35QrIfljwfeNHdm929D7gLeN3EmjgzrWisZNOuttwsBpuNNf8PrH4PPPA/g+ATEZEJySbw1gPLzGypmRUTTDq5O8v3fxk4w8zKzMyA84DnjqypM8vKxkrauvvZ1dY9PR9oBm/5KtQvhx9fCQd3Tc/niogUiHEDz937gY8D9xGE1R3uvtHMrjazqwHMrNHMmoC/Bv7OzJrMrMrdHwXuBB4Hng4/76Yp+lmm1Yp50zRxJV1JRXA+r7cD7vwQDPRP32eLiMxyWV2H5+73uPtydz/W3b8cbrvR3W8MH+9y94XuXuXuNeHjtnDfte6+0t1Xufvl7t4zdT/O9FnZWAVMUU3Nscw9Dt7yL/DSg8GisT3T/PkiIrOUKq0coeqyBI1VyekPPICT3wtv+Fv4021w/emw+ZfT3wYRkVlGgTcJwcSVPPWwzvt7+NCvoaQKbnsP/OgD0L4nP20REZkFFHiTsLKxkhf2tNM3kKdal4tOhY/8Hs79u2BJof99KjzxA62lJyKSgQJvElY0VtI7MMj2vR35a0S8GM7+73D1fwXn9372Mfj+xdC6LX9tEhGZgRR4k7A8HzM1R9OwHK64J7h0YecTcMPr4MGvaSaniEhIgTcJr5lbQazI8jNxJZOiIjj1Q/CxR+HYN8JvroVvnQs7n8x3y0RE8k6BNwnJRIwldWUzo4eXruooWHcrvPv70L4bvvVG+NXfQ29nvlsmIpI3CrxJWtlYxebdbfluxuHM4PiLg97eKe+Dh74O3/gzlSUTkchS4E3SisZKdrR20d4zQ8+VldbC274O7/85WCyY0PKDS+Dx70PH3ny3TkRk2ijwJim1Nt6W3TNsWHOkpW+Aj/4XnPNZ2Ls5qNJy3TL4zpvh4Rtg30v5bqGIyJRS4E1SavXzGTNxZSyJUjjnGvjkU/CRP8BZ/x269sN9n4V/XQ03vgF+98+w+1ldyyciBSee7wbMdotqyygrjs2OwEsxg/mrg9u5n4OWF2DTL4KL1+//R7j/yzDnGFj5F8Ft4anBDFAREXcY7If+bujvDe+7ob8nuB/oHf78sPvU457D953/BZizdMqarsCbpKIiY9m8YG28WavuWDjzE8Ht4G7Y/At47ufwyDeCyS4V82DFm2HpWdCwAuYcC4lkvlstEi0jg2agZ8Tj8Jb+eOh5GEID6SHTOyKgRj5PD7QRYeaTrS5lEE9CvGT4fd/UziRX4OXAynmV/OrZXbg7wbJ/s1jlPFj7weDWfQC2/Ao2/R946g547DvhQQa1Rwdr89Uvh/plUL8ieFxel9fmi+TMQH/wBz4VGAPhLRUiA33DA2XYfU/asWn3w7Z1j338Ye/dA+TiVEMqbIqD+1hJWvCE24oroKwu3Ja2PXXcsNeUpN2SECsOTp8M7Q+3DXt9IhhpmmYKvBxY0VjJDzfsoLm9h7mVBdTzSVbD6ncFt/4e2LslvD0f3DdvgRd/H/zDTSmdkxaCy4MeYfUiKJsT7IsX5+/nkZkj1VtJhcBA3/DH/T2HbxvoOfzY9CAaOqZveMAMBclY+0cGW08OejFpiuJhSBSPuC8JwyAMgmR1uG3EvqH7tHAZCpHUe6WHWHHacSP2FcXzEjYzgQIvB9InrhRU4KWLl0DjicEt3eAgHNhxKARTgbjll/DEvx/+PsWVUFYb/Ndj6ZwgCIc9DoOxrA5KKg//L8WI/kPNaHAQBvuCP+CDfTA4cOjxQPh86HH/8H3Djus/FBjDHvcd6uUMva730OvH2j70+gxhlno8FYoS4R/5RPCHPlYcBkLaLV4CyapwfyJDqCTSAicxPKCG3m/E40wBlh5SOgc+IyjwcmBFWuC9YVlDnlszzYqKguHN2qNh2fnD93W2BuF3cGfwuKs1uO9shc6W4HnrtuB5z4HsPi9WEpw/jKffStKCsTQcLikKbkWx8HF4X1Q04nn6fgtnp/qIezJsS9vng+ADQQANPR5Iu/fh23wwfDwYBMzQbSDD84EM+8NgycnwVhYsFny3RYlDAREb8bgoLThKKsNt8UN/+IeOLR7xHiO3FY94XfHhx8RLMry2JG/DZDJ7KPByoK6ihHlVJTyyrZUr33BMvpszc5TNgcWnZ3fsQB907UsLxhboaYf+rmD4qS+8P+x596FbXzd0twXvlQqe9HBJ3Yaep/an7TMDLO0ewv/JsC/t3mJp4ZkK0vG2xYKgLoqn3WJZPg8DJZb+OHHouNjIbYlDx6bCKj3Aho7NsF8hIgVCgZcjf7lmITf+7gV2tHayaE5Zvpsz+8QSUDE3uImITAENLOfI+844GjPjB4+oYomIyEykwMuRo2pKufCERm7748t09s7QupoiIhGmwMuhK85cQlt3Pz99Yme+myIiIiMo8HJo7dG1nHBUFd996EVctShFRGYUBV4OmRlXvG4JW3a389ALLflujoiIpFHg5dhbTzqKOeXFfOe/tue7KSIikkaBl2PJRIz3nraY327azcstU1sIVUREsqfAmwLvO+NoYmZ8/+Ht+W6KiIiEFHhToLE6yUUnzueHG3bQ0aNLFEREZgIF3hS54nVLONjdz11PvJLvpoiICAq8KbNmcQ2rF1bz3f/SJQoiIjOBAm+KpC5ReKG5gwe37s13c0REIk+BN4Xesno+9RXFfFeXKIiI5J0CbwqVxGO89/Sj+c/Ne9i+tyPfzRERiTQF3hR73+mLw0sUtIqCiEg+KfCm2NyqJG9ZPZ8fbdhBuy5REBHJGwXeNLjidUs42NPPjx9ryndTREQiS4E3DU5ZXMtJi2r43kPbGRzUJQoiIvmgwJsmHzxzCdv2dvD755vz3RQRkUhS4E2Ti1bNp6GyhO8+tD3fTRERiSQF3jQpjhfxvtOP5oHNzWxrbs93c0REIkeBN43ee/piEjFdoiAikg8KvGnUUFnCW1cfxY827OBgd1++myMiEikKvGn2/tctoaN3gDt1iYKIyLRS4E2zkxbVsGaxLlEQEZluCrw8uOLMpWxv6eSBLXvy3RQRkchQ4OXBRasamVdVwne0ioKIyLRR4OVBIlbE5WcczR+e38vWPQfz3RwRkUhQ4OXJpactpjhexPce0iUKIiLTIavAM7MLzWyzmW01s2sy7F9pZg+bWY+Z/W3a9hVm9mTarc3MPpXD9s9adRUlvO2ko/jx400c6NIlCiIiU23cwDOzGHA9cBFwPHCpmR0/4rBW4BPAdekb3X2zu5/s7icDrwU6gZ/koN0F4YrXLaGzd4AfPKJenojIVMumh3casNXdt7l7L3A7cHH6Ae6+x93XA2N1Vc4DXnB3/XUPrVpQzfnHzeVffrWZXzz1ar6bIyJS0LIJvAXAjrTnTeG2iVoH3DbaTjO7ysw2mNmG5uborCjw9UtPYc3iWj55+xP89rnd+W6OiEjByibwLMO2CV0xbWbFwNuAH412jLvf5O5r3X1tQ0PDRN5+VisrjnPLB07l+KOq+OgPHucPWj5IRGRKZBN4TcCitOcLgZ0T/JyLgMfdXV2YDKqSCb7/wdM4pqGcD39/A49ua8l3k0RECk42gbceWGZmS8Oe2jrg7gl+zqWMMZwpUFNWzA+uPJ0FNaV88LvreeLlfflukohIQRk38Ny9H/g4cB/wHHCHu280s6vN7GoAM2s0sybgr4G/M7MmM6sK95UBFwB3TdUPUSjqK0q49cozqKso4f23/JGNOw/ku0kiIgXD3GdeAeO1a9f6hg0b8t2MvNnR2sl7vvkw3f2D/PCqM1g2rzLfTRIRmTXM7DF3XztyuyqtzECL5pRx64fPIFZkXHbzo2zf25HvJomIzHoKvBlqaX05t155On0Dg1x286M07evMd5NERGY1Bd4MtnxeJf/+odNp6+7jspsfZXdbd76bJCIyaynwZrhVC6r53gdPY+/BHt77rUfY296T7yaJiMxKCrxZYM3iWr59xam8sr+Ly7/9R/Z39ua7SSIis44Cb5Y445g6vnn5Wl7Y0877b/kjB7u1woKIyEQo8GaRs5c3cP1la9i4s40PfXcDnb39+W6SiMisocCbZS44fh7/6z0ns+GlVt76bw/yi6deZXBw5l1LKSIy0yjwZqG3nnQU337/qZgZH/uPx3nLvz3Ib57dzUwsIiAiMlMo8Gapc1fO5b5PncX/es9JdPb2c+X3N/D2Gx7i91uaFXwiIhmotFgB6BsY5K7Hm/j6b7fyyv4uTlsyh79503JOP6Yu300TEZl2o5UWU+AVkJ7+AX64fgf/+z+3sudgD69/TT1//ablrFlcm++miYhMGwVehHT3DfCDR17ihgdeoLWjl/NWzuXTFyxn1YLqfDdNRGTKKfAiqL2nn+89tJ1v/u4F2rr7uWhVI5++YDnLtfqCiBQwBV6EHejq49sPvsi3/7CNzr4BzlrWwLkrGjhnxVyW1Jfnu3kiIjmlwBNaO3q5+Q/buOfpV9neEqy+sKSujHNWzOXsFQ382TF1JBOxPLdSRGRyFHgyzPa9HTyweQ8PbGnm4Rda6OkfpCRexBnH1HFO2Ptbqt6fiMxCCjwZVXffAI9sa+GBzc38bkszL4YLzh5dV8Y5yxs4Z+Vc9f5EZNZQ4EnWXmrp4IHNzTyweQ8Pb2uhuy/o/Z20sIbj5leycn4VKxsrWdFYSVlxPN/NFREZRoEnR6S7b4BHX2zld5ub+VPTfja92kZH7wAAZnD0nDJWNlaxcn4lx82v4rjGKhbWllJUZHluuYhE1WiBp/88lzElEzHOXt7A2csbABgcdF7Z38Vzr7axadfBofv7nt1F6r+dyotjrGgMeoLHNVZy7NwKFtWWMb86STymanYikh/q4UlOdPb2s2V3O5vSgvC5V9to6z60hFG8yDiqppRFc0pZPKeMhbVlLJ5TxqI5wX1tWQIz9QxFZHLUw5MpVVYc5+RFNZy8qGZom7vz6oFuXtzbwY7WTl5u7WTHvi5ebu3kVxt309IxfOX28uIYi8IAXFRbxoLaUhqrkjRWlzCvKsncyiTFcfUQReTIKPBkypgFPbqjakoz7u/o6WfHvk52tHYNBWLTvk5eaungwef30tU3cNhr6iuKmVeVpLEqybzqJPPD+yAYk8yrSlKVjKunKCKHUeBJ3pSXxIMJL41Vh+1zd/Z39rGrrZtdbd3sPhDet3Xz6oFuXtnfxeMv72NfZ99hr00mimioLKGhoiS4ryyhoSJ56HF4q68opiSuSy1EokKBJzOSmVFbXkxteTHHzT88EFO6+wbY09YzFIy7DnSxp62Hve09NLf38OLeDv74YmvGYASoLk0MhWN9ZQl15cU0hPd1FUEo1leUUF9RQmmxwlFkNlPgyayWTMRYXFfG4rqyMY/r7R+kpaOH5oMjbu3B/Z6DPTzdtJ+W9l4O9vRnfI+y4hh1YQDWlR8Kw7qKYuaUB4/nlBcHz8uKNSNVZIZR4EkkFMeLmF9dyvzqzOcT03X3DdDS0UtLe9BT3NveS0t7L3vbe2hp76Glo5emfZ38qWk/rR29DAxmnulcU5YIgrD8UBDWlReHj4Ne5JwwLBWQIlNPgScyQjIRY0FNKQtGmWyTbnDQ2d/VR2tHDy3tvUFQhmHZ2tEbbuvhheZ21m/vpbWzl9GuBEoFZCoU55SXpAVkalsxdWGAasaqyMQo8EQmoajIhoLoNXPHP35g0NnX2UtrR9BjbO3oHQrGoccdwbnHx17aR2tHL6N0IKksiQ/1EOvKi6ktC3qMdSPCMnUrK45p9qpEmgJPZBrFimxoEkw2C/EODjoHuvpo6UgFYjCk2hr2JlMhuXN/N8+80kZrRy+9A4MZ36skXjQ0jFpblhaMFcODMRWgVcmESsRJQVHgicxgRUWHZqtmw91p7+kPe4pBMA497uihtaMvvO/lxb0d7OvoHaqNOlKsyIJeY3li2FBqaoh1KDR1HlJmCQWeSAExMyqTCSqTCY6uy249w+6+gbTh1MODMTXc+tyuoAe5f5RLPCC4zGNkT/Hw3mPJ0NCrlpyS6aTAE4m4ZCI2ZkWckfoHBtnX2Td0vnFfGI4taaG5r6OXHa2dPLljP/s6eukf5URkaSJ22KScOSPORQ4FZ0UxlSWqoiNHToEnIhMSjxUNVauB8c9DujttXf1BOHYe6jGmAnLf0MzWXp7f3U5LRw/dfZnPQxbHiqgtTxw2KScViCNDsrpU5yHlEAWeiEwpM6O6LEF1WSLr13T29h+audqZ6Vxk8HjHvk5axygWEJyHTAwbTh0ZmIfCsoTasoTOQxYwBZ6IzDhlxXHK5sRZNGfsCjopvf2DI3qPPSN6kj1Zn4cc73rIurRCArVluh5yNlHgicisVxwvYl5VsFpGNkaeh8x0PeTe9h62NXewYfs+9nWOcT1kMj5UVi4VlME5ycxBqYDMHwWeiETORM9DDoTXQ7am9Rz3Dl32cWjCzsstnTzx8n72dY5ecq6yJD40SWeoxFza46F9CsicU+CJiIwjNsGKOoODTlt3H3vbhxcMGDnMmprJOlZN1pE9yPq0QEzvOdZXBNdrJnQOclQKPBGRHCsqMmrKiqkpy65gQCoghy7taB9eUSdVnzWbgKwuTQwfSk31HIf1IkuGzkHGIjSLVYEnIpJn6QF5bMP4x4/sQba09wwNsbYM9SaDmqxjnYM0I63M3PDlruoqSqhPC8fUZR6z+TpIBZ6IyCwz0R7kwKCzv/PQ9Y4t6at7tPcMq6bT0t7Lga7Ms1jj4dBuanHk9Hqs9RWHhllTwVlWPLMiZma1RkREci5WZGFPrQTmjX9838DgsOWtglmrh8KxpSNYJ/Kllk5a2ntGrcdamogNDavWjxhira84dA5yus4/KvBERGSYRGxil3l09Q6k9RoP7z3u7ehlV1s3G3e20dLRQ99A5vOPd/3V61izuDaXP8owCjwREZmU0uIYC4vLWFg7fqEAd6etu39oYk5Le0/Ye+xlURavnwwFnoiITBszo7o0QXVpgmOymKCTS7pgQ0REIiGrwDOzC81ss5ltNbNrMuxfaWYPm1mPmf3tiH01ZnanmW0ys+fM7M9y1XgREZFsjTukaWYx4HrgAqAJWG9md7v7s2mHtQKfAN6e4S3+Ffilu19iZsXA1A7SioiIZJBND+80YKu7b3P3XuB24OL0A9x9j7uvB4ZdvGFmVcBZwLfD43rdfX8uGi4iIjIR2QTeAmBH2vOmcFs2jgGage+Y2RNmdrOZlWc60MyuMrMNZrahubk5y7cXERHJTjaBl6mOzCgLZRwmDqwBvuHupwAdwGHnAAHc/SZ3X+vuaxsapnnqjoiIFLxsAq8JWJT2fCGwM8v3bwKa3P3R8PmdBAEoIiIyrbIJvPXAMjNbGk46WQfcnc2bu/suYIeZrQg3nQc8O8ZLREREpsS4szTdvd/MPg7cB8SAW9x9o5ldHe6/0cwagQ1AFTBoZp8Cjnf3NuC/AbeGYbkN+MDU/CgiIiKjy6rSirvfA9wzYtuNaY93EQx1Znrtk8DaI2+iiIjI5KnSioiIRIICT0REIkGBJyIikaDAExGRSFDgiYhIJCjwREQkEhR4IiISCQo8ERGJBAWeiIhEggJPREQiQYEnIiKRYO7ZLm03fcysGXgpB29VD+zNwfsUGn0vmel7yUzfS2b6XjKbCd/L0e5+2MKqMzLwcsXMNri7ClePoO8lM30vmel7yUzfS2Yz+XvRkKaIiESCAk9ERCKh0APvpnw3YIbS95KZvpfM9L1kpu8lsxn7vRT0OTwREZGUQu/hiYiIAAo8ERGJiIIMPDO70Mw2m9lWM7sm3+2ZScxsu5k9bWZPmtmGfLcnX8zsFjPbY2bPpG2bY2a/NrPnw/vafLYxH0b5Xr5gZq+EvzNPmtmb89nGfDCzRWZ2v5k9Z2YbzeyT4fZI/86M8b3MyN+ZgjuHZ2YxYAtwAdAErAcudfdn89qwGcLMtgNr3T3fF4bmlZmdBbQD33f3VeG2fwZa3f0r4X8o1br7Z/LZzuk2yvfyBaDd3a/LZ9vyyczmA/Pd/XEzqwQeA94OXEGEf2fG+F7ezQz8nSnEHt5pwFZ33+buvcDtwMV5bpPMMO7+e6B1xOaLge+Fj79H8A83Ukb5XiLP3V9198fDxweB54AFRPx3ZozvZUYqxMBbAOxIe97EDP4/IA8c+JWZPWZmV+W7MTPMPHd/FYJ/yMDcPLdnJvm4mT0VDnlGathuJDNbApwCPIp+Z4aM+F5gBv7OFGLgWYZthTVuOzlnuvsa4CLgY+EQlshYvgEcC5wMvAr8S15bk0dmVgH8GPiUu7fluz0zRYbvZUb+zhRi4DUBi9KeLwR25qktM4677wzv9wA/IRgClsDu8JxE6tzEnjy3Z0Zw993uPuDug8C3iOjvjJklCP6o3+rud4WbI/87k+l7mam/M4UYeOuBZWa21MyKgXXA3Xlu04xgZuXhiWXMrBx4E/DM2K+KlLuB94eP3w/8LI9tmTFSf9BD7yCCvzNmZsC3gefc/atpuyL9OzPa9zJTf2cKbpYmQDgF9mtADLjF3b+c3xbNDGZ2DEGvDiAO/EdUvxszuw04h2Apk93AtcBPgTuAxcDLwLvcPVITOEb5Xs4hGJpyYDvwkdR5q6gws9cDfwCeBgbDzZ8jOF8V2d+ZMb6XS5mBvzMFGXgiIiIjFeKQpoiIyGEUeCIiEgkKPBERiQQFnoiIRIICT0REIkGBJ1KAzOwcM/t5vtshMpMo8EREJBIUeCJ5ZGbvM7M/hmuGfdPMYmbWbmb/YmaPm9lvzawhPPZkM3skLMj7k1RBXjN7jZn9xsz+FL7m2PDtK8zsTjPbZGa3hlUxRCJLgSeSJ2Z2HPAegoLeJwMDwGVAOfB4WOT7dwTVTgC+D3zG3VcTVLZIbb8VuN7dTwJeR1CsF4LK9Z8CjgeOAc6c4h9JZEaL57sBIhF2HvBaYH3Y+SolKD48CPwwPOYHwF1mVg3UuPvvwu3fA34U1kZd4O4/AXD3boDw/f7o7k3h8yeBJcCDU/5TicxQCjyR/DHge+7+2WEbzf5+xHFj1f8ba5iyJ+3xAPr3LhGnIU2R/PktcImZzQUwszlmdjTBv8tLwmPeCzzo7geAfWb2hnD75cDvwrXHmszs7eF7lJhZ2XT+ECKzhf6LTyRP3P1ZM/s7ghXoi4A+4GNAB3CCmT0GHCA4zwfB8jM3hoG2DfhAuP1y4Jtm9qXwPd41jT+GyKyh1RJEZhgza3f3iny3Q6TQaEhTREQiQT08ERGJBPXwREQkEhR4IiISCQo8ERGJBAWeiIhEggJPREQi4f8CZ11/KLbip9gAAAAASUVORK5CYII=\n", - "text/plain": [ - "
" - ] - }, - "metadata": { - "needs_background": "light" - }, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Loss\n", - "\ttraining \t (min: 0.161, max: 0.228, cur: 0.161)\n", - "\tvalidation \t (min: 0.176, max: 0.242, cur: 0.177)\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
RecommenderHR@1HR@3HR@5HR@10NDCG@1NDCG@3NDCG@5NDCG@10
0NetflixRecommender0.0427770.1066140.1431390.2003950.0427770.0782280.0934830.111724
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "from recommenders.netflix_recommender import NetflixRecommender\n", - "\n", - "netflix_recommender = NetflixRecommender(n_epochs=30, print_type='live')\n", - "\n", - "netflix_tts_results = [['NetflixRecommender'] + list(evaluate_train_test_split_implicit(\n", - " netflix_recommender, interactions_df, items_df))]\n", - "\n", - "netflix_tts_results = pd.DataFrame(\n", - " netflix_tts_results, columns=['Recommender', 'HR@1', 'HR@3', 'HR@5', 'HR@10', 'NDCG@1', 'NDCG@3', 'NDCG@5', 'NDCG@10'])\n", - "\n", - "display(HTML(netflix_tts_results.to_html()))" - ] - }, - { - "cell_type": "code", - "execution_count": 435, - "id": "moderate-printing", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
RecommenderHR@1HR@3HR@5HR@10NDCG@1NDCG@3NDCG@5NDCG@10
0NNRecommender0.0250080.0352090.0664690.1168150.0250080.0311000.0436970.059459
1AmazonRecommender0.0421190.1046400.1405070.1994080.0421190.0768260.0917970.110711
2NetflixRecommender0.0427770.1066140.1431390.2003950.0427770.0782280.0934830.111724
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "tts_results = pd.concat([nn_tts_results, amazon_tts_results, netflix_tts_results]).reset_index(drop=True)\n", - "display(HTML(tts_results.to_html()))" - ] - }, - { - "cell_type": "markdown", - "id": "uniform-vegetable", - "metadata": {}, - "source": [ - "# Summary\n", - "\n", - "**Task:**
\n", - "Write a summary of your experiments. What worked well and what did not? What are your thoughts how could you possibly further improve the model?" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8f5451ec", - "metadata": {}, - "outputs": [], - "source": [ - "Na początku bezmyślnie użyłem BCELoss, \n", - "to był duży błąd, który kosztował mnie godzinę szukania w internecie, dlaczego ciągle zwraca mi tylko item-id=0\n", - "\n", - "Wyższe \"accuracy\" w testach != lepszy wynik w predykcjach \n", - "\n", - "Fitting nie zawsze znajduje najlepszy możliwy parametr. Miałem przypadek gdzie został wybrany 5, a dawał HR 0.05, podczas gdy 6 dawał 0.08\n", - "\n", - "Dodanie dropout potrawfi znacząco zwiększyć wyniki. Dropout podniósł HR10 z 0.035 do 0.11 \n", - "(niestety, w trakcie dalszych prób udoskonalenia, gdzieś zagubiłem to rozwiązanie)\n", - "\n", - "Podsumowanie:\n", - "\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "rek_uno", - "language": "python", - "name": "rek_uno" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.8" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/project_2_recommender_and_evaluation-Copy1.ipynb b/project_2_recommender_and_evaluation-Copy1.ipynb deleted file mode 100644 index 529fb99..0000000 --- a/project_2_recommender_and_evaluation-Copy1.ipynb +++ /dev/null @@ -1,1687 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 17, - "id": "alike-morgan", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The autoreload extension is already loaded. To reload it, use:\n", - " %reload_ext autoreload\n" - ] - } - ], - "source": [ - "%matplotlib inline\n", - "%load_ext autoreload\n", - "%autoreload 2\n", - "\n", - "import numpy as np\n", - "import pandas as pd\n", - "import matplotlib.pyplot as plt\n", - "import seaborn as sns\n", - "from IPython.display import Markdown, display, HTML\n", - "from collections import defaultdict\n", - "\n", - "import torch\n", - "import torch.nn as nn\n", - "import torch.optim as optim\n", - "from livelossplot import PlotLosses\n", - "\n", - "# Fix the dying kernel problem (only a problem in some installations - you can remove it, if it works without it)\n", - "import os\n", - "os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'" - ] - }, - { - "cell_type": "markdown", - "id": "blessed-knitting", - "metadata": {}, - "source": [ - "# Load the dataset for recommenders" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "id": "victorian-bottom", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
user_iditem_idtermlength_of_stay_bucketrate_planroom_segmentn_people_bucketweekend_stay
010WinterVacation[2-3]Standard[260-360][5-inf]True
121WinterVacation[2-3]Standard[160-260][3-4]True
232WinterVacation[2-3]Standard[160-260][2-2]False
343WinterVacation[4-7]Standard[160-260][3-4]True
454WinterVacation[4-7]Standard[0-160][2-2]True
565Easter[4-7]Standard[260-360][5-inf]True
676OffSeason[2-3]Standard[260-360][5-inf]True
787HighSeason[2-3]Standard[160-260][1-1]True
898HighSeason[2-3]Standard[0-160][1-1]True
987HighSeason[2-3]Standard[160-260][1-1]True
1087HighSeason[2-3]Standard[160-260][1-1]True
11109HighSeason[2-3]Standard[160-260][3-4]True
12119HighSeason[2-3]Standard[160-260][3-4]True
131210HighSeason[8-inf]Standard[160-260][3-4]True
141411HighSeason[2-3]Standard[0-160][3-4]True
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "data_path = os.path.join(\"data\", \"hotel_data\")\n", - "\n", - "interactions_df = pd.read_csv(os.path.join(data_path, \"hotel_data_interactions_df.csv\"), index_col=0)\n", - "\n", - "base_item_features = ['term', 'length_of_stay_bucket', 'rate_plan', 'room_segment', 'n_people_bucket', 'weekend_stay']\n", - "\n", - "column_values_dict = {\n", - " 'term': ['WinterVacation', 'Easter', 'OffSeason', 'HighSeason', 'LowSeason', 'MayLongWeekend', 'NewYear', 'Christmas'],\n", - " 'length_of_stay_bucket': ['[0-1]', '[2-3]', '[4-7]', '[8-inf]'],\n", - " 'rate_plan': ['Standard', 'Nonref'],\n", - " 'room_segment': ['[0-160]', '[160-260]', '[260-360]', '[360-500]', '[500-900]'],\n", - " 'n_people_bucket': ['[1-1]', '[2-2]', '[3-4]', '[5-inf]'],\n", - " 'weekend_stay': ['True', 'False']\n", - "}\n", - "\n", - "interactions_df.loc[:, 'term'] = pd.Categorical(\n", - " interactions_df['term'], categories=column_values_dict['term'])\n", - "interactions_df.loc[:, 'length_of_stay_bucket'] = pd.Categorical(\n", - " interactions_df['length_of_stay_bucket'], categories=column_values_dict['length_of_stay_bucket'])\n", - "interactions_df.loc[:, 'rate_plan'] = pd.Categorical(\n", - " interactions_df['rate_plan'], categories=column_values_dict['rate_plan'])\n", - "interactions_df.loc[:, 'room_segment'] = pd.Categorical(\n", - " interactions_df['room_segment'], categories=column_values_dict['room_segment'])\n", - "interactions_df.loc[:, 'n_people_bucket'] = pd.Categorical(\n", - " interactions_df['n_people_bucket'], categories=column_values_dict['n_people_bucket'])\n", - "interactions_df.loc[:, 'weekend_stay'] = interactions_df['weekend_stay'].astype('str')\n", - "interactions_df.loc[:, 'weekend_stay'] = pd.Categorical(\n", - " interactions_df['weekend_stay'], categories=column_values_dict['weekend_stay'])\n", - "\n", - "display(HTML(interactions_df.head(15).to_html()))" - ] - }, - { - "cell_type": "markdown", - "id": "realistic-third", - "metadata": {}, - "source": [ - "# (Optional) Prepare numerical user features\n", - "\n", - "The method below is left here for convenience if you want to experiment with content-based user features as an input for your neural network." - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "id": "variable-jaguar", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "['user_term_WinterVacation', 'user_term_Easter', 'user_term_OffSeason', 'user_term_HighSeason', 'user_term_LowSeason', 'user_term_MayLongWeekend', 'user_term_NewYear', 'user_term_Christmas', 'user_length_of_stay_bucket_[0-1]', 'user_length_of_stay_bucket_[2-3]', 'user_length_of_stay_bucket_[4-7]', 'user_length_of_stay_bucket_[8-inf]', 'user_rate_plan_Standard', 'user_rate_plan_Nonref', 'user_room_segment_[0-160]', 'user_room_segment_[160-260]', 'user_room_segment_[260-360]', 'user_room_segment_[360-500]', 'user_room_segment_[500-900]', 'user_n_people_bucket_[1-1]', 'user_n_people_bucket_[2-2]', 'user_n_people_bucket_[3-4]', 'user_n_people_bucket_[5-inf]', 'user_weekend_stay_True', 'user_weekend_stay_False']\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
user_iduser_term_WinterVacationuser_term_Easteruser_term_OffSeasonuser_term_HighSeasonuser_term_LowSeasonuser_term_MayLongWeekenduser_term_NewYearuser_term_Christmasuser_length_of_stay_bucket_[0-1]user_length_of_stay_bucket_[2-3]user_length_of_stay_bucket_[4-7]user_length_of_stay_bucket_[8-inf]user_rate_plan_Standarduser_rate_plan_Nonrefuser_room_segment_[0-160]user_room_segment_[160-260]user_room_segment_[260-360]user_room_segment_[360-500]user_room_segment_[500-900]user_n_people_bucket_[1-1]user_n_people_bucket_[2-2]user_n_people_bucket_[3-4]user_n_people_bucket_[5-inf]user_weekend_stay_Trueuser_weekend_stay_False
010.1304350.00.6521740.0869570.1304350.0000000.0000000.0000000.0000000.6086960.3913040.0000000.5217390.4782610.0000000.8695650.1304350.0000000.00.0000000.7391300.1739130.0869570.7826090.217391
47500.0434780.00.4347830.3043480.2173910.0000000.0000000.0000000.0000000.9130430.0869570.0000000.2608700.7391300.0000000.5652170.4347830.0000000.00.0000000.1739130.5217390.3043480.7826090.217391
92960.0833330.00.7083330.1250000.0416670.0416670.0000000.0000000.2500000.6666670.0416670.0416670.2916670.7083330.1250000.7916670.0833330.0000000.00.0416670.3333330.5416670.0833330.7500000.250000
1111150.7272730.00.2727270.0000000.0000000.0000000.0000000.0000000.5000000.3636360.1363640.0000001.0000000.0000000.0000000.8181820.1818180.0000000.00.8181820.0909090.0454550.0454550.3636360.636364
6757060.0919880.00.4510390.1899110.2077150.0385760.0118690.0089020.1691390.4599410.2729970.0979230.9940650.0059350.0207720.8397630.1305640.0089020.00.0415430.0949550.7388720.1246290.6765580.323442
169917360.0344830.00.4827590.2068970.2758620.0000000.0000000.0000000.2413790.5517240.2068970.0000000.1724140.8275860.0000000.9310340.0689660.0000000.00.3793100.4137930.2068970.0000000.4482760.551724
763977790.0370370.00.2962960.2592590.3703700.0000000.0000000.0370370.1111110.2962960.4814810.1111111.0000000.0000000.0000000.8148150.1851850.0000000.00.0000000.0370370.7407410.2222220.8148150.185185
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "def n_to_p(l):\n", - " n = sum(l)\n", - " return [x / n for x in l] if n > 0 else l\n", - "\n", - "def calculate_p(x, values):\n", - " counts = [0]*len(values)\n", - " for v in x:\n", - " counts[values.index(v)] += 1\n", - "\n", - " return n_to_p(counts)\n", - "\n", - "def prepare_users_df(interactions_df):\n", - "\n", - " users_df = interactions_df.loc[:, [\"user_id\"]]\n", - " users_df = users_df.groupby(\"user_id\").first().reset_index(drop=False)\n", - " \n", - " user_features = []\n", - "\n", - " for column in base_item_features:\n", - "\n", - " column_values = column_values_dict[column]\n", - " df = interactions_df.loc[:, ['user_id', column]]\n", - " df = df.groupby('user_id').aggregate(lambda x: list(x)).reset_index(drop=False)\n", - "\n", - " def calc_p(x):\n", - " return calculate_p(x, column_values)\n", - "\n", - " df.loc[:, column] = df[column].apply(lambda x: calc_p(x))\n", - "\n", - " p_columns = []\n", - " for i in range(len(column_values)):\n", - " p_columns.append(\"user_\" + column + \"_\" + column_values[i])\n", - " df.loc[:, p_columns[i]] = df[column].apply(lambda x: x[i])\n", - " user_features.append(p_columns[i])\n", - "\n", - " users_df = pd.merge(users_df, df.loc[:, ['user_id'] + p_columns], on=[\"user_id\"])\n", - " \n", - " return users_df, user_features\n", - " \n", - "\n", - "users_df, user_features = prepare_users_df(interactions_df)\n", - "\n", - "print(user_features)\n", - "\n", - "display(HTML(users_df.loc[users_df['user_id'].isin([706, 1736, 7779, 96, 1, 50, 115])].head(15).to_html()))" - ] - }, - { - "cell_type": "markdown", - "id": "amino-keyboard", - "metadata": {}, - "source": [ - "# (Optional) Prepare numerical item features\n", - "\n", - "The method below is left here for convenience if you want to experiment with content-based item features as an input for your neural network." - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "id": "formal-munich", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "['term_WinterVacation', 'term_Easter', 'term_OffSeason', 'term_HighSeason', 'term_LowSeason', 'term_MayLongWeekend', 'term_NewYear', 'term_Christmas', 'length_of_stay_bucket_[0-1]', 'length_of_stay_bucket_[2-3]', 'length_of_stay_bucket_[4-7]', 'length_of_stay_bucket_[8-inf]', 'rate_plan_Standard', 'rate_plan_Nonref', 'room_segment_[0-160]', 'room_segment_[160-260]', 'room_segment_[260-360]', 'room_segment_[360-500]', 'room_segment_[500-900]', 'n_people_bucket_[1-1]', 'n_people_bucket_[2-2]', 'n_people_bucket_[3-4]', 'n_people_bucket_[5-inf]', 'weekend_stay_True', 'weekend_stay_False']\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
item_idterm_WinterVacationterm_Easterterm_OffSeasonterm_HighSeasonterm_LowSeasonterm_MayLongWeekendterm_NewYearterm_Christmaslength_of_stay_bucket_[0-1]length_of_stay_bucket_[2-3]length_of_stay_bucket_[4-7]length_of_stay_bucket_[8-inf]rate_plan_Standardrate_plan_Nonrefroom_segment_[0-160]room_segment_[160-260]room_segment_[260-360]room_segment_[360-500]room_segment_[500-900]n_people_bucket_[1-1]n_people_bucket_[2-2]n_people_bucket_[3-4]n_people_bucket_[5-inf]weekend_stay_Trueweekend_stay_False
001000000001001000100000110
111000000001001001000001010
221000000001001001000010001
331000000000101001000001010
441000000000101010000010010
550100000000101000100000110
660010000001001000100000110
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "def map_items_to_onehot(df):\n", - " one_hot = pd.get_dummies(df.loc[:, base_item_features])\n", - " df = df.drop(base_item_features, axis = 1)\n", - " df = df.join(one_hot)\n", - " \n", - " return df, list(one_hot.columns)\n", - "\n", - "def prepare_items_df(interactions_df):\n", - " items_df = interactions_df.loc[:, [\"item_id\"] + base_item_features].drop_duplicates()\n", - " \n", - " items_df, item_features = map_items_to_onehot(items_df)\n", - " \n", - " return items_df, item_features\n", - "\n", - "\n", - "items_df, item_features = prepare_items_df(interactions_df)\n", - "\n", - "print(item_features)\n", - "\n", - "display(HTML(items_df.loc[items_df['item_id'].isin([0, 1, 2, 3, 4, 5, 6])].head(15).to_html()))" - ] - }, - { - "cell_type": "markdown", - "id": "figured-imaging", - "metadata": {}, - "source": [ - "# Neural network recommender\n", - "\n", - "**Task:**
\n", - "Code a recommender based on a neural network model. You are free to choose any network architecture you find appropriate. The network can use the interaction vectors for users and items, embeddings of users and items, as well as user and item features (you can use the features you developed in the first project).\n", - "\n", - "Remember to keep control over randomness - in the init method add the seed as a parameter and initialize the random seed generator with that seed (both for numpy and pytorch):\n", - "\n", - "```python\n", - "self.seed = seed\n", - "self.rng = np.random.RandomState(seed=seed)\n", - "```\n", - "in the network model:\n", - "```python\n", - "self.seed = torch.manual_seed(seed)\n", - "```\n", - "\n", - "You are encouraged to experiment with:\n", - " - the number of layers in the network, the number of neurons and different activation functions,\n", - " - different optimizers and their parameters,\n", - " - batch size and the number of epochs,\n", - " - embedding layers,\n", - " - content-based features of both users and items." - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "id": "unlike-recipient", - "metadata": {}, - "outputs": [], - "source": [ - "from recommenders.recommender import Recommender\n", - "\n", - "\n", - "class Net(nn.Module):\n", - " def __init__(self, features_len, output_len):\n", - " super(Net, self).__init__()\n", - " \n", - " print(\"IN:\", features_len, \"OUT:\", output_len)\n", - " \n", - " self.fc1 = nn.Linear(features_len, 150)\n", - " self.fc2 = nn.Linear(150, 50)\n", - " self.fc3 = nn.Linear(50, 25)\n", - " self.fc4 = nn.Linear(25, output_len+500)\n", - " \n", - " def forward(self, x):\n", - " x = F.relu(self.fc1(x))\n", - " x = F.relu(self.fc2(x))\n", - " x = F.relu(self.fc3(x))\n", - " return self.fc4(x)\n", - "\n", - "# class Net(nn.Module):\n", - "# def __init__(self, features_len):\n", - "# super(Net, self).__init__()\n", - "# self.hid1 = nn.Linear(features_len, 10)\n", - "# self.hid2 = nn.Linear(10, 10)\n", - "# self.oupt = nn.Linear(10, 1)\n", - "\n", - "# nn.init.xavier_uniform_(self.hid1.weight)\n", - "# nn.init.zeros_(self.hid1.bias)\n", - "# nn.init.xavier_uniform_(self.hid2.weight)\n", - "# nn.init.zeros_(self.hid2.bias)\n", - "# nn.init.xavier_uniform_(self.oupt.weight)\n", - "# nn.init.zeros_(self.oupt.bias)\n", - "\n", - "# def forward(self, x):\n", - "# z = torch.tanh(self.hid1(x))\n", - "# z = torch.tanh(self.hid2(z))\n", - "# z = torch.sigmoid(self.oupt(z))\n", - "# return z\n", - " \n", - " \n", - "class NNRecommender(Recommender):\n", - " \"\"\"\n", - " Linear recommender class based on user and item features.\n", - " \"\"\"\n", - " \n", - " def generate_negative_interaction(self):\n", - " user_ids = interactions_df['user_id']\n", - " item_ids = interactions_df['item_id']\n", - " \n", - " user_id = user_ids.sample().item()\n", - " item_id = item_ids.sample().item()\n", - " positive_interactions = interactions_df.loc[\n", - " (interactions_df['item_id'] == item_id) & (interactions_df['user_id'] == user_id)]\n", - " \n", - " while not positive_interactions.empty:\n", - " user_id = user_ids.sample().item()\n", - " item_id = item_ids.sample().item()\n", - " positive_interactions = interactions_df.loc[\n", - " (interactions_df['item_id'] == item_id) & (interactions_df['user_id'] == user_id)]\n", - " \n", - " return (user_id, item_id, 0)\n", - " \n", - " def generate_negative_interactions(self, n, interactions_df, cross_df):\n", - " combined_dfs = pd.concat([cross_df, interactions_df[['user_id', 'item_id']]])\n", - " return combined_dfs.drop_duplicates(keep=False).sample(n=n)\n", - " \n", - " \n", - " def __init__(self, seed=6789, n_neg_per_pos=5):\n", - " \"\"\"\n", - " Initialize base recommender params and variables.\n", - " \"\"\"\n", - " self.model = None\n", - " self.n_neg_per_pos = n_neg_per_pos\n", - " \n", - " self.recommender_df = pd.DataFrame(columns=['user_id', 'item_id', 'score'])\n", - " self.users_df = None\n", - " self.user_features = None\n", - " \n", - " self.seed = seed\n", - " self.rng = np.random.RandomState(seed=seed)\n", - " \n", - " def calculate_accuracy(self, y_true, y_pred):\n", - " predicted = y_pred.ge(.5).view(-1)\n", - " return (y_true == predicted).sum().float() / len(y_true)\n", - " \n", - " def round_tensor(self, t, decimal_places=3):\n", - " return round(t.item(), decimal_places)\n", - " \n", - " def fit(self, interactions_df, users_df, items_df):\n", - " \"\"\"\n", - " Training of the recommender.\n", - " \n", - " :param pd.DataFrame interactions_df: DataFrame with recorded interactions between users and items \n", - " defined by user_id, item_id and features of the interaction.\n", - " :param pd.DataFrame users_df: DataFrame with users and their features defined by user_id and the user feature columns.\n", - " :param pd.DataFrame items_df: DataFrame with items and their features defined by item_id and the item feature columns.\n", - " \"\"\"\n", - " \n", - " interactions_df = interactions_df.copy()\n", - " # Prepare users_df and items_df \n", - " # (optional - use only if you want to train a hybrid model with content-based features)\n", - " \n", - " users_df, user_features = prepare_users_df(interactions_df)\n", - " \n", - " self.users_df = users_df\n", - " self.user_features = user_features\n", - " \n", - " items_df, item_features = prepare_items_df(interactions_df)\n", - " items_df = items_df.loc[:, ['item_id'] + item_features]\n", - " \n", - " n_epochs = 51\n", - "\n", - " X = items_df[['term_WinterVacation', 'term_Easter', 'term_OffSeason', 'term_HighSeason', 'term_LowSeason', 'term_MayLongWeekend', 'term_NewYear', 'term_Christmas', 'rate_plan_Standard', 'rate_plan_Nonref', 'room_segment_[0-160]', 'room_segment_[160-260]', 'room_segment_[260-360]', 'room_segment_[360-500]', 'room_segment_[500-900]', 'n_people_bucket_[1-1]', 'n_people_bucket_[2-2]', 'n_people_bucket_[3-4]', 'n_people_bucket_[5-inf]', 'weekend_stay_True', 'weekend_stay_False']]\n", - " y = items_df[['item_id']]\n", - " X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=self.seed)\n", - " \n", - " X_train = torch.from_numpy(X_train.to_numpy()).float()\n", - " y_train = torch.squeeze(torch.from_numpy(y_train.to_numpy()).long())\n", - " X_test = torch.from_numpy(X_test.to_numpy()).float()\n", - " y_test = torch.squeeze(torch.from_numpy(y_test.to_numpy()).long())\n", - " \n", - " self.net = Net(X_train.shape[1], items_df['item_id'].unique().size)\n", - " \n", - " optimizer = optim.Adam(self.net.parameters(), lr=0.05)\n", - " criterion = nn.CrossEntropyLoss()\n", - " \n", - " for epoch in range(n_epochs):\n", - " y_pred = self.net(X_train)\n", - " y_pred = torch.squeeze(y_pred)\n", - " train_loss = criterion(y_pred, y_train)\n", - " \n", - "# if epoch % 5000 == 0:\n", - "# train_acc = self.calculate_accuracy(y_train, y_pred)\n", - "# y_test_pred = self.net(X_test)\n", - "# y_test_pred = torch.squeeze(y_test_pred)\n", - "# test_loss = criterion(y_test_pred, y_test)\n", - "# test_acc = self.calculate_accuracy(y_test, y_test_pred)\n", - "# print(\n", - "# f'''epoch {epoch}\n", - "# Train set - loss: {self.round_tensor(train_loss)}, accuracy: {self.round_tensor(train_acc)}\n", - "# Test set - loss: {self.round_tensor(test_loss)}, accuracy: {self.round_tensor(test_acc)}\n", - "# ''')\n", - " \n", - " optimizer.zero_grad()\n", - " train_loss.backward()\n", - " optimizer.step()\n", - " \n", - " def recommend(self, users_df, items_df, n_recommendations=1):\n", - " \"\"\"\n", - " Serving of recommendations. Scores items in items_df for each user in users_df and returns \n", - " top n_recommendations for each user.\n", - " \n", - " :param pd.DataFrame users_df: DataFrame with users and their features for which recommendations should be generated.\n", - " :param pd.DataFrame items_df: DataFrame with items and their features which should be scored.\n", - " :param int n_recommendations: Number of recommendations to be returned for each user.\n", - " :return: DataFrame with user_id, item_id and score as columns returning n_recommendations top recommendations \n", - " for each user.\n", - " :rtype: pd.DataFrame\n", - " \"\"\"\n", - " \n", - " # Clean previous recommendations (iloc could be used alternatively)\n", - " self.recommender_df = self.recommender_df[:0]\n", - " \n", - " # Prepare users_df and items_df\n", - " # (optional - use only if you want to train a hybrid model with content-based features)\n", - " \n", - " users_df = users_df.loc[:, 'user_id']\n", - " users_df = pd.merge(users_df, self.users_df, on=['user_id'], how='left').fillna(0)\n", - " \n", - " # items_df, item_features = prepare_items_df(items_df)\n", - " # items_df = items_df.loc[:, ['item_id'] + item_features]\n", - " \n", - " # Score the items\n", - " \n", - " recommendations = pd.DataFrame(columns=['user_id', 'item_id', 'score'])\n", - " \n", - " for ix, user in users_df.iterrows():\n", - " prep_user = torch.from_numpy(user[['user_term_WinterVacation', 'user_term_Easter', 'user_term_OffSeason', 'user_term_HighSeason', 'user_term_LowSeason', 'user_term_MayLongWeekend', 'user_term_NewYear', 'user_term_Christmas', 'user_rate_plan_Standard', 'user_rate_plan_Nonref', 'user_room_segment_[0-160]', 'user_room_segment_[160-260]', 'user_room_segment_[260-360]', 'user_room_segment_[360-500]', 'user_room_segment_[500-900]', 'user_n_people_bucket_[1-1]', 'user_n_people_bucket_[2-2]', 'user_n_people_bucket_[3-4]', 'user_n_people_bucket_[5-inf]', 'user_weekend_stay_True', 'user_weekend_stay_False']].to_numpy()).float()\n", - " \n", - " scores = self.net(prep_user).detach().numpy()\n", - " \n", - " chosen_ids = np.argsort(-scores)[:n_recommendations]\n", - " \n", - " recommendations = []\n", - " for item_id in chosen_ids:\n", - " recommendations.append(\n", - " {\n", - " 'user_id': user['user_id'],\n", - " 'item_id': item_id,\n", - " 'score': scores[item_id]\n", - " }\n", - " )\n", - " \n", - " user_recommendations = pd.DataFrame(recommendations)\n", - " \n", - " self.recommender_df = pd.concat([self.recommender_df, user_recommendations])\n", - " \n", - " return self.recommender_df" - ] - }, - { - "cell_type": "markdown", - "id": "copyrighted-relative", - "metadata": {}, - "source": [ - "# Quick test of the recommender" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "id": "greatest-canon", - "metadata": {}, - "outputs": [], - "source": [ - "items_df = interactions_df.loc[:, ['item_id'] + base_item_features].drop_duplicates()" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "id": "initial-capital", - "metadata": {}, - "outputs": [ - { - "ename": "NameError", - "evalue": "name 'train_test_split' is not defined", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;31m# Fit method\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0mnn_recommender\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mNNRecommender\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 3\u001b[0;31m \u001b[0mnn_recommender\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minteractions_df\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhead\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m1000\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 4\u001b[0m \u001b[0;31m# nn_recommender.fit(interactions_df, None, None)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m\u001b[0m in \u001b[0;36mfit\u001b[0;34m(self, interactions_df, users_df, items_df)\u001b[0m\n\u001b[1;32m 114\u001b[0m \u001b[0mX\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mitems_df\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'term_WinterVacation'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'term_Easter'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'term_OffSeason'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'term_HighSeason'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'term_LowSeason'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'term_MayLongWeekend'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'term_NewYear'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'term_Christmas'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'rate_plan_Standard'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'rate_plan_Nonref'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'room_segment_[0-160]'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'room_segment_[160-260]'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'room_segment_[260-360]'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'room_segment_[360-500]'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'room_segment_[500-900]'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'n_people_bucket_[1-1]'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'n_people_bucket_[2-2]'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'n_people_bucket_[3-4]'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'n_people_bucket_[5-inf]'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'weekend_stay_True'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'weekend_stay_False'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0my\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mitems_df\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'item_id'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 116\u001b[0;31m \u001b[0mX_train\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mX_test\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my_train\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my_test\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtrain_test_split\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtest_size\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m0.2\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrandom_state\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mseed\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 117\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 118\u001b[0m \u001b[0mX_train\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfrom_numpy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX_train\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto_numpy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfloat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mNameError\u001b[0m: name 'train_test_split' is not defined" - ] - } - ], - "source": [ - "# Fit method\n", - "nn_recommender = NNRecommender()\n", - "nn_recommender.fit(interactions_df.head(1000), None, None)\n", - "# nn_recommender.fit(interactions_df, None, None)" - ] - }, - { - "cell_type": "code", - "execution_count": 193, - "id": "digital-consolidation", - "metadata": { - "scrolled": true - }, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
user_iditem_idscoretermlength_of_stay_bucketrate_planroom_segmentn_people_bucketweekend_stay
01.08837.715969WinterVacation[0-1]Standard[160-260][2-2]True
11.05736.182877WinterVacation[2-3]Standard[160-260][2-2]True
21.06935.771114WinterVacation[4-7]Standard[160-260][2-2]True
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "# Recommender method\n", - "\n", - "recommendations = nn_recommender.recommend(pd.DataFrame([[1]], columns=['user_id']), items_df, 3)\n", - "\n", - "recommendations = pd.merge(recommendations, items_df, on='item_id', how='left')\n", - "display(HTML(recommendations.to_html()))" - ] - }, - { - "cell_type": "markdown", - "id": "advanced-eleven", - "metadata": {}, - "source": [ - "# Tuning method" - ] - }, - { - "cell_type": "code", - "execution_count": 194, - "id": "strange-alaska", - "metadata": {}, - "outputs": [], - "source": [ - "from evaluation_and_testing.testing import evaluate_train_test_split_implicit\n", - "\n", - "seed = 6789" - ] - }, - { - "cell_type": "code", - "execution_count": 195, - "id": "stable-theta", - "metadata": {}, - "outputs": [], - "source": [ - "from hyperopt import hp, fmin, tpe, Trials\n", - "import traceback\n", - "\n", - "def tune_recommender(recommender_class, interactions_df, items_df, \n", - " param_space, max_evals=1, show_progressbar=True, seed=6789):\n", - " # Split into train_validation and test sets\n", - "\n", - " shuffle = np.arange(len(interactions_df))\n", - " rng = np.random.RandomState(seed=seed)\n", - " rng.shuffle(shuffle)\n", - " shuffle = list(shuffle)\n", - "\n", - " train_test_split = 0.8\n", - " split_index = int(len(interactions_df) * train_test_split)\n", - "\n", - " train_validation = interactions_df.iloc[shuffle[:split_index]]\n", - " test = interactions_df.iloc[shuffle[split_index:]]\n", - "\n", - " # Tune\n", - "\n", - " def loss(tuned_params):\n", - " recommender = recommender_class(seed=seed, **tuned_params)\n", - " hr1, hr3, hr5, hr10, ndcg1, ndcg3, ndcg5, ndcg10 = evaluate_train_test_split_implicit(\n", - " recommender, train_validation, items_df, seed=seed)\n", - " return -hr10\n", - "\n", - " n_tries = 1\n", - " succeded = False\n", - " try_id = 0\n", - " while not succeded and try_id < n_tries:\n", - " try:\n", - " trials = Trials()\n", - " best_param_set = fmin(loss, space=param_space, algo=tpe.suggest, \n", - " max_evals=max_evals, show_progressbar=show_progressbar, trials=trials, verbose=True)\n", - " succeded = True\n", - " except:\n", - " traceback.print_exc()\n", - " try_id += 1\n", - " \n", - " if not succeded:\n", - " return None\n", - " \n", - " # Validate\n", - " \n", - " recommender = recommender_class(seed=seed, **best_param_set)\n", - "\n", - " results = [[recommender_class.__name__] + list(evaluate_train_test_split_implicit(\n", - " recommender, {'train': train_validation, 'test': test}, items_df, seed=seed))]\n", - "\n", - " results = pd.DataFrame(results, \n", - " columns=['Recommender', 'HR@1', 'HR@3', 'HR@5', 'HR@10', 'NDCG@1', 'NDCG@3', 'NDCG@5', 'NDCG@10'])\n", - "\n", - " display(HTML(results.to_html()))\n", - " \n", - " return best_param_set" - ] - }, - { - "cell_type": "markdown", - "id": "reliable-switzerland", - "metadata": {}, - "source": [ - "## Tuning of the recommender\n", - "\n", - "**Task:**
\n", - "Tune your model using the code below. You only need to put the class name of your recommender and choose an appropriate parameter space." - ] - }, - { - "cell_type": "code", - "execution_count": 196, - "id": "obvious-astrology", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "IN: \n", - "21 \n", - "OUT: \n", - "691 \n", - "IN: \n", - "21 \n", - "OUT: \n", - "691 \n", - "IN: \n", - "21 \n", - "OUT: \n", - "691 \n", - "IN: \n", - "21 \n", - "OUT: \n", - "691 \n", - "IN: \n", - "21 \n", - "OUT: \n", - "691 \n", - "IN: \n", - "21 \n", - "OUT: \n", - "691 \n", - "IN: \n", - "21 \n", - "OUT: \n", - "691 \n", - "IN: \n", - "21 \n", - "OUT: \n", - "691 \n", - "IN: \n", - "21 \n", - "OUT: \n", - "691 \n", - "IN: \n", - "21 \n", - "OUT: \n", - "691 \n", - "100%|██████████| 10/10 [18:34<00:00, 111.50s/trial, best loss: -0.04424416222859484]\n", - "IN: 21 OUT: 736\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
RecommenderHR@1HR@3HR@5HR@10NDCG@1NDCG@3NDCG@5NDCG@10
0NNRecommender0.0102010.0200720.0263240.0355380.0102010.015740.0182160.021141
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Best parameters:\n", - "{'n_neg_per_pos': 9.0}\n" - ] - } - ], - "source": [ - "param_space = {\n", - " 'n_neg_per_pos': hp.quniform('n_neg_per_pos', 1, 10, 1)\n", - "}\n", - "items_df['item_id'].unique().size\n", - "\n", - "best_param_set = tune_recommender(NNRecommender, interactions_df, items_df,\n", - " param_space, max_evals=10, show_progressbar=True, seed=seed)\n", - "\n", - "print(\"Best parameters:\")\n", - "print(best_param_set)" - ] - }, - { - "cell_type": "markdown", - "id": "accredited-strap", - "metadata": {}, - "source": [ - "# Final evaluation\n", - "\n", - "**Task:**
\n", - "Run the final evaluation of your recommender and present its results against the Amazon and Netflix recommenders' results. You just need to give the class name of your recommender and its tuned parameters below." - ] - }, - { - "cell_type": "code", - "execution_count": 198, - "id": "given-homework", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "IN: 21 OUT: 736\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
RecommenderHR@1HR@3HR@5HR@10NDCG@1NDCG@3NDCG@5NDCG@10
0NNRecommender0.0039490.0151370.0197430.0266540.0039490.0103610.012230.014409
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "nn_recommender = NNRecommender(n_neg_per_pos=9) # Initialize your recommender here\n", - "\n", - "# Give the name of your recommender in the line below\n", - "nn_tts_results = [['NNRecommender'] + list(evaluate_train_test_split_implicit(\n", - " nn_recommender, interactions_df, items_df))]\n", - "\n", - "nn_tts_results = pd.DataFrame(\n", - " nn_tts_results, columns=['Recommender', 'HR@1', 'HR@3', 'HR@5', 'HR@10', 'NDCG@1', 'NDCG@3', 'NDCG@5', 'NDCG@10'])\n", - "\n", - "display(HTML(nn_tts_results.to_html()))" - ] - }, - { - "cell_type": "code", - "execution_count": 199, - "id": "suited-nomination", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
RecommenderHR@1HR@3HR@5HR@10NDCG@1NDCG@3NDCG@5NDCG@10
0AmazonRecommender0.0421190.104640.1405070.1994080.0421190.0768260.0917970.110705
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "from recommenders.amazon_recommender import AmazonRecommender\n", - "\n", - "amazon_recommender = AmazonRecommender()\n", - "\n", - "amazon_tts_results = [['AmazonRecommender'] + list(evaluate_train_test_split_implicit(\n", - " amazon_recommender, interactions_df, items_df))]\n", - "\n", - "amazon_tts_results = pd.DataFrame(\n", - " amazon_tts_results, columns=['Recommender', 'HR@1', 'HR@3', 'HR@5', 'HR@10', 'NDCG@1', 'NDCG@3', 'NDCG@5', 'NDCG@10'])\n", - "\n", - "display(HTML(amazon_tts_results.to_html()))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "conservative-remedy", - "metadata": {}, - "outputs": [], - "source": [ - "from recommenders.netflix_recommender import NetflixRecommender\n", - "\n", - "netflix_recommender = NetflixRecommender(n_epochs=30, print_type='live')\n", - "\n", - "netflix_tts_results = [['NetflixRecommender'] + list(evaluate_train_test_split_implicit(\n", - " netflix_recommender, interactions_df, items_df))]\n", - "\n", - "netflix_tts_results = pd.DataFrame(\n", - " netflix_tts_results, columns=['Recommender', 'HR@1', 'HR@3', 'HR@5', 'HR@10', 'NDCG@1', 'NDCG@3', 'NDCG@5', 'NDCG@10'])\n", - "\n", - "display(HTML(netflix_tts_results.to_html()))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "moderate-printing", - "metadata": {}, - "outputs": [], - "source": [ - "tts_results = pd.concat([nn_tts_results, amazon_tts_results, netflix_tts_results]).reset_index(drop=True)\n", - "display(HTML(tts_results.to_html()))" - ] - }, - { - "cell_type": "markdown", - "id": "uniform-vegetable", - "metadata": {}, - "source": [ - "# Summary\n", - "\n", - "**Task:**
\n", - "Write a summary of your experiments. What worked well and what did not? What are your thoughts how could you possibly further improve the model?" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "declared-howard", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "rek_uno", - "language": "python", - "name": "rek_uno" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.8" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/project_2_recommender_and_evaluation-Copy2.ipynb b/project_2_recommender_and_evaluation-Copy2.ipynb deleted file mode 100644 index 5dc2301..0000000 --- a/project_2_recommender_and_evaluation-Copy2.ipynb +++ /dev/null @@ -1,1979 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 302, - "id": "alike-morgan", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The autoreload extension is already loaded. To reload it, use:\n", - " %reload_ext autoreload\n" - ] - } - ], - "source": [ - "%matplotlib inline\n", - "%load_ext autoreload\n", - "%autoreload 2\n", - "\n", - "import numpy as np\n", - "import pandas as pd\n", - "import matplotlib.pyplot as plt\n", - "import seaborn as sns\n", - "from IPython.display import Markdown, display, HTML\n", - "from collections import defaultdict\n", - "\n", - "import torch\n", - "import torch.nn as nn\n", - "import torch.optim as optim\n", - "from livelossplot import PlotLosses\n", - "\n", - "# Fix the dying kernel problem (only a problem in some installations - you can remove it, if it works without it)\n", - "import os\n", - "os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'" - ] - }, - { - "cell_type": "markdown", - "id": "blessed-knitting", - "metadata": {}, - "source": [ - "# Load the dataset for recommenders" - ] - }, - { - "cell_type": "code", - "execution_count": 303, - "id": "victorian-bottom", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
user_iditem_idtermlength_of_stay_bucketrate_planroom_segmentn_people_bucketweekend_stay
010WinterVacation[2-3]Standard[260-360][5-inf]True
121WinterVacation[2-3]Standard[160-260][3-4]True
232WinterVacation[2-3]Standard[160-260][2-2]False
343WinterVacation[4-7]Standard[160-260][3-4]True
454WinterVacation[4-7]Standard[0-160][2-2]True
565Easter[4-7]Standard[260-360][5-inf]True
676OffSeason[2-3]Standard[260-360][5-inf]True
787HighSeason[2-3]Standard[160-260][1-1]True
898HighSeason[2-3]Standard[0-160][1-1]True
987HighSeason[2-3]Standard[160-260][1-1]True
1087HighSeason[2-3]Standard[160-260][1-1]True
11109HighSeason[2-3]Standard[160-260][3-4]True
12119HighSeason[2-3]Standard[160-260][3-4]True
131210HighSeason[8-inf]Standard[160-260][3-4]True
141411HighSeason[2-3]Standard[0-160][3-4]True
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "data_path = os.path.join(\"data\", \"hotel_data\")\n", - "\n", - "interactions_df = pd.read_csv(os.path.join(data_path, \"hotel_data_interactions_df.csv\"), index_col=0)\n", - "\n", - "base_item_features = ['term', 'length_of_stay_bucket', 'rate_plan', 'room_segment', 'n_people_bucket', 'weekend_stay']\n", - "\n", - "column_values_dict = {\n", - " 'term': ['WinterVacation', 'Easter', 'OffSeason', 'HighSeason', 'LowSeason', 'MayLongWeekend', 'NewYear', 'Christmas'],\n", - " 'length_of_stay_bucket': ['[0-1]', '[2-3]', '[4-7]', '[8-inf]'],\n", - " 'rate_plan': ['Standard', 'Nonref'],\n", - " 'room_segment': ['[0-160]', '[160-260]', '[260-360]', '[360-500]', '[500-900]'],\n", - " 'n_people_bucket': ['[1-1]', '[2-2]', '[3-4]', '[5-inf]'],\n", - " 'weekend_stay': ['True', 'False']\n", - "}\n", - "\n", - "interactions_df.loc[:, 'term'] = pd.Categorical(\n", - " interactions_df['term'], categories=column_values_dict['term'])\n", - "interactions_df.loc[:, 'length_of_stay_bucket'] = pd.Categorical(\n", - " interactions_df['length_of_stay_bucket'], categories=column_values_dict['length_of_stay_bucket'])\n", - "interactions_df.loc[:, 'rate_plan'] = pd.Categorical(\n", - " interactions_df['rate_plan'], categories=column_values_dict['rate_plan'])\n", - "interactions_df.loc[:, 'room_segment'] = pd.Categorical(\n", - " interactions_df['room_segment'], categories=column_values_dict['room_segment'])\n", - "interactions_df.loc[:, 'n_people_bucket'] = pd.Categorical(\n", - " interactions_df['n_people_bucket'], categories=column_values_dict['n_people_bucket'])\n", - "interactions_df.loc[:, 'weekend_stay'] = interactions_df['weekend_stay'].astype('str')\n", - "interactions_df.loc[:, 'weekend_stay'] = pd.Categorical(\n", - " interactions_df['weekend_stay'], categories=column_values_dict['weekend_stay'])\n", - "\n", - "display(HTML(interactions_df.head(15).to_html()))" - ] - }, - { - "cell_type": "markdown", - "id": "realistic-third", - "metadata": {}, - "source": [ - "# (Optional) Prepare numerical user features\n", - "\n", - "The method below is left here for convenience if you want to experiment with content-based user features as an input for your neural network." - ] - }, - { - "cell_type": "code", - "execution_count": 304, - "id": "variable-jaguar", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "['user_term_WinterVacation', 'user_term_Easter', 'user_term_OffSeason', 'user_term_HighSeason', 'user_term_LowSeason', 'user_term_MayLongWeekend', 'user_term_NewYear', 'user_term_Christmas', 'user_length_of_stay_bucket_[0-1]', 'user_length_of_stay_bucket_[2-3]', 'user_length_of_stay_bucket_[4-7]', 'user_length_of_stay_bucket_[8-inf]', 'user_rate_plan_Standard', 'user_rate_plan_Nonref', 'user_room_segment_[0-160]', 'user_room_segment_[160-260]', 'user_room_segment_[260-360]', 'user_room_segment_[360-500]', 'user_room_segment_[500-900]', 'user_n_people_bucket_[1-1]', 'user_n_people_bucket_[2-2]', 'user_n_people_bucket_[3-4]', 'user_n_people_bucket_[5-inf]', 'user_weekend_stay_True', 'user_weekend_stay_False']\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
user_iduser_term_WinterVacationuser_term_Easteruser_term_OffSeasonuser_term_HighSeasonuser_term_LowSeasonuser_term_MayLongWeekenduser_term_NewYearuser_term_Christmasuser_length_of_stay_bucket_[0-1]user_length_of_stay_bucket_[2-3]user_length_of_stay_bucket_[4-7]user_length_of_stay_bucket_[8-inf]user_rate_plan_Standarduser_rate_plan_Nonrefuser_room_segment_[0-160]user_room_segment_[160-260]user_room_segment_[260-360]user_room_segment_[360-500]user_room_segment_[500-900]user_n_people_bucket_[1-1]user_n_people_bucket_[2-2]user_n_people_bucket_[3-4]user_n_people_bucket_[5-inf]user_weekend_stay_Trueuser_weekend_stay_False
010.1304350.00.6521740.0869570.1304350.0000000.0000000.0000000.0000000.6086960.3913040.0000000.5217390.4782610.0000000.8695650.1304350.0000000.00.0000000.7391300.1739130.0869570.7826090.217391
47500.0434780.00.4347830.3043480.2173910.0000000.0000000.0000000.0000000.9130430.0869570.0000000.2608700.7391300.0000000.5652170.4347830.0000000.00.0000000.1739130.5217390.3043480.7826090.217391
92960.0833330.00.7083330.1250000.0416670.0416670.0000000.0000000.2500000.6666670.0416670.0416670.2916670.7083330.1250000.7916670.0833330.0000000.00.0416670.3333330.5416670.0833330.7500000.250000
1111150.7272730.00.2727270.0000000.0000000.0000000.0000000.0000000.5000000.3636360.1363640.0000001.0000000.0000000.0000000.8181820.1818180.0000000.00.8181820.0909090.0454550.0454550.3636360.636364
6757060.0919880.00.4510390.1899110.2077150.0385760.0118690.0089020.1691390.4599410.2729970.0979230.9940650.0059350.0207720.8397630.1305640.0089020.00.0415430.0949550.7388720.1246290.6765580.323442
169917360.0344830.00.4827590.2068970.2758620.0000000.0000000.0000000.2413790.5517240.2068970.0000000.1724140.8275860.0000000.9310340.0689660.0000000.00.3793100.4137930.2068970.0000000.4482760.551724
763977790.0370370.00.2962960.2592590.3703700.0000000.0000000.0370370.1111110.2962960.4814810.1111111.0000000.0000000.0000000.8148150.1851850.0000000.00.0000000.0370370.7407410.2222220.8148150.185185
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "def n_to_p(l):\n", - " n = sum(l)\n", - " return [x / n for x in l] if n > 0 else l\n", - "\n", - "def calculate_p(x, values):\n", - " counts = [0]*len(values)\n", - " for v in x:\n", - " counts[values.index(v)] += 1\n", - "\n", - " return n_to_p(counts)\n", - "\n", - "def prepare_users_df(interactions_df):\n", - "\n", - " users_df = interactions_df.loc[:, [\"user_id\"]]\n", - " users_df = users_df.groupby(\"user_id\").first().reset_index(drop=False)\n", - " \n", - " user_features = []\n", - "\n", - " for column in base_item_features:\n", - "\n", - " column_values = column_values_dict[column]\n", - " df = interactions_df.loc[:, ['user_id', column]]\n", - " df = df.groupby('user_id').aggregate(lambda x: list(x)).reset_index(drop=False)\n", - "\n", - " def calc_p(x):\n", - " return calculate_p(x, column_values)\n", - "\n", - " df.loc[:, column] = df[column].apply(lambda x: calc_p(x))\n", - "\n", - " p_columns = []\n", - " for i in range(len(column_values)):\n", - " p_columns.append(\"user_\" + column + \"_\" + column_values[i])\n", - " df.loc[:, p_columns[i]] = df[column].apply(lambda x: x[i])\n", - " user_features.append(p_columns[i])\n", - "\n", - " users_df = pd.merge(users_df, df.loc[:, ['user_id'] + p_columns], on=[\"user_id\"])\n", - " \n", - " return users_df, user_features\n", - " \n", - "\n", - "users_df, user_features = prepare_users_df(interactions_df)\n", - "\n", - "print(user_features)\n", - "\n", - "display(HTML(users_df.loc[users_df['user_id'].isin([706, 1736, 7779, 96, 1, 50, 115])].head(15).to_html()))" - ] - }, - { - "cell_type": "markdown", - "id": "amino-keyboard", - "metadata": {}, - "source": [ - "# (Optional) Prepare numerical item features\n", - "\n", - "The method below is left here for convenience if you want to experiment with content-based item features as an input for your neural network." - ] - }, - { - "cell_type": "code", - "execution_count": 305, - "id": "formal-munich", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "['term_WinterVacation', 'term_Easter', 'term_OffSeason', 'term_HighSeason', 'term_LowSeason', 'term_MayLongWeekend', 'term_NewYear', 'term_Christmas', 'length_of_stay_bucket_[0-1]', 'length_of_stay_bucket_[2-3]', 'length_of_stay_bucket_[4-7]', 'length_of_stay_bucket_[8-inf]', 'rate_plan_Standard', 'rate_plan_Nonref', 'room_segment_[0-160]', 'room_segment_[160-260]', 'room_segment_[260-360]', 'room_segment_[360-500]', 'room_segment_[500-900]', 'n_people_bucket_[1-1]', 'n_people_bucket_[2-2]', 'n_people_bucket_[3-4]', 'n_people_bucket_[5-inf]', 'weekend_stay_True', 'weekend_stay_False']\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
item_idterm_WinterVacationterm_Easterterm_OffSeasonterm_HighSeasonterm_LowSeasonterm_MayLongWeekendterm_NewYearterm_Christmaslength_of_stay_bucket_[0-1]length_of_stay_bucket_[2-3]length_of_stay_bucket_[4-7]length_of_stay_bucket_[8-inf]rate_plan_Standardrate_plan_Nonrefroom_segment_[0-160]room_segment_[160-260]room_segment_[260-360]room_segment_[360-500]room_segment_[500-900]n_people_bucket_[1-1]n_people_bucket_[2-2]n_people_bucket_[3-4]n_people_bucket_[5-inf]weekend_stay_Trueweekend_stay_False
001000000001001000100000110
111000000001001001000001010
221000000001001001000010001
331000000000101001000001010
441000000000101010000010010
550100000000101000100000110
660010000001001000100000110
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "def map_items_to_onehot(df):\n", - " one_hot = pd.get_dummies(df.loc[:, base_item_features])\n", - " df = df.drop(base_item_features, axis = 1)\n", - " df = df.join(one_hot)\n", - " \n", - " return df, list(one_hot.columns)\n", - "\n", - "def prepare_items_df(interactions_df):\n", - " items_df = interactions_df.loc[:, [\"item_id\"] + base_item_features].drop_duplicates()\n", - " \n", - " items_df, item_features = map_items_to_onehot(items_df)\n", - " \n", - " return items_df, item_features\n", - "\n", - "\n", - "items_df, item_features = prepare_items_df(interactions_df)\n", - "\n", - "print(item_features)\n", - "\n", - "display(HTML(items_df.loc[items_df['item_id'].isin([0, 1, 2, 3, 4, 5, 6])].head(15).to_html()))" - ] - }, - { - "cell_type": "markdown", - "id": "figured-imaging", - "metadata": {}, - "source": [ - "# Neural network recommender\n", - "\n", - "**Task:**
\n", - "Code a recommender based on a neural network model. You are free to choose any network architecture you find appropriate. The network can use the interaction vectors for users and items, embeddings of users and items, as well as user and item features (you can use the features you developed in the first project).\n", - "\n", - "Remember to keep control over randomness - in the init method add the seed as a parameter and initialize the random seed generator with that seed (both for numpy and pytorch):\n", - "\n", - "```python\n", - "self.seed = seed\n", - "self.rng = np.random.RandomState(seed=seed)\n", - "```\n", - "in the network model:\n", - "```python\n", - "self.seed = torch.manual_seed(seed)\n", - "```\n", - "\n", - "You are encouraged to experiment with:\n", - " - the number of layers in the network, the number of neurons and different activation functions,\n", - " - different optimizers and their parameters,\n", - " - batch size and the number of epochs,\n", - " - embedding layers,\n", - " - content-based features of both users and items." - ] - }, - { - "cell_type": "code", - "execution_count": 319, - "id": "unlike-recipient", - "metadata": {}, - "outputs": [], - "source": [ - "from recommenders.recommender import Recommender\n", - "\n", - "\n", - "class Net(nn.Module):\n", - " def __init__(self, features_len, output_len):\n", - " super(Net, self).__init__()\n", - " \n", - " self.fc1 = nn.Linear(features_len, 150)\n", - " self.fc2 = nn.Linear(150, 50)\n", - " self.fc3 = nn.Linear(50, 25)\n", - " self.output = nn.Linear(25, output_len+300)\n", - " \n", - " self.relu1 = nn.PReLU()\n", - " self.relu2 = nn.PReLU()\n", - " self.relu3 = nn.PReLU()\n", - " \n", - " self.dropout = nn.Dropout(p=0.2)\n", - " \n", - " def forward(self, x):\n", - " x = self.fc1(x)\n", - " x = self.relu1(x)\n", - " x = self.fc2(x)\n", - " x = self.relu2(x)\n", - " x = self.dropout(x)\n", - " x = self.fc3(x)\n", - " x = self.relu3(x)\n", - " x = self.output(x)\n", - "\n", - " return x\n", - " \n", - "# class Net(nn.Module):\n", - "# def __init__(self, features_len, output_len):\n", - "# super(Net, self).__init__()\n", - "# self.hid1 = nn.Linear(features_len, 100)\n", - "# self.hid2 = nn.Linear(100, 15)\n", - "# self.oupt = nn.Linear(15, output_len+500)\n", - "\n", - "# nn.init.xavier_uniform_(self.hid1.weight)\n", - "# nn.init.zeros_(self.hid1.bias)\n", - "# nn.init.xavier_uniform_(self.hid2.weight)\n", - "# nn.init.zeros_(self.hid2.bias)\n", - "# nn.init.xavier_uniform_(self.oupt.weight)\n", - "# nn.init.zeros_(self.oupt.bias)\n", - "\n", - "# def forward(self, x):\n", - "# z = torch.tanh(self.hid1(x))\n", - "# z = torch.tanh(self.hid2(z))\n", - "# z = torch.sigmoid(self.oupt(z))\n", - "# return z\n", - " \n", - "# class Net(nn.Module):\n", - "# def __init__(self, features_len, output_len):\n", - "# super(Net, self).__init__()\n", - " \n", - "# self.fc1 = nn.Linear(features_len, 150)\n", - "# self.fc2 = nn.Linear(150, 300)\n", - "# self.fc3 = nn.Linear(300, output_len)\n", - "# self.dropout = nn.Dropout(p=0.5)\n", - "# self.fc4 = nn.Linear(output_len, output_len+300)\n", - " \n", - "# def forward(self, x):\n", - "# x = F.relu(self.fc1(x))\n", - "# x = torch.tanh(self.fc2(x))\n", - "# x = F.relu(self.fc3(x))\n", - "# return self.fc4(x) \n", - " \n", - " \n", - "class NNRecommender(Recommender):\n", - " \"\"\"\n", - " Linear recommender class based on user and item features.\n", - " \"\"\"\n", - " \n", - " def __init__(self, seed=6789, n_neg_per_pos=5, n_epochs=2000, lr=0.05):\n", - " \"\"\"\n", - " Initialize base recommender params and variables.\n", - " \"\"\"\n", - " self.model = None\n", - " self.n_neg_per_pos = n_neg_per_pos\n", - " \n", - " self.recommender_df = pd.DataFrame(columns=['user_id', 'item_id', 'score'])\n", - " self.users_df = None\n", - " self.user_features = None\n", - " \n", - " self.seed = seed\n", - " self.rng = np.random.RandomState(seed=seed)\n", - " \n", - " self.n_epochs = n_epochs\n", - " self.lr = lr\n", - " \n", - " def calculate_accuracy(self, y_true, y_pred):\n", - " predictions=(y_pred.argmax(1))\n", - " return (predictions == y_true).sum().float() / len(y_true)\n", - " \n", - " def round_tensor(self, t, decimal_places=3):\n", - " return round(t.item(), decimal_places)\n", - " \n", - " def fit(self, interactions_df, users_df, items_df):\n", - " \"\"\"\n", - " Training of the recommender.\n", - " \n", - " :param pd.DataFrame interactions_df: DataFrame with recorded interactions between users and items \n", - " defined by user_id, item_id and features of the interaction.\n", - " :param pd.DataFrame users_df: DataFrame with users and their features defined by user_id and the user feature columns.\n", - " :param pd.DataFrame items_df: DataFrame with items and their features defined by item_id and the item feature columns.\n", - " \"\"\"\n", - " \n", - " interactions_df = interactions_df.copy()\n", - " # Prepare users_df and items_df \n", - " # (optional - use only if you want to train a hybrid model with content-based features)\n", - " \n", - " users_df, user_features = prepare_users_df(interactions_df)\n", - " \n", - " self.users_df = users_df\n", - " self.user_features = user_features\n", - " \n", - " items_df, item_features = prepare_items_df(interactions_df)\n", - " items_df = items_df.loc[:, ['item_id'] + item_features]\n", - " \n", - " X = items_df[['term_WinterVacation', 'term_Easter', 'term_OffSeason', 'term_HighSeason', 'term_LowSeason', 'term_MayLongWeekend', 'term_NewYear', 'term_Christmas', 'rate_plan_Standard', 'rate_plan_Nonref', 'room_segment_[0-160]', 'room_segment_[160-260]', 'room_segment_[260-360]', 'room_segment_[360-500]', 'room_segment_[500-900]', 'n_people_bucket_[1-1]', 'n_people_bucket_[2-2]', 'n_people_bucket_[3-4]', 'n_people_bucket_[5-inf]', 'weekend_stay_True', 'weekend_stay_False']]\n", - " y = items_df[['item_id']]\n", - " X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=self.seed)\n", - " \n", - " X_train = torch.from_numpy(X_train.to_numpy()).float()\n", - " y_train = torch.squeeze(torch.from_numpy(y_train.to_numpy()).long())\n", - " X_test = torch.from_numpy(X_test.to_numpy()).float()\n", - " y_test = torch.squeeze(torch.from_numpy(y_test.to_numpy()).long())\n", - " \n", - " self.net = Net(X_train.shape[1], items_df['item_id'].unique().size)\n", - " \n", - " optimizer = optim.Adam(self.net.parameters(), lr=self.lr)\n", - " criterion = nn.CrossEntropyLoss()\n", - " \n", - " for epoch in range(self.n_epochs):\n", - " y_pred = self.net(X_train)\n", - " y_pred = torch.squeeze(y_pred)\n", - " train_loss = criterion(y_pred, y_train)\n", - " \n", - " if (epoch+1) % 100 == 0:\n", - " train_acc = self.calculate_accuracy(y_train, y_pred)\n", - " y_test_pred = self.net(X_test)\n", - " y_test_pred = torch.squeeze(y_test_pred)\n", - " test_loss = criterion(y_test_pred, y_test)\n", - " test_acc = self.calculate_accuracy(y_test, y_test_pred)\n", - " print(\n", - " f'''epoch {epoch}\n", - " Train set - loss: {self.round_tensor(train_loss)}, accuracy: {self.round_tensor(train_acc)}\n", - " Test set - loss: {self.round_tensor(test_loss)}, accuracy: {self.round_tensor(test_acc)}\n", - " ''')\n", - " \n", - " optimizer.zero_grad()\n", - " train_loss.backward()\n", - " optimizer.step()\n", - " \n", - " def recommend(self, users_df, items_df, n_recommendations=1):\n", - " \"\"\"\n", - " Serving of recommendations. Scores items in items_df for each user in users_df and returns \n", - " top n_recommendations for each user.\n", - " \n", - " :param pd.DataFrame users_df: DataFrame with users and their features for which recommendations should be generated.\n", - " :param pd.DataFrame items_df: DataFrame with items and their features which should be scored.\n", - " :param int n_recommendations: Number of recommendations to be returned for each user.\n", - " :return: DataFrame with user_id, item_id and score as columns returning n_recommendations top recommendations \n", - " for each user.\n", - " :rtype: pd.DataFrame\n", - " \"\"\"\n", - " \n", - " # Clean previous recommendations (iloc could be used alternatively)\n", - " self.recommender_df = self.recommender_df[:0]\n", - " \n", - " # Prepare users_df and items_df\n", - " # (optional - use only if you want to train a hybrid model with content-based features)\n", - " \n", - " users_df = users_df.loc[:, 'user_id']\n", - " users_df = pd.merge(users_df, self.users_df, on=['user_id'], how='left').fillna(0)\n", - " \n", - " # items_df, item_features = prepare_items_df(items_df)\n", - " # items_df = items_df.loc[:, ['item_id'] + item_features]\n", - " \n", - " # Score the items\n", - " \n", - " recommendations = pd.DataFrame(columns=['user_id', 'item_id', 'score'])\n", - " \n", - " for ix, user in users_df.iterrows():\n", - " prep_user = torch.from_numpy(user[['user_term_WinterVacation', 'user_term_Easter', 'user_term_OffSeason', 'user_term_HighSeason', 'user_term_LowSeason', 'user_term_MayLongWeekend', 'user_term_NewYear', 'user_term_Christmas', 'user_rate_plan_Standard', 'user_rate_plan_Nonref', 'user_room_segment_[0-160]', 'user_room_segment_[160-260]', 'user_room_segment_[260-360]', 'user_room_segment_[360-500]', 'user_room_segment_[500-900]', 'user_n_people_bucket_[1-1]', 'user_n_people_bucket_[2-2]', 'user_n_people_bucket_[3-4]', 'user_n_people_bucket_[5-inf]', 'user_weekend_stay_True', 'user_weekend_stay_False']].to_numpy()).float()\n", - " \n", - " scores = self.net(prep_user).detach().numpy()\n", - " \n", - " chosen_ids = np.argsort(-scores)[:n_recommendations]\n", - " \n", - " recommendations = []\n", - " for item_id in chosen_ids:\n", - " recommendations.append(\n", - " {\n", - " 'user_id': user['user_id'],\n", - " 'item_id': item_id,\n", - " 'score': scores[item_id]\n", - " }\n", - " )\n", - " \n", - " user_recommendations = pd.DataFrame(recommendations)\n", - " \n", - " self.recommender_df = pd.concat([self.recommender_df, user_recommendations])\n", - " \n", - " return self.recommender_df\n", - "\n", - "# Fit method\n", - "# nn_recommender = NNRecommender(6789, 5, 300, 0.05)\n", - "# nn_recommender.fit(interactions_df.head(1000), None, None)\n", - "# nn_recommender.fit(interactions_df, None, None)" - ] - }, - { - "cell_type": "markdown", - "id": "copyrighted-relative", - "metadata": {}, - "source": [ - "# Quick test of the recommender" - ] - }, - { - "cell_type": "code", - "execution_count": 307, - "id": "greatest-canon", - "metadata": {}, - "outputs": [], - "source": [ - "items_df = interactions_df.loc[:, ['item_id'] + base_item_features].drop_duplicates()" - ] - }, - { - "cell_type": "code", - "execution_count": 308, - "id": "initial-capital", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "epoch 99\n", - " Train set - loss: 0.958, accuracy: 0.474\n", - " Test set - loss: 28.826, accuracy: 0.0\n", - " \n", - "epoch 199\n", - " Train set - loss: 0.922, accuracy: 0.484\n", - " Test set - loss: 26.949, accuracy: 0.0\n", - " \n", - "epoch 299\n", - " Train set - loss: 0.921, accuracy: 0.476\n", - " Test set - loss: 25.042, accuracy: 0.0\n", - " \n", - "epoch 399\n", - " Train set - loss: 0.907, accuracy: 0.481\n", - " Test set - loss: 23.741, accuracy: 0.0\n", - " \n", - "epoch 499\n", - " Train set - loss: 0.897, accuracy: 0.474\n", - " Test set - loss: 23.28, accuracy: 0.0\n", - " \n", - "epoch 599\n", - " Train set - loss: 0.894, accuracy: 0.472\n", - " Test set - loss: 23.993, accuracy: 0.0\n", - " \n", - "epoch 699\n", - " Train set - loss: 0.889, accuracy: 0.5\n", - " Test set - loss: 24.347, accuracy: 0.0\n", - " \n", - "epoch 799\n", - " Train set - loss: 0.907, accuracy: 0.472\n", - " Test set - loss: 25.641, accuracy: 0.0\n", - " \n", - "epoch 899\n", - " Train set - loss: 0.9, accuracy: 0.456\n", - " Test set - loss: 25.375, accuracy: 0.0\n", - " \n", - "epoch 999\n", - " Train set - loss: 0.885, accuracy: 0.479\n", - " Test set - loss: 25.575, accuracy: 0.0\n", - " \n", - "epoch 1099\n", - " Train set - loss: 0.877, accuracy: 0.494\n", - " Test set - loss: 25.631, accuracy: 0.0\n", - " \n", - "epoch 1199\n", - " Train set - loss: 0.881, accuracy: 0.482\n", - " Test set - loss: 26.272, accuracy: 0.0\n", - " \n" - ] - } - ], - "source": [ - "# Fit method\n", - "nn_recommender = NNRecommender(10000, 0.1)\n", - "# nn_recommender.fit(interactions_df.head(1000), None, None)\n", - "nn_recommender.fit(interactions_df, None, None)" - ] - }, - { - "cell_type": "code", - "execution_count": 309, - "id": "digital-consolidation", - "metadata": { - "scrolled": true - }, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
user_iditem_idscoretermlength_of_stay_bucketrate_planroom_segmentn_people_bucketweekend_stay
01.01034.634204OffSeason[2-3]Nonref[160-260][2-2]True
11.04664.432645OffSeason[0-1]Nonref[160-260][2-2]True
21.01094.307235OffSeason[4-7]Nonref[160-260][2-2]True
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "# Recommender method\n", - "\n", - "recommendations = nn_recommender.recommend(pd.DataFrame([[1]], columns=['user_id']), items_df, 3)\n", - "\n", - "recommendations = pd.merge(recommendations, items_df, on='item_id', how='left')\n", - "display(HTML(recommendations.to_html()))" - ] - }, - { - "cell_type": "markdown", - "id": "advanced-eleven", - "metadata": {}, - "source": [ - "# Tuning method" - ] - }, - { - "cell_type": "code", - "execution_count": 310, - "id": "strange-alaska", - "metadata": {}, - "outputs": [], - "source": [ - "from evaluation_and_testing.testing import evaluate_train_test_split_implicit\n", - "\n", - "seed = 6789" - ] - }, - { - "cell_type": "code", - "execution_count": 311, - "id": "stable-theta", - "metadata": {}, - "outputs": [], - "source": [ - "from hyperopt import hp, fmin, tpe, Trials\n", - "import traceback\n", - "\n", - "def tune_recommender(recommender_class, interactions_df, items_df, \n", - " param_space, max_evals=1, show_progressbar=True, seed=6789):\n", - " # Split into train_validation and test sets\n", - "\n", - " shuffle = np.arange(len(interactions_df))\n", - " rng = np.random.RandomState(seed=seed)\n", - " rng.shuffle(shuffle)\n", - " shuffle = list(shuffle)\n", - "\n", - " train_test_split = 0.8\n", - " split_index = int(len(interactions_df) * train_test_split)\n", - "\n", - " train_validation = interactions_df.iloc[shuffle[:split_index]]\n", - " test = interactions_df.iloc[shuffle[split_index:]]\n", - "\n", - " # Tune\n", - "\n", - " def loss(tuned_params):\n", - " recommender = recommender_class(seed=seed, **tuned_params)\n", - " hr1, hr3, hr5, hr10, ndcg1, ndcg3, ndcg5, ndcg10 = evaluate_train_test_split_implicit(\n", - " recommender, train_validation, items_df, seed=seed)\n", - " return -hr10\n", - "\n", - " n_tries = 1\n", - " succeded = False\n", - " try_id = 0\n", - " while not succeded and try_id < n_tries:\n", - " try:\n", - " trials = Trials()\n", - " best_param_set = fmin(loss, space=param_space, algo=tpe.suggest, \n", - " max_evals=max_evals, show_progressbar=show_progressbar, trials=trials, verbose=True)\n", - " succeded = True\n", - " except:\n", - " traceback.print_exc()\n", - " try_id += 1\n", - " \n", - " if not succeded:\n", - " return None\n", - " \n", - " # Validate\n", - " \n", - " recommender = recommender_class(seed=seed, **best_param_set)\n", - "\n", - " results = [[recommender_class.__name__] + list(evaluate_train_test_split_implicit(\n", - " recommender, {'train': train_validation, 'test': test}, items_df, seed=seed))]\n", - "\n", - " results = pd.DataFrame(results, \n", - " columns=['Recommender', 'HR@1', 'HR@3', 'HR@5', 'HR@10', 'NDCG@1', 'NDCG@3', 'NDCG@5', 'NDCG@10'])\n", - "\n", - " display(HTML(results.to_html()))\n", - " \n", - " return best_param_set" - ] - }, - { - "cell_type": "markdown", - "id": "reliable-switzerland", - "metadata": {}, - "source": [ - "## Tuning of the recommender\n", - "\n", - "**Task:**
\n", - "Tune your model using the code below. You only need to put the class name of your recommender and choose an appropriate parameter space." - ] - }, - { - "cell_type": "code", - "execution_count": 312, - "id": "obvious-astrology", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "epoch 99 \n", - " Train set - loss: 0.96, accuracy: 0.475\n", - " Test set - loss: 28.477, accuracy: 0.0\n", - " \n", - "epoch 199 \n", - " Train set - loss: 0.895, accuracy: 0.475\n", - " Test set - loss: 27.29, accuracy: 0.0\n", - " \n", - "epoch 299 \n", - " Train set - loss: 0.9, accuracy: 0.48\n", - " Test set - loss: 25.707, accuracy: 0.0\n", - " \n", - "epoch 399 \n", - " Train set - loss: 0.87, accuracy: 0.498\n", - " Test set - loss: 25.687, accuracy: 0.0\n", - " \n", - "epoch 499 \n", - " Train set - loss: 0.886, accuracy: 0.476\n", - " Test set - loss: 24.167, accuracy: 0.0\n", - " \n", - "epoch 599 \n", - " Train set - loss: 0.876, accuracy: 0.482\n", - " Test set - loss: 23.449, accuracy: 0.0\n", - " \n", - "epoch 699 \n", - " Train set - loss: 0.876, accuracy: 0.487\n", - " Test set - loss: 23.576, accuracy: 0.0\n", - " \n", - "epoch 799 \n", - " Train set - loss: 0.867, accuracy: 0.473\n", - " Test set - loss: 22.554, accuracy: 0.0\n", - " \n", - "epoch 899 \n", - " Train set - loss: 0.865, accuracy: 0.496\n", - " Test set - loss: 23.201, accuracy: 0.0\n", - " \n", - "epoch 999 \n", - " Train set - loss: 0.845, accuracy: 0.509\n", - " Test set - loss: 25.268, accuracy: 0.0\n", - " \n", - "epoch 1099 \n", - " Train set - loss: 0.855, accuracy: 0.493\n", - " Test set - loss: 25.903, accuracy: 0.0\n", - " \n", - "epoch 1199 \n", - " Train set - loss: 0.855, accuracy: 0.48\n", - " Test set - loss: 24.97, accuracy: 0.0\n", - " \n", - "100%|██████████| 1/1 [02:23<00:00, 143.24s/trial, best loss: -0.031544448996312986]\n", - "epoch 99\n", - " Train set - loss: 0.999, accuracy: 0.471\n", - " Test set - loss: 28.026, accuracy: 0.0\n", - " \n", - "epoch 199\n", - " Train set - loss: 0.937, accuracy: 0.457\n", - " Test set - loss: 26.713, accuracy: 0.0\n", - " \n", - "epoch 299\n", - " Train set - loss: 0.937, accuracy: 0.481\n", - " Test set - loss: 25.02, accuracy: 0.0\n", - " \n", - "epoch 399\n", - " Train set - loss: 0.91, accuracy: 0.481\n", - " Test set - loss: 23.575, accuracy: 0.0\n", - " \n", - "epoch 499\n", - " Train set - loss: 0.912, accuracy: 0.491\n", - " Test set - loss: 24.782, accuracy: 0.0\n", - " \n", - "epoch 599\n", - " Train set - loss: 0.918, accuracy: 0.49\n", - " Test set - loss: 23.602, accuracy: 0.0\n", - " \n", - "epoch 699\n", - " Train set - loss: 0.916, accuracy: 0.478\n", - " Test set - loss: 23.995, accuracy: 0.0\n", - " \n", - "epoch 799\n", - " Train set - loss: 0.9, accuracy: 0.463\n", - " Test set - loss: 24.721, accuracy: 0.0\n", - " \n", - "epoch 899\n", - " Train set - loss: 0.905, accuracy: 0.48\n", - " Test set - loss: 26.169, accuracy: 0.0\n", - " \n", - "epoch 999\n", - " Train set - loss: 0.896, accuracy: 0.48\n", - " Test set - loss: 25.179, accuracy: 0.0\n", - " \n", - "epoch 1099\n", - " Train set - loss: 0.884, accuracy: 0.469\n", - " Test set - loss: 27.071, accuracy: 0.0\n", - " \n", - "epoch 1199\n", - " Train set - loss: 0.91, accuracy: 0.468\n", - " Test set - loss: 27.978, accuracy: 0.0\n", - " \n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
RecommenderHR@1HR@3HR@5HR@10NDCG@1NDCG@3NDCG@5NDCG@10
0NNRecommender0.0082260.017440.0236920.0332350.0082260.0136520.0162580.019356
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Best parameters:\n", - "{'n_neg_per_pos': 7.0}\n" - ] - } - ], - "source": [ - "param_space = {\n", - " 'n_neg_per_pos': hp.quniform('n_neg_per_pos', 1, 10, 1)\n", - "}\n", - "items_df['item_id'].unique().size\n", - "\n", - "best_param_set = tune_recommender(NNRecommender, interactions_df, items_df,\n", - " param_space, max_evals=1, show_progressbar=True, seed=seed)\n", - "\n", - "print(\"Best parameters:\")\n", - "print(best_param_set)" - ] - }, - { - "cell_type": "markdown", - "id": "accredited-strap", - "metadata": {}, - "source": [ - "# Final evaluation\n", - "\n", - "**Task:**
\n", - "Run the final evaluation of your recommender and present its results against the Amazon and Netflix recommenders' results. You just need to give the class name of your recommender and its tuned parameters below." - ] - }, - { - "cell_type": "code", - "execution_count": 318, - "id": "given-homework", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "epoch 99\n", - " Train set - loss: 0.956, accuracy: 0.463\n", - " Test set - loss: 47.605, accuracy: 0.0\n", - " \n", - "epoch 199\n", - " Train set - loss: 0.949, accuracy: 0.468\n", - " Test set - loss: 41.11, accuracy: 0.0\n", - " \n", - "epoch 299\n", - " Train set - loss: 0.911, accuracy: 0.485\n", - " Test set - loss: 37.505, accuracy: 0.0\n", - " \n", - "epoch 399\n", - " Train set - loss: 0.918, accuracy: 0.461\n", - " Test set - loss: 36.35, accuracy: 0.0\n", - " \n", - "epoch 499\n", - " Train set - loss: 0.925, accuracy: 0.497\n", - " Test set - loss: 36.651, accuracy: 0.0\n", - " \n", - "epoch 599\n", - " Train set - loss: 0.901, accuracy: 0.495\n", - " Test set - loss: 35.965, accuracy: 0.0\n", - " \n", - "epoch 699\n", - " Train set - loss: 0.908, accuracy: 0.474\n", - " Test set - loss: 34.862, accuracy: 0.0\n", - " \n", - "epoch 799\n", - " Train set - loss: 0.885, accuracy: 0.485\n", - " Test set - loss: 33.993, accuracy: 0.0\n", - " \n", - "epoch 899\n", - " Train set - loss: 0.894, accuracy: 0.51\n", - " Test set - loss: 35.172, accuracy: 0.0\n", - " \n", - "epoch 999\n", - " Train set - loss: 0.897, accuracy: 0.474\n", - " Test set - loss: 34.52, accuracy: 0.0\n", - " \n", - "epoch 1099\n", - " Train set - loss: 0.888, accuracy: 0.483\n", - " Test set - loss: 34.963, accuracy: 0.0\n", - " \n", - "epoch 1199\n", - " Train set - loss: 0.89, accuracy: 0.457\n", - " Test set - loss: 34.955, accuracy: 0.0\n", - " \n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
RecommenderHR@1HR@3HR@5HR@10NDCG@1NDCG@3NDCG@5NDCG@10
0NNRecommender0.0062520.0144780.0240210.0319180.0062520.0109250.0148620.017392
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "nn_recommender = NNRecommender(n_neg_per_pos=5) # Initialize your recommender here\n", - "\n", - "# Give the name of your recommender in the line below\n", - "nn_tts_results = [['NNRecommender'] + list(evaluate_train_test_split_implicit(\n", - " nn_recommender, interactions_df, items_df))]\n", - "\n", - "nn_tts_results = pd.DataFrame(\n", - " nn_tts_results, columns=['Recommender', 'HR@1', 'HR@3', 'HR@5', 'HR@10', 'NDCG@1', 'NDCG@3', 'NDCG@5', 'NDCG@10'])\n", - "\n", - "display(HTML(nn_tts_results.to_html()))" - ] - }, - { - "cell_type": "code", - "execution_count": 314, - "id": "suited-nomination", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
RecommenderHR@1HR@3HR@5HR@10NDCG@1NDCG@3NDCG@5NDCG@10
0AmazonRecommender0.0421190.104640.1405070.1994080.0421190.0768260.0917970.110711
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "from recommenders.amazon_recommender import AmazonRecommender\n", - "\n", - "amazon_recommender = AmazonRecommender()\n", - "\n", - "amazon_tts_results = [['AmazonRecommender'] + list(evaluate_train_test_split_implicit(\n", - " amazon_recommender, interactions_df, items_df))]\n", - "\n", - "amazon_tts_results = pd.DataFrame(\n", - " amazon_tts_results, columns=['Recommender', 'HR@1', 'HR@3', 'HR@5', 'HR@10', 'NDCG@1', 'NDCG@3', 'NDCG@5', 'NDCG@10'])\n", - "\n", - "display(HTML(amazon_tts_results.to_html()))" - ] - }, - { - "cell_type": "code", - "execution_count": 315, - "id": "conservative-remedy", - "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAbwAAAI4CAYAAAAReVyMAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8vihELAAAACXBIWXMAAAsTAAALEwEAmpwYAABE1UlEQVR4nO3de3ycdZ33/9cnM5NMzkmTtCk90II9AKVALYcV5SDggq6iLmoR+YmKiKu3h929f6K/XVH3dm93f6y3694gIuJhZUFEVG4F8bCgshxsOQgU2lJKoaG0TZO2ac6nz/3HdU06SSfJpJlkkrnez8djnJnrumbmmzHNm+/3+l6fr7k7IiIiha4o3w0QERGZDgo8ERGJBAWeiIhEggJPREQiQYEnIiKRoMATEZFIUOCJiEgkKPBE8szMtpvZ+fluh0ihU+CJiEgkKPBEZiAzKzGzr5nZzvD2NTMrCffVm9nPzWy/mbWa2R/MrCjc9xkze8XMDprZZjM7L78/icjMEc93A0Qko/8POAM4GXDgZ8DfAX8P/A3QBDSEx54BuJmtAD4OnOruO81sCRCb3maLzFzq4YnMTJcBX3L3Pe7eDHwRuDzc1wfMB4529z53/4MHRXEHgBLgeDNLuPt2d38hL60XmYEUeCIz01HAS2nPXwq3Afz/wFbgV2a2zcyuAXD3rcCngC8Ae8zsdjM7ChEBFHgiM9VO4Oi054vDbbj7QXf/G3c/Bngr8Nepc3Xu/h/u/vrwtQ780/Q2W2TmUuCJzAwJM0umbsBtwN+ZWYOZ1QOfB34AYGZ/YWavMTMD2giGMgfMbIWZvTGc3NINdIX7RAQFnshMcQ9BQKVuSWAD8BTwNPA48D/CY5cBvwHagYeBG9z9AYLzd18B9gK7gLnA56btJxCZ4UwLwIqISBSohyciIpGgwBMRkUhQ4ImISCQo8EREJBJmZGmx+vp6X7JkSb6bISIis9Bjjz22190bRm6fkYG3ZMkSNmzYkO9miIjILGRmL2XariFNERGJBAWeiIhEggJPREQiQYEnIiKRoMATEZFIUOCJiEgkKPBERCQSFHgiIhIJCjwREYkEBZ6IiESCAk9ERCJBgSciIpGgwBMRkUhQ4ImISCQo8EREJBIUeCIiEgkKPBERiQQFnoiIRIICT0REIkGBJyIikVC4gffCf8Ke5/LdChERmSEKN/B+eDk89r18t0JERGaIwg28ZA107893K0REZIYo3MArrYGu/fluhYiIzBCFG3jq4YmISJrCDTz18EREJE3hBp56eCIikqZwA089PBERSVO4gZesgf4u6O/Jd0tERGQGKNzAK60J7rsP5LUZIiIyMxRu4CVrgnsNa4qICFkGnpldaGabzWyrmV2TYf9lZvZUeHvIzE4asT9mZk+Y2c9z1fBxDfXw9k/bR4qIyMw1buCZWQy4HrgIOB641MyOH3HYi8DZ7r4a+AfgphH7PwlMb2FL9fBERCRNNj2804Ct7r7N3XuB24GL0w9w94fcfV/49BFgYWqfmS0E3gLcnJsmZylZHdyrhyciImQXeAuAHWnPm8Jto/kQcG/a868B/y8wONaHmNlVZrbBzDY0Nzdn0axxpIY01cMTERGyCzzLsM0zHmh2LkHgfSZ8/hfAHnd/bLwPcfeb3H2tu69taGjIolnjSA1pqocnIiJAPItjmoBFac8XAjtHHmRmqwmGLS9y95Zw85nA28zszUASqDKzH7j7+ybX7CzEiyFRph6eiIgA2fXw1gPLzGypmRUD64C70w8ws8XAXcDl7r4ltd3dP+vuC919Sfi6/5yWsEtReTEREQmN28Nz934z+zhwHxADbnH3jWZ2dbj/RuDzQB1wg5kB9Lv72qlrdpZUXkxERELZDGni7vcA94zYdmPa4yuBK8d5jweABybcwslQD09EREKFW2kFgh6eSouJiAiFHnjJGg1piogIUOiBV1qjIU0REQEKPfCSNdDbDgN9+W6JiIjkWWEHnpYIEhGRUGEHXqqeps7jiYhEXoEHXk1wr/N4IiKRV9iBpwLSIiISKuzAUw9PRERChR14Qz28fWMeJiIiha+wA089PBERCRV24CWSEE/qHJ6IiBR44EFYQFrX4YmIRF3hB57Ki4mICFEIPBWQFhERohB46uGJiAhRCLxkDXTpHJ6ISNRFIPCq1cMTEZEIBF5pDfS0weBAvlsiIiJ5VPiBN3TxuYY1RUSirPADT+XFRESEKASeyouJiAhRCDwtESQiIkQh8NTDExERohB4qR6eJq2IiERa4QdeqoenIU0RkUgr/MBLlEKsWEOaIiIRV/iBZ6YC0iIiEoHAAxWQFhGRiAReslo9PBGRiItI4NWohyciEnHRCLzSGvXwREQiLhqBpx6eiEjkRSPwSmuguw0GB/PdEhERyZNoBF6yBnDoUbUVEZGoikbgqYC0iEjkRSPwtAisiEjkRSPwhgpI789nK0REJI+iEXgqIC0iEnnRCDz18EREIi8agacenohI5EUj8IrLoSiuHp6ISIRFI/DMVEBaRCTiohF4oPJiIiIRF53AUwFpEZFIi07gqYcnIhJp0Qk89fBERCItq8AzswvNbLOZbTWzazLsv8zMngpvD5nZSeH2pJn90cz+ZGYbzeyLuf4BspasUWkxEZEIi493gJnFgOuBC4AmYL2Z3e3uz6Yd9iJwtrvvM7OLgJuA04Ee4I3u3m5mCeBBM7vX3R/J+U8yntKaIPDcg1mbIiISKdn08E4Dtrr7NnfvBW4HLk4/wN0fcvd94dNHgIXhdnf39nB7Irx5Tlo+Ucka8AHoOZiXjxcRkfzKJvAWADvSnjeF20bzIeDe1BMzi5nZk8Ae4Nfu/ugRtHPyVF5MRCTSsgm8TON/GXtpZnYuQeB9ZuhA9wF3P5mg13eama0a5bVXmdkGM9vQ3NycRbMmSOXFREQiLZvAawIWpT1fCOwceZCZrQZuBi5295aR+919P/AAcGGmD3H3m9x9rbuvbWhoyKJZE6QenohIpGUTeOuBZWa21MyKgXXA3ekHmNli4C7gcnffkra9wcxqwselwPnAphy1fWLUwxMRibRxZ2m6e7+ZfRy4D4gBt7j7RjO7Otx/I/B5oA64wYIZkP3uvhaYD3wvnOlZBNzh7j+fmh9lHMnq4F49PBGRSBo38ADc/R7gnhHbbkx7fCVwZYbXPQWcMsk25kZqSFM9PBGRSIpOpZXiSrAi9fBERCIqOoFXVKQlgkREIiw6gQcqIC0iEmHRCrxUeTEREYmcgg28x17ax4t7O4ZvTNZoSFNEJKIKNvDef8sf+feHXxq+sbRGQ5oiIhFVsIFXlYzT1t03fKN6eCIikVW4gVea4EDXiMBL9fA8Pws2iIhI/hR04LWNDLxkDQz2Q29HxteIiEjhKtjAqx6thwc6jyciEkEFG3hVyQQHu/uHb0zV09R5PBGRyCnYwMvYw0utmKAenohI5BRs4FWVxmnv6ad/YPDQRhWQFhGJrIINvOrSBMDwYU318EREIqtgA68qGQTesGFN9fBERCKrYAMv1cMbdvF5STVgqqcpIhJBhRt4ZRl6eEVFkKzSkKaISAQVbOClhjTbukZemlCjIU0RkQgq2MBLDWmOWl5MREQipWADr6o0DqAC0iIiAhRw4JUmYiRiph6eiIgABRx4ZkZVcpQC0urhiYhETsEGHoxWXqxaSwSJiERQQQdeZWmCtpEFpEtrYKAX+rry0iYREcmPgg48FZAWEZGUgg68qmT88HN4Ki8mIhJJBR141aOteg7q4YmIRExBB15VOKTp6RNUhlY9Vz1NEZEoKejAqy5N0D/odPUNHNqY6uFpSFNEJFIKOvAyLxFUG9xrSFNEJFIKOvCGlghKLyCdrA7u1cMTEYmUSATe8CWCYlCiJYJERKKmoANvqIC0youJiEReQQfe6EsEVauHJyISMQUdeEOLwGqJIBGRyCvowKtMBkOaoxaQFhGRyCjowIvHiqgoiQ+fpQnBxefq4YmIREpBBx6MUUBaPTwRkUgp+MCrTMYzr3re3w193Xlpk4iITL+CD7zq0kTmSSugepoiIhFS8IFXlWnFBJUXExGJnIIPvDGXCNLEFRGRyCj4wKtKZpi0MrRE0P7pbo6IiORJwQdedWmCjt4B+gcGD21UD09EJHIKPvCG6ml2p12Lpx6eiEjkFHzgHVoiKG1YU0sEiYhETmQCb9h5vFgCEuXq4YmIREjBB15V6SgFpFVeTEQkUrIKPDO70Mw2m9lWM7smw/7LzOyp8PaQmZ0Ubl9kZveb2XNmttHMPpnrH2A8oy4RpPJiIiKREh/vADOLAdcDFwBNwHozu9vdn0077EXgbHffZ2YXATcBpwP9wN+4++NmVgk8Zma/HvHaKTW0RJAKSIuIRFo2PbzTgK3uvs3de4HbgYvTD3D3h9x9X/j0EWBhuP1Vd388fHwQeA5YkKvGZ2PsHp5Ki4mIREU2gbcA2JH2vImxQ+tDwL0jN5rZEuAU4NEJtG/SkokiEjHLfA5PQ5oiIpEx7pAmYBm2ecYDzc4lCLzXj9heAfwY+JS7t43y2quAqwAWL16cRbOyY2ajLxGkIU0RkcjIpofXBCxKe74Q2DnyIDNbDdwMXOzuLWnbEwRhd6u73zXah7j7Te6+1t3XNjQ0ZNv+rIxaXqyvAwb6Mr5GREQKSzaBtx5YZmZLzawYWAfcnX6AmS0G7gIud/ctadsN+DbwnLt/NXfNnpiMKyaovJiISKSMG3ju3g98HLiPYNLJHe6+0cyuNrOrw8M+D9QBN5jZk2a2Idx+JnA58MZw+5Nm9ubc/xhjy7xEUE1wr/N4IiKRkM05PNz9HuCeEdtuTHt8JXBlhtc9SOZzgNOqujTBjtbO4RvVwxMRiZSCr7QCUJWMa4kgEZGIi0TgpRaBdU+bXKoC0iIikRKJwKsqTdA/6HT2DhzamBrSVA9PRCQSIhF41ZkKSKeGNNXDExGJhEgEXqqe5rDzePESiJeqhyciEhGRCLxDi8BmKCCtwBMRiYRIBZ7Ki4mIRFckAq+qNLjcMOPF51oxQUQkEiIReOrhiYhIJAKvMtOkFdA5PBGRCIlE4MWKjMqS+OFr4qmHJyISGZEIPAguPs/Yw+s9CAP9GV8jIiKFI1KBd9hlCUPVVjRxRUSk0EUn8JLxDGvihfU0dR5PRKTgRSbwqksTh5/DU3kxEZHIiEzgZTyHNzSkuW/a2yMiItMrMoFXPdaq5+rhiYgUvMgEXlUyQUfvAH0Dg4c2atKKiEhkRCbwqsPyYge702ZqatVzEZHIiEzgVWUqL5YohViJhjRFRCIgMoF3aIkglRcTEYmiyAWeCkiLiERTZAIvNaSZ8Vo89fBERApeZAJPPTwRkWiLTOBVaYkgEZFIi0zgJRNFFMeKMhSQroYuXYcnIlLoIhN4ZkZVaTzzkGbPARgcyEu7RERkekQm8CBcImi0AtKqtiIiUtCiFXjJDPU0h8qL7Z/u5oiIyDSKVOCNWUBaPTwRkYIWqcAbc4kgXZogIlLQIhV41aVx2rpHzNJUAWkRkUiIVOBVJYMenrsf2qgenohIJEQq8KpLEwwMOp29aZcgqIcnIhIJkQq8zEsElUFRQj08EZECF6nAq85UQNpM5cVERCIgkoF3oFMFpEVEoiZSgTdqAelktXp4IiIFLlKBd2hIM8OlCerhiYgUtEgFXlVpHBhlTTz18EREClqkAq8yHNLMWF5MPTwRkYIWqcCLFRmVJaMtEdQGg4N5aZeIiEy9SAUejLFEkA9C78G8tElERKZeNANPBaRFRCIncoFXXRqnrUsFpEVEoiZygZcqID2MengiIgUvcoFXPdo5PFAPT0SkgEUu8LQIrIhINGUVeGZ2oZltNrOtZnZNhv2XmdlT4e0hMzspbd8tZrbHzJ7JZcOPVHVpgs7eAfoG0i5BUA9PRKTgjRt4ZhYDrgcuAo4HLjWz40cc9iJwtruvBv4BuClt33eBC3PS2hyoSgbVVobN1CyuAIuphyciUsCy6eGdBmx1923u3gvcDlycfoC7P+Tu+8KnjwAL0/b9HmjNUXsnrbosQz1NMxWQFhEpcNkE3gJgR9rzpnDbaD4E3DvRhpjZVWa2wcw2NDc3T/TlWavOtAgsqLyYiEiByybwLMM2z3ig2bkEgfeZiTbE3W9y97XuvrahoWGiL8/a6EsE1UD3gSn7XBERya94Fsc0AYvSni8Edo48yMxWAzcDF7l7S26al3tDSwRl6uFpSFNEpGBl08NbDywzs6VmVgysA+5OP8DMFgN3AZe7+5bcNzN3qkYb0tSq5yIiBW3cwHP3fuDjwH3Ac8Ad7r7RzK42s6vDwz4P1AE3mNmTZrYh9Xozuw14GFhhZk1m9qGc/xQTcGgRWPXwRESiJJshTdz9HuCeEdtuTHt8JXDlKK+9dDINzLWSeBHFsaLRe3juwaxNEREpKJGrtGJm4YoJGQpI+wD0tuelXSIiMrUiF3gAVaVxLREkIhIxkQw8FZAWEYmeSAaelggSEYmeSAZedaZVz9XDExEpaJEMvKrSeIYeXnVwrx6eiEhBimTgBefw+nFPq5CWGtJUD09EpCBFMvCqkgkGBp2O3oFDG0uqAFM9TRGRAhXJwMu4YkJRUTCsqSFNEZGCFOnAUwFpEZHoiGTgqYC0iEj0RDLw1MMTEYmeSAbemIvAqocnIlKQIhl4h5YIylBAWj08EZGCFMnAq0gGqyKNuUSQiIgUlEgGXqzIqExmWDGhtAYG+6CvMy/tEhGRqRPJwIPgPJ6WCBIRiY7IBl7GJYJS9TR1Hk9EpOBENvAyFpBOrZigHp6ISMGJbOAFSwSNmKU5VEBa9TRFRApNZAMv4yKwWhNPRKRgRTbwMp/DqwnuNaQpIlJwIht4VaUJOnsH6BsYPLRRk1ZERApWZAMv8xJBsSD0Olvz1CoREZkqkQ+8w67FqzkaWrfloUUiIjKVIht4VaWjlBdrWAF7n89Di0REZCrF892AfBm1gHT9cnj6R9DbCcVleWiZiBSivr4+mpqa6O7uzndTCkYymWThwoUkEomsjo9s4I26RFD9suC+5XmYf9I0t0pEClVTUxOVlZUsWbIEM8t3c2Y9d6elpYWmpiaWLl2a1WsiO6Q56jm8+hXBvYY1RSSHuru7qaurU9jliJlRV1c3oR5zZAOvKtMsTYA5x4AVwd4teWiViBQyhV1uTfT7jGzgJRMxiuNFh198nkgGMzWbN+enYSIiU2T//v3ccMMNE37dm9/8Zvbv3z/mMZ///Of5zW9+c4Qtmx6RDTwYZYkg0ExNESlIowXewMDAmK+75557qKmpGfOYL33pS5x//vmTad6Ui3TgVZfGDy8gDcHElZatMDj2L4GIyGxyzTXX8MILL3DyySdz6qmncu655/Le976XE088EYC3v/3tvPa1r+WEE07gpptuGnrdkiVL2Lt3L9u3b+e4447jwx/+MCeccAJvetOb6OrqAuCKK67gzjvvHDr+2muvZc2aNZx44ols2rQJgObmZi644ALWrFnDRz7yEY4++mj27t07bT9/ZGdpQnAe77BzeBBcmjDQA/tfCs7piYjk0Bf/z0ae3dmW0/c8/qgqrn3rCWMe85WvfIVnnnmGJ598kgceeIC3vOUtPPPMM0OzHG+55RbmzJlDV1cXp556Kn/5l39JXV3dsPd4/vnnue222/jWt77Fu9/9bn784x/zvve977DPqq+v5/HHH+eGG27guuuu4+abb+aLX/wib3zjG/nsZz/LL3/5y2GhOh0i3sPLUEAaNFNTRCLhtNNOGzal/+tf/zonnXQSZ5xxBjt27OD55w//G7h06VJOPvlkAF772teyffv2jO/9zne+87BjHnzwQdatWwfAhRdeSG1tbe5+mCxEu4eXTPDi3o7Dd6Suxdu7BZb/+fQ2SkQK3ng9selSXl4+9PiBBx7gN7/5DQ8//DBlZWWcc845Gaf8l5SUDD2OxWJDQ5qjHReLxejvD04duXsumz9hke/hZRzSLJsDZfWaqSkiBaWyspKDBw9m3HfgwAFqa2spKytj06ZNPPLIIzn//Ne//vXccccdAPzqV79i3759Of+MsUS7h1cap62rD3c//HoOzdQUkQJTV1fHmWeeyapVqygtLWXevHlD+y688EJuvPFGVq9ezYoVKzjjjDNy/vnXXnstl156KT/84Q85++yzmT9/PpWVlTn/nNFYvruYmaxdu9Y3bNgw5Z9z0+9f4B/v2cTTX3gTlckRtdj+zyfh2bvhMy9OeTtEpPA999xzHHfccfluRl719PQQi8WIx+M8/PDDfPSjH+XJJ5+c1Htm+l7N7DF3Xzvy2Ej38NILSB8WePXLoasVOvZCeX0eWiciUlhefvll3v3udzM4OEhxcTHf+ta3pvXzIx14QwWkO/tYUFM6fOfQTM0tCjwRkRxYtmwZTzzxRN4+P/KTVoBRLk1Im6kpIiKzXqQDb9QC0gDViyBeCs0KPBGRQhDpwBt1iSCAoiKof416eCIiBSLSgTfqIrAp9csVeCIiBSLSgVeZjGMWzNLMqH4F7H8Z+jJXEhARKWQVFRUA7Ny5k0suuSTjMeeccw7jXUb2ta99jc7OzqHn2Sw3NBUiHXhFRUZFSTzzkCaEE1c8WDlBRCSijjrqqKGVEI7EyMDLZrmhqRDpwIOwgPRYQ5qgYU0RKQif+cxnhq2H94UvfIEvfvGLnHfeeUNL+fzsZz877HXbt29n1apVAHR1dbFu3TpWr17Ne97znmG1ND/60Y+ydu1aTjjhBK699logKEi9c+dOzj33XM4991zg0HJDAF/96ldZtWoVq1at4mtf+9rQ5422DNFkZHUdnpldCPwrEANudvevjNh/GfCZ8Gk78FF3/1M2r823quQo9TQB6l4DmGZqikhu3XsN7Ho6t+/ZeCJcNPaf13Xr1vGpT32Kv/qrvwLgjjvu4Je//CWf/vSnqaqqYu/evZxxxhm87W1vO7zcYugb3/gGZWVlPPXUUzz11FOsWbNmaN+Xv/xl5syZw8DAAOeddx5PPfUUn/jEJ/jqV7/K/fffT3398GuaH3vsMb7zne/w6KOP4u6cfvrpnH322dTW1ma9DNFEjNvDM7MYcD1wEXA8cKmZHT/isBeBs919NfAPwE0TeG1ejbpEEEAiCbVHq4cnIgXhlFNOYc+ePezcuZM//elP1NbWMn/+fD73uc+xevVqzj//fF555RV279496nv8/ve/Hwqe1atXs3r16qF9d9xxB2vWrOGUU05h48aNPPvss2O258EHH+Qd73gH5eXlVFRU8M53vpM//OEPQPbLEE1ENj2804Ct7r4NwMxuBy4Ghn4Sd38o7fhHgIXZvjbfqkrjmZcISqlfriLSIpJb4/TEptIll1zCnXfeya5du1i3bh233norzc3NPPbYYyQSCZYsWZJxWaB0mXp/L774Itdddx3r16+ntraWK664Ytz3GauWc7bLEE1ENufwFgA70p43hdtG8yHg3om+1syuMrMNZrahubk5i2blxqhLBKXUL4eW52FwYNraJCIyVdatW8ftt9/OnXfeySWXXMKBAweYO3cuiUSC+++/n5deemnM15911lnceuutADzzzDM89dRTALS1tVFeXk51dTW7d+/m3nvvHXrNaMsSnXXWWfz0pz+ls7OTjo4OfvKTn/CGN7whhz/tcNn08DIN5GaMZTM7lyDwXj/R17r7TYRDoWvXrp22JRyqkgnauka5LAGCwOvvhgM7oHbJdDVLRGRKnHDCCRw8eJAFCxYwf/58LrvsMt761reydu1aTj75ZFauXDnm6z/60Y/ygQ98gNWrV3PyySdz2mmnAXDSSSdxyimncMIJJ3DMMcdw5plnDr3mqquu4qKLLmL+/Pncf//9Q9vXrFnDFVdcMfQeV155JaecckpOhi8zGXd5IDP7M+AL7v7n4fPPArj7/xxx3GrgJ8BF7r5lIq8dabqWBwL4t98+z7/8egtb/sdFFMczdHhfehi+cyFcdicsu2Ba2iQihUfLA02NiSwPlM2Q5npgmZktNbNiYB1w94g3XwzcBVyeCrtsX5tvVWMVkIZgIVjQ6uciIrPcuEOa7t5vZh8H7iO4tOAWd99oZleH+28EPg/UATeEJzP73X3taK+dop/liFSnFZCuryg5/ICyOVBWp5maIiKzXFbX4bn7PcA9I7bdmPb4SuDKbF87k4xZQDpFMzVFRGa9yFdaqSoNMn/cmZp7NaQpIpMz3pwJmZiJfp+RD7xDi8COM1OzswU6WqapVSJSaJLJJC0tLQq9HHF3WlpaSCaTWb8mqyHNQjbuEkFwqKZmy/NQXjcNrRKRQrNw4UKampqYzuuMC10ymWThwoXjHxhS4GVzDq8hDLzmzbD4jGlolYgUmkQiwdKlS/PdjEiL/JBmMhGjOF40duBVL4J4UjM1RURmscgHHoxTQBqgKBasnKCZmiIis5YCD6hKxsc+hweaqSkiMssp8MiigDQEgbfvJegbu/q3iIjMTAo8gokrYxaQBqhfBji0vjAtbRIRkdxS4JFlD081NUVEZjUFHuESQWNNWgGYcyxgmrgiIjJLKfAIZ2l29TE4OEYFhOIyqFmkSxNERGYpBR5BPc1Bh47e8c7jrdBMTRGRWUqBx/AlgsZUvxz2boXBwWlolYiI5JICj/Qlgsbp4TUsh/4uaGuahlaJiEguKfDIsoA0HCoi3azzeCIis40Cj7QC0uPN1EwFniauiIjMOgo8JnAOr7weSuco8EREZiEFHlkuEZRSv1yBJyIyCynwgMqSOGbZBt4yBZ6IyCykwAOKiozKkixWTICgxFhHM3S2Tn3DREQkZxR4oarSBG3d41yWAGkTV1RiTERkNlHghbIqIA3hqgloWFNEZJZR4IWqkonszuHVHA2xEgWeiMgso8ALZd3DK4pB3WsUeCIis4wCL1RVGh//wvMUzdQUEZl1FHihrHt4EMzU3Lcd+numtE0iIpI7CrxQVTJBd98gPf0D4x9cvxx8EFpemPqGiYhITijwQtVlWa6YAJqpKSIyCynwQtXZFpAGqEsFnq7FExGZLRR4oayXCAIoLoPqxVr9XERkFlHghSZUQBo0U1NEZJZR4IWqS+NAlj08CGZq7n0eBgensFUiIpIrCrzQoUVgs5i0AkEPr68T2l6ZwlaJiEiuKPBCqXN42Q9pavVzEZHZRIEXSiZilMSLsh/SrF8R3GumpojIrKDAS1NVmmUBaYDyekjWaKamiMgsocBLM6HyYmbBsKZ6eCIis4ICL01VcgIFpAEaluscnojILKHASzOhHh4EPbz23dC1f8raJCIiuaHASxOcw8vysgTQxBURkVlEgZdm4j08FZEWEZktFHhpqpIJDnb3MTjo2b2g5miIFWumpojILKDAS1NdmmDQob03y2HNWBzqXqMhTRGRWUCBl6Z6ogWkQUWkRURmCQVemqqJFpCGYKZm64vQ3ztFrRIRkVxQ4KU5tETQBGdq+gC0bpuiVomISC4o8NJMaBHYlKGZmpq4IiIyk2UVeGZ2oZltNrOtZnZNhv0rzexhM+sxs78dse+TZvaMmW00s0/lqN1T4ojP4YHO44mIzHDjBp6ZxYDrgYuA44FLzez4EYe1Ap8Arhvx2lXAh4HTgJOAvzCzZTlo95Q4tCbeBAKvuByqF2mmpojIDJdND+80YKu7b3P3XuB24OL0A9x9j7uvB0YmxXHAI+7e6e79wO+Ad+Sg3VOisiSO2QSHNCHo5TVrSFNEZCbLJvAWADvSnjeF27LxDHCWmdWZWRnwZmBRpgPN7Coz22BmG5qbm7N8+9wqKjKqkglaOyY44zK1aoJnecG6iIhMu2wCzzJsy+ovu7s/B/wT8Gvgl8CfgIxTIN39Jndf6+5rGxoasnn7KbG0vpwXmtsn9qL65dDXAW07p6ZRIiIyadkEXhPDe2ULgaz/srv7t919jbufRXCub0af7FrZWMnmXQfxifTW6pcH95qpKSIyY2UTeOuBZWa21MyKgXXA3dl+gJnNDe8XA+8EbjuShk6XFY2V7Ovso/lgT/YvGgq8GZ3lIiKRFh/vAHfvN7OPA/cBMeAWd99oZleH+280s0ZgA1AFDIaXHxzv7m3Aj82sjmBCy8fcfd8U/Sw5saKxEoBNuw4ytyqZ3Ysq5kKyGnY9PYUtExGRyRg38ADc/R7gnhHbbkx7vItgqDPTa98wmQZOt5WNVQBs3nWQs5ZneS7RDI59I2y+Bwb6IJaYwhaKiMiRUKWVEeaUF9NQWcKmXQcn9sIT3w2dLfDC/VPTMBERmRQFXgYrGyvZvLttYi96zfmQrIGnfzQlbRIRkclR4GWwYl4lz+9uZyDbhWAB4sVwwtth0y+gt2PK2iYiIkdGgZfBisZKevoH2d4yweA68V3B9Xib752ahomIyBFT4GWQPnFlQha/DqoWaFhTRGQGUuBlsGxeBUXGxCeuFBXBqr+Erb+BjpapaZyIiBwRBV4GyUSMJXXlbN41wYkrEAxrDvbDsz/NebtEROTIKfBGsSIsMTZhjSdCw0oNa4qIzDAKvFGsaKzkpdZOOnsz1roenRmceAm8/DDsf3lqGiciIhOmwBvFysZK3OH53RNcOQGCYU2Ap+/MbaNEROSIKfBGseJIZ2oC1C6Bhacp8EREZhAF3igWzykjmSia+EzNlNXvhj0bYffG3DZMRESOiAJvFLEiY/m8IygxlnL828FimrwiIjJDKPDGsGLeEc7UBKhoCFZQePpOGBzMbcNERGTCFHhjWNFYyd72Xva2T2Ax2HQnvgsO7IAdj+a2YSIiMmEKvDGkSoxtOdJe3sq3QLwUnr4jh60SEZEjocAbw/LGCuAISoyllFTAyjfDxp9Af28OWyYiIhOlwBtDQ0UJc8qLj/w8HgQLw3btg21aGFZEJJ8UeGMwM1bMq2TT7kkE3rFvhNJaeErDmiIi+aTAG8eKxkqe332QwYksBpsuXgwnvAM23wM9R1C1RUREckKBN46VjZV09g6wY1/nkb/Jie+Cvs4g9EREJC8UeONY0VgJTGLiCsCiM6B6kS5CFxHJIwXeOJbPCwJvUhNXhhaG/S107M1Ry0REZCIUeOMoL4mzeE7Z5AIPgmFNHwguURARkWmnwMvCisZKNh3J6ufpGlfB3OM1rCkikicKvCysbKxke0sn3X0Dk3ujEy8Jyozt256TdomISPYUeFlY0VjJwKCzdc8kLytYdUlwr3XyRESmnQIvCysbczBxBaD26GDG5tM/Aj/C6/pEROSIKPCysKSunOJ4EZsnU3ElZfW7oHkT7H5m8u8lIiJZU+BlIR4r4jUNFZO7Fi/l+HdAUVyTV0REppkCL0srGyvZPNmZmgDldXDsefD0j7UwrIjINFLgZWlFYyW723rY35mDZX5OfBe0NcHLD0/+vUREJCsKvCzlpMRYyso3Q6JMw5oiItNIgZel1Ornk56pCVBcHqyG/uxPtTCsiMg0UeBlaV5VCdWlidz08ODQwrAv/DY37yciImNS4GXJzFiRq4krAMeeC2V1WhhWRGSaKPAmYGVjJVt2t+O5uGg8lggXhr0XenLUaxQRkVEp8CZgRWMl7T39NO3rys0bnvgu6O+CTb/IzfuJiMioFHgTkLMSYymLTofapfC7f4LuHA2ViohIRgq8CViWWgw2FyXGAMzg7TfAvpfg7v+m+poiIlNIgTcBVckEC2pKczdTE+Do18F5fx9corD+5ty9r4iIDKPAm6CcztRMed0nYdmb4L7PwSuP5/a9RUQEUOBN2IrGSrY1d9Dbn8M6mEVF8I5vQvlc+NEV0LU/d+8tIiKAAm/CVjZW0j/obNs7ycVgRyqbA+/6LrS9Aj/7mM7niYjkmAJvglbkeqZmukWnwgVfgk0/h0duyP37i4hEmAJvgo6pryBeZLmduJLujL+ClX8Bv/487Fg/NZ8hIhJBCrwJKo4XcWxDxdT08CC4VOHi66FqQXA+r7N1aj5HRCRiFHhHIJipOYXlwEprgvN5HXvgJx/RQrEiIjmQVeCZ2YVmttnMtprZNRn2rzSzh82sx8z+dsS+T5vZRjN7xsxuM7NkrhqfLysaK3llfxdt3X1T9yEL1sCf/yM8/yt46F+n7nNERCJi3MAzsxhwPXARcDxwqZkdP+KwVuATwHUjXrsg3L7W3VcBMWBdDtqdV6kSY1umspcHcOqVQYHp3/4DbP+vqf0sEZECl00P7zRgq7tvc/de4Hbg4vQD3H2Pu68HMnV54kCpmcWBMmDnJNucdzld/XwsZvDWr0PtErjzg9DePLWfJyJSwLIJvAXAjrTnTeG2cbn7KwS9vpeBV4ED7v6rTMea2VVmtsHMNjQ3z+w/7AtqSqksiU/tebyUZBW8+3vQvR/uuhIGB6b+M0VEClA2gWcZtmV1VbSZ1RL0BpcCRwHlZva+TMe6+03uvtbd1zY0NGTz9nljZiyf6okr6RpPhIv+GbY9AL+/btzDRUTkcNkEXhOwKO35QrIfljwfeNHdm929D7gLeN3EmjgzrWisZNOuttwsBpuNNf8PrH4PPPA/g+ATEZEJySbw1gPLzGypmRUTTDq5O8v3fxk4w8zKzMyA84DnjqypM8vKxkrauvvZ1dY9PR9oBm/5KtQvhx9fCQd3Tc/niogUiHEDz937gY8D9xGE1R3uvtHMrjazqwHMrNHMmoC/Bv7OzJrMrMrdHwXuBB4Hng4/76Yp+lmm1Yp50zRxJV1JRXA+r7cD7vwQDPRP32eLiMxyWV2H5+73uPtydz/W3b8cbrvR3W8MH+9y94XuXuXuNeHjtnDfte6+0t1Xufvl7t4zdT/O9FnZWAVMUU3Nscw9Dt7yL/DSg8GisT3T/PkiIrOUKq0coeqyBI1VyekPPICT3wtv+Fv4021w/emw+ZfT3wYRkVlGgTcJwcSVPPWwzvt7+NCvoaQKbnsP/OgD0L4nP20REZkFFHiTsLKxkhf2tNM3kKdal4tOhY/8Hs79u2BJof99KjzxA62lJyKSgQJvElY0VtI7MMj2vR35a0S8GM7+73D1fwXn9372Mfj+xdC6LX9tEhGZgRR4k7A8HzM1R9OwHK64J7h0YecTcMPr4MGvaSaniEhIgTcJr5lbQazI8jNxJZOiIjj1Q/CxR+HYN8JvroVvnQs7n8x3y0RE8k6BNwnJRIwldWUzo4eXruooWHcrvPv70L4bvvVG+NXfQ29nvlsmIpI3CrxJWtlYxebdbfluxuHM4PiLg97eKe+Dh74O3/gzlSUTkchS4E3SisZKdrR20d4zQ8+VldbC274O7/85WCyY0PKDS+Dx70PH3ny3TkRk2ijwJim1Nt6W3TNsWHOkpW+Aj/4XnPNZ2Ls5qNJy3TL4zpvh4Rtg30v5bqGIyJRS4E1SavXzGTNxZSyJUjjnGvjkU/CRP8BZ/x269sN9n4V/XQ03vgF+98+w+1ldyyciBSee7wbMdotqyygrjs2OwEsxg/mrg9u5n4OWF2DTL4KL1+//R7j/yzDnGFj5F8Ft4anBDFAREXcY7If+bujvDe+7ob8nuB/oHf78sPvU457D953/BZizdMqarsCbpKIiY9m8YG28WavuWDjzE8Ht4G7Y/At47ufwyDeCyS4V82DFm2HpWdCwAuYcC4lkvlstEi0jg2agZ8Tj8Jb+eOh5GEID6SHTOyKgRj5PD7QRYeaTrS5lEE9CvGT4fd/UziRX4OXAynmV/OrZXbg7wbJ/s1jlPFj7weDWfQC2/Ao2/R946g547DvhQQa1Rwdr89Uvh/plUL8ieFxel9fmi+TMQH/wBz4VGAPhLRUiA33DA2XYfU/asWn3w7Z1j338Ye/dA+TiVEMqbIqD+1hJWvCE24oroKwu3Ja2PXXcsNeUpN2SECsOTp8M7Q+3DXt9IhhpmmYKvBxY0VjJDzfsoLm9h7mVBdTzSVbD6ncFt/4e2LslvD0f3DdvgRd/H/zDTSmdkxaCy4MeYfUiKJsT7IsX5+/nkZkj1VtJhcBA3/DH/T2HbxvoOfzY9CAaOqZveMAMBclY+0cGW08OejFpiuJhSBSPuC8JwyAMgmR1uG3EvqH7tHAZCpHUe6WHWHHacSP2FcXzEjYzgQIvB9InrhRU4KWLl0DjicEt3eAgHNhxKARTgbjll/DEvx/+PsWVUFYb/Ndj6ZwgCIc9DoOxrA5KKg//L8WI/kPNaHAQBvuCP+CDfTA4cOjxQPh86HH/8H3Djus/FBjDHvcd6uUMva730OvH2j70+gxhlno8FYoS4R/5RPCHPlYcBkLaLV4CyapwfyJDqCTSAicxPKCG3m/E40wBlh5SOgc+IyjwcmBFWuC9YVlDnlszzYqKguHN2qNh2fnD93W2BuF3cGfwuKs1uO9shc6W4HnrtuB5z4HsPi9WEpw/jKffStKCsTQcLikKbkWx8HF4X1Q04nn6fgtnp/qIezJsS9vng+ADQQANPR5Iu/fh23wwfDwYBMzQbSDD84EM+8NgycnwVhYsFny3RYlDAREb8bgoLThKKsNt8UN/+IeOLR7xHiO3FY94XfHhx8RLMry2JG/DZDJ7KPByoK6ihHlVJTyyrZUr33BMvpszc5TNgcWnZ3fsQB907UsLxhboaYf+rmD4qS+8P+x596FbXzd0twXvlQqe9HBJ3Yaep/an7TMDLO0ewv/JsC/t3mJp4ZkK0vG2xYKgLoqn3WJZPg8DJZb+OHHouNjIbYlDx6bCKj3Aho7NsF8hIgVCgZcjf7lmITf+7gV2tHayaE5Zvpsz+8QSUDE3uImITAENLOfI+844GjPjB4+oYomIyEykwMuRo2pKufCERm7748t09s7QupoiIhGmwMuhK85cQlt3Pz99Yme+myIiIiMo8HJo7dG1nHBUFd996EVctShFRGYUBV4OmRlXvG4JW3a389ALLflujoiIpFHg5dhbTzqKOeXFfOe/tue7KSIikkaBl2PJRIz3nraY327azcstU1sIVUREsqfAmwLvO+NoYmZ8/+Ht+W6KiIiEFHhToLE6yUUnzueHG3bQ0aNLFEREZgIF3hS54nVLONjdz11PvJLvpoiICAq8KbNmcQ2rF1bz3f/SJQoiIjOBAm+KpC5ReKG5gwe37s13c0REIk+BN4Xesno+9RXFfFeXKIiI5J0CbwqVxGO89/Sj+c/Ne9i+tyPfzRERiTQF3hR73+mLw0sUtIqCiEg+KfCm2NyqJG9ZPZ8fbdhBuy5REBHJGwXeNLjidUs42NPPjx9ryndTREQiS4E3DU5ZXMtJi2r43kPbGRzUJQoiIvmgwJsmHzxzCdv2dvD755vz3RQRkUhS4E2Ti1bNp6GyhO8+tD3fTRERiSQF3jQpjhfxvtOP5oHNzWxrbs93c0REIkeBN43ee/piEjFdoiAikg8KvGnUUFnCW1cfxY827OBgd1++myMiEikKvGn2/tctoaN3gDt1iYKIyLRS4E2zkxbVsGaxLlEQEZluCrw8uOLMpWxv6eSBLXvy3RQRkchQ4OXBRasamVdVwne0ioKIyLRR4OVBIlbE5WcczR+e38vWPQfz3RwRkUhQ4OXJpactpjhexPce0iUKIiLTIavAM7MLzWyzmW01s2sy7F9pZg+bWY+Z/W3a9hVm9mTarc3MPpXD9s9adRUlvO2ko/jx400c6NIlCiIiU23cwDOzGHA9cBFwPHCpmR0/4rBW4BPAdekb3X2zu5/s7icDrwU6gZ/koN0F4YrXLaGzd4AfPKJenojIVMumh3casNXdt7l7L3A7cHH6Ae6+x93XA2N1Vc4DXnB3/XUPrVpQzfnHzeVffrWZXzz1ar6bIyJS0LIJvAXAjrTnTeG2iVoH3DbaTjO7ysw2mNmG5uborCjw9UtPYc3iWj55+xP89rnd+W6OiEjByibwLMO2CV0xbWbFwNuAH412jLvf5O5r3X1tQ0PDRN5+VisrjnPLB07l+KOq+OgPHucPWj5IRGRKZBN4TcCitOcLgZ0T/JyLgMfdXV2YDKqSCb7/wdM4pqGcD39/A49ua8l3k0RECk42gbceWGZmS8Oe2jrg7gl+zqWMMZwpUFNWzA+uPJ0FNaV88LvreeLlfflukohIQRk38Ny9H/g4cB/wHHCHu280s6vN7GoAM2s0sybgr4G/M7MmM6sK95UBFwB3TdUPUSjqK0q49cozqKso4f23/JGNOw/ku0kiIgXD3GdeAeO1a9f6hg0b8t2MvNnR2sl7vvkw3f2D/PCqM1g2rzLfTRIRmTXM7DF3XztyuyqtzECL5pRx64fPIFZkXHbzo2zf25HvJomIzHoKvBlqaX05t155On0Dg1x286M07evMd5NERGY1Bd4MtnxeJf/+odNp6+7jspsfZXdbd76bJCIyaynwZrhVC6r53gdPY+/BHt77rUfY296T7yaJiMxKCrxZYM3iWr59xam8sr+Ly7/9R/Z39ua7SSIis44Cb5Y445g6vnn5Wl7Y0877b/kjB7u1woKIyEQo8GaRs5c3cP1la9i4s40PfXcDnb39+W6SiMisocCbZS44fh7/6z0ns+GlVt76bw/yi6deZXBw5l1LKSIy0yjwZqG3nnQU337/qZgZH/uPx3nLvz3Ib57dzUwsIiAiMlMo8Gapc1fO5b5PncX/es9JdPb2c+X3N/D2Gx7i91uaFXwiIhmotFgB6BsY5K7Hm/j6b7fyyv4uTlsyh79503JOP6Yu300TEZl2o5UWU+AVkJ7+AX64fgf/+z+3sudgD69/TT1//ablrFlcm++miYhMGwVehHT3DfCDR17ihgdeoLWjl/NWzuXTFyxn1YLqfDdNRGTKKfAiqL2nn+89tJ1v/u4F2rr7uWhVI5++YDnLtfqCiBQwBV6EHejq49sPvsi3/7CNzr4BzlrWwLkrGjhnxVyW1Jfnu3kiIjmlwBNaO3q5+Q/buOfpV9neEqy+sKSujHNWzOXsFQ382TF1JBOxPLdSRGRyFHgyzPa9HTyweQ8PbGnm4Rda6OkfpCRexBnH1HFO2Ptbqt6fiMxCCjwZVXffAI9sa+GBzc38bkszL4YLzh5dV8Y5yxs4Z+Vc9f5EZNZQ4EnWXmrp4IHNzTyweQ8Pb2uhuy/o/Z20sIbj5leycn4VKxsrWdFYSVlxPN/NFREZRoEnR6S7b4BHX2zld5ub+VPTfja92kZH7wAAZnD0nDJWNlaxcn4lx82v4rjGKhbWllJUZHluuYhE1WiBp/88lzElEzHOXt7A2csbABgcdF7Z38Vzr7axadfBofv7nt1F6r+dyotjrGgMeoLHNVZy7NwKFtWWMb86STymanYikh/q4UlOdPb2s2V3O5vSgvC5V9to6z60hFG8yDiqppRFc0pZPKeMhbVlLJ5TxqI5wX1tWQIz9QxFZHLUw5MpVVYc5+RFNZy8qGZom7vz6oFuXtzbwY7WTl5u7WTHvi5ebu3kVxt309IxfOX28uIYi8IAXFRbxoLaUhqrkjRWlzCvKsncyiTFcfUQReTIKPBkypgFPbqjakoz7u/o6WfHvk52tHYNBWLTvk5eaungwef30tU3cNhr6iuKmVeVpLEqybzqJPPD+yAYk8yrSlKVjKunKCKHUeBJ3pSXxIMJL41Vh+1zd/Z39rGrrZtdbd3sPhDet3Xz6oFuXtnfxeMv72NfZ99hr00mimioLKGhoiS4ryyhoSJ56HF4q68opiSuSy1EokKBJzOSmVFbXkxteTHHzT88EFO6+wbY09YzFIy7DnSxp62Hve09NLf38OLeDv74YmvGYASoLk0MhWN9ZQl15cU0hPd1FUEo1leUUF9RQmmxwlFkNlPgyayWTMRYXFfG4rqyMY/r7R+kpaOH5oMjbu3B/Z6DPTzdtJ+W9l4O9vRnfI+y4hh1YQDWlR8Kw7qKYuaUB4/nlBcHz8uKNSNVZIZR4EkkFMeLmF9dyvzqzOcT03X3DdDS0UtLe9BT3NveS0t7L3vbe2hp76Glo5emfZ38qWk/rR29DAxmnulcU5YIgrD8UBDWlReHj4Ne5JwwLBWQIlNPgScyQjIRY0FNKQtGmWyTbnDQ2d/VR2tHDy3tvUFQhmHZ2tEbbuvhheZ21m/vpbWzl9GuBEoFZCoU55SXpAVkalsxdWGAasaqyMQo8EQmoajIhoLoNXPHP35g0NnX2UtrR9BjbO3oHQrGoccdwbnHx17aR2tHL6N0IKksiQ/1EOvKi6ktC3qMdSPCMnUrK45p9qpEmgJPZBrFimxoEkw2C/EODjoHuvpo6UgFYjCk2hr2JlMhuXN/N8+80kZrRy+9A4MZ36skXjQ0jFpblhaMFcODMRWgVcmESsRJQVHgicxgRUWHZqtmw91p7+kPe4pBMA497uihtaMvvO/lxb0d7OvoHaqNOlKsyIJeY3li2FBqaoh1KDR1HlJmCQWeSAExMyqTCSqTCY6uy249w+6+gbTh1MODMTXc+tyuoAe5f5RLPCC4zGNkT/Hw3mPJ0NCrlpyS6aTAE4m4ZCI2ZkWckfoHBtnX2Td0vnFfGI4taaG5r6OXHa2dPLljP/s6eukf5URkaSJ22KScOSPORQ4FZ0UxlSWqoiNHToEnIhMSjxUNVauB8c9DujttXf1BOHYe6jGmAnLf0MzWXp7f3U5LRw/dfZnPQxbHiqgtTxw2KScViCNDsrpU5yHlEAWeiEwpM6O6LEF1WSLr13T29h+audqZ6Vxk8HjHvk5axygWEJyHTAwbTh0ZmIfCsoTasoTOQxYwBZ6IzDhlxXHK5sRZNGfsCjopvf2DI3qPPSN6kj1Zn4cc73rIurRCArVluh5yNlHgicisVxwvYl5VsFpGNkaeh8x0PeTe9h62NXewYfs+9nWOcT1kMj5UVi4VlME5ycxBqYDMHwWeiETORM9DDoTXQ7am9Rz3Dl32cWjCzsstnTzx8n72dY5ecq6yJD40SWeoxFza46F9CsicU+CJiIwjNsGKOoODTlt3H3vbhxcMGDnMmprJOlZN1pE9yPq0QEzvOdZXBNdrJnQOclQKPBGRHCsqMmrKiqkpy65gQCoghy7taB9eUSdVnzWbgKwuTQwfSk31HIf1IkuGzkHGIjSLVYEnIpJn6QF5bMP4x4/sQba09wwNsbYM9SaDmqxjnYM0I63M3PDlruoqSqhPC8fUZR6z+TpIBZ6IyCwz0R7kwKCzv/PQ9Y4t6at7tPcMq6bT0t7Lga7Ms1jj4dBuanHk9Hqs9RWHhllTwVlWPLMiZma1RkREci5WZGFPrQTmjX9838DgsOWtglmrh8KxpSNYJ/Kllk5a2ntGrcdamogNDavWjxhira84dA5yus4/KvBERGSYRGxil3l09Q6k9RoP7z3u7ehlV1s3G3e20dLRQ99A5vOPd/3V61izuDaXP8owCjwREZmU0uIYC4vLWFg7fqEAd6etu39oYk5Le0/Ye+xlURavnwwFnoiITBszo7o0QXVpgmOymKCTS7pgQ0REIiGrwDOzC81ss5ltNbNrMuxfaWYPm1mPmf3tiH01ZnanmW0ys+fM7M9y1XgREZFsjTukaWYx4HrgAqAJWG9md7v7s2mHtQKfAN6e4S3+Ffilu19iZsXA1A7SioiIZJBND+80YKu7b3P3XuB24OL0A9x9j7uvB4ZdvGFmVcBZwLfD43rdfX8uGi4iIjIR2QTeAmBH2vOmcFs2jgGage+Y2RNmdrOZlWc60MyuMrMNZrahubk5y7cXERHJTjaBl6mOzCgLZRwmDqwBvuHupwAdwGHnAAHc/SZ3X+vuaxsapnnqjoiIFLxsAq8JWJT2fCGwM8v3bwKa3P3R8PmdBAEoIiIyrbIJvPXAMjNbGk46WQfcnc2bu/suYIeZrQg3nQc8O8ZLREREpsS4szTdvd/MPg7cB8SAW9x9o5ldHe6/0cwagQ1AFTBoZp8Cjnf3NuC/AbeGYbkN+MDU/CgiIiKjy6rSirvfA9wzYtuNaY93EQx1Znrtk8DaI2+iiIjI5KnSioiIRIICT0REIkGBJyIikaDAExGRSFDgiYhIJCjwREQkEhR4IiISCQo8ERGJBAWeiIhEggJPREQiQYEnIiKRYO7ZLm03fcysGXgpB29VD+zNwfsUGn0vmel7yUzfS2b6XjKbCd/L0e5+2MKqMzLwcsXMNri7ClePoO8lM30vmel7yUzfS2Yz+XvRkKaIiESCAk9ERCKh0APvpnw3YIbS95KZvpfM9L1kpu8lsxn7vRT0OTwREZGUQu/hiYiIAAo8ERGJiIIMPDO70Mw2m9lWM7sm3+2ZScxsu5k9bWZPmtmGfLcnX8zsFjPbY2bPpG2bY2a/NrPnw/vafLYxH0b5Xr5gZq+EvzNPmtmb89nGfDCzRWZ2v5k9Z2YbzeyT4fZI/86M8b3MyN+ZgjuHZ2YxYAtwAdAErAcudfdn89qwGcLMtgNr3T3fF4bmlZmdBbQD33f3VeG2fwZa3f0r4X8o1br7Z/LZzuk2yvfyBaDd3a/LZ9vyyczmA/Pd/XEzqwQeA94OXEGEf2fG+F7ezQz8nSnEHt5pwFZ33+buvcDtwMV5bpPMMO7+e6B1xOaLge+Fj79H8A83Ukb5XiLP3V9198fDxweB54AFRPx3ZozvZUYqxMBbAOxIe97EDP4/IA8c+JWZPWZmV+W7MTPMPHd/FYJ/yMDcPLdnJvm4mT0VDnlGathuJDNbApwCPIp+Z4aM+F5gBv7OFGLgWYZthTVuOzlnuvsa4CLgY+EQlshYvgEcC5wMvAr8S15bk0dmVgH8GPiUu7fluz0zRYbvZUb+zhRi4DUBi9KeLwR25qktM4677wzv9wA/IRgClsDu8JxE6tzEnjy3Z0Zw993uPuDug8C3iOjvjJklCP6o3+rud4WbI/87k+l7mam/M4UYeOuBZWa21MyKgXXA3Xlu04xgZuXhiWXMrBx4E/DM2K+KlLuB94eP3w/8LI9tmTFSf9BD7yCCvzNmZsC3gefc/atpuyL9OzPa9zJTf2cKbpYmQDgF9mtADLjF3b+c3xbNDGZ2DEGvDiAO/EdUvxszuw04h2Apk93AtcBPgTuAxcDLwLvcPVITOEb5Xs4hGJpyYDvwkdR5q6gws9cDfwCeBgbDzZ8jOF8V2d+ZMb6XS5mBvzMFGXgiIiIjFeKQpoiIyGEUeCIiEgkKPBERiQQFnoiIRIICT0REIkGBJ1KAzOwcM/t5vtshMpMo8EREJBIUeCJ5ZGbvM7M/hmuGfdPMYmbWbmb/YmaPm9lvzawhPPZkM3skLMj7k1RBXjN7jZn9xsz+FL7m2PDtK8zsTjPbZGa3hlUxRCJLgSeSJ2Z2HPAegoLeJwMDwGVAOfB4WOT7dwTVTgC+D3zG3VcTVLZIbb8VuN7dTwJeR1CsF4LK9Z8CjgeOAc6c4h9JZEaL57sBIhF2HvBaYH3Y+SolKD48CPwwPOYHwF1mVg3UuPvvwu3fA34U1kZd4O4/AXD3boDw/f7o7k3h8yeBJcCDU/5TicxQCjyR/DHge+7+2WEbzf5+xHFj1f8ba5iyJ+3xAPr3LhGnIU2R/PktcImZzQUwszlmdjTBv8tLwmPeCzzo7geAfWb2hnD75cDvwrXHmszs7eF7lJhZ2XT+ECKzhf6LTyRP3P1ZM/s7ghXoi4A+4GNAB3CCmT0GHCA4zwfB8jM3hoG2DfhAuP1y4Jtm9qXwPd41jT+GyKyh1RJEZhgza3f3iny3Q6TQaEhTREQiQT08ERGJBPXwREQkEhR4IiISCQo8ERGJBAWeiIhEggJPREQi4f8CZ11/KLbip9gAAAAASUVORK5CYII=\n", - "text/plain": [ - "
" - ] - }, - "metadata": { - "needs_background": "light" - }, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Loss\n", - "\ttraining \t (min: 0.161, max: 0.228, cur: 0.161)\n", - "\tvalidation \t (min: 0.176, max: 0.242, cur: 0.177)\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
RecommenderHR@1HR@3HR@5HR@10NDCG@1NDCG@3NDCG@5NDCG@10
0NetflixRecommender0.0427770.1066140.1431390.2003950.0427770.0782280.0934830.111724
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "from recommenders.netflix_recommender import NetflixRecommender\n", - "\n", - "netflix_recommender = NetflixRecommender(n_epochs=30, print_type='live')\n", - "\n", - "netflix_tts_results = [['NetflixRecommender'] + list(evaluate_train_test_split_implicit(\n", - " netflix_recommender, interactions_df, items_df))]\n", - "\n", - "netflix_tts_results = pd.DataFrame(\n", - " netflix_tts_results, columns=['Recommender', 'HR@1', 'HR@3', 'HR@5', 'HR@10', 'NDCG@1', 'NDCG@3', 'NDCG@5', 'NDCG@10'])\n", - "\n", - "display(HTML(netflix_tts_results.to_html()))" - ] - }, - { - "cell_type": "code", - "execution_count": 316, - "id": "moderate-printing", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
RecommenderHR@1HR@3HR@5HR@10NDCG@1NDCG@3NDCG@5NDCG@10
0NNRecommender0.0065810.0157950.0240210.0361960.0065810.0118770.0152620.019205
1AmazonRecommender0.0421190.1046400.1405070.1994080.0421190.0768260.0917970.110711
2NetflixRecommender0.0427770.1066140.1431390.2003950.0427770.0782280.0934830.111724
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "tts_results = pd.concat([nn_tts_results, amazon_tts_results, netflix_tts_results]).reset_index(drop=True)\n", - "display(HTML(tts_results.to_html()))" - ] - }, - { - "cell_type": "markdown", - "id": "uniform-vegetable", - "metadata": {}, - "source": [ - "# Summary\n", - "\n", - "**Task:**
\n", - "Write a summary of your experiments. What worked well and what did not? What are your thoughts how could you possibly further improve the model?" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "04e565c4", - "metadata": {}, - "outputs": [], - "source": [ - "Na początku bezmyślnie użyłem BCELoss, \n", - "to był duży błąd, który kosztował mnie godzinę szukania w internecie, dlaczego ciągle zwraca mi tylko item-id=0\n", - "\n", - "\n", - "Dodanie dropout zwiększyło HR10 z 0.03 do 0.11\n", - "\n", - "Podsumowanie:\n", - "\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "rek_uno", - "language": "python", - "name": "rek_uno" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.8" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/project_2_recommender_and_evaluation.html b/project_2_recommender_and_evaluation.html new file mode 100644 index 0000000..2b2fa80 --- /dev/null +++ b/project_2_recommender_and_evaluation.html @@ -0,0 +1,17156 @@ + + + + + +project_2_recommender_and_evaluation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/project_2_recommender_and_evaluation.ipynb b/project_2_recommender_and_evaluation.ipynb index 1ab01bb..260b9c4 100644 --- a/project_2_recommender_and_evaluation.ipynb +++ b/project_2_recommender_and_evaluation.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 302, + "execution_count": 449, "id": "alike-morgan", "metadata": {}, "outputs": [ @@ -47,7 +47,7 @@ }, { "cell_type": "code", - "execution_count": 303, + "execution_count": 450, "id": "victorian-bottom", "metadata": {}, "outputs": [ @@ -290,7 +290,7 @@ }, { "cell_type": "code", - "execution_count": 304, + "execution_count": 451, "id": "variable-jaguar", "metadata": {}, "outputs": [ @@ -611,7 +611,7 @@ }, { "cell_type": "code", - "execution_count": 305, + "execution_count": 452, "id": "formal-munich", "metadata": {}, "outputs": [ @@ -926,7 +926,7 @@ }, { "cell_type": "code", - "execution_count": 446, + "execution_count": 457, "id": "unlike-recipient", "metadata": {}, "outputs": [], @@ -999,25 +999,6 @@ "# return x\n", " \n", "# HR10 = 0.116 EPOCH 20000\n", - "# class Net(nn.Module):\n", - "# def __init__(self, features_len, output_len):\n", - "# super(Net, self).__init__()\n", - " \n", - "# self.fc1 = nn.Linear(features_len, 150)\n", - "# self.fc2 = nn.Linear(150, 100)\n", - "# self.fc3 = nn.Linear(100, output_len)\n", - "# self.fc4 = nn.Linear(output_len, output_len+200)\n", - " \n", - "# self.dropout = nn.Dropout(p=0.5)\n", - " \n", - "# def forward(self, x):\n", - "# x = F.relu(self.fc1(x))\n", - "# x = self.dropout(x)\n", - "# x = F.relu(self.fc2(x))\n", - "# x = self.dropout(x)\n", - "# x = F.relu(self.fc3(x))\n", - "# return self.fc4(x)\n", - " \n", "class Net(nn.Module):\n", " def __init__(self, features_len, output_len):\n", " super(Net, self).__init__()\n", @@ -1028,18 +1009,38 @@ " self.fc4 = nn.Linear(output_len, output_len+200)\n", " \n", " self.dropout = nn.Dropout(p=0.5)\n", - " self.prelu = nn.PReLU()\n", " \n", " def forward(self, x):\n", - " x = self.fc1(x)\n", - " x = self.prelu(x)\n", + " x = F.relu(self.fc1(x))\n", " x = self.dropout(x)\n", - " x = self.fc2(x)\n", - " x = self.prelu(x)\n", + " x = F.relu(self.fc2(x))\n", " x = self.dropout(x)\n", - " x = self.fc3(x)\n", - " x = self.prelu(x)\n", + " x = F.relu(self.fc3(x))\n", " return self.fc4(x)\n", + "\n", + "# A lot slower than ReLU\n", + "# class Net(nn.Module):\n", + "# def __init__(self, features_len, output_len):\n", + "# super(Net, self).__init__()\n", + " \n", + "# self.fc1 = nn.Linear(features_len, 150)\n", + "# self.fc2 = nn.Linear(150, 100)\n", + "# self.fc3 = nn.Linear(100, output_len)\n", + "# self.fc4 = nn.Linear(output_len, output_len+200)\n", + " \n", + "# self.dropout = nn.Dropout(p=0.5)\n", + "# self.prelu = nn.PReLU()\n", + " \n", + "# def forward(self, x):\n", + "# x = self.fc1(x)\n", + "# x = self.prelu(x)\n", + "# x = self.dropout(x)\n", + "# x = self.fc2(x)\n", + "# x = self.prelu(x)\n", + "# x = self.dropout(x)\n", + "# x = self.fc3(x)\n", + "# x = self.prelu(x)\n", + "# return self.fc4(x)\n", " \n", "class NNRecommender(Recommender):\n", " \"\"\"\n", @@ -1193,7 +1194,7 @@ }, { "cell_type": "code", - "execution_count": 412, + "execution_count": 13, "id": "greatest-canon", "metadata": {}, "outputs": [], @@ -1358,7 +1359,7 @@ }, { "cell_type": "code", - "execution_count": 310, + "execution_count": 454, "id": "strange-alaska", "metadata": {}, "outputs": [], @@ -1370,7 +1371,7 @@ }, { "cell_type": "code", - "execution_count": 311, + "execution_count": 455, "id": "stable-theta", "metadata": {}, "outputs": [], @@ -1445,10 +1446,10 @@ }, { "cell_type": "code", - "execution_count": 447, + "execution_count": 458, "id": "obvious-astrology", "metadata": { - "scrolled": false + "scrolled": true }, "outputs": [ { @@ -1456,102 +1457,959 @@ "output_type": "stream", "text": [ "epoch 0 \n", - " Train set - loss: 6.797\n", - " Test set - loss: 6.793\n", + " Train set - loss: 6.791\n", + " Test set - loss: 6.798\n", " \n", "epoch 1000 \n", - " Train set - loss: 1.009\n", - " Test set - loss: 29.285\n", + " Train set - loss: 1.044\n", + " Test set - loss: 25.104\n", " \n", "epoch 2000 \n", - " Train set - loss: 1.055\n", - " Test set - loss: 30.205\n", + " Train set - loss: 1.031\n", + " Test set - loss: 28.583\n", " \n", "epoch 3000 \n", - " Train set - loss: 0.971\n", - " Test set - loss: 35.335\n", + " Train set - loss: 0.995\n", + " Test set - loss: 32.894\n", " \n", "epoch 4000 \n", - " Train set - loss: 0.948\n", - " Test set - loss: 35.459\n", + " Train set - loss: 0.958\n", + " Test set - loss: 32.049\n", " \n", "epoch 5000 \n", - " Train set - loss: 0.927\n", - " Test set - loss: 35.575\n", + " Train set - loss: 0.95\n", + " Test set - loss: 33.561\n", " \n", "epoch 6000 \n", - " Train set - loss: 0.968\n", - " Test set - loss: 37.951\n", + " Train set - loss: 0.919\n", + " Test set - loss: 37.039\n", " \n", "epoch 7000 \n", - " Train set - loss: 0.963\n", - " Test set - loss: 50.067\n", + " Train set - loss: 0.951\n", + " Test set - loss: 41.181\n", " \n", "epoch 8000 \n", - " Train set - loss: 0.919\n", - " Test set - loss: 48.694\n", + " Train set - loss: 0.914\n", + " Test set - loss: 39.916\n", " \n", "epoch 9000 \n", - " Train set - loss: 0.888\n", - " Test set - loss: 51.907\n", + " Train set - loss: 0.996\n", + " Test set - loss: 40.807\n", " \n", "epoch 10000 \n", - " Train set - loss: 4.246\n", - " Test set - loss: 115.464\n", + " Train set - loss: 0.917\n", + " Test set - loss: 43.963\n", " \n", "epoch 11000 \n", - " Train set - loss: 0.911\n", - " Test set - loss: 57.464\n", + " Train set - loss: 0.974\n", + " Test set - loss: 42.84\n", " \n", "epoch 12000 \n", - " Train set - loss: 0.872\n", - " Test set - loss: 64.896\n", + " Train set - loss: 0.961\n", + " Test set - loss: 48.198\n", " \n", "epoch 13000 \n", - " Train set - loss: 0.931\n", - " Test set - loss: 52.029\n", + " Train set - loss: 0.923\n", + " Test set - loss: 50.819\n", " \n", "epoch 14000 \n", - " Train set - loss: 1.024\n", - " Test set - loss: 56.175\n", + " Train set - loss: 0.989\n", + " Test set - loss: 50.511\n", " \n", - " 0%| | 0/10 [18:33\", line 33, in tune_recommender\n", - " best_param_set = fmin(loss, space=param_space, algo=tpe.suggest,\n", - " File \"/opt/conda/envs/rek_uno/lib/python3.8/site-packages/hyperopt/fmin.py\", line 507, in fmin\n", - " return trials.fmin(\n", - " File \"/opt/conda/envs/rek_uno/lib/python3.8/site-packages/hyperopt/base.py\", line 682, in fmin\n", - " return fmin(\n", - " File \"/opt/conda/envs/rek_uno/lib/python3.8/site-packages/hyperopt/fmin.py\", line 553, in fmin\n", - " rval.exhaust()\n", - " File \"/opt/conda/envs/rek_uno/lib/python3.8/site-packages/hyperopt/fmin.py\", line 356, in exhaust\n", - " self.run(self.max_evals - n_done, block_until_done=self.asynchronous)\n", - " File \"/opt/conda/envs/rek_uno/lib/python3.8/site-packages/hyperopt/fmin.py\", line 292, in run\n", - " self.serial_evaluate()\n", - " File \"/opt/conda/envs/rek_uno/lib/python3.8/site-packages/hyperopt/fmin.py\", line 170, in serial_evaluate\n", - " result = self.domain.evaluate(spec, ctrl)\n", - " File \"/opt/conda/envs/rek_uno/lib/python3.8/site-packages/hyperopt/base.py\", line 907, in evaluate\n", - " rval = self.fn(pyll_rval)\n", - " File \"\", line 23, in loss\n", - " hr1, hr3, hr5, hr10, ndcg1, ndcg3, ndcg5, ndcg10 = evaluate_train_test_split_implicit(\n", - " File \"/home/jovyan/REK/evaluation_and_testing/testing.py\", line 93, in evaluate_train_test_split_implicit\n", - " recommender.fit(interactions_df_train, None, items_df)\n", - " File \"\", line 192, in fit\n", - " train_loss.backward()\n", - " File \"/opt/conda/envs/rek_uno/lib/python3.8/site-packages/torch/tensor.py\", line 245, in backward\n", - " torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)\n", - " File \"/opt/conda/envs/rek_uno/lib/python3.8/site-packages/torch/autograd/__init__.py\", line 145, in backward\n", - " Variable._execution_engine.run_backward(\n", - "KeyboardInterrupt\n" + "epoch 15000 \n", + " Train set - loss: 0.974\n", + " Test set - loss: 57.224\n", + " \n", + "epoch 16000 \n", + " Train set - loss: 0.933\n", + " Test set - loss: 62.57\n", + " \n", + "epoch 17000 \n", + " Train set - loss: 0.96\n", + " Test set - loss: 63.399\n", + " \n", + "epoch 18000 \n", + " Train set - loss: 0.937\n", + " Test set - loss: 65.288\n", + " \n", + "epoch 19000 \n", + " Train set - loss: 1.02\n", + " Test set - loss: 62.537\n", + " \n", + "epoch 0 \n", + " Train set - loss: 6.797\n", + " Test set - loss: 6.792\n", + " \n", + "epoch 1000 \n", + " Train set - loss: 1.106\n", + " Test set - loss: 23.897\n", + " \n", + "epoch 2000 \n", + " Train set - loss: 1.028\n", + " Test set - loss: 25.238\n", + " \n", + "epoch 3000 \n", + " Train set - loss: 0.981\n", + " Test set - loss: 29.186\n", + " \n", + "epoch 4000 \n", + " Train set - loss: 0.981\n", + " Test set - loss: 30.399\n", + " \n", + "epoch 5000 \n", + " Train set - loss: 0.967\n", + " Test set - loss: 33.602\n", + " \n", + "epoch 6000 \n", + " Train set - loss: 0.992\n", + " Test set - loss: 35.063\n", + " \n", + "epoch 7000 \n", + " Train set - loss: 0.955\n", + " Test set - loss: 35.093\n", + " \n", + "epoch 8000 \n", + " Train set - loss: 0.984\n", + " Test set - loss: 35.48\n", + " \n", + "epoch 9000 \n", + " Train set - loss: 1.044\n", + " Test set - loss: 37.907\n", + " \n", + "epoch 10000 \n", + " Train set - loss: 0.914\n", + " Test set - loss: 40.246\n", + " \n", + "epoch 11000 \n", + " Train set - loss: 0.941\n", + " Test set - loss: 41.36\n", + " \n", + "epoch 12000 \n", + " Train set - loss: 0.995\n", + " Test set - loss: 41.922\n", + " \n", + "epoch 13000 \n", + " Train set - loss: 0.991\n", + " Test set - loss: 45.061\n", + " \n", + "epoch 14000 \n", + " Train set - loss: 0.907\n", + " Test set - loss: 47.871\n", + " \n", + "epoch 15000 \n", + " Train set - loss: 0.964\n", + " Test set - loss: 49.0\n", + " \n", + "epoch 16000 \n", + " Train set - loss: 0.918\n", + " Test set - loss: 49.898\n", + " \n", + "epoch 17000 \n", + " Train set - loss: 0.925\n", + " Test set - loss: 52.609\n", + " \n", + "epoch 18000 \n", + " Train set - loss: 0.943\n", + " Test set - loss: 55.524\n", + " \n", + "epoch 19000 \n", + " Train set - loss: 0.988\n", + " Test set - loss: 53.781\n", + " \n", + "epoch 0 \n", + " Train set - loss: 6.797\n", + " Test set - loss: 6.794\n", + " \n", + "epoch 1000 \n", + " Train set - loss: 1.083\n", + " Test set - loss: 24.762\n", + " \n", + "epoch 2000 \n", + " Train set - loss: 1.002\n", + " Test set - loss: 26.87\n", + " \n", + "epoch 3000 \n", + " Train set - loss: 1.002\n", + " Test set - loss: 29.752\n", + " \n", + "epoch 4000 \n", + " Train set - loss: 0.902\n", + " Test set - loss: 30.802\n", + " \n", + "epoch 5000 \n", + " Train set - loss: 0.966\n", + " Test set - loss: 33.726\n", + " \n", + "epoch 6000 \n", + " Train set - loss: 0.929\n", + " Test set - loss: 38.221\n", + " \n", + "epoch 7000 \n", + " Train set - loss: 0.923\n", + " Test set - loss: 40.249\n", + " \n", + "epoch 8000 \n", + " Train set - loss: 0.941\n", + " Test set - loss: 43.72\n", + " \n", + "epoch 9000 \n", + " Train set - loss: 0.988\n", + " Test set - loss: 45.261\n", + " \n", + "epoch 10000 \n", + " Train set - loss: 0.958\n", + " Test set - loss: 49.028\n", + " \n", + "epoch 11000 \n", + " Train set - loss: 0.914\n", + " Test set - loss: 51.199\n", + " \n", + "epoch 12000 \n", + " Train set - loss: 0.984\n", + " Test set - loss: 52.24\n", + " \n", + "epoch 13000 \n", + " Train set - loss: 0.935\n", + " Test set - loss: 58.326\n", + " \n", + "epoch 14000 \n", + " Train set - loss: 0.932\n", + " Test set - loss: 55.572\n", + " \n", + "epoch 15000 \n", + " Train set - loss: 0.932\n", + " Test set - loss: 57.253\n", + " \n", + "epoch 16000 \n", + " Train set - loss: 0.901\n", + " Test set - loss: 59.313\n", + " \n", + "epoch 17000 \n", + " Train set - loss: 0.934\n", + " Test set - loss: 59.817\n", + " \n", + "epoch 18000 \n", + " Train set - loss: 0.994\n", + " Test set - loss: 57.325\n", + " \n", + "epoch 19000 \n", + " Train set - loss: 0.913\n", + " Test set - loss: 59.364\n", + " \n", + "epoch 0 \n", + " Train set - loss: 6.795\n", + " Test set - loss: 6.796\n", + " \n", + "epoch 1000 \n", + " Train set - loss: 1.067\n", + " Test set - loss: 25.381\n", + " \n", + "epoch 2000 \n", + " Train set - loss: 1.039\n", + " Test set - loss: 27.164\n", + " \n", + "epoch 3000 \n", + " Train set - loss: 0.958\n", + " Test set - loss: 30.859\n", + " \n", + "epoch 4000 \n", + " Train set - loss: 0.961\n", + " Test set - loss: 32.549\n", + " \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "epoch 5000 \n", + " Train set - loss: 0.922\n", + " Test set - loss: 38.252\n", + " \n", + "epoch 6000 \n", + " Train set - loss: 0.971\n", + " Test set - loss: 37.736\n", + " \n", + "epoch 7000 \n", + " Train set - loss: 0.986\n", + " Test set - loss: 43.201\n", + " \n", + "epoch 8000 \n", + " Train set - loss: 0.949\n", + " Test set - loss: 43.737\n", + " \n", + "epoch 9000 \n", + " Train set - loss: 0.895\n", + " Test set - loss: 44.754\n", + " \n", + "epoch 10000 \n", + " Train set - loss: 0.976\n", + " Test set - loss: 49.17\n", + " \n", + "epoch 11000 \n", + " Train set - loss: 0.941\n", + " Test set - loss: 51.909\n", + " \n", + "epoch 12000 \n", + " Train set - loss: 0.917\n", + " Test set - loss: 53.406\n", + " \n", + "epoch 13000 \n", + " Train set - loss: 0.97\n", + " Test set - loss: 57.24\n", + " \n", + "epoch 14000 \n", + " Train set - loss: 0.944\n", + " Test set - loss: 54.791\n", + " \n", + "epoch 15000 \n", + " Train set - loss: 0.969\n", + " Test set - loss: 56.372\n", + " \n", + "epoch 16000 \n", + " Train set - loss: 0.981\n", + " Test set - loss: 58.586\n", + " \n", + "epoch 17000 \n", + " Train set - loss: 0.965\n", + " Test set - loss: 57.376\n", + " \n", + "epoch 18000 \n", + " Train set - loss: 0.988\n", + " Test set - loss: 60.655\n", + " \n", + "epoch 19000 \n", + " Train set - loss: 0.883\n", + " Test set - loss: 58.51\n", + " \n", + "epoch 0 \n", + " Train set - loss: 6.794\n", + " Test set - loss: 6.786\n", + " \n", + "epoch 1000 \n", + " Train set - loss: 1.074\n", + " Test set - loss: 24.294\n", + " \n", + "epoch 2000 \n", + " Train set - loss: 1.002\n", + " Test set - loss: 25.177\n", + " \n", + "epoch 3000 \n", + " Train set - loss: 0.979\n", + " Test set - loss: 28.115\n", + " \n", + "epoch 4000 \n", + " Train set - loss: 0.974\n", + " Test set - loss: 31.27\n", + " \n", + "epoch 5000 \n", + " Train set - loss: 0.929\n", + " Test set - loss: 35.596\n", + " \n", + "epoch 6000 \n", + " Train set - loss: 0.956\n", + " Test set - loss: 39.096\n", + " \n", + "epoch 7000 \n", + " Train set - loss: 0.944\n", + " Test set - loss: 39.886\n", + " \n", + "epoch 8000 \n", + " Train set - loss: 0.951\n", + " Test set - loss: 44.383\n", + " \n", + "epoch 9000 \n", + " Train set - loss: 0.976\n", + " Test set - loss: 46.715\n", + " \n", + "epoch 10000 \n", + " Train set - loss: 0.907\n", + " Test set - loss: 48.878\n", + " \n", + "epoch 11000 \n", + " Train set - loss: 0.957\n", + " Test set - loss: 49.986\n", + " \n", + "epoch 12000 \n", + " Train set - loss: 0.998\n", + " Test set - loss: 52.608\n", + " \n", + "epoch 13000 \n", + " Train set - loss: 0.986\n", + " Test set - loss: 51.419\n", + " \n", + "epoch 14000 \n", + " Train set - loss: 0.984\n", + " Test set - loss: 55.804\n", + " \n", + "epoch 15000 \n", + " Train set - loss: 0.965\n", + " Test set - loss: 57.902\n", + " \n", + "epoch 16000 \n", + " Train set - loss: 0.905\n", + " Test set - loss: 57.022\n", + " \n", + "epoch 17000 \n", + " Train set - loss: 0.96\n", + " Test set - loss: 53.676\n", + " \n", + "epoch 18000 \n", + " Train set - loss: 0.939\n", + " Test set - loss: 62.478\n", + " \n", + "epoch 19000 \n", + " Train set - loss: 0.93\n", + " Test set - loss: 61.828\n", + " \n", + "epoch 0 \n", + " Train set - loss: 6.793\n", + " Test set - loss: 6.794\n", + " \n", + "epoch 1000 \n", + " Train set - loss: 1.063\n", + " Test set - loss: 23.191\n", + " \n", + "epoch 2000 \n", + " Train set - loss: 1.032\n", + " Test set - loss: 26.461\n", + " \n", + "epoch 3000 \n", + " Train set - loss: 1.02\n", + " Test set - loss: 29.392\n", + " \n", + "epoch 4000 \n", + " Train set - loss: 0.932\n", + " Test set - loss: 33.168\n", + " \n", + "epoch 5000 \n", + " Train set - loss: 1.017\n", + " Test set - loss: 34.574\n", + " \n", + "epoch 6000 \n", + " Train set - loss: 0.975\n", + " Test set - loss: 38.711\n", + " \n", + "epoch 7000 \n", + " Train set - loss: 0.953\n", + " Test set - loss: 39.829\n", + " \n", + "epoch 8000 \n", + " Train set - loss: 0.91\n", + " Test set - loss: 41.895\n", + " \n", + "epoch 9000 \n", + " Train set - loss: 0.989\n", + " Test set - loss: 45.25\n", + " \n", + "epoch 10000 \n", + " Train set - loss: 1.0\n", + " Test set - loss: 46.407\n", + " \n", + "epoch 11000 \n", + " Train set - loss: 0.98\n", + " Test set - loss: 50.797\n", + " \n", + "epoch 12000 \n", + " Train set - loss: 0.983\n", + " Test set - loss: 53.173\n", + " \n", + "epoch 13000 \n", + " Train set - loss: 0.925\n", + " Test set - loss: 54.291\n", + " \n", + "epoch 14000 \n", + " Train set - loss: 0.926\n", + " Test set - loss: 54.929\n", + " \n", + "epoch 15000 \n", + " Train set - loss: 0.986\n", + " Test set - loss: 58.36\n", + " \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "epoch 16000 \n", + " Train set - loss: 0.944\n", + " Test set - loss: 57.972\n", + " \n", + "epoch 17000 \n", + " Train set - loss: 0.963\n", + " Test set - loss: 58.177\n", + " \n", + "epoch 18000 \n", + " Train set - loss: 0.967\n", + " Test set - loss: 57.693\n", + " \n", + "epoch 19000 \n", + " Train set - loss: 0.97\n", + " Test set - loss: 62.002\n", + " \n", + "epoch 0 \n", + " Train set - loss: 6.793\n", + " Test set - loss: 6.798\n", + " \n", + "epoch 1000 \n", + " Train set - loss: 1.046\n", + " Test set - loss: 24.413\n", + " \n", + "epoch 2000 \n", + " Train set - loss: 0.981\n", + " Test set - loss: 28.192\n", + " \n", + "epoch 3000 \n", + " Train set - loss: 0.966\n", + " Test set - loss: 29.734\n", + " \n", + "epoch 4000 \n", + " Train set - loss: 0.989\n", + " Test set - loss: 34.306\n", + " \n", + "epoch 5000 \n", + " Train set - loss: 0.967\n", + " Test set - loss: 34.852\n", + " \n", + "epoch 6000 \n", + " Train set - loss: 0.902\n", + " Test set - loss: 37.421\n", + " \n", + "epoch 7000 \n", + " Train set - loss: 0.94\n", + " Test set - loss: 37.481\n", + " \n", + "epoch 8000 \n", + " Train set - loss: 0.951\n", + " Test set - loss: 40.332\n", + " \n", + "epoch 9000 \n", + " Train set - loss: 0.945\n", + " Test set - loss: 48.709\n", + " \n", + "epoch 10000 \n", + " Train set - loss: 0.967\n", + " Test set - loss: 50.611\n", + " \n", + "epoch 11000 \n", + " Train set - loss: 0.99\n", + " Test set - loss: 49.536\n", + " \n", + "epoch 12000 \n", + " Train set - loss: 0.991\n", + " Test set - loss: 53.281\n", + " \n", + "epoch 13000 \n", + " Train set - loss: 0.911\n", + " Test set - loss: 53.05\n", + " \n", + "epoch 14000 \n", + " Train set - loss: 0.952\n", + " Test set - loss: 56.761\n", + " \n", + "epoch 15000 \n", + " Train set - loss: 0.97\n", + " Test set - loss: 57.142\n", + " \n", + "epoch 16000 \n", + " Train set - loss: 0.921\n", + " Test set - loss: 57.22\n", + " \n", + "epoch 17000 \n", + " Train set - loss: 0.937\n", + " Test set - loss: 59.433\n", + " \n", + "epoch 18000 \n", + " Train set - loss: 0.964\n", + " Test set - loss: 58.954\n", + " \n", + "epoch 19000 \n", + " Train set - loss: 0.91\n", + " Test set - loss: 57.752\n", + " \n", + "epoch 0 \n", + " Train set - loss: 6.797\n", + " Test set - loss: 6.793\n", + " \n", + "epoch 1000 \n", + " Train set - loss: 1.052\n", + " Test set - loss: 25.378\n", + " \n", + "epoch 2000 \n", + " Train set - loss: 0.967\n", + " Test set - loss: 30.641\n", + " \n", + "epoch 3000 \n", + " Train set - loss: 0.97\n", + " Test set - loss: 32.983\n", + " \n", + "epoch 4000 \n", + " Train set - loss: 0.931\n", + " Test set - loss: 35.008\n", + " \n", + "epoch 5000 \n", + " Train set - loss: 0.95\n", + " Test set - loss: 38.592\n", + " \n", + "epoch 6000 \n", + " Train set - loss: 0.961\n", + " Test set - loss: 41.785\n", + " \n", + "epoch 7000 \n", + " Train set - loss: 0.93\n", + " Test set - loss: 46.456\n", + " \n", + "epoch 8000 \n", + " Train set - loss: 0.977\n", + " Test set - loss: 46.483\n", + " \n", + "epoch 9000 \n", + " Train set - loss: 0.955\n", + " Test set - loss: 48.554\n", + " \n", + "epoch 10000 \n", + " Train set - loss: 0.941\n", + " Test set - loss: 53.479\n", + " \n", + "epoch 11000 \n", + " Train set - loss: 1.003\n", + " Test set - loss: 51.243\n", + " \n", + "epoch 12000 \n", + " Train set - loss: 0.987\n", + " Test set - loss: 55.073\n", + " \n", + "epoch 13000 \n", + " Train set - loss: 0.995\n", + " Test set - loss: 56.564\n", + " \n", + "epoch 14000 \n", + " Train set - loss: 0.953\n", + " Test set - loss: 55.438\n", + " \n", + "epoch 15000 \n", + " Train set - loss: 0.911\n", + " Test set - loss: 58.512\n", + " \n", + "epoch 16000 \n", + " Train set - loss: 0.922\n", + " Test set - loss: 57.445\n", + " \n", + "epoch 17000 \n", + " Train set - loss: 0.949\n", + " Test set - loss: 60.568\n", + " \n", + "epoch 18000 \n", + " Train set - loss: 0.984\n", + " Test set - loss: 60.303\n", + " \n", + "epoch 19000 \n", + " Train set - loss: 0.962\n", + " Test set - loss: 63.902\n", + " \n", + "100%|██████████| 10/10 [3:22:15<00:00, 1213.59s/trial, best loss: -0.0823433019254404]\n", + "epoch 0\n", + " Train set - loss: 6.842\n", + " Test set - loss: 6.834\n", + " \n", + "epoch 1000\n", + " Train set - loss: 1.101\n", + " Test set - loss: 25.026\n", + " \n", + "epoch 2000\n", + " Train set - loss: 0.971\n", + " Test set - loss: 28.552\n", + " \n", + "epoch 3000\n", + " Train set - loss: 0.989\n", + " Test set - loss: 32.089\n", + " \n", + "epoch 4000\n", + " Train set - loss: 0.99\n", + " Test set - loss: 33.257\n", + " \n", + "epoch 5000\n", + " Train set - loss: 0.985\n", + " Test set - loss: 36.744\n", + " \n", + "epoch 6000\n", + " Train set - loss: 0.971\n", + " Test set - loss: 38.915\n", + " \n", + "epoch 7000\n", + " Train set - loss: 0.977\n", + " Test set - loss: 40.527\n", + " \n", + "epoch 8000\n", + " Train set - loss: 1.013\n", + " Test set - loss: 42.967\n", + " \n", + "epoch 9000\n", + " Train set - loss: 0.981\n", + " Test set - loss: 44.936\n", + " \n", + "epoch 10000\n", + " Train set - loss: 0.975\n", + " Test set - loss: 52.466\n", + " \n", + "epoch 11000\n", + " Train set - loss: 0.949\n", + " Test set - loss: 50.95\n", + " \n", + "epoch 12000\n", + " Train set - loss: 0.933\n", + " Test set - loss: 51.5\n", + " \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "epoch 13000\n", + " Train set - loss: 1.023\n", + " Test set - loss: 54.636\n", + " \n", + "epoch 14000\n", + " Train set - loss: 0.987\n", + " Test set - loss: 59.892\n", + " \n", + "epoch 15000\n", + " Train set - loss: 0.996\n", + " Test set - loss: 57.323\n", + " \n", + "epoch 16000\n", + " Train set - loss: 0.989\n", + " Test set - loss: 61.067\n", + " \n", + "epoch 17000\n", + " Train set - loss: 0.969\n", + " Test set - loss: 64.222\n", + " \n", + "epoch 18000\n", + " Train set - loss: 0.925\n", + " Test set - loss: 62.306\n", + " \n", + "epoch 19000\n", + " Train set - loss: 1.006\n", + " Test set - loss: 63.963\n", + " \n" + ] + }, + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
RecommenderHR@1HR@3HR@5HR@10NDCG@1NDCG@3NDCG@5NDCG@10
0NNRecommender0.0052650.0151370.0204010.0322470.0052650.0109760.0131430.01686
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Best parameters:\n", + "{'n_neg_per_pos': 5.0}\n" ] } ], @@ -1581,19 +2439,50 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 434, "id": "given-homework", "metadata": {}, "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "epoch 0\n", - " Train set - loss: 6.842\n", - " Test set - loss: 6.843\n", - " \n" - ] + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
RecommenderHR@1HR@3HR@5HR@10NDCG@1NDCG@3NDCG@5NDCG@10
0NNRecommender0.0250080.0352090.0664690.1168150.0250080.03110.0436970.059459
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" } ], "source": [ @@ -1844,14 +2733,16 @@ }, { "cell_type": "markdown", - "id": "1b89411a", + "id": "8caf15c1", "metadata": {}, "source": [ "What did not work:\n", - "- I tried to use softmax, it wasn't good idea\n", - "- Firstly, I copy and paste without thinking some code from tutorial for binary linear regresion. BCELoss is not a good idea for mutli-classification.\n", + "- I tried to use softmax, it wasn't a good idea\n", + "- Firstly, I copied and pasted some code without thinking from tutorial for binary linear regresion. BCELoss is not a good idea for mutli-classification.\n", "- More layers don't mean better results.\n", "- More epochs don't always mean better results.\n", + "- PReLU was a lot slower than ReLU and it did not give me better results.\n", + "- For some reason, n_neg_per_pos I got from fitting wasn't the best fit. With one point bigger n_neg_per_pos I got better results. \n", "\n", "What did work well:\n", "- Dropout layer increased results significantly (from HR@10 0.03 to 0.116).\n", @@ -1861,8 +2752,7 @@ "How to further improve model:\n", "- Add more data or more features\n", "- Work on network layout\n", - " \n", - "\n" + "- Try using \"One vs All\" layout. " ] } ],