dl_projekt/lstm.ipynb

478 lines
27 KiB
Plaintext
Raw Normal View History

2024-06-03 16:32:11 +02:00
{
"cells": [
{
"cell_type": "markdown",
"source": [
"# LSTM"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "code",
"execution_count": 1,
"outputs": [],
"source": [
"import pandas as pd\n",
"import numpy as np\n",
"\n",
"train = pd.read_csv(\"train.csv\")\n",
"test = pd.read_csv(\"test.csv\")\n",
"valid = pd.read_csv(\"valid.csv\")\n",
"\n",
"train.loc[train[\"review_score\"]==-1, \"review_score\"]=0\n",
"test.loc[test[\"review_score\"]==-1, \"review_score\"]=0\n",
"valid.loc[valid[\"review_score\"]==-1, \"review_score\"]=0"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "markdown",
"source": [
"### Sprawdzanie długości najdłuższej recenzji (teoretycznie Steam zezwala na max 8000 znaków)"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "code",
"execution_count": 2,
"outputs": [],
"source": [
"train[\"seq_length\"] = train[\"review_text\"].apply(lambda x : len(x.split()))"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "code",
"execution_count": 3,
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"1570\n"
]
}
],
"source": [
"print(train[\"seq_length\"].max())"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "code",
"execution_count": 4,
"outputs": [],
"source": [
"import tensorflow as tf\n",
"\n",
"SEQ_PADDED_LENGTH = 1600\n",
"VOCABULARY_SIZE = 4000\n",
"vectorizer = tf.keras.layers.TextVectorization(output_sequence_length=SEQ_PADDED_LENGTH, max_tokens=VOCABULARY_SIZE)\n",
"vectorizer.adapt(train[\"review_text\"])"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "code",
"execution_count": 5,
"outputs": [
{
"data": {
"text/plain": "4000"
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"len(vectorizer.get_vocabulary())"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "code",
"execution_count": 6,
"outputs": [],
"source": [
"train[\"vectorized\"] = train[\"review_text\"].apply(vectorizer)"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "code",
"execution_count": 7,
"outputs": [],
"source": [
"test[\"vectorized\"] = test[\"review_text\"].apply(vectorizer)\n",
"valid[\"vectorized\"] = valid[\"review_text\"].apply(vectorizer)"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "code",
"execution_count": 42,
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Model: \"model_9\"\n",
"_________________________________________________________________\n",
" Layer (type) Output Shape Param # \n",
"=================================================================\n",
" input_15 (InputLayer) [(None, 1600)] 0 \n",
" \n",
" embedding_14 (Embedding) (None, 1600, 16) 64016 \n",
" \n",
" lstm_15 (LSTM) (None, 64) 20736 \n",
" \n",
" dense_9 (Dense) (None, 1) 65 \n",
" \n",
"=================================================================\n",
"Total params: 84,817\n",
"Trainable params: 84,817\n",
"Non-trainable params: 0\n",
"_________________________________________________________________\n"
]
}
],
"source": [
"from keras.optimizers import Adam\n",
"import keras.layers as layers\n",
"import keras\n",
"\n",
"\n",
"def create_model():\n",
" input_layer = layers.Input(shape=(SEQ_PADDED_LENGTH,))\n",
" embedding_layer = layers.Embedding(input_dim=VOCABULARY_SIZE+1, output_dim=16, input_length=SEQ_PADDED_LENGTH)(input_layer)\n",
" lstm_layer = layers.LSTM(64)(embedding_layer)\n",
" output_layer = layers.Dense(1,activation=\"sigmoid\")(lstm_layer)\n",
" model = keras.Model(inputs=input_layer, outputs=output_layer)\n",
" model.compile(loss='binary_crossentropy', optimizer=Adam(learning_rate=1e-3), metrics=['accuracy'])\n",
" return model\n",
"model = create_model()\n",
"model.summary()"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "code",
"execution_count": 11,
"outputs": [
{
"data": {
"text/plain": "TensorShape([1600])"
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"train.iloc[120][\"vectorized\"].shape"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "code",
"execution_count": 12,
"outputs": [
{
"data": {
"text/plain": "[1600]"
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"train.iloc[120][\"vectorized\"].get_shape().as_list()"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "code",
"execution_count": 13,
"outputs": [
{
"data": {
"text/plain": "<tf.Tensor: shape=(1600,), dtype=int64, numpy=array([423, 635, 423, ..., 0, 0, 0], dtype=int64)>"
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"train.iloc[120][\"vectorized\"]"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "markdown",
"source": [
"### Część recenzji nie zawierała tekstu więc po usunięciu interpunkcji i znaków specjalnych były puste, teksty te trzeba usunąć z materiału treningowego"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "code",
"execution_count": 14,
"outputs": [
{
"data": {
"text/plain": "shapes\n1600 43111\n0 119\nName: count, dtype: int64"
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"train[\"shapes\"] = train[\"vectorized\"].apply(lambda x : x.get_shape().as_list()[0])\n",
"train[\"shapes\"].value_counts()"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "code",
"execution_count": 15,
"outputs": [
{
"data": {
"text/plain": "shapes\n1600 43111\nName: count, dtype: int64"
},
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"train.drop(train[train[\"vectorized\"].map(lambda x : x.get_shape().as_list()[0])!=SEQ_PADDED_LENGTH].index, inplace=True)\n",
"train[\"shapes\"].value_counts()"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "code",
"execution_count": 16,
"outputs": [
{
"data": {
"text/plain": " Unnamed: 0 review_text review_score vectorized\n42 4552590 !!! 1 ()\n124 5286261 . 1 ()\n259 4934066 ........ 1 ()\n468 5584357 . 1 ()\n717 2172088 =] 1 ()",
"text/html": "<div>\n<style scoped>\n .dataframe tbody tr th:only-of-type {\n vertical-align: middle;\n }\n\n .dataframe tbody tr th {\n vertical-align: top;\n }\n\n .dataframe thead th {\n text-align: right;\n }\n</style>\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>Unnamed: 0</th>\n <th>review_text</th>\n <th>review_score</th>\n <th>vectorized</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>42</th>\n <td>4552590</td>\n <td>!!!</td>\n <td>1</td>\n <td>()</td>\n </tr>\n <tr>\n <th>124</th>\n <td>5286261</td>\n <td>.</td>\n <td>1</td>\n <td>()</td>\n </tr>\n <tr>\n <th>259</th>\n <td>4934066</td>\n <td>........</td>\n <td>1</td>\n <td>()</td>\n </tr>\n <tr>\n <th>468</th>\n <td>5584357</td>\n <td>.</td>\n <td>1</td>\n <td>()</td>\n </tr>\n <tr>\n <th>717</th>\n <td>2172088</td>\n <td>=]</td>\n <td>1</td>\n <td>()</td>\n </tr>\n </tbody>\n</table>\n</div>"
},
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"#valid.drop(valid[valid[\"vectorized\"].map(lambda x : x.get_shape().as_list()[0])!=1600].index, inplace=True)\n",
"\n",
"empty_valid = valid[valid[\"vectorized\"].map(lambda x : x.get_shape().as_list()[0])==0]\n",
"empty_valid.head()"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "markdown",
"source": [
"\"0\" to maskowane pozycje, puste dane w zbiorze testowym można nimi uzupełnić"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "code",
"execution_count": 17,
"outputs": [],
"source": [
"#test.loc[test[\"vectorized\"].map(lambda x : x.get_shape().as_list()[0])!=SEQ_PADDED_LENGTH,\"vectorized\"] = tf.zeros((SEQ_PADDED_LENGTH,), dtype=tf.dtypes.int64)\n",
"#valid.loc[valid[\"vectorized\"].map(lambda x : x.get_shape().as_list()[0])!=SEQ_PADDED_LENGTH,\"vectorized\"] = tf.zeros((SEQ_PADDED_LENGTH,), dtype=tf.dtypes.int64)\n",
"#empty_valid[\"vectorized\"] = tf.zeros((len(empty_valid.index),1600), dtype=tf.dtypes.int64)\n",
"#empty_test[\"vectorized\"] = tf.zeros((len(empty_test.index),1600), dtype=tf.dtypes.int64)\n",
"\n",
"#empty_valid[\"vectorized\"].iloc[0]\n",
"\n",
"def vector_fix(x):\n",
" if x.get_shape().as_list()[0]==SEQ_PADDED_LENGTH:\n",
" return x\n",
" return tf.zeros((1600,), dtype=tf.dtypes.int64)\n",
"\n",
"test[\"vectorized\"] = test[\"vectorized\"].apply(vector_fix)\n",
"valid[\"vectorized\"] = valid[\"vectorized\"].apply(vector_fix)"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "code",
"execution_count": 18,
"outputs": [],
"source": [
"#train[\"vectorized\"] = train[\"vectorized\"].apply(lambda x : x.numpy())\n",
"#valid[\"vectorized\"] = valid[\"vectorized\"].apply(lambda x : x.numpy())\n",
"#test[\"vectorized\"] = test[\"vectorized\"].apply(lambda x : x.numpy())"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "code",
"execution_count": 19,
"outputs": [
{
"data": {
"text/plain": "<tf.Tensor: shape=(1600,), dtype=int64, numpy=array([ 96, 2, 824, ..., 0, 0, 0], dtype=int64)>"
},
"execution_count": 19,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"train.iloc[0][\"vectorized\"]"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "markdown",
"source": [
"### Trening nawet mniejszego modelu na pełnym zbiorze danych zajmował bardzo dużo czasu więc skróciłem zbiór treningowy"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "code",
"execution_count": 44,
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/3\n",
"1348/1348 [==============================] - 627s 465ms/step - loss: 0.6933 - accuracy: 0.4947 - val_loss: 0.6950 - val_accuracy: 0.1744\n",
"Epoch 2/3\n",
" 918/1348 [===================>..........] - ETA: 3:00 - loss: 0.6932 - accuracy: 0.4982"
]
},
{
"ename": "KeyboardInterrupt",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
"\u001B[1;31mKeyboardInterrupt\u001B[0m Traceback (most recent call last)",
"Cell \u001B[1;32mIn [44], line 16\u001B[0m\n\u001B[0;32m 12\u001B[0m valid_x \u001B[38;5;241m=\u001B[39m np\u001B[38;5;241m.\u001B[39mstack(valid[\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mvectorized\u001B[39m\u001B[38;5;124m\"\u001B[39m]\u001B[38;5;241m.\u001B[39mvalues)\n\u001B[0;32m 15\u001B[0m \u001B[38;5;66;03m#callback = keras.callbacks.EarlyStopping(monitor='val_loss', mode='min', patience=3, restore_best_weights=True)\u001B[39;00m\n\u001B[1;32m---> 16\u001B[0m history \u001B[38;5;241m=\u001B[39m \u001B[43mmodel\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mfit\u001B[49m\u001B[43m(\u001B[49m\u001B[43mtrain_x\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mtrain_y\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mvalidation_data\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43m(\u001B[49m\u001B[43mvalid_x\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mvalid_y\u001B[49m\u001B[43m)\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mepochs\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;241;43m3\u001B[39;49m\u001B[43m)\u001B[49m\n",
"File \u001B[1;32m~\\miniconda3\\lib\\site-packages\\keras\\utils\\traceback_utils.py:65\u001B[0m, in \u001B[0;36mfilter_traceback.<locals>.error_handler\u001B[1;34m(*args, **kwargs)\u001B[0m\n\u001B[0;32m 63\u001B[0m filtered_tb \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;01mNone\u001B[39;00m\n\u001B[0;32m 64\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[1;32m---> 65\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m fn(\u001B[38;5;241m*\u001B[39margs, \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwargs)\n\u001B[0;32m 66\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m \u001B[38;5;167;01mException\u001B[39;00m \u001B[38;5;28;01mas\u001B[39;00m e:\n\u001B[0;32m 67\u001B[0m filtered_tb \u001B[38;5;241m=\u001B[39m _process_traceback_frames(e\u001B[38;5;241m.\u001B[39m__traceback__)\n",
"File \u001B[1;32m~\\miniconda3\\lib\\site-packages\\keras\\engine\\training.py:1564\u001B[0m, in \u001B[0;36mModel.fit\u001B[1;34m(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)\u001B[0m\n\u001B[0;32m 1556\u001B[0m \u001B[38;5;28;01mwith\u001B[39;00m tf\u001B[38;5;241m.\u001B[39mprofiler\u001B[38;5;241m.\u001B[39mexperimental\u001B[38;5;241m.\u001B[39mTrace(\n\u001B[0;32m 1557\u001B[0m \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mtrain\u001B[39m\u001B[38;5;124m\"\u001B[39m,\n\u001B[0;32m 1558\u001B[0m epoch_num\u001B[38;5;241m=\u001B[39mepoch,\n\u001B[1;32m (...)\u001B[0m\n\u001B[0;32m 1561\u001B[0m _r\u001B[38;5;241m=\u001B[39m\u001B[38;5;241m1\u001B[39m,\n\u001B[0;32m 1562\u001B[0m ):\n\u001B[0;32m 1563\u001B[0m callbacks\u001B[38;5;241m.\u001B[39mon_train_batch_begin(step)\n\u001B[1;32m-> 1564\u001B[0m tmp_logs \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mtrain_function\u001B[49m\u001B[43m(\u001B[49m\u001B[43miterator\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m 1565\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m data_handler\u001B[38;5;241m.\u001B[39mshould_sync:\n\u001B[0;32m 1566\u001B[0m context\u001B[38;5;241m.\u001B[39masync_wait()\n",
"File \u001B[1;32m~\\miniconda3\\lib\\site-packages\\tensorflow\\python\\util\\traceback_utils.py:150\u001B[0m, in \u001B[0;36mfilter_traceback.<locals>.error_handler\u001B[1;34m(*args, **kwargs)\u001B[0m\n\u001B[0;32m 148\u001B[0m filtered_tb \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;01mNone\u001B[39;00m\n\u001B[0;32m 149\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[1;32m--> 150\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m fn(\u001B[38;5;241m*\u001B[39margs, \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwargs)\n\u001B[0;32m 151\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m \u001B[38;5;167;01mException\u001B[39;00m \u001B[38;5;28;01mas\u001B[39;00m e:\n\u001B[0;32m 152\u001B[0m filtered_tb \u001B[38;5;241m=\u001B[39m _process_traceback_frames(e\u001B[38;5;241m.\u001B[39m__traceback__)\n",
"File \u001B[1;32m~\\miniconda3\\lib\\site-packages\\tensorflow\\python\\eager\\def_function.py:915\u001B[0m, in \u001B[0;36mFunction.__call__\u001B[1;34m(self, *args, **kwds)\u001B[0m\n\u001B[0;32m 912\u001B[0m compiler \u001B[38;5;241m=\u001B[39m \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mxla\u001B[39m\u001B[38;5;124m\"\u001B[39m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_jit_compile \u001B[38;5;28;01melse\u001B[39;00m \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mnonXla\u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[0;32m 914\u001B[0m \u001B[38;5;28;01mwith\u001B[39;00m OptionalXlaContext(\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_jit_compile):\n\u001B[1;32m--> 915\u001B[0m result \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_call(\u001B[38;5;241m*\u001B[39margs, \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwds)\n\u001B[0;32m 917\u001B[0m new_tracing_count \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mexperimental_get_tracing_count()\n\u001B[0;32m 918\u001B[0m without_tracing \u001B[38;5;241m=\u001B[39m (tracing_count \u001B[38;5;241m==\u001B[39m new_tracing_count)\n",
"File \u001B[1;32m~\\miniconda3\\lib\\site-packages\\tensorflow\\python\\eager\\def_function.py:947\u001B[0m, in \u001B[0;36mFunction._call\u001B[1;34m(self, *args, **kwds)\u001B[0m\n\u001B[0;32m 944\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_lock\u001B[38;5;241m.\u001B[39mrelease()\n\u001B[0;32m 945\u001B[0m \u001B[38;5;66;03m# In this case we have created variables on the first call, so we run the\u001B[39;00m\n\u001B[0;32m 946\u001B[0m \u001B[38;5;66;03m# defunned version which is guaranteed to never create variables.\u001B[39;00m\n\u001B[1;32m--> 947\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_stateless_fn(\u001B[38;5;241m*\u001B[39margs, \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwds) \u001B[38;5;66;03m# pylint: disable=not-callable\u001B[39;00m\n\u001B[0;32m 948\u001B[0m \u001B[38;5;28;01melif\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_stateful_fn \u001B[38;5;129;01mis\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;28;01mNone\u001B[39;00m:\n\u001B[0;32m 949\u001B[0m \u001B[38;5;66;03m# Release the lock early so that multiple threads can perform the call\u001B[39;00m\n\u001B[0;32m 950\u001B[0m \u001B[38;5;66;03m# in parallel.\u001B[39;00m\n\u001B[0;32m 951\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_lock\u001B[38;5;241m.\u001B[39mrelease()\n",
"File \u001B[1;32m~\\miniconda3\\lib\\site-packages\\tensorflow\\python\\eager\\function.py:2496\u001B[0m, in \u001B[0;36mFunction.__call__\u001B[1;34m(self, *args, **kwargs)\u001B[0m\n\u001B[0;32m 2493\u001B[0m \u001B[38;5;28;01mwith\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_lock:\n\u001B[0;32m 2494\u001B[0m (graph_function,\n\u001B[0;32m 2495\u001B[0m filtered_flat_args) \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_maybe_define_function(args, kwargs)\n\u001B[1;32m-> 2496\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mgraph_function\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_call_flat\u001B[49m\u001B[43m(\u001B[49m\n\u001B[0;32m 2497\u001B[0m \u001B[43m \u001B[49m\u001B[43mfiltered_flat_args\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mcaptured_inputs\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mgraph_function\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mcaptured_inputs\u001B[49m\u001B[43m)\u001B[49m\n",
"File \u001B[1;32m~\\miniconda3\\lib\\site-packages\\tensorflow\\python\\eager\\function.py:1862\u001B[0m, in \u001B[0;36mConcreteFunction._call_flat\u001B[1;34m(self, args, captured_inputs, cancellation_manager)\u001B[0m\n\u001B[0;32m 1858\u001B[0m possible_gradient_type \u001B[38;5;241m=\u001B[39m gradients_util\u001B[38;5;241m.\u001B[39mPossibleTapeGradientTypes(args)\n\u001B[0;32m 1859\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m (possible_gradient_type \u001B[38;5;241m==\u001B[39m gradients_util\u001B[38;5;241m.\u001B[39mPOSSIBLE_GRADIENT_TYPES_NONE\n\u001B[0;32m 1860\u001B[0m \u001B[38;5;129;01mand\u001B[39;00m executing_eagerly):\n\u001B[0;32m 1861\u001B[0m \u001B[38;5;66;03m# No tape is watching; skip to running the function.\u001B[39;00m\n\u001B[1;32m-> 1862\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_build_call_outputs(\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_inference_function\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mcall\u001B[49m\u001B[43m(\u001B[49m\n\u001B[0;32m 1863\u001B[0m \u001B[43m \u001B[49m\u001B[43mctx\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mcancellation_manager\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mcancellation_manager\u001B[49m\u001B[43m)\u001B[49m)\n\u001B[0;32m 1864\u001B[0m forward_backward \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_select_forward_and_backward_functions(\n\u001B[0;32m 1865\u001B[0m args,\n\u001B[0;32m 1866\u001B[0m possible_gradient_type,\n\u001B[0;32m 1867\u001B[0m executing_eagerly)\n\u001B[0;32m 1868\u001B[0m forward_function, args_with_tangents \u001B[38;5;241m=\u001B[39m forward_backward\u001B[38;5;241m.\u001B[39mforward()\n",
"File \u001B[1;32m~\\miniconda3\\lib\\site-packages\\tensorflow\\python\\eager\\function.py:499\u001B[0m, in \u001B[0;36m_EagerDefinedFunction.call\u001B[1;34m(self, ctx, args, cancellation_manager)\u001B[0m\n\u001B[0;32m 497\u001B[0m \u001B[38;5;28;01mwith\u001B[39;00m _InterpolateFunctionError(\u001B[38;5;28mself\u001B[39m):\n\u001B[0;32m 498\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m cancellation_manager \u001B[38;5;129;01mis\u001B[39;00m \u001B[38;5;28;01mNone\u001B[39;00m:\n\u001B[1;32m--> 499\u001B[0m outputs \u001B[38;5;241m=\u001B[39m \u001B[43mexecute\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mexecute\u001B[49m\u001B[43m(\u001B[49m\n\u001B[0;32m 500\u001B[0m \u001B[43m \u001B[49m\u001B[38;5;28;43mstr\u001B[39;49m\u001B[43m(\u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43msignature\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mname\u001B[49m\u001B[43m)\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m 501\u001B[0m \u001B[43m \u001B[49m\u001B[43mnum_outputs\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_num_outputs\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m 502\u001B[0m \u001B[43m \u001B[49m\u001B[43minputs\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m 503\u001B[0m \u001B[43m \u001B[49m\u001B[43mattrs\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mattrs\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m 504\u001B[0m \u001B[43m \u001B[49m\u001B[43mctx\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mctx\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m 505\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[0;32m 506\u001B[0m outputs \u001B[38;5;241m=\u001B[39m execute\u001B[38;5;241m.\u001B[39mexecute_with_cancellation(\n\u001B[0;32m 507\u001B[0m \u001B[38;5;28mstr\u001B[39m(\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39msignature\u001B[38;5;241m.\u001B[39mname),\n\u001B[0;32m 508\u001B[0m num_outputs\u001B[38;5;241m=\u001B[39m\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_num_outputs,\n\u001B[1;32m (...)\u001B[0m\n\u001B[0;32m 511\u001B[0m ctx\u001B[38;5;241m=\u001B[39mctx,\n\u001B[0;32m 512\u001B[0m cancellation_manager\u001B[38;5;241m=\u001B[39mcancellation_manager)\n",
"File \u001B[1;32m~\\miniconda3\\lib\\site-packages\\tensorflow\\python\\eager\\execute.py:54\u001B[0m, in \u001B[0;36mquick_execute\u001B[1;34m(op_name, num_outputs, inputs, attrs, ctx, name)\u001B[0m\n\u001B[0;32m 52\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[0;32m 53\u001B[0m ctx\u001B[38;5;241m.\u001B[39mensure_initialized()\n\u001B[1;32m---> 54\u001B[0m tensors \u001B[38;5;241m=\u001B[39m \u001B[43mpywrap_tfe\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mTFE_Py_Execute\u001B[49m\u001B[43m(\u001B[49m\u001B[43mctx\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_handle\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mdevice_name\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mop_name\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m 55\u001B[0m \u001B[43m \u001B[49m\u001B[43minputs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mattrs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mnum_outputs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m 56\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m core\u001B[38;5;241m.\u001B[39m_NotOkStatusException \u001B[38;5;28;01mas\u001B[39;00m e:\n\u001B[0;32m 57\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m name \u001B[38;5;129;01mis\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;28;01mNone\u001B[39;00m:\n",
"\u001B[1;31mKeyboardInterrupt\u001B[0m: "
]
}
],
"source": [
"#train_y = np.stack(train[\"review_score\"].values)\n",
"train_y = np.stack(train[\"review_score\"].values)\n",
"valid_y = np.stack(valid[\"review_score\"].values)\n",
"\n",
"test_y = np.stack(test[\"review_score\"].values)\n",
"\n",
"###\n",
"#train_x = np.stack(train[\"vectorized\"].values)\n",
"train_x = np.stack(train[\"vectorized\"].values)\n",
"\n",
"test_x = np.stack(test[\"vectorized\"].values)\n",
"valid_x = np.stack(valid[\"vectorized\"].values)\n",
"\n",
"\n",
"#callback = keras.callbacks.EarlyStopping(monitor='val_loss', mode='min', patience=3, restore_best_weights=True)\n",
"history = model.fit(train_x, train_y, validation_data=(valid_x, valid_y), epochs=3)"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "code",
"execution_count": null,
"outputs": [],
"source": [],
"metadata": {
"collapsed": false
}
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 0
}