diff --git a/ocr.py b/ocr.py index ae55e6d..54fbc7a 100644 --- a/ocr.py +++ b/ocr.py @@ -1,30 +1,5 @@ import easyocr import cv2 as cv -import keras_ocr -import pytesseract - -def keras_ocr_func(): - pipeline = keras_ocr.pipeline.Pipeline() - images = [ - keras_ocr.tools.read(img) for img in ['img0.png', ] - ] - prediction_groups = pipeline.recognize(images) - car_numbers = '' - - try: - for i in prediction_groups[0]: - car_numbers += i[0] - except: - print('no detection') - - return car_numbers - -def tesseract_ocr(): - img = cv.imread('img0.png') - res = pytesseract.image_to_string(img, - lang='eng', - config='--oem 3 --psm 6 -c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789') - return res def get_text_from_image(img_path, cut=7): text = '' diff --git a/yolo.ipynb b/yolo.ipynb index 4f5bbd8..6121dad 100644 --- a/yolo.ipynb +++ b/yolo.ipynb @@ -1254,7 +1254,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "id": "4038756b", "metadata": {}, "outputs": [ @@ -1264,15 +1264,16 @@ "text": [ "-------------------CLASS NAMES-------------------\n", "['licence']\n", - "-------------------CLASS NAMES-------------------\n" + "-------------------CLASS NAMES-------------------\n", + "Metal device set to: Apple M1 Max\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "2023-01-22 02:42:51.965089: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: SSE4.1 SSE4.2 AVX AVX2 FMA\n", - "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n" + "2023-01-25 23:51:36.616831: I tensorflow/core/common_runtime/pluggable_device/pluggable_device_factory.cc:306] Could not identify NUMA node of platform GPU ID 0, defaulting to 0. Your kernel may not have been built with NUMA support.\n", + "2023-01-25 23:51:36.616851: I tensorflow/core/common_runtime/pluggable_device/pluggable_device_factory.cc:272] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 0 MB memory) -> physical PluggableDevice (device: 0, name: METAL, pci bus id: )\n" ] }, { @@ -1296,9 +1297,9 @@ "name": "stderr", "output_type": "stream", "text": [ - "/Users/aczajka/miniconda3/envs/yolov3/lib/python3.9/site-packages/keras/optimizers/optimizer_v2/adam.py:117: UserWarning: The `lr` argument is deprecated, use `learning_rate` instead.\n", + "/Users/maciej/miniconda3/envs/tensorflow-metal/lib/python3.9/site-packages/keras/optimizers/optimizer_v2/adam.py:117: UserWarning: The `lr` argument is deprecated, use `learning_rate` instead.\n", " super().__init__(name, **kwargs)\n", - "/var/folders/j_/grk4ythd0392dcw5z3gkgw5w0000gn/T/ipykernel_39692/4035785499.py:62: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.\n", + "/var/folders/cz/4x4d9vv505z_64vvjkx8jcp40000gn/T/ipykernel_15636/4035785499.py:62: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.\n", " model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),\n" ] }, @@ -1306,8 +1307,21 @@ "name": "stdout", "output_type": "stream", "text": [ - "Epoch 1/500\n", - "WARNING:tensorflow:From /Users/aczajka/miniconda3/envs/yolov3/lib/python3.9/site-packages/tensorflow/python/autograph/pyct/static_analysis/liveness.py:83: Analyzer.lamba_check (from tensorflow.python.autograph.pyct.static_analysis.liveness) is deprecated and will be removed after 2023-09-23.\n", + "Epoch 1/500\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2023-01-25 23:51:38.517307: W tensorflow/tsl/platform/profile_utils/cpu_utils.cc:128] Failed to get CPU frequency: 0 Hz\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /Users/maciej/miniconda3/envs/tensorflow-metal/lib/python3.9/site-packages/tensorflow/python/autograph/pyct/static_analysis/liveness.py:83: Analyzer.lamba_check (from tensorflow.python.autograph.pyct.static_analysis.liveness) is deprecated and will be removed after 2023-09-23.\n", "Instructions for updating:\n", "Lambda fuctions will be no more assumed to be used in the statement where they are used, or at least in the same block. https://github.com/tensorflow/tensorflow/issues/56089\n" ] @@ -1316,1176 +1330,14 @@ "name": "stderr", "output_type": "stream", "text": [ - "2023-01-22 02:43:01.274999: E tensorflow/core/grappler/optimizers/meta_optimizer.cc:954] layout failed: INVALID_ARGUMENT: Subshape must have computed start >= end since stride is negative, but is 0 and 2 (computed from start 0 and end 9223372036854775807 over shape with rank 2 and stride-1)\n" + "2023-01-25 23:51:41.830237: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "30/30 [==============================] - ETA: 0s - loss: 1092.7228" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-01-22 02:43:54.290859: E tensorflow/core/grappler/optimizers/meta_optimizer.cc:954] layout failed: INVALID_ARGUMENT: Subshape must have computed start >= end since stride is negative, but is 0 and 2 (computed from start 0 and end 9223372036854775807 over shape with rank 2 and stride-1)\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "30/30 [==============================] - 70s 2s/step - loss: 1092.7228 - val_loss: 216.0935\n", - "Epoch 2/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 135.6953 - val_loss: 94.2604\n", - "Epoch 3/500\n", - "30/30 [==============================] - 52s 2s/step - loss: 79.2672 - val_loss: 68.8617\n", - "Epoch 4/500\n", - "30/30 [==============================] - 52s 2s/step - loss: 60.4469 - val_loss: 54.7572\n", - "Epoch 5/500\n", - "30/30 [==============================] - 50s 2s/step - loss: 50.0802 - val_loss: 47.2904\n", - "Epoch 6/500\n", - "30/30 [==============================] - 51s 2s/step - loss: 43.6335 - val_loss: 41.2742\n", - "Epoch 7/500\n", - "30/30 [==============================] - 51s 2s/step - loss: 39.3473 - val_loss: 38.5374\n", - "Epoch 8/500\n", - "30/30 [==============================] - 52s 2s/step - loss: 36.2422 - val_loss: 35.2012\n", - "Epoch 9/500\n", - "30/30 [==============================] - 51s 2s/step - loss: 33.6743 - val_loss: 33.0579\n", - "Epoch 10/500\n", - "30/30 [==============================] - 49s 2s/step - loss: 32.0283 - val_loss: 30.6336\n", - "Epoch 11/500\n", - "30/30 [==============================] - 49s 2s/step - loss: 30.3864 - val_loss: 29.2345\n", - "Epoch 12/500\n", - "30/30 [==============================] - 51s 2s/step - loss: 29.6261 - val_loss: 28.6320\n", - "Epoch 13/500\n", - "30/30 [==============================] - 51s 2s/step - loss: 28.1432 - val_loss: 27.8887\n", - "Epoch 14/500\n", - "30/30 [==============================] - 52s 2s/step - loss: 27.6032 - val_loss: 27.0226\n", - "Epoch 15/500\n", - "30/30 [==============================] - 52s 2s/step - loss: 26.9148 - val_loss: 26.3452\n", - "Epoch 16/500\n", - "30/30 [==============================] - 52s 2s/step - loss: 26.4210 - val_loss: 26.4830\n", - "Epoch 17/500\n", - "30/30 [==============================] - 53s 2s/step - loss: 25.6399 - val_loss: 25.2511\n", - "Epoch 18/500\n", - "30/30 [==============================] - 60s 2s/step - loss: 25.5443 - val_loss: 24.6174\n", - "Epoch 19/500\n", - "30/30 [==============================] - 52s 2s/step - loss: 25.2961 - val_loss: 24.7754\n", - "Epoch 20/500\n", - "30/30 [==============================] - 54s 2s/step - loss: 24.7307 - val_loss: 24.6782\n", - "Epoch 21/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 24.2857 - val_loss: 24.3096\n", - "Epoch 22/500\n", - "30/30 [==============================] - 52s 2s/step - loss: 24.2008 - val_loss: 24.3196\n", - "Epoch 23/500\n", - "30/30 [==============================] - 51s 2s/step - loss: 23.5739 - val_loss: 23.3351\n", - "Epoch 24/500\n", - "30/30 [==============================] - 52s 2s/step - loss: 23.6946 - val_loss: 24.0281\n", - "Epoch 25/500\n", - "30/30 [==============================] - 52s 2s/step - loss: 23.7198 - val_loss: 23.4021\n", - "Epoch 26/500\n", - "30/30 [==============================] - 53s 2s/step - loss: 23.2751 - val_loss: 23.3185\n", - "Epoch 27/500\n", - "30/30 [==============================] - 53s 2s/step - loss: 23.2101 - val_loss: 22.7601\n", - "Epoch 28/500\n", - "30/30 [==============================] - 53s 2s/step - loss: 22.9937 - val_loss: 22.6282\n", - "Epoch 29/500\n", - "30/30 [==============================] - 53s 2s/step - loss: 22.8363 - val_loss: 22.1787\n", - "Epoch 30/500\n", - "30/30 [==============================] - 55s 2s/step - loss: 22.6890 - val_loss: 22.1749\n", - "Epoch 31/500\n", - "30/30 [==============================] - 56s 2s/step - loss: 22.4564 - val_loss: 22.6868\n", - "Epoch 32/500\n", - "30/30 [==============================] - 52s 2s/step - loss: 22.3397 - val_loss: 22.1918\n", - "Epoch 33/500\n", - "30/30 [==============================] - 53s 2s/step - loss: 22.8438 - val_loss: 22.4380\n", - "Epoch 34/500\n", - "30/30 [==============================] - 53s 2s/step - loss: 22.0734 - val_loss: 22.9481\n", - "Epoch 35/500\n", - "30/30 [==============================] - 53s 2s/step - loss: 21.9711 - val_loss: 22.8436\n", - "Epoch 36/500\n", - "30/30 [==============================] - 53s 2s/step - loss: 22.0127 - val_loss: 22.7770\n", - "Epoch 37/500\n", - "30/30 [==============================] - 53s 2s/step - loss: 22.6367 - val_loss: 21.8047\n", - "Epoch 38/500\n", - "30/30 [==============================] - 53s 2s/step - loss: 21.8459 - val_loss: 22.3148\n", - "Epoch 39/500\n", - "30/30 [==============================] - 54s 2s/step - loss: 21.9811 - val_loss: 21.6083\n", - "Epoch 40/500\n", - "30/30 [==============================] - 52s 2s/step - loss: 21.8194 - val_loss: 21.5877\n", - "Epoch 41/500\n", - "30/30 [==============================] - 53s 2s/step - loss: 21.6587 - val_loss: 21.3777\n", - "Epoch 42/500\n", - "30/30 [==============================] - 56s 2s/step - loss: 21.4056 - val_loss: 21.0999\n", - "Epoch 43/500\n", - "30/30 [==============================] - 52s 2s/step - loss: 21.4517 - val_loss: 20.9185\n", - "Epoch 44/500\n", - "30/30 [==============================] - 55s 2s/step - loss: 21.4323 - val_loss: 21.4888\n", - "Epoch 45/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 21.4581 - val_loss: 20.9886\n", - "Epoch 46/500\n", - "30/30 [==============================] - 56s 2s/step - loss: 21.3487 - val_loss: 20.3990\n", - "Epoch 47/500\n", - "30/30 [==============================] - 56s 2s/step - loss: 20.9203 - val_loss: 20.3689\n", - "Epoch 48/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 21.0719 - val_loss: 21.1066\n", - "Epoch 49/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 21.3894 - val_loss: 21.2877\n", - "Epoch 50/500\n", - "30/30 [==============================] - 56s 2s/step - loss: 21.2891 - val_loss: 21.2323\n", - "Epoch 51/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 21.0220 - val_loss: 20.7920\n", - "Epoch 52/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 20.9018 - val_loss: 20.3990\n", - "Epoch 53/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 21.2242 - val_loss: 20.2087\n", - "Epoch 54/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 20.9219 - val_loss: 20.2367\n", - "Epoch 55/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 20.8007 - val_loss: 20.1518\n", - "Epoch 56/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 20.8917 - val_loss: 20.4730\n", - "Epoch 57/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 20.8413 - val_loss: 20.3548\n", - "Epoch 58/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 20.5870 - val_loss: 20.3552\n", - "Epoch 59/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 20.9533 - val_loss: 20.3583\n", - "Epoch 60/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 20.5604 - val_loss: 19.6875\n", - "Epoch 61/500\n", - "30/30 [==============================] - 62s 2s/step - loss: 20.8170 - val_loss: 20.4102\n", - "Epoch 62/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 20.7297 - val_loss: 20.4196\n", - "Epoch 63/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 20.4839 - val_loss: 20.1161\n", - "Epoch 64/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 20.4190 - val_loss: 20.5080\n", - "Epoch 65/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 20.6353 - val_loss: 20.1768\n", - "Epoch 66/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 20.6978 - val_loss: 20.5307\n", - "Epoch 67/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 20.5475 - val_loss: 20.9204\n", - "Epoch 68/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 20.4128 - val_loss: 20.2049\n", - "Epoch 69/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 20.3816 - val_loss: 19.6142\n", - "Epoch 70/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 20.4737 - val_loss: 20.3626\n", - "Epoch 71/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 20.0341 - val_loss: 19.9938\n", - "Epoch 72/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 20.4320 - val_loss: 21.0509\n", - "Epoch 73/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 20.4354 - val_loss: 20.2190\n", - "Epoch 74/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 19.9763 - val_loss: 19.8038\n", - "Epoch 75/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 20.4443 - val_loss: 19.8551\n", - "Epoch 76/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 19.5941 - val_loss: 19.9696\n", - "Epoch 77/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 20.1076 - val_loss: 20.5628\n", - "Epoch 78/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 19.8378 - val_loss: 20.4607\n", - "Epoch 79/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 19.9174 - val_loss: 19.2342\n", - "Epoch 80/500\n", - "30/30 [==============================] - 60s 2s/step - loss: 19.9954 - val_loss: 19.9048\n", - "Epoch 81/500\n", - "30/30 [==============================] - 61s 2s/step - loss: 19.8898 - val_loss: 19.9757\n", - "Epoch 82/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 19.8671 - val_loss: 20.3432\n", - "Epoch 83/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 20.0536 - val_loss: 20.0036\n", - "Epoch 84/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 19.8378 - val_loss: 19.8090\n", - "Epoch 85/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 20.0678 - val_loss: 19.4705\n", - "Epoch 86/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 20.0358 - val_loss: 19.7351\n", - "Epoch 87/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 19.7083 - val_loss: 19.1633\n", - "Epoch 88/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 19.5802 - val_loss: 19.2210\n", - "Epoch 89/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 19.6578 - val_loss: 19.5279\n", - "Epoch 90/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 19.3884 - val_loss: 19.6862\n", - "Epoch 91/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 19.8888 - val_loss: 20.6697\n", - "Epoch 92/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 19.7249 - val_loss: 19.4848\n", - "Epoch 93/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 19.9230 - val_loss: 19.8133\n", - "Epoch 94/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 19.5174 - val_loss: 18.7876\n", - "Epoch 95/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 19.6750 - val_loss: 19.1217\n", - "Epoch 96/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 19.2918 - val_loss: 19.0066\n", - "Epoch 97/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 19.5391 - val_loss: 19.2043\n", - "Epoch 98/500\n", - "30/30 [==============================] - 62s 2s/step - loss: 19.6986 - val_loss: 20.1391\n", - "Epoch 99/500\n", - "30/30 [==============================] - 61s 2s/step - loss: 19.5048 - val_loss: 19.4383\n", - "Epoch 100/500\n", - "30/30 [==============================] - 56s 2s/step - loss: 19.2941 - val_loss: 19.6998\n", - "Epoch 101/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 19.4645 - val_loss: 18.7480\n", - "Epoch 102/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 19.3468 - val_loss: 20.0104\n", - "Epoch 103/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 19.5910 - val_loss: 19.0392\n", - "Epoch 104/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 19.4385 - val_loss: 19.1266\n", - "Epoch 105/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 19.3504 - val_loss: 19.7491\n", - "Epoch 106/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 18.9692 - val_loss: 19.1707\n", - "Epoch 107/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 19.2553 - val_loss: 19.5704\n", - "Epoch 108/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 19.5590 - val_loss: 18.9097\n", - "Epoch 109/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 18.6710 - val_loss: 19.4302\n", - "Epoch 110/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 19.0906 - val_loss: 19.0445\n", - "Epoch 111/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 19.0178 - val_loss: 18.9003\n", - "Epoch 112/500\n", - "30/30 [==============================] - 64s 2s/step - loss: 19.1675 - val_loss: 18.6330\n", - "Epoch 113/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 18.9757 - val_loss: 18.7002\n", - "Epoch 114/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 18.9385 - val_loss: 18.6894\n", - "Epoch 115/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 19.1563 - val_loss: 18.4366\n", - "Epoch 116/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 18.7275 - val_loss: 19.4573\n", - "Epoch 117/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 18.9145 - val_loss: 18.3290\n", - "Epoch 118/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 18.9156 - val_loss: 19.1213\n", - "Epoch 119/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 19.1048 - val_loss: 18.6663\n", - "Epoch 120/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 18.5928 - val_loss: 19.4735\n", - "Epoch 121/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 18.9346 - val_loss: 18.1666\n", - "Epoch 122/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 18.7425 - val_loss: 18.6575\n", - "Epoch 123/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 18.5844 - val_loss: 18.5724\n", - "Epoch 124/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 18.5992 - val_loss: 18.6960\n", - "Epoch 125/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 18.7258 - val_loss: 18.3909\n", - "Epoch 126/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 18.7052 - val_loss: 18.4346\n", - "Epoch 127/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 18.5290 - val_loss: 19.0881\n", - "Epoch 128/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 18.6084 - val_loss: 18.3423\n", - "Epoch 129/500\n", - "30/30 [==============================] - 67s 2s/step - loss: 18.2576 - val_loss: 17.7641\n", - "Epoch 130/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 18.5741 - val_loss: 19.3456\n", - "Epoch 131/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 18.7191 - val_loss: 18.2478\n", - "Epoch 132/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 18.2529 - val_loss: 17.8907\n", - "Epoch 133/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 18.6660 - val_loss: 19.2050\n", - "Epoch 134/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 18.4503 - val_loss: 17.5212\n", - "Epoch 135/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 18.4364 - val_loss: 17.6540\n", - "Epoch 136/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 18.3926 - val_loss: 17.6553\n", - "Epoch 137/500\n", - "30/30 [==============================] - 61s 2s/step - loss: 18.2460 - val_loss: 18.6843\n", - "Epoch 138/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 18.2683 - val_loss: 18.1989\n", - "Epoch 139/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 18.4373 - val_loss: 18.2519\n", - "Epoch 140/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 18.0950 - val_loss: 18.2093\n", - "Epoch 141/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 18.6397 - val_loss: 17.5036\n", - "Epoch 142/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 18.4368 - val_loss: 18.0884\n", - "Epoch 143/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 18.2509 - val_loss: 18.0419\n", - "Epoch 144/500\n", - "30/30 [==============================] - 60s 2s/step - loss: 17.9318 - val_loss: 17.1161\n", - "Epoch 145/500\n", - "30/30 [==============================] - 67s 2s/step - loss: 18.1403 - val_loss: 17.9708\n", - "Epoch 146/500\n", - "30/30 [==============================] - 56s 2s/step - loss: 18.2065 - val_loss: 18.9385\n", - "Epoch 147/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 17.9481 - val_loss: 17.5626\n", - "Epoch 148/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 17.9567 - val_loss: 17.6918\n", - "Epoch 149/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 18.0001 - val_loss: 17.8759\n", - "Epoch 150/500\n", - "30/30 [==============================] - 60s 2s/step - loss: 18.2126 - val_loss: 18.0285\n", - "Epoch 151/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 17.8216 - val_loss: 18.1529\n", - "Epoch 152/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 17.8409 - val_loss: 18.0349\n", - "Epoch 153/500\n", - "30/30 [==============================] - 60s 2s/step - loss: 17.8870 - val_loss: 16.9735\n", - "Epoch 154/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 17.5961 - val_loss: 17.3506\n", - "Epoch 155/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 18.0078 - val_loss: 18.0054\n", - "Epoch 156/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 17.9904 - val_loss: 17.5965\n", - "Epoch 157/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 17.9485 - val_loss: 17.4312\n", - "Epoch 158/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 17.8291 - val_loss: 17.3607\n", - "Epoch 159/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 17.8277 - val_loss: 17.2476\n", - "Epoch 160/500\n", - "30/30 [==============================] - 60s 2s/step - loss: 17.2321 - val_loss: 17.4888\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Epoch 161/500\n", - "30/30 [==============================] - 66s 2s/step - loss: 17.8075 - val_loss: 17.9411\n", - "Epoch 162/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 17.6729 - val_loss: 16.4171\n", - "Epoch 163/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 17.7537 - val_loss: 17.1066\n", - "Epoch 164/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 17.7760 - val_loss: 17.9759\n", - "Epoch 165/500\n", - "30/30 [==============================] - 60s 2s/step - loss: 17.9173 - val_loss: 17.1527\n", - "Epoch 166/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 17.7308 - val_loss: 17.3219\n", - "Epoch 167/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 17.4189 - val_loss: 17.8249\n", - "Epoch 168/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 17.4101 - val_loss: 17.1193\n", - "Epoch 169/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 17.7719 - val_loss: 17.0561\n", - "Epoch 170/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 17.7480 - val_loss: 16.8337\n", - "Epoch 171/500\n", - "30/30 [==============================] - 60s 2s/step - loss: 17.3692 - val_loss: 16.0112\n", - "Epoch 172/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 17.7808 - val_loss: 16.9604\n", - "Epoch 173/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 17.5618 - val_loss: 16.9944\n", - "Epoch 174/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 17.6525 - val_loss: 17.3570\n", - "Epoch 175/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 17.5603 - val_loss: 16.9481\n", - "Epoch 176/500\n", - "30/30 [==============================] - 64s 2s/step - loss: 17.3968 - val_loss: 16.7614\n", - "Epoch 177/500\n", - "30/30 [==============================] - 61s 2s/step - loss: 17.7041 - val_loss: 17.0379\n", - "Epoch 178/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 17.4487 - val_loss: 17.8662\n", - "Epoch 179/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 17.5594 - val_loss: 16.9650\n", - "Epoch 180/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 17.4884 - val_loss: 16.6101\n", - "Epoch 181/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 17.3017 - val_loss: 17.3026\n", - "Epoch 182/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 17.4179 - val_loss: 17.0920\n", - "Epoch 183/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 17.1374 - val_loss: 17.0096\n", - "Epoch 184/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 17.2827 - val_loss: 17.5058\n", - "Epoch 185/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 17.3034 - val_loss: 17.1128\n", - "Epoch 186/500\n", - "30/30 [==============================] - 72s 2s/step - loss: 17.1985 - val_loss: 16.1411\n", - "Epoch 187/500\n", - "30/30 [==============================] - 102s 3s/step - loss: 17.2851 - val_loss: 17.6696\n", - "Epoch 188/500\n", - "30/30 [==============================] - 117s 4s/step - loss: 17.1215 - val_loss: 17.2290\n", - "Epoch 189/500\n", - "30/30 [==============================] - 123s 4s/step - loss: 17.4202 - val_loss: 16.9745\n", - "Epoch 190/500\n", - "30/30 [==============================] - 126s 4s/step - loss: 17.0531 - val_loss: 16.7439\n", - "Epoch 191/500\n", - "30/30 [==============================] - 125s 4s/step - loss: 17.2199 - val_loss: 16.9525\n", - "Epoch 192/500\n", - "30/30 [==============================] - 130s 4s/step - loss: 17.2730 - val_loss: 16.7329\n", - "Epoch 193/500\n", - "30/30 [==============================] - 131s 4s/step - loss: 17.0992 - val_loss: 16.7782\n", - "Epoch 194/500\n", - "30/30 [==============================] - 123s 4s/step - loss: 17.2608 - val_loss: 16.7102\n", - "Epoch 195/500\n", - "30/30 [==============================] - 123s 4s/step - loss: 17.2919 - val_loss: 16.7896\n", - "Epoch 196/500\n", - "30/30 [==============================] - 125s 4s/step - loss: 17.2738 - val_loss: 16.4602\n", - "Epoch 197/500\n", - "30/30 [==============================] - 125s 4s/step - loss: 16.8739 - val_loss: 16.5762\n", - "Epoch 198/500\n", - "30/30 [==============================] - 128s 4s/step - loss: 17.1413 - val_loss: 16.7527\n", - "Epoch 199/500\n", - "30/30 [==============================] - 111s 4s/step - loss: 16.9642 - val_loss: 16.8084\n", - "Epoch 200/500\n", - "30/30 [==============================] - 52s 2s/step - loss: 17.0036 - val_loss: 16.4942\n", - "Epoch 201/500\n", - "30/30 [==============================] - 67s 2s/step - loss: 16.9632 - val_loss: 16.9797\n", - "Epoch 202/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 17.0620 - val_loss: 17.0211\n", - "Epoch 203/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 17.4539 - val_loss: 16.8192\n", - "Epoch 204/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 16.9974 - val_loss: 16.3421\n", - "Epoch 205/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 16.8898 - val_loss: 17.2752\n", - "Epoch 206/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 16.9180 - val_loss: 15.9694\n", - "Epoch 207/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 17.2808 - val_loss: 16.7605\n", - "Epoch 208/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 17.0356 - val_loss: 16.7981\n", - "Epoch 209/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 17.0600 - val_loss: 16.9557\n", - "Epoch 210/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 17.1601 - val_loss: 17.0695\n", - "Epoch 211/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 17.0459 - val_loss: 16.6873\n", - "Epoch 212/500\n", - "30/30 [==============================] - 89s 3s/step - loss: 17.1121 - val_loss: 16.7286\n", - "Epoch 213/500\n", - "30/30 [==============================] - 112s 4s/step - loss: 16.7431 - val_loss: 16.7320\n", - "Epoch 214/500\n", - "30/30 [==============================] - 118s 4s/step - loss: 16.8781 - val_loss: 16.9751\n", - "Epoch 215/500\n", - "30/30 [==============================] - 123s 4s/step - loss: 17.1820 - val_loss: 16.3007\n", - "Epoch 216/500\n", - "30/30 [==============================] - 125s 4s/step - loss: 16.7895 - val_loss: 16.8100\n", - "Epoch 217/500\n", - "30/30 [==============================] - 125s 4s/step - loss: 17.0252 - val_loss: 16.4287\n", - "Epoch 218/500\n", - "30/30 [==============================] - 125s 4s/step - loss: 16.7351 - val_loss: 16.7850\n", - "Epoch 219/500\n", - "30/30 [==============================] - 124s 4s/step - loss: 16.8772 - val_loss: 16.6999\n", - "Epoch 220/500\n", - "30/30 [==============================] - 127s 4s/step - loss: 16.8597 - val_loss: 16.5488\n", - "Epoch 221/500\n", - "30/30 [==============================] - 124s 4s/step - loss: 17.1371 - val_loss: 16.2532\n", - "Epoch 222/500\n", - "30/30 [==============================] - 125s 4s/step - loss: 16.8663 - val_loss: 16.5585\n", - "Epoch 223/500\n", - "30/30 [==============================] - 125s 4s/step - loss: 16.9581 - val_loss: 16.2982\n", - "Epoch 224/500\n", - "30/30 [==============================] - 125s 4s/step - loss: 16.9060 - val_loss: 16.3674\n", - "Epoch 225/500\n", - "30/30 [==============================] - 123s 4s/step - loss: 16.9360 - val_loss: 16.6738\n", - "Epoch 226/500\n", - "30/30 [==============================] - 110s 4s/step - loss: 16.6695 - val_loss: 16.8557\n", - "Epoch 227/500\n", - "30/30 [==============================] - 54s 2s/step - loss: 16.9524 - val_loss: 16.5686\n", - "Epoch 228/500\n", - "30/30 [==============================] - 68s 2s/step - loss: 16.8033 - val_loss: 16.5986\n", - "Epoch 229/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 16.7346 - val_loss: 16.0810\n", - "Epoch 230/500\n", - "30/30 [==============================] - 61s 2s/step - loss: 16.6734 - val_loss: 16.3377\n", - "Epoch 231/500\n", - "30/30 [==============================] - 61s 2s/step - loss: 16.8451 - val_loss: 16.1956\n", - "Epoch 232/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 16.8533 - val_loss: 16.4178\n", - "Epoch 233/500\n", - "30/30 [==============================] - 67s 2s/step - loss: 16.6677 - val_loss: 16.1700\n", - "Epoch 234/500\n", - "30/30 [==============================] - 62s 2s/step - loss: 16.7513 - val_loss: 16.6826\n", - "Epoch 235/500\n", - "30/30 [==============================] - 75s 3s/step - loss: 16.5991 - val_loss: 16.3288\n", - "Epoch 236/500\n", - "30/30 [==============================] - 60s 2s/step - loss: 17.0266 - val_loss: 16.5422\n", - "Epoch 237/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 16.7941 - val_loss: 16.2773\n", - "Epoch 238/500\n", - "30/30 [==============================] - 75s 3s/step - loss: 16.5905 - val_loss: 15.8983\n", - "Epoch 239/500\n", - "30/30 [==============================] - 79s 3s/step - loss: 16.7549 - val_loss: 16.9012\n", - "Epoch 240/500\n", - "30/30 [==============================] - 83s 3s/step - loss: 16.7844 - val_loss: 16.3224\n", - "Epoch 241/500\n", - "30/30 [==============================] - 86s 3s/step - loss: 16.6991 - val_loss: 16.3654\n", - "Epoch 242/500\n", - "30/30 [==============================] - 87s 3s/step - loss: 16.5758 - val_loss: 15.8727\n", - "Epoch 243/500\n", - "30/30 [==============================] - 87s 3s/step - loss: 16.3914 - val_loss: 16.4542\n", - "Epoch 244/500\n", - "30/30 [==============================] - 89s 3s/step - loss: 16.7021 - val_loss: 16.4562\n", - "Epoch 245/500\n", - "30/30 [==============================] - 89s 3s/step - loss: 16.4270 - val_loss: 16.3817\n", - "Epoch 246/500\n", - "30/30 [==============================] - 89s 3s/step - loss: 16.7722 - val_loss: 16.2395\n", - "Epoch 247/500\n", - "30/30 [==============================] - 89s 3s/step - loss: 16.6468 - val_loss: 16.6332\n", - "Epoch 248/500\n", - "30/30 [==============================] - 89s 3s/step - loss: 17.0123 - val_loss: 16.2401\n", - "Epoch 249/500\n", - "30/30 [==============================] - 88s 3s/step - loss: 16.4098 - val_loss: 16.2627\n", - "Epoch 250/500\n", - "30/30 [==============================] - 87s 3s/step - loss: 16.6750 - val_loss: 16.3639\n", - "Epoch 251/500\n", - "30/30 [==============================] - 88s 3s/step - loss: 16.4957 - val_loss: 17.0374\n", - "Epoch 252/500\n", - "30/30 [==============================] - 88s 3s/step - loss: 16.5535 - val_loss: 16.6554\n", - "Epoch 253/500\n", - "30/30 [==============================] - 119s 4s/step - loss: 16.5255 - val_loss: 16.8328\n", - "Epoch 254/500\n", - "30/30 [==============================] - 119s 4s/step - loss: 16.6808 - val_loss: 16.1435\n", - "Epoch 255/500\n", - "30/30 [==============================] - 54s 2s/step - loss: 16.5841 - val_loss: 16.3919\n", - "Epoch 256/500\n", - "30/30 [==============================] - 78s 3s/step - loss: 16.5055 - val_loss: 16.5761\n", - "Epoch 257/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 16.6117 - val_loss: 16.4381\n", - "Epoch 258/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 16.6162 - val_loss: 16.2132\n", - "Epoch 259/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 16.4880 - val_loss: 16.6501\n", - "Epoch 260/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 16.5138 - val_loss: 15.8520\n", - "Epoch 261/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 16.4561 - val_loss: 15.8716\n", - "Epoch 262/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 16.6328 - val_loss: 16.2283\n", - "Epoch 263/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 16.8187 - val_loss: 16.8967\n", - "Epoch 264/500\n", - "30/30 [==============================] - 90s 3s/step - loss: 16.5232 - val_loss: 15.7357\n", - "Epoch 265/500\n", - "30/30 [==============================] - 108s 4s/step - loss: 16.3057 - val_loss: 16.0941\n", - "Epoch 266/500\n", - "30/30 [==============================] - 117s 4s/step - loss: 16.6120 - val_loss: 16.4122\n", - "Epoch 267/500\n", - "30/30 [==============================] - 125s 4s/step - loss: 16.6497 - val_loss: 15.5423\n", - "Epoch 268/500\n", - "30/30 [==============================] - 122s 4s/step - loss: 16.4017 - val_loss: 16.8959\n", - "Epoch 269/500\n", - "30/30 [==============================] - 127s 4s/step - loss: 16.5587 - val_loss: 16.1176\n", - "Epoch 270/500\n", - "30/30 [==============================] - 124s 4s/step - loss: 16.3952 - val_loss: 16.4328\n", - "Epoch 271/500\n", - "30/30 [==============================] - 125s 4s/step - loss: 16.5917 - val_loss: 16.1204\n", - "Epoch 272/500\n", - "30/30 [==============================] - 123s 4s/step - loss: 16.3392 - val_loss: 16.1431\n", - "Epoch 273/500\n", - "30/30 [==============================] - 128s 4s/step - loss: 16.5220 - val_loss: 16.2746\n", - "Epoch 274/500\n", - "30/30 [==============================] - 128s 4s/step - loss: 16.6498 - val_loss: 16.3835\n", - "Epoch 275/500\n", - "30/30 [==============================] - 123s 4s/step - loss: 16.2066 - val_loss: 16.0384\n", - "Epoch 276/500\n", - "30/30 [==============================] - 81s 3s/step - loss: 16.2591 - val_loss: 16.4378\n", - "Epoch 277/500\n", - "30/30 [==============================] - 65s 2s/step - loss: 16.6943 - val_loss: 16.1523\n", - "Epoch 278/500\n", - "30/30 [==============================] - 61s 2s/step - loss: 16.3948 - val_loss: 16.1507\n", - "Epoch 279/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 16.6854 - val_loss: 16.2779\n", - "Epoch 280/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 16.4208 - val_loss: 16.0576\n", - "Epoch 281/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 16.3797 - val_loss: 16.6038\n", - "Epoch 282/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 16.8321 - val_loss: 16.0848\n", - "Epoch 283/500\n", - "30/30 [==============================] - 67s 2s/step - loss: 16.2373 - val_loss: 16.3140\n", - "Epoch 284/500\n", - "30/30 [==============================] - 56s 2s/step - loss: 16.3162 - val_loss: 15.8853\n", - "Epoch 285/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 16.3769 - val_loss: 16.3856\n", - "Epoch 286/500\n", - "30/30 [==============================] - 61s 2s/step - loss: 16.4671 - val_loss: 16.0674\n", - "Epoch 287/500\n", - "30/30 [==============================] - 85s 3s/step - loss: 16.5860 - val_loss: 16.3418\n", - "Epoch 288/500\n", - "30/30 [==============================] - 106s 4s/step - loss: 16.4896 - val_loss: 16.5205\n", - "Epoch 289/500\n", - "30/30 [==============================] - 118s 4s/step - loss: 16.4469 - val_loss: 15.8535\n", - "Epoch 290/500\n", - "30/30 [==============================] - 123s 4s/step - loss: 16.6378 - val_loss: 15.7410\n", - "Epoch 291/500\n", - "30/30 [==============================] - 122s 4s/step - loss: 16.6027 - val_loss: 16.5198\n", - "Epoch 292/500\n", - "30/30 [==============================] - 125s 4s/step - loss: 15.9454 - val_loss: 16.8931\n", - "Epoch 293/500\n", - "30/30 [==============================] - 124s 4s/step - loss: 16.3042 - val_loss: 15.6124\n", - "Epoch 294/500\n", - "30/30 [==============================] - 124s 4s/step - loss: 16.4451 - val_loss: 15.6456\n", - "Epoch 295/500\n", - "30/30 [==============================] - 126s 4s/step - loss: 16.3229 - val_loss: 16.1610\n", - "Epoch 296/500\n", - "30/30 [==============================] - 122s 4s/step - loss: 16.3041 - val_loss: 16.1309\n", - "Epoch 297/500\n", - "30/30 [==============================] - 123s 4s/step - loss: 16.5280 - val_loss: 15.8774\n", - "Epoch 298/500\n", - "30/30 [==============================] - 122s 4s/step - loss: 16.3325 - val_loss: 16.5331\n", - "Epoch 299/500\n", - "30/30 [==============================] - 110s 4s/step - loss: 16.5183 - val_loss: 15.7422\n", - "Epoch 300/500\n", - "30/30 [==============================] - 69s 2s/step - loss: 16.5641 - val_loss: 16.7612\n", - "Epoch 301/500\n", - "30/30 [==============================] - 63s 2s/step - loss: 16.2330 - val_loss: 15.8244\n", - "Epoch 302/500\n", - "30/30 [==============================] - 64s 2s/step - loss: 16.4699 - val_loss: 15.6958\n", - "Epoch 303/500\n", - "30/30 [==============================] - 63s 2s/step - loss: 16.4143 - val_loss: 16.6897\n", - "Epoch 304/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 16.2447 - val_loss: 16.1471\n", - "Epoch 305/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 16.5204 - val_loss: 15.7905\n", - "Epoch 306/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 16.1380 - val_loss: 16.5672\n", - "Epoch 307/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 16.5557 - val_loss: 15.9381\n", - "Epoch 308/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 16.4380 - val_loss: 16.5429\n", - "Epoch 309/500\n", - "30/30 [==============================] - 60s 2s/step - loss: 16.3664 - val_loss: 15.8925\n", - "Epoch 310/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 16.3254 - val_loss: 15.8290\n", - "Epoch 311/500\n", - "30/30 [==============================] - 73s 2s/step - loss: 16.4264 - val_loss: 16.0228\n", - "Epoch 312/500\n", - "30/30 [==============================] - 97s 3s/step - loss: 16.2977 - val_loss: 16.1006\n", - "Epoch 313/500\n", - "30/30 [==============================] - 114s 4s/step - loss: 16.4107 - val_loss: 16.0559\n", - "Epoch 314/500\n", - "30/30 [==============================] - 118s 4s/step - loss: 16.1044 - val_loss: 15.9039\n", - "Epoch 315/500\n", - "30/30 [==============================] - 129s 4s/step - loss: 16.3085 - val_loss: 16.3312\n", - "Epoch 316/500\n", - "30/30 [==============================] - 127s 4s/step - loss: 16.1068 - val_loss: 16.0503\n", - "Epoch 317/500\n", - "30/30 [==============================] - 126s 4s/step - loss: 16.6584 - val_loss: 16.2829\n", - "Epoch 318/500\n", - "30/30 [==============================] - 125s 4s/step - loss: 16.2703 - val_loss: 15.6388\n", - "Epoch 319/500\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "30/30 [==============================] - 129s 4s/step - loss: 16.2571 - val_loss: 15.7867\n", - "Epoch 320/500\n", - "30/30 [==============================] - 125s 4s/step - loss: 16.5441 - val_loss: 15.8499\n", - "Epoch 321/500\n", - "30/30 [==============================] - 123s 4s/step - loss: 16.3501 - val_loss: 16.1323\n", - "Epoch 322/500\n", - "30/30 [==============================] - 124s 4s/step - loss: 16.2824 - val_loss: 15.9564\n", - "Epoch 323/500\n", - "30/30 [==============================] - 125s 4s/step - loss: 16.3759 - val_loss: 16.3467\n", - "Epoch 324/500\n", - "30/30 [==============================] - 60s 2s/step - loss: 16.3403 - val_loss: 15.6820\n", - "Epoch 325/500\n", - "30/30 [==============================] - 69s 2s/step - loss: 16.2955 - val_loss: 16.1720\n", - "Epoch 326/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 16.4078 - val_loss: 16.3941\n", - "Epoch 327/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 16.0622 - val_loss: 16.0237\n", - "Epoch 328/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 16.3376 - val_loss: 15.5706\n", - "Epoch 329/500\n", - "30/30 [==============================] - 62s 2s/step - loss: 16.1294 - val_loss: 16.5142\n", - "Epoch 330/500\n", - "30/30 [==============================] - 63s 2s/step - loss: 16.0853 - val_loss: 16.1133\n", - "Epoch 331/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 16.1868 - val_loss: 15.9329\n", - "Epoch 332/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 16.1243 - val_loss: 15.7737\n", - "Epoch 333/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 16.0936 - val_loss: 15.8534\n", - "Epoch 334/500\n", - "30/30 [==============================] - 70s 2s/step - loss: 16.3387 - val_loss: 16.0363\n", - "Epoch 335/500\n", - "30/30 [==============================] - 96s 3s/step - loss: 16.1497 - val_loss: 16.3894\n", - "Epoch 336/500\n", - "30/30 [==============================] - 114s 4s/step - loss: 15.7429 - val_loss: 16.1402\n", - "Epoch 337/500\n", - "30/30 [==============================] - 119s 4s/step - loss: 16.3378 - val_loss: 16.3067\n", - "Epoch 338/500\n", - "30/30 [==============================] - 122s 4s/step - loss: 16.1981 - val_loss: 16.1319\n", - "Epoch 339/500\n", - "30/30 [==============================] - 124s 4s/step - loss: 16.1361 - val_loss: 15.7421\n", - "Epoch 340/500\n", - "30/30 [==============================] - 125s 4s/step - loss: 16.2517 - val_loss: 15.5112\n", - "Epoch 341/500\n", - "30/30 [==============================] - 124s 4s/step - loss: 16.1154 - val_loss: 15.5062\n", - "Epoch 342/500\n", - "30/30 [==============================] - 125s 4s/step - loss: 16.1898 - val_loss: 15.5263\n", - "Epoch 343/500\n", - "30/30 [==============================] - 122s 4s/step - loss: 16.0264 - val_loss: 16.6698\n", - "Epoch 344/500\n", - "30/30 [==============================] - 125s 4s/step - loss: 16.1943 - val_loss: 15.7087\n", - "Epoch 345/500\n", - "30/30 [==============================] - 125s 4s/step - loss: 16.2535 - val_loss: 16.1479\n", - "Epoch 346/500\n", - "30/30 [==============================] - 124s 4s/step - loss: 16.5307 - val_loss: 15.6747\n", - "Epoch 347/500\n", - "30/30 [==============================] - 64s 2s/step - loss: 16.2075 - val_loss: 15.6584\n", - "Epoch 348/500\n", - "30/30 [==============================] - 69s 2s/step - loss: 16.2071 - val_loss: 15.3423\n", - "Epoch 349/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 16.0504 - val_loss: 16.2236\n", - "Epoch 350/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 16.0833 - val_loss: 16.2664\n", - "Epoch 351/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 16.2250 - val_loss: 15.8436\n", - "Epoch 352/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 16.1694 - val_loss: 15.7174\n", - "Epoch 353/500\n", - "30/30 [==============================] - 61s 2s/step - loss: 16.3608 - val_loss: 16.8256\n", - "Epoch 354/500\n", - "30/30 [==============================] - 63s 2s/step - loss: 16.0936 - val_loss: 15.2995\n", - "Epoch 355/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 16.0449 - val_loss: 16.5662\n", - "Epoch 356/500\n", - "30/30 [==============================] - 70s 2s/step - loss: 16.1806 - val_loss: 16.0976\n", - "Epoch 357/500\n", - "30/30 [==============================] - 96s 3s/step - loss: 16.2721 - val_loss: 15.5171\n", - "Epoch 358/500\n", - "30/30 [==============================] - 114s 4s/step - loss: 16.2750 - val_loss: 16.0328\n", - "Epoch 359/500\n", - "30/30 [==============================] - 121s 4s/step - loss: 16.4254 - val_loss: 16.0317\n", - "Epoch 360/500\n", - "30/30 [==============================] - 123s 4s/step - loss: 16.2188 - val_loss: 15.7162\n", - "Epoch 361/500\n", - "30/30 [==============================] - 130s 4s/step - loss: 16.0624 - val_loss: 16.2708\n", - "Epoch 362/500\n", - "30/30 [==============================] - 123s 4s/step - loss: 16.1229 - val_loss: 16.3186\n", - "Epoch 363/500\n", - "30/30 [==============================] - 122s 4s/step - loss: 16.1250 - val_loss: 15.5198\n", - "Epoch 364/500\n", - "30/30 [==============================] - 126s 4s/step - loss: 16.1816 - val_loss: 16.0486\n", - "Epoch 365/500\n", - "30/30 [==============================] - 89s 3s/step - loss: 16.2343 - val_loss: 16.1744\n", - "Epoch 366/500\n", - "30/30 [==============================] - 88s 3s/step - loss: 16.1624 - val_loss: 15.6001\n", - "Epoch 367/500\n", - "30/30 [==============================] - 88s 3s/step - loss: 16.1360 - val_loss: 16.4407\n", - "Epoch 368/500\n", - "30/30 [==============================] - 88s 3s/step - loss: 16.0462 - val_loss: 16.1154\n", - "Epoch 369/500\n", - "30/30 [==============================] - 67s 2s/step - loss: 16.1973 - val_loss: 15.5669\n", - "Epoch 370/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 16.1021 - val_loss: 15.6763\n", - "Epoch 371/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 16.1306 - val_loss: 15.5349\n", - "Epoch 372/500\n", - "30/30 [==============================] - 56s 2s/step - loss: 16.2231 - val_loss: 16.4343\n", - "Epoch 373/500\n", - "30/30 [==============================] - 55s 2s/step - loss: 15.9661 - val_loss: 15.7303\n", - "Epoch 374/500\n", - "30/30 [==============================] - 54s 2s/step - loss: 16.1949 - val_loss: 15.5661\n", - "Epoch 375/500\n", - "30/30 [==============================] - 54s 2s/step - loss: 15.9551 - val_loss: 16.5234\n", - "Epoch 376/500\n", - "30/30 [==============================] - 54s 2s/step - loss: 16.0258 - val_loss: 15.4668\n", - "Epoch 377/500\n", - "30/30 [==============================] - 54s 2s/step - loss: 16.1134 - val_loss: 16.1877\n", - "Epoch 378/500\n", - "30/30 [==============================] - 54s 2s/step - loss: 15.9459 - val_loss: 16.0216\n", - "Epoch 379/500\n", - "30/30 [==============================] - 60s 2s/step - loss: 16.4525 - val_loss: 15.6702\n", - "Epoch 380/500\n", - "30/30 [==============================] - 72s 2s/step - loss: 16.0660 - val_loss: 15.3305\n", - "Epoch 381/500\n", - "30/30 [==============================] - 81s 3s/step - loss: 16.0083 - val_loss: 16.1274\n", - "Epoch 382/500\n", - "30/30 [==============================] - 87s 3s/step - loss: 16.0092 - val_loss: 16.1366\n", - "Epoch 383/500\n", - "30/30 [==============================] - 121s 4s/step - loss: 16.1354 - val_loss: 15.7858\n", - "Epoch 384/500\n", - "30/30 [==============================] - 123s 4s/step - loss: 16.1129 - val_loss: 15.7107\n", - "Epoch 385/500\n", - "30/30 [==============================] - 123s 4s/step - loss: 16.0284 - val_loss: 16.0496\n", - "Epoch 386/500\n", - "30/30 [==============================] - 126s 4s/step - loss: 15.8368 - val_loss: 16.5170\n", - "Epoch 387/500\n", - "30/30 [==============================] - 124s 4s/step - loss: 16.3342 - val_loss: 15.4547\n", - "Epoch 388/500\n", - "30/30 [==============================] - 123s 4s/step - loss: 16.2401 - val_loss: 15.5744\n", - "Epoch 389/500\n", - "30/30 [==============================] - 126s 4s/step - loss: 16.2276 - val_loss: 15.5983\n", - "Epoch 390/500\n", - "30/30 [==============================] - 125s 4s/step - loss: 15.9413 - val_loss: 15.6545\n", - "Epoch 391/500\n", - "30/30 [==============================] - 123s 4s/step - loss: 16.3595 - val_loss: 15.3371\n", - "Epoch 392/500\n", - "30/30 [==============================] - 63s 2s/step - loss: 15.8981 - val_loss: 16.1008\n", - "Epoch 393/500\n", - "30/30 [==============================] - 68s 2s/step - loss: 16.1883 - val_loss: 15.8930\n", - "Epoch 394/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 15.9516 - val_loss: 15.9927\n", - "Epoch 395/500\n", - "30/30 [==============================] - 61s 2s/step - loss: 15.9433 - val_loss: 14.9362\n", - "Epoch 396/500\n", - "30/30 [==============================] - 60s 2s/step - loss: 16.1004 - val_loss: 16.4069\n", - "Epoch 397/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 15.9822 - val_loss: 15.7637\n", - "Epoch 398/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 16.1382 - val_loss: 15.9379\n", - "Epoch 399/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 16.1960 - val_loss: 16.0348\n", - "Epoch 400/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 16.0175 - val_loss: 16.3064\n", - "Epoch 401/500\n", - "30/30 [==============================] - 62s 2s/step - loss: 15.8997 - val_loss: 15.6500\n", - "Epoch 402/500\n", - "30/30 [==============================] - 83s 3s/step - loss: 16.2010 - val_loss: 15.8539\n", - "Epoch 403/500\n", - "30/30 [==============================] - 104s 3s/step - loss: 15.7408 - val_loss: 16.1076\n", - "Epoch 404/500\n", - "30/30 [==============================] - 115s 4s/step - loss: 16.4274 - val_loss: 15.3456\n", - "Epoch 405/500\n", - "30/30 [==============================] - 120s 4s/step - loss: 15.7428 - val_loss: 15.5534\n", - "Epoch 406/500\n", - "30/30 [==============================] - 122s 4s/step - loss: 16.2359 - val_loss: 16.0074\n", - "Epoch 407/500\n", - "30/30 [==============================] - 124s 4s/step - loss: 16.0718 - val_loss: 16.4514\n", - "Epoch 408/500\n", - "30/30 [==============================] - 125s 4s/step - loss: 16.3895 - val_loss: 15.2282\n", - "Epoch 409/500\n", - "30/30 [==============================] - 122s 4s/step - loss: 15.9361 - val_loss: 15.5478\n", - "Epoch 410/500\n", - "30/30 [==============================] - 125s 4s/step - loss: 16.1194 - val_loss: 15.6116\n", - "Epoch 411/500\n", - "30/30 [==============================] - 126s 4s/step - loss: 16.0214 - val_loss: 16.1073\n", - "Epoch 412/500\n", - "30/30 [==============================] - 123s 4s/step - loss: 16.0557 - val_loss: 15.5114\n", - "Epoch 413/500\n", - "30/30 [==============================] - 127s 4s/step - loss: 16.1405 - val_loss: 16.4630\n", - "Epoch 414/500\n", - "30/30 [==============================] - 125s 4s/step - loss: 16.1881 - val_loss: 15.7337\n", - "Epoch 415/500\n", - "30/30 [==============================] - 70s 2s/step - loss: 15.9524 - val_loss: 15.4768\n", - "Epoch 416/500\n", - "30/30 [==============================] - 65s 2s/step - loss: 15.8019 - val_loss: 15.7502\n", - "Epoch 417/500\n", - "30/30 [==============================] - 60s 2s/step - loss: 16.3821 - val_loss: 15.6619\n", - "Epoch 418/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 15.8915 - val_loss: 15.7251\n", - "Epoch 419/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 15.8573 - val_loss: 16.5752\n", - "Epoch 420/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 16.0249 - val_loss: 16.2398\n", - "Epoch 421/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 15.9861 - val_loss: 16.3022\n", - "Epoch 422/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 15.8775 - val_loss: 15.7504\n", - "Epoch 423/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 16.0352 - val_loss: 15.9333\n", - "Epoch 424/500\n", - "30/30 [==============================] - 61s 2s/step - loss: 15.9813 - val_loss: 16.0950\n", - "Epoch 425/500\n", - "30/30 [==============================] - 60s 2s/step - loss: 16.0516 - val_loss: 15.4165\n", - "Epoch 426/500\n", - "30/30 [==============================] - 71s 2s/step - loss: 16.1241 - val_loss: 15.4657\n", - "Epoch 427/500\n", - "30/30 [==============================] - 98s 3s/step - loss: 16.0654 - val_loss: 16.1920\n", - "Epoch 428/500\n", - "30/30 [==============================] - 114s 4s/step - loss: 15.9455 - val_loss: 15.2535\n", - "Epoch 429/500\n", - "30/30 [==============================] - 121s 4s/step - loss: 16.0065 - val_loss: 15.8941\n", - "Epoch 430/500\n", - "30/30 [==============================] - 124s 4s/step - loss: 15.7573 - val_loss: 15.4150\n", - "Epoch 431/500\n", - "30/30 [==============================] - 124s 4s/step - loss: 16.0947 - val_loss: 15.7753\n", - "Epoch 432/500\n", - "30/30 [==============================] - 127s 4s/step - loss: 15.8444 - val_loss: 15.5911\n", - "Epoch 433/500\n", - "30/30 [==============================] - 125s 4s/step - loss: 16.1289 - val_loss: 15.9490\n", - "Epoch 434/500\n", - "30/30 [==============================] - 125s 4s/step - loss: 15.9296 - val_loss: 15.6148\n", - "Epoch 435/500\n", - "30/30 [==============================] - 126s 4s/step - loss: 15.9802 - val_loss: 15.4892\n", - "Epoch 436/500\n", - "30/30 [==============================] - 123s 4s/step - loss: 16.0529 - val_loss: 15.2430\n", - "Epoch 437/500\n", - "30/30 [==============================] - 124s 4s/step - loss: 15.7882 - val_loss: 15.6371\n", - "Epoch 438/500\n", - "30/30 [==============================] - 119s 4s/step - loss: 16.0208 - val_loss: 15.5694\n", - "Epoch 439/500\n", - "30/30 [==============================] - 53s 2s/step - loss: 16.2243 - val_loss: 16.4516\n", - "Epoch 440/500\n", - "30/30 [==============================] - 69s 2s/step - loss: 15.8460 - val_loss: 15.2869\n", - "Epoch 441/500\n", - "30/30 [==============================] - 65s 2s/step - loss: 15.9455 - val_loss: 15.9559\n", - "Epoch 442/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 15.9085 - val_loss: 15.4212\n", - "Epoch 443/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 16.0805 - val_loss: 15.5691\n", - "Epoch 444/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 15.8312 - val_loss: 15.5900\n", - "Epoch 445/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 16.1131 - val_loss: 14.9550\n", - "Epoch 446/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 16.1825 - val_loss: 16.5839\n", - "Epoch 447/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 15.8725 - val_loss: 15.4740\n", - "Epoch 448/500\n", - "30/30 [==============================] - 83s 3s/step - loss: 15.9381 - val_loss: 15.3606\n", - "Epoch 449/500\n", - "30/30 [==============================] - 107s 4s/step - loss: 15.7734 - val_loss: 15.8835\n", - "Epoch 450/500\n", - "30/30 [==============================] - 118s 4s/step - loss: 16.2426 - val_loss: 16.0760\n", - "Epoch 451/500\n", - "30/30 [==============================] - 122s 4s/step - loss: 15.7717 - val_loss: 16.1588\n", - "Epoch 452/500\n", - "30/30 [==============================] - 126s 4s/step - loss: 15.8032 - val_loss: 15.5423\n", - "Epoch 453/500\n", - "30/30 [==============================] - 125s 4s/step - loss: 16.0863 - val_loss: 16.2087\n", - "Epoch 454/500\n", - "30/30 [==============================] - 123s 4s/step - loss: 15.7231 - val_loss: 15.4152\n", - "Epoch 455/500\n", - "30/30 [==============================] - 123s 4s/step - loss: 15.9819 - val_loss: 15.6086\n", - "Epoch 456/500\n", - "30/30 [==============================] - 124s 4s/step - loss: 16.2392 - val_loss: 15.6546\n", - "Epoch 457/500\n", - "30/30 [==============================] - 125s 4s/step - loss: 15.9337 - val_loss: 15.5734\n", - "Epoch 458/500\n", - "30/30 [==============================] - 123s 4s/step - loss: 15.7483 - val_loss: 16.0871\n", - "Epoch 459/500\n", - "30/30 [==============================] - 125s 4s/step - loss: 15.9154 - val_loss: 15.7753\n", - "Epoch 460/500\n", - "30/30 [==============================] - 116s 4s/step - loss: 16.1634 - val_loss: 16.0291\n", - "Epoch 461/500\n", - "30/30 [==============================] - 52s 2s/step - loss: 16.0713 - val_loss: 15.6570\n", - "Epoch 462/500\n", - "30/30 [==============================] - 68s 2s/step - loss: 15.7077 - val_loss: 15.3641\n", - "Epoch 463/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 16.0866 - val_loss: 15.8481\n", - "Epoch 464/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 15.9679 - val_loss: 15.6844\n", - "Epoch 465/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 15.9050 - val_loss: 15.2170\n", - "Epoch 466/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 15.7928 - val_loss: 16.0792\n", - "Epoch 467/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 15.9432 - val_loss: 15.4652\n", - "Epoch 468/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 16.0087 - val_loss: 15.8910\n", - "Epoch 469/500\n", - "30/30 [==============================] - 64s 2s/step - loss: 15.9682 - val_loss: 15.9137\n", - "Epoch 470/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 15.6714 - val_loss: 15.9395\n", - "Epoch 471/500\n", - "30/30 [==============================] - 88s 3s/step - loss: 16.0309 - val_loss: 15.9491\n", - "Epoch 472/500\n", - "30/30 [==============================] - 109s 4s/step - loss: 15.8227 - val_loss: 15.7770\n", - "Epoch 473/500\n", - "30/30 [==============================] - 117s 4s/step - loss: 16.0340 - val_loss: 15.3767\n", - "Epoch 474/500\n", - "30/30 [==============================] - 122s 4s/step - loss: 15.8100 - val_loss: 16.0189\n", - "Epoch 475/500\n", - "30/30 [==============================] - 129s 4s/step - loss: 15.8677 - val_loss: 15.8241\n", - "Epoch 476/500\n", - "30/30 [==============================] - 129s 4s/step - loss: 15.8201 - val_loss: 15.2546\n", - "Epoch 477/500\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "30/30 [==============================] - 123s 4s/step - loss: 16.1264 - val_loss: 16.2662\n", - "Epoch 478/500\n", - "30/30 [==============================] - 124s 4s/step - loss: 16.1311 - val_loss: 15.2587\n", - "Epoch 479/500\n", - "30/30 [==============================] - 125s 4s/step - loss: 16.2160 - val_loss: 15.7506\n", - "Epoch 480/500\n", - "30/30 [==============================] - 123s 4s/step - loss: 15.8996 - val_loss: 16.0202\n", - "Epoch 481/500\n", - "30/30 [==============================] - 127s 4s/step - loss: 15.9867 - val_loss: 15.5650\n", - "Epoch 482/500\n", - "30/30 [==============================] - 170s 6s/step - loss: 15.7489 - val_loss: 15.4263\n", - "Epoch 483/500\n", - "30/30 [==============================] - 63s 2s/step - loss: 16.0861 - val_loss: 15.6782\n", - "Epoch 484/500\n", - "30/30 [==============================] - 77s 3s/step - loss: 15.8524 - val_loss: 15.6728\n", - "Epoch 485/500\n", - "30/30 [==============================] - 68s 2s/step - loss: 15.9259 - val_loss: 15.5141\n", - "Epoch 486/500\n", - "30/30 [==============================] - 67s 2s/step - loss: 15.7106 - val_loss: 15.6335\n", - "Epoch 487/500\n", - "30/30 [==============================] - 63s 2s/step - loss: 15.9842 - val_loss: 15.1482\n", - "Epoch 488/500\n", - "30/30 [==============================] - 61s 2s/step - loss: 15.8998 - val_loss: 16.0844\n", - "Epoch 489/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 15.8302 - val_loss: 16.6305\n", - "Epoch 490/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 15.8365 - val_loss: 15.8551\n", - "Epoch 491/500\n", - "30/30 [==============================] - 57s 2s/step - loss: 16.0139 - val_loss: 15.3942\n", - "Epoch 492/500\n", - "30/30 [==============================] - 58s 2s/step - loss: 15.9906 - val_loss: 16.0351\n", - "Epoch 493/500\n", - "30/30 [==============================] - 59s 2s/step - loss: 15.7704 - val_loss: 15.5585\n", - "Epoch 494/500\n", - "30/30 [==============================] - 64s 2s/step - loss: 15.8734 - val_loss: 15.5017\n", - "Epoch 495/500\n", - "30/30 [==============================] - 86s 3s/step - loss: 15.8414 - val_loss: 16.0038\n", - "Epoch 496/500\n", - "30/30 [==============================] - 109s 4s/step - loss: 16.0293 - val_loss: 15.9147\n", - "Epoch 497/500\n", - "30/30 [==============================] - 119s 4s/step - loss: 15.7651 - val_loss: 15.6716\n", - "Epoch 498/500\n", - "30/30 [==============================] - 123s 4s/step - loss: 15.8485 - val_loss: 16.0082\n", - "Epoch 499/500\n", - "30/30 [==============================] - 126s 4s/step - loss: 15.8425 - val_loss: 14.8089\n", - "Epoch 500/500\n", - "30/30 [==============================] - 101s 3s/step - loss: 16.0761 - val_loss: 15.9947\n", - "Unfreeze all of the layers.\n", - "Train on 488 samples, val on 121 samples, with batch size 16.\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/var/folders/j_/grk4ythd0392dcw5z3gkgw5w0000gn/T/ipykernel_39692/4035785499.py:81: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.\n", - " model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Epoch 51/100\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-01-22 13:37:37.110606: E tensorflow/core/grappler/optimizers/meta_optimizer.cc:954] layout failed: INVALID_ARGUMENT: Subshape must have computed start >= end since stride is negative, but is 0 and 2 (computed from start 0 and end 9223372036854775807 over shape with rank 2 and stride-1)\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "30/30 [==============================] - ETA: 0s - loss: 16.2221 " - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-01-22 13:43:00.326593: E tensorflow/core/grappler/optimizers/meta_optimizer.cc:954] layout failed: INVALID_ARGUMENT: Subshape must have computed start >= end since stride is negative, but is 0 and 2 (computed from start 0 and end 9223372036854775807 over shape with rank 2 and stride-1)\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "30/30 [==============================] - 350s 11s/step - loss: 16.2221 - val_loss: 15.0134 - lr: 1.0000e-04\n", - "Epoch 52/100\n", - "30/30 [==============================] - 346s 12s/step - loss: 14.9261 - val_loss: 14.5771 - lr: 1.0000e-04\n", - "Epoch 53/100\n", - "30/30 [==============================] - 243s 8s/step - loss: 14.5103 - val_loss: 14.5714 - lr: 1.0000e-04\n", - "Epoch 54/100\n", - "30/30 [==============================] - 231s 8s/step - loss: 14.2489 - val_loss: 13.8991 - lr: 1.0000e-04\n", - "Epoch 55/100\n", - "30/30 [==============================] - 276s 9s/step - loss: 14.1362 - val_loss: 14.2145 - lr: 1.0000e-04\n", - "Epoch 56/100\n", - "30/30 [==============================] - 334s 11s/step - loss: 13.6959 - val_loss: 13.6794 - lr: 1.0000e-04\n", - "Epoch 57/100\n", - "30/30 [==============================] - 340s 11s/step - loss: 13.5898 - val_loss: 13.1452 - lr: 1.0000e-04\n", - "Epoch 58/100\n", - "30/30 [==============================] - 336s 11s/step - loss: 13.4866 - val_loss: 13.5824 - lr: 1.0000e-04\n", - "Epoch 59/100\n", - "30/30 [==============================] - 259s 9s/step - loss: 13.4531 - val_loss: 13.1278 - lr: 1.0000e-04\n", - "Epoch 60/100\n", - "30/30 [==============================] - 219s 7s/step - loss: 13.3503 - val_loss: 13.0499 - lr: 1.0000e-04\n", - "Epoch 61/100\n", - "30/30 [==============================] - 254s 8s/step - loss: 13.2267 - val_loss: 13.0210 - lr: 1.0000e-04\n", - "Epoch 62/100\n", - "30/30 [==============================] - 414s 14s/step - loss: 13.2120 - val_loss: 14.0383 - lr: 1.0000e-04\n", - "Epoch 63/100\n", - "30/30 [==============================] - 472s 16s/step - loss: 12.9336 - val_loss: 13.2708 - lr: 1.0000e-04\n", - "Epoch 64/100\n", - "30/30 [==============================] - ETA: 0s - loss: 13.1477 \n", - "Epoch 64: ReduceLROnPlateau reducing learning rate to 9.999999747378752e-06.\n", - "30/30 [==============================] - 470s 16s/step - loss: 13.1477 - val_loss: 13.6016 - lr: 1.0000e-04\n", - "Epoch 65/100\n", - "30/30 [==============================] - 272s 9s/step - loss: 13.2001 - val_loss: 12.9789 - lr: 1.0000e-05\n", - "Epoch 66/100\n", - "30/30 [==============================] - 256s 9s/step - loss: 12.8699 - val_loss: 12.7537 - lr: 1.0000e-05\n", - "Epoch 67/100\n", - "30/30 [==============================] - 460s 15s/step - loss: 12.8529 - val_loss: 12.5797 - lr: 1.0000e-05\n", - "Epoch 68/100\n", - "30/30 [==============================] - 520s 17s/step - loss: 12.8881 - val_loss: 12.8464 - lr: 1.0000e-05\n", - "Epoch 69/100\n", - "30/30 [==============================] - 316s 10s/step - loss: 12.8289 - val_loss: 13.0487 - lr: 1.0000e-05\n", - "Epoch 70/100\n", - "30/30 [==============================] - ETA: 0s - loss: 12.7765\n", - "Epoch 70: ReduceLROnPlateau reducing learning rate to 9.999999747378752e-07.\n", - "30/30 [==============================] - 236s 8s/step - loss: 12.7765 - val_loss: 12.7764 - lr: 1.0000e-05\n", - "Epoch 71/100\n", - "30/30 [==============================] - 357s 12s/step - loss: 12.7222 - val_loss: 12.6030 - lr: 1.0000e-06\n", - "Epoch 72/100\n", - "30/30 [==============================] - 471s 16s/step - loss: 12.9312 - val_loss: 12.7407 - lr: 1.0000e-06\n", - "Epoch 73/100\n", - "30/30 [==============================] - ETA: 0s - loss: 12.7563 \n", - "Epoch 73: ReduceLROnPlateau reducing learning rate to 9.999999974752428e-08.\n", - "30/30 [==============================] - 474s 16s/step - loss: 12.7563 - val_loss: 12.8981 - lr: 1.0000e-06\n", - "Epoch 74/100\n", - "30/30 [==============================] - 337s 11s/step - loss: 12.6372 - val_loss: 13.0085 - lr: 1.0000e-07\n", - "Epoch 75/100\n", - "30/30 [==============================] - 238s 8s/step - loss: 12.6892 - val_loss: 12.6015 - lr: 1.0000e-07\n", - "Epoch 76/100\n", - "30/30 [==============================] - ETA: 0s - loss: 12.7828\n", - "Epoch 76: ReduceLROnPlateau reducing learning rate to 1.0000000116860975e-08.\n", - "30/30 [==============================] - 308s 10s/step - loss: 12.7828 - val_loss: 13.2228 - lr: 1.0000e-07\n", - "Epoch 77/100\n", - "30/30 [==============================] - 336s 11s/step - loss: 12.7876 - val_loss: 12.4209 - lr: 1.0000e-08\n", - "Epoch 78/100\n", - "30/30 [==============================] - 337s 11s/step - loss: 12.5455 - val_loss: 12.7752 - lr: 1.0000e-08\n", - "Epoch 79/100\n", - "30/30 [==============================] - 258s 8s/step - loss: 12.7785 - val_loss: 12.7235 - lr: 1.0000e-08\n", - "Epoch 80/100\n", - "30/30 [==============================] - ETA: 0s - loss: 12.7194\n", - "Epoch 80: ReduceLROnPlateau reducing learning rate to 9.999999939225292e-10.\n", - "30/30 [==============================] - 222s 7s/step - loss: 12.7194 - val_loss: 12.8656 - lr: 1.0000e-08\n", - "Epoch 81/100\n", - "30/30 [==============================] - 255s 9s/step - loss: 13.0056 - val_loss: 12.7722 - lr: 1.0000e-09\n", - "Epoch 82/100\n", - "30/30 [==============================] - 275s 9s/step - loss: 12.6045 - val_loss: 12.7747 - lr: 1.0000e-09\n", - "Epoch 83/100\n", - "30/30 [==============================] - ETA: 0s - loss: 12.8213 \n", - "Epoch 83: ReduceLROnPlateau reducing learning rate to 9.999999717180686e-11.\n", - "30/30 [==============================] - 371s 12s/step - loss: 12.8213 - val_loss: 12.7278 - lr: 1.0000e-09\n", - "Epoch 84/100\n", - "30/30 [==============================] - 476s 16s/step - loss: 12.8018 - val_loss: 12.7182 - lr: 1.0000e-10\n", - "Epoch 85/100\n", - "30/30 [==============================] - 395s 13s/step - loss: 12.6051 - val_loss: 12.9475 - lr: 1.0000e-10\n", - "Epoch 86/100\n", - "30/30 [==============================] - ETA: 0s - loss: 12.7930\n", - "Epoch 86: ReduceLROnPlateau reducing learning rate to 9.99999943962493e-12.\n", - "30/30 [==============================] - 221s 7s/step - loss: 12.7930 - val_loss: 12.5336 - lr: 1.0000e-10\n", - "Epoch 87/100\n", - "30/30 [==============================] - 239s 8s/step - loss: 12.6282 - val_loss: 12.7567 - lr: 1.0000e-11\n", - "Epoch 87: early stopping\n" + " 2/30 [=>............................] - ETA: 11s - loss: 4173.7817 " ] } ], @@ -3184,7 +2036,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.15" + "version": "3.9.16" }, "latex_envs": { "LaTeX_envs_menu_present": true,