diff --git a/.gitignore b/.gitignore
index d973327..7ded952 100644
--- a/.gitignore
+++ b/.gitignore
@@ -211,4 +211,6 @@ fabric.properties
# Android studio 3.1+ serialized cache file
.idea/caches/build_file_checksums.ser
-.idea
\ No newline at end of file
+.idea
+
+slot-model
\ No newline at end of file
diff --git a/lab/08-parsing-semantyczny-uczenie.ipynb b/lab/08-parsing-semantyczny-uczenie(zmodyfikowany).ipynb
similarity index 50%
rename from lab/08-parsing-semantyczny-uczenie.ipynb
rename to lab/08-parsing-semantyczny-uczenie(zmodyfikowany).ipynb
index 8b2d03a..d186b33 100644
--- a/lab/08-parsing-semantyczny-uczenie.ipynb
+++ b/lab/08-parsing-semantyczny-uczenie(zmodyfikowany).ipynb
@@ -73,60 +73,6 @@
"Skorzystamy ze zbioru danych przygotowanego przez Schustera (2019)."
]
},
- {
- "cell_type": "code",
- "execution_count": 32,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "c:\\Develop\\wmi\\AITECH\\sem1\\Systemy dialogowe\\lab\\l07\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "A subdirectory or file -p already exists.\n",
- "Error occurred while processing: -p.\n",
- "A subdirectory or file l07 already exists.\n",
- "Error occurred while processing: l07.\n",
- "** Resuming transfer from byte position 8923190\n",
- " % Total % Received % Xferd Average Speed Time Time Time Current\n",
- " Dload Upload Total Spent Left Speed\n",
- "\n",
- " 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\n",
- " 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\n",
- "\n",
- "100 49 100 49 0 0 118 0 --:--:-- --:--:-- --:--:-- 118\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "c:\\Develop\\wmi\\AITECH\\sem1\\Systemy dialogowe\\lab\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "'unzip' is not recognized as an internal or external command,\n",
- "operable program or batch file.\n"
- ]
- }
- ],
- "source": [
- "!mkdir -p l07\n",
- "%cd l07\n",
- "!curl -L -C - https://fb.me/multilingual_task_oriented_data -o data.zip\n",
- "!unzip data.zip\n",
- "%cd .."
- ]
- },
{
"cell_type": "markdown",
"metadata": {},
@@ -136,28 +82,28 @@
},
{
"cell_type": "code",
- "execution_count": 76,
+ "execution_count": 23,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
- "# text: halo\n",
+ "# text: halo\t\t\t\n",
"\n",
- "# intent: hello\n",
+ "# intent: hello\t\t\t\n",
"\n",
- "# slots: \n",
+ "# slots: \t\t\t\n",
"\n",
"1\thalo\thello\tNoLabel\n",
"\n",
+ "\t\t\t\n",
"\n",
+ "# text: chaciałbym pójść na premierę filmu jakie premiery są w tym tygodniu\t\t\t\n",
"\n",
- "# text: chaciałbym pójść na premierę filmu jakie premiery są w tym tygodniu\n",
+ "# intent: reqmore\t\t\t\n",
"\n",
- "# intent: reqmore\n",
- "\n",
- "# slots: \n",
+ "# slots: \t\t\t\n",
"\n",
"1\tchaciałbym\treqmore\tNoLabel\n",
"\n",
@@ -169,9 +115,9 @@
"\n",
"5\tfilmu\treqmore\tNoLabel\n",
"\n",
- "6\tjakie\treqmore\tNoLabel\n",
+ "6\tjakie\treqmore\tB-goal\n",
"\n",
- "7\tpremiery\treqmore\tNoLabel\n",
+ "7\tpremiery\treqmore\tI-goal\n",
"\n"
]
}
@@ -209,7 +155,7 @@
},
{
"cell_type": "code",
- "execution_count": 34,
+ "execution_count": 24,
"metadata": {},
"outputs": [
{
@@ -226,7 +172,7 @@
"'
\\n\\n1 | wybieram | inform | O |
\\n2 | batmana | inform | B-title |
\\n\\n
'"
]
},
- "execution_count": 34,
+ "execution_count": 24,
"metadata": {},
"output_type": "execute_result"
}
@@ -238,7 +184,7 @@
},
{
"cell_type": "code",
- "execution_count": 77,
+ "execution_count": 25,
"metadata": {},
"outputs": [
{
@@ -246,17 +192,17 @@
"text/html": [
"\n",
"\n",
- "1 | chcę | inform | O |
\n",
- "2 | zarezerwować | inform | O |
\n",
- "3 | bilety | inform | O |
\n",
+ "1 | chcę | inform | O |
\n",
+ "2 | zarezerwować | inform | B-goal |
\n",
+ "3 | bilety | inform | O |
\n",
"\n",
"
"
],
"text/plain": [
- "'\\n\\n1 | chcę | inform | O |
\\n2 | zarezerwować | inform | O |
\\n3 | bilety | inform | O |
\\n\\n
'"
+ "'\\n\\n1 | chcę | inform | O |
\\n2 | zarezerwować | inform | B-goal |
\\n3 | bilety | inform | O |
\\n\\n
'"
]
},
- "execution_count": 77,
+ "execution_count": 25,
"metadata": {},
"output_type": "execute_result"
}
@@ -267,7 +213,7 @@
},
{
"cell_type": "code",
- "execution_count": 78,
+ "execution_count": 26,
"metadata": {},
"outputs": [
{
@@ -286,7 +232,7 @@
"'\\n\\n1 | chciałbym | inform | O |
\\n2 | anulować | inform | O |
\\n3 | rezerwację | inform | O |
\\n4 | biletu | inform | O |
\\n\\n
'"
]
},
- "execution_count": 78,
+ "execution_count": 26,
"metadata": {},
"output_type": "execute_result"
}
@@ -305,7 +251,7 @@
},
{
"cell_type": "code",
- "execution_count": 38,
+ "execution_count": 27,
"metadata": {},
"outputs": [],
"source": [
@@ -341,15 +287,15 @@
},
{
"cell_type": "code",
- "execution_count": 39,
+ "execution_count": 28,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
- "Corpus: 297 train + 33 dev + 33 test sentences\n",
- "Dictionary with 14 tags: , O, B-date, I-date, B-time, I-time, B-area, I-area, B-title, B-quantity, I-title, I-quantity, , \n"
+ "Corpus: 345 train + 38 dev + 32 test sentences\n",
+ "Dictionary with 20 tags: , O, B-interval, I-interval, B-title, B-date, I-date, B-time, B-quantity, B-area, I-area, B-goal, I-goal, I-title, I-time, I-quantity, B-seats, I-seats, , \n"
]
}
],
@@ -387,132 +333,9 @@
},
{
"cell_type": "code",
- "execution_count": 40,
+ "execution_count": 29,
"metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "2022-04-28 22:14:01,525 https://flair.informatik.hu-berlin.de/resources/embeddings/token/pl-wiki-fasttext-300d-1M.vectors.npy not found in cache, downloading to C:\\Users\\48516\\AppData\\Local\\Temp\\tmp8ekygs88\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "100%|██████████| 1199998928/1199998928 [01:00<00:00, 19734932.13B/s]"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "2022-04-28 22:15:02,505 copying C:\\Users\\48516\\AppData\\Local\\Temp\\tmp8ekygs88 to cache at C:\\Users\\48516\\.flair\\embeddings\\pl-wiki-fasttext-300d-1M.vectors.npy\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "2022-04-28 22:15:03,136 removing temp file C:\\Users\\48516\\AppData\\Local\\Temp\\tmp8ekygs88\n",
- "2022-04-28 22:15:03,420 https://flair.informatik.hu-berlin.de/resources/embeddings/token/pl-wiki-fasttext-300d-1M not found in cache, downloading to C:\\Users\\48516\\AppData\\Local\\Temp\\tmp612sxdgl\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "100%|██████████| 40874795/40874795 [00:02<00:00, 18943852.55B/s]"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "2022-04-28 22:15:05,807 copying C:\\Users\\48516\\AppData\\Local\\Temp\\tmp612sxdgl to cache at C:\\Users\\48516\\.flair\\embeddings\\pl-wiki-fasttext-300d-1M\n",
- "2022-04-28 22:15:05,830 removing temp file C:\\Users\\48516\\AppData\\Local\\Temp\\tmp612sxdgl\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "2022-04-28 22:15:13,095 https://flair.informatik.hu-berlin.de/resources/embeddings/flair/lm-polish-forward-v0.2.pt not found in cache, downloading to C:\\Users\\48516\\AppData\\Local\\Temp\\tmp05k_xff8\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "100%|██████████| 84244196/84244196 [00:04<00:00, 19653900.77B/s]"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "2022-04-28 22:15:17,599 copying C:\\Users\\48516\\AppData\\Local\\Temp\\tmp05k_xff8 to cache at C:\\Users\\48516\\.flair\\embeddings\\lm-polish-forward-v0.2.pt\n",
- "2022-04-28 22:15:17,640 removing temp file C:\\Users\\48516\\AppData\\Local\\Temp\\tmp05k_xff8\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "2022-04-28 22:15:18,034 https://flair.informatik.hu-berlin.de/resources/embeddings/flair/lm-polish-backward-v0.2.pt not found in cache, downloading to C:\\Users\\48516\\AppData\\Local\\Temp\\tmpbjevekqx\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "100%|██████████| 84244196/84244196 [00:04<00:00, 19850177.72B/s]"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "2022-04-28 22:15:22,467 copying C:\\Users\\48516\\AppData\\Local\\Temp\\tmpbjevekqx to cache at C:\\Users\\48516\\.flair\\embeddings\\lm-polish-backward-v0.2.pt\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "2022-04-28 22:15:22,518 removing temp file C:\\Users\\48516\\AppData\\Local\\Temp\\tmpbjevekqx\n"
- ]
- }
- ],
+ "outputs": [],
"source": [
"embedding_types = [\n",
" WordEmbeddings('pl'),\n",
@@ -537,7 +360,7 @@
},
{
"cell_type": "code",
- "execution_count": 41,
+ "execution_count": 30,
"metadata": {},
"outputs": [
{
@@ -572,7 +395,7 @@
" (locked_dropout): LockedDropout(p=0.5)\n",
" (embedding2nn): Linear(in_features=4446, out_features=4446, bias=True)\n",
" (rnn): LSTM(4446, 256, batch_first=True, bidirectional=True)\n",
- " (linear): Linear(in_features=512, out_features=14, bias=True)\n",
+ " (linear): Linear(in_features=512, out_features=20, bias=True)\n",
" (beta): 1.0\n",
" (weights): None\n",
" (weight_tensor) None\n",
@@ -593,15 +416,15 @@
},
{
"cell_type": "code",
- "execution_count": 42,
+ "execution_count": 31,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
- "2022-04-28 22:15:23,085 ----------------------------------------------------------------------------------------------------\n",
- "2022-04-28 22:15:23,086 Model: \"SequenceTagger(\n",
+ "2022-05-01 12:13:39,609 ----------------------------------------------------------------------------------------------------\n",
+ "2022-05-01 12:13:39,610 Model: \"SequenceTagger(\n",
" (embeddings): StackedEmbeddings(\n",
" (list_embedding_0): WordEmbeddings('pl')\n",
" (list_embedding_1): FlairEmbeddings(\n",
@@ -629,243 +452,254 @@
" (locked_dropout): LockedDropout(p=0.5)\n",
" (embedding2nn): Linear(in_features=4446, out_features=4446, bias=True)\n",
" (rnn): LSTM(4446, 256, batch_first=True, bidirectional=True)\n",
- " (linear): Linear(in_features=512, out_features=14, bias=True)\n",
+ " (linear): Linear(in_features=512, out_features=20, bias=True)\n",
" (beta): 1.0\n",
" (weights): None\n",
" (weight_tensor) None\n",
")\"\n",
- "2022-04-28 22:15:23,087 ----------------------------------------------------------------------------------------------------\n",
- "2022-04-28 22:15:23,088 Corpus: \"Corpus: 297 train + 33 dev + 33 test sentences\"\n",
- "2022-04-28 22:15:23,088 ----------------------------------------------------------------------------------------------------\n",
- "2022-04-28 22:15:23,089 Parameters:\n",
- "2022-04-28 22:15:23,089 - learning_rate: \"0.1\"\n",
- "2022-04-28 22:15:23,090 - mini_batch_size: \"32\"\n",
- "2022-04-28 22:15:23,090 - patience: \"3\"\n",
- "2022-04-28 22:15:23,091 - anneal_factor: \"0.5\"\n",
- "2022-04-28 22:15:23,092 - max_epochs: \"10\"\n",
- "2022-04-28 22:15:23,093 - shuffle: \"True\"\n",
- "2022-04-28 22:15:23,093 - train_with_dev: \"False\"\n",
- "2022-04-28 22:15:23,094 - batch_growth_annealing: \"False\"\n",
- "2022-04-28 22:15:23,094 ----------------------------------------------------------------------------------------------------\n",
- "2022-04-28 22:15:23,095 Model training base path: \"slot-model\"\n",
- "2022-04-28 22:15:23,095 ----------------------------------------------------------------------------------------------------\n",
- "2022-04-28 22:15:23,096 Device: cpu\n",
- "2022-04-28 22:15:23,096 ----------------------------------------------------------------------------------------------------\n",
- "2022-04-28 22:15:23,097 Embeddings storage mode: cpu\n",
- "2022-04-28 22:15:23,100 ----------------------------------------------------------------------------------------------------\n",
- "2022-04-28 22:15:25,051 epoch 1 - iter 1/10 - loss 15.67058754 - samples/sec: 16.40 - lr: 0.100000\n",
- "2022-04-28 22:15:27,334 epoch 1 - iter 2/10 - loss 13.01803017 - samples/sec: 14.02 - lr: 0.100000\n",
- "2022-04-28 22:15:29,132 epoch 1 - iter 3/10 - loss 11.16305335 - samples/sec: 17.81 - lr: 0.100000\n",
- "2022-04-28 22:15:30,629 epoch 1 - iter 4/10 - loss 9.23769999 - samples/sec: 21.39 - lr: 0.100000\n",
- "2022-04-28 22:15:32,614 epoch 1 - iter 5/10 - loss 7.94914236 - samples/sec: 16.13 - lr: 0.100000\n",
- "2022-04-28 22:15:34,081 epoch 1 - iter 6/10 - loss 7.05464562 - samples/sec: 21.83 - lr: 0.100000\n",
- "2022-04-28 22:15:35,257 epoch 1 - iter 7/10 - loss 6.28502292 - samples/sec: 27.26 - lr: 0.100000\n",
- "2022-04-28 22:15:37,386 epoch 1 - iter 8/10 - loss 5.74554797 - samples/sec: 15.04 - lr: 0.100000\n",
- "2022-04-28 22:15:39,009 epoch 1 - iter 9/10 - loss 5.48559354 - samples/sec: 19.73 - lr: 0.100000\n",
- "2022-04-28 22:15:39,892 epoch 1 - iter 10/10 - loss 5.10890775 - samples/sec: 36.28 - lr: 0.100000\n",
- "2022-04-28 22:15:39,893 ----------------------------------------------------------------------------------------------------\n",
- "2022-04-28 22:15:39,894 EPOCH 1 done: loss 5.1089 - lr 0.1000000\n",
- "2022-04-28 22:15:41,651 DEV : loss 1.1116931438446045 - score 0.0\n",
- "2022-04-28 22:15:41,654 BAD EPOCHS (no improvement): 0\n",
+ "2022-05-01 12:13:39,611 ----------------------------------------------------------------------------------------------------\n",
+ "2022-05-01 12:13:39,611 Corpus: \"Corpus: 345 train + 38 dev + 32 test sentences\"\n",
+ "2022-05-01 12:13:39,612 ----------------------------------------------------------------------------------------------------\n",
+ "2022-05-01 12:13:39,613 Parameters:\n",
+ "2022-05-01 12:13:39,614 - learning_rate: \"0.1\"\n",
+ "2022-05-01 12:13:39,614 - mini_batch_size: \"32\"\n",
+ "2022-05-01 12:13:39,615 - patience: \"3\"\n",
+ "2022-05-01 12:13:39,616 - anneal_factor: \"0.5\"\n",
+ "2022-05-01 12:13:39,616 - max_epochs: \"10\"\n",
+ "2022-05-01 12:13:39,616 - shuffle: \"True\"\n",
+ "2022-05-01 12:13:39,617 - train_with_dev: \"False\"\n",
+ "2022-05-01 12:13:39,618 - batch_growth_annealing: \"False\"\n",
+ "2022-05-01 12:13:39,618 ----------------------------------------------------------------------------------------------------\n",
+ "2022-05-01 12:13:39,619 Model training base path: \"slot-model\"\n",
+ "2022-05-01 12:13:39,620 ----------------------------------------------------------------------------------------------------\n",
+ "2022-05-01 12:13:39,620 Device: cpu\n",
+ "2022-05-01 12:13:39,621 ----------------------------------------------------------------------------------------------------\n",
+ "2022-05-01 12:13:39,621 Embeddings storage mode: cpu\n",
+ "2022-05-01 12:13:39,623 ----------------------------------------------------------------------------------------------------\n",
+ "2022-05-01 12:13:42,490 epoch 1 - iter 1/11 - loss 9.59000492 - samples/sec: 11.17 - lr: 0.100000\n",
+ "2022-05-01 12:13:44,150 epoch 1 - iter 2/11 - loss 9.31767702 - samples/sec: 19.29 - lr: 0.100000\n",
+ "2022-05-01 12:13:45,968 epoch 1 - iter 3/11 - loss 8.70617644 - samples/sec: 17.61 - lr: 0.100000\n",
+ "2022-05-01 12:13:47,791 epoch 1 - iter 4/11 - loss 8.11678410 - samples/sec: 17.57 - lr: 0.100000\n",
+ "2022-05-01 12:13:49,815 epoch 1 - iter 5/11 - loss 7.65581417 - samples/sec: 15.82 - lr: 0.100000\n",
+ "2022-05-01 12:13:52,296 epoch 1 - iter 6/11 - loss 7.27475810 - samples/sec: 12.90 - lr: 0.100000\n",
+ "2022-05-01 12:13:54,454 epoch 1 - iter 7/11 - loss 6.95693064 - samples/sec: 14.84 - lr: 0.100000\n",
+ "2022-05-01 12:13:56,845 epoch 1 - iter 8/11 - loss 6.61199290 - samples/sec: 13.39 - lr: 0.100000\n",
+ "2022-05-01 12:13:59,195 epoch 1 - iter 9/11 - loss 6.58955601 - samples/sec: 13.63 - lr: 0.100000\n",
+ "2022-05-01 12:14:01,065 epoch 1 - iter 10/11 - loss 6.63135071 - samples/sec: 17.11 - lr: 0.100000\n",
+ "2022-05-01 12:14:02,415 epoch 1 - iter 11/11 - loss 6.52558366 - samples/sec: 23.72 - lr: 0.100000\n",
+ "2022-05-01 12:14:02,416 ----------------------------------------------------------------------------------------------------\n",
+ "2022-05-01 12:14:02,417 EPOCH 1 done: loss 6.5256 - lr 0.1000000\n",
+ "2022-05-01 12:14:05,139 DEV : loss 8.419286727905273 - score 0.0\n",
+ "2022-05-01 12:14:05,141 BAD EPOCHS (no improvement): 0\n",
"saving best model\n",
- "2022-04-28 22:15:54,970 ----------------------------------------------------------------------------------------------------\n",
- "2022-04-28 22:15:55,703 epoch 2 - iter 1/10 - loss 2.39535546 - samples/sec: 48.71 - lr: 0.100000\n",
- "2022-04-28 22:15:56,276 epoch 2 - iter 2/10 - loss 3.14594960 - samples/sec: 55.94 - lr: 0.100000\n",
- "2022-04-28 22:15:56,849 epoch 2 - iter 3/10 - loss 2.96723008 - samples/sec: 55.94 - lr: 0.100000\n",
- "2022-04-28 22:15:57,326 epoch 2 - iter 4/10 - loss 2.72414619 - samples/sec: 67.23 - lr: 0.100000\n",
- "2022-04-28 22:15:57,799 epoch 2 - iter 5/10 - loss 2.52746274 - samples/sec: 67.80 - lr: 0.100000\n",
- "2022-04-28 22:15:58,255 epoch 2 - iter 6/10 - loss 2.41920217 - samples/sec: 70.33 - lr: 0.100000\n",
- "2022-04-28 22:15:58,770 epoch 2 - iter 7/10 - loss 2.48535442 - samples/sec: 62.26 - lr: 0.100000\n",
- "2022-04-28 22:15:59,324 epoch 2 - iter 8/10 - loss 2.40343314 - samples/sec: 57.87 - lr: 0.100000\n",
- "2022-04-28 22:15:59,827 epoch 2 - iter 9/10 - loss 2.41345758 - samples/sec: 63.74 - lr: 0.100000\n",
- "2022-04-28 22:16:00,052 epoch 2 - iter 10/10 - loss 2.63766205 - samples/sec: 142.86 - lr: 0.100000\n",
- "2022-04-28 22:16:00,053 ----------------------------------------------------------------------------------------------------\n",
- "2022-04-28 22:16:00,054 EPOCH 2 done: loss 2.6377 - lr 0.1000000\n",
- "2022-04-28 22:16:00,234 DEV : loss 1.2027416229248047 - score 0.0\n",
- "2022-04-28 22:16:00,238 BAD EPOCHS (no improvement): 1\n",
- "2022-04-28 22:16:00,241 ----------------------------------------------------------------------------------------------------\n",
- "2022-04-28 22:16:00,771 epoch 3 - iter 1/10 - loss 2.07519531 - samples/sec: 60.61 - lr: 0.100000\n",
- "2022-04-28 22:16:01,297 epoch 3 - iter 2/10 - loss 2.21946335 - samples/sec: 60.95 - lr: 0.100000\n",
- "2022-04-28 22:16:01,826 epoch 3 - iter 3/10 - loss 2.32372427 - samples/sec: 60.61 - lr: 0.100000\n",
- "2022-04-28 22:16:02,304 epoch 3 - iter 4/10 - loss 2.18133342 - samples/sec: 67.23 - lr: 0.100000\n",
- "2022-04-28 22:16:02,727 epoch 3 - iter 5/10 - loss 2.10553741 - samples/sec: 75.83 - lr: 0.100000\n",
- "2022-04-28 22:16:03,215 epoch 3 - iter 6/10 - loss 1.99518015 - samples/sec: 65.84 - lr: 0.100000\n",
- "2022-04-28 22:16:03,670 epoch 3 - iter 7/10 - loss 2.03174150 - samples/sec: 70.64 - lr: 0.100000\n",
- "2022-04-28 22:16:04,239 epoch 3 - iter 8/10 - loss 2.19520997 - samples/sec: 56.34 - lr: 0.100000\n",
- "2022-04-28 22:16:04,686 epoch 3 - iter 9/10 - loss 2.15986861 - samples/sec: 71.75 - lr: 0.100000\n",
- "2022-04-28 22:16:04,919 epoch 3 - iter 10/10 - loss 2.02860461 - samples/sec: 137.93 - lr: 0.100000\n",
- "2022-04-28 22:16:04,920 ----------------------------------------------------------------------------------------------------\n",
- "2022-04-28 22:16:04,921 EPOCH 3 done: loss 2.0286 - lr 0.1000000\n",
- "2022-04-28 22:16:05,067 DEV : loss 0.9265440702438354 - score 0.0\n",
- "2022-04-28 22:16:05,069 BAD EPOCHS (no improvement): 0\n",
+ "2022-05-01 12:14:15,906 ----------------------------------------------------------------------------------------------------\n",
+ "2022-05-01 12:14:16,782 epoch 2 - iter 1/11 - loss 7.61237478 - samples/sec: 40.25 - lr: 0.100000\n",
+ "2022-05-01 12:14:17,253 epoch 2 - iter 2/11 - loss 7.02023911 - samples/sec: 68.09 - lr: 0.100000\n",
+ "2022-05-01 12:14:17,744 epoch 2 - iter 3/11 - loss 6.25125138 - samples/sec: 65.31 - lr: 0.100000\n",
+ "2022-05-01 12:14:18,282 epoch 2 - iter 4/11 - loss 5.91574061 - samples/sec: 59.59 - lr: 0.100000\n",
+ "2022-05-01 12:14:18,742 epoch 2 - iter 5/11 - loss 5.80905600 - samples/sec: 69.87 - lr: 0.100000\n",
+ "2022-05-01 12:14:19,262 epoch 2 - iter 6/11 - loss 5.51969266 - samples/sec: 61.66 - lr: 0.100000\n",
+ "2022-05-01 12:14:19,753 epoch 2 - iter 7/11 - loss 5.34836953 - samples/sec: 65.31 - lr: 0.100000\n",
+ "2022-05-01 12:14:20,267 epoch 2 - iter 8/11 - loss 5.33710295 - samples/sec: 62.38 - lr: 0.100000\n",
+ "2022-05-01 12:14:20,750 epoch 2 - iter 9/11 - loss 5.28061861 - samples/sec: 66.32 - lr: 0.100000\n",
+ "2022-05-01 12:14:21,379 epoch 2 - iter 10/11 - loss 5.20552692 - samples/sec: 50.95 - lr: 0.100000\n",
+ "2022-05-01 12:14:21,922 epoch 2 - iter 11/11 - loss 5.26294283 - samples/sec: 59.03 - lr: 0.100000\n",
+ "2022-05-01 12:14:21,923 ----------------------------------------------------------------------------------------------------\n",
+ "2022-05-01 12:14:21,924 EPOCH 2 done: loss 5.2629 - lr 0.1000000\n",
+ "2022-05-01 12:14:22,145 DEV : loss 7.168168544769287 - score 0.0645\n",
+ "2022-05-01 12:14:22,149 BAD EPOCHS (no improvement): 0\n",
"saving best model\n",
- "2022-04-28 22:16:10,882 ----------------------------------------------------------------------------------------------------\n",
- "2022-04-28 22:16:11,339 epoch 4 - iter 1/10 - loss 2.63443780 - samples/sec: 70.33 - lr: 0.100000\n",
- "2022-04-28 22:16:11,858 epoch 4 - iter 2/10 - loss 2.35905457 - samples/sec: 61.78 - lr: 0.100000\n",
- "2022-04-28 22:16:12,523 epoch 4 - iter 3/10 - loss 2.23206981 - samples/sec: 48.19 - lr: 0.100000\n",
- "2022-04-28 22:16:13,026 epoch 4 - iter 4/10 - loss 2.28027773 - samples/sec: 63.75 - lr: 0.100000\n",
- "2022-04-28 22:16:13,610 epoch 4 - iter 5/10 - loss 2.22129200 - samples/sec: 54.98 - lr: 0.100000\n",
- "2022-04-28 22:16:14,074 epoch 4 - iter 6/10 - loss 2.10545621 - samples/sec: 69.11 - lr: 0.100000\n",
- "2022-04-28 22:16:14,646 epoch 4 - iter 7/10 - loss 2.10457425 - samples/sec: 56.04 - lr: 0.100000\n",
- "2022-04-28 22:16:15,144 epoch 4 - iter 8/10 - loss 2.04774940 - samples/sec: 64.38 - lr: 0.100000\n",
- "2022-04-28 22:16:15,698 epoch 4 - iter 9/10 - loss 1.99643935 - samples/sec: 57.97 - lr: 0.100000\n",
- "2022-04-28 22:16:15,935 epoch 4 - iter 10/10 - loss 1.81641705 - samples/sec: 136.14 - lr: 0.100000\n",
- "2022-04-28 22:16:15,936 ----------------------------------------------------------------------------------------------------\n",
- "2022-04-28 22:16:15,937 EPOCH 4 done: loss 1.8164 - lr 0.1000000\n",
- "2022-04-28 22:16:16,092 DEV : loss 0.8311207890510559 - score 0.0\n",
- "2022-04-28 22:16:16,094 BAD EPOCHS (no improvement): 0\n",
+ "2022-05-01 12:14:27,939 ----------------------------------------------------------------------------------------------------\n",
+ "2022-05-01 12:14:28,495 epoch 3 - iter 1/11 - loss 3.70659065 - samples/sec: 57.56 - lr: 0.100000\n",
+ "2022-05-01 12:14:29,038 epoch 3 - iter 2/11 - loss 4.21530080 - samples/sec: 59.04 - lr: 0.100000\n",
+ "2022-05-01 12:14:29,607 epoch 3 - iter 3/11 - loss 4.40864404 - samples/sec: 56.37 - lr: 0.100000\n",
+ "2022-05-01 12:14:30,171 epoch 3 - iter 4/11 - loss 4.69527233 - samples/sec: 56.93 - lr: 0.100000\n",
+ "2022-05-01 12:14:30,587 epoch 3 - iter 5/11 - loss 4.43719640 - samples/sec: 77.11 - lr: 0.100000\n",
+ "2022-05-01 12:14:31,075 epoch 3 - iter 6/11 - loss 4.55344125 - samples/sec: 65.71 - lr: 0.100000\n",
+ "2022-05-01 12:14:31,625 epoch 3 - iter 7/11 - loss 4.77397609 - samples/sec: 58.34 - lr: 0.100000\n",
+ "2022-05-01 12:14:32,143 epoch 3 - iter 8/11 - loss 4.61572361 - samples/sec: 61.89 - lr: 0.100000\n",
+ "2022-05-01 12:14:32,703 epoch 3 - iter 9/11 - loss 4.60090372 - samples/sec: 57.24 - lr: 0.100000\n",
+ "2022-05-01 12:14:33,404 epoch 3 - iter 10/11 - loss 4.70502276 - samples/sec: 45.69 - lr: 0.100000\n",
+ "2022-05-01 12:14:33,839 epoch 3 - iter 11/11 - loss 4.76321775 - samples/sec: 73.73 - lr: 0.100000\n",
+ "2022-05-01 12:14:33,840 ----------------------------------------------------------------------------------------------------\n",
+ "2022-05-01 12:14:33,840 EPOCH 3 done: loss 4.7632 - lr 0.1000000\n",
+ "2022-05-01 12:14:33,992 DEV : loss 7.209894180297852 - score 0.0\n",
+ "2022-05-01 12:14:33,993 BAD EPOCHS (no improvement): 1\n",
+ "2022-05-01 12:14:33,994 ----------------------------------------------------------------------------------------------------\n",
+ "2022-05-01 12:14:34,556 epoch 4 - iter 1/11 - loss 5.55247641 - samples/sec: 57.04 - lr: 0.100000\n",
+ "2022-05-01 12:14:35,078 epoch 4 - iter 2/11 - loss 5.08158088 - samples/sec: 61.42 - lr: 0.100000\n",
+ "2022-05-01 12:14:35,643 epoch 4 - iter 3/11 - loss 4.69475476 - samples/sec: 56.73 - lr: 0.100000\n",
+ "2022-05-01 12:14:36,270 epoch 4 - iter 4/11 - loss 4.78649628 - samples/sec: 51.16 - lr: 0.100000\n",
+ "2022-05-01 12:14:36,806 epoch 4 - iter 5/11 - loss 4.62873497 - samples/sec: 59.93 - lr: 0.100000\n",
+ "2022-05-01 12:14:37,419 epoch 4 - iter 6/11 - loss 4.70938087 - samples/sec: 52.29 - lr: 0.100000\n",
+ "2022-05-01 12:14:38,068 epoch 4 - iter 7/11 - loss 4.50588363 - samples/sec: 49.46 - lr: 0.100000\n",
+ "2022-05-01 12:14:38,581 epoch 4 - iter 8/11 - loss 4.36334288 - samples/sec: 62.50 - lr: 0.100000\n",
+ "2022-05-01 12:14:39,140 epoch 4 - iter 9/11 - loss 4.36617618 - samples/sec: 57.45 - lr: 0.100000\n",
+ "2022-05-01 12:14:39,780 epoch 4 - iter 10/11 - loss 4.37847199 - samples/sec: 50.16 - lr: 0.100000\n",
+ "2022-05-01 12:14:40,321 epoch 4 - iter 11/11 - loss 4.26116128 - samples/sec: 59.18 - lr: 0.100000\n",
+ "2022-05-01 12:14:40,323 ----------------------------------------------------------------------------------------------------\n",
+ "2022-05-01 12:14:40,324 EPOCH 4 done: loss 4.2612 - lr 0.1000000\n",
+ "2022-05-01 12:14:40,544 DEV : loss 5.882441997528076 - score 0.1714\n",
+ "2022-05-01 12:14:40,546 BAD EPOCHS (no improvement): 0\n",
"saving best model\n",
- "2022-04-28 22:16:21,938 ----------------------------------------------------------------------------------------------------\n",
- "2022-04-28 22:16:22,424 epoch 5 - iter 1/10 - loss 1.31467295 - samples/sec: 66.12 - lr: 0.100000\n",
- "2022-04-28 22:16:22,852 epoch 5 - iter 2/10 - loss 1.87177873 - samples/sec: 74.94 - lr: 0.100000\n",
- "2022-04-28 22:16:23,440 epoch 5 - iter 3/10 - loss 1.83717314 - samples/sec: 54.51 - lr: 0.100000\n",
- "2022-04-28 22:16:23,991 epoch 5 - iter 4/10 - loss 2.06565040 - samples/sec: 58.18 - lr: 0.100000\n",
- "2022-04-28 22:16:24,364 epoch 5 - iter 5/10 - loss 1.95749507 - samples/sec: 86.25 - lr: 0.100000\n",
- "2022-04-28 22:16:24,832 epoch 5 - iter 6/10 - loss 1.84727591 - samples/sec: 68.67 - lr: 0.100000\n",
- "2022-04-28 22:16:25,238 epoch 5 - iter 7/10 - loss 1.79978011 - samples/sec: 79.21 - lr: 0.100000\n",
- "2022-04-28 22:16:25,679 epoch 5 - iter 8/10 - loss 1.69797329 - samples/sec: 72.73 - lr: 0.100000\n",
- "2022-04-28 22:16:26,173 epoch 5 - iter 9/10 - loss 1.70765987 - samples/sec: 64.84 - lr: 0.100000\n",
- "2022-04-28 22:16:26,364 epoch 5 - iter 10/10 - loss 1.76581790 - samples/sec: 169.31 - lr: 0.100000\n",
- "2022-04-28 22:16:26,366 ----------------------------------------------------------------------------------------------------\n",
- "2022-04-28 22:16:26,367 EPOCH 5 done: loss 1.7658 - lr 0.1000000\n",
- "2022-04-28 22:16:26,509 DEV : loss 0.7797471880912781 - score 0.2222\n",
- "2022-04-28 22:16:26,510 BAD EPOCHS (no improvement): 0\n",
+ "2022-05-01 12:14:46,159 ----------------------------------------------------------------------------------------------------\n",
+ "2022-05-01 12:14:46,709 epoch 5 - iter 1/11 - loss 3.86370564 - samples/sec: 58.29 - lr: 0.100000\n",
+ "2022-05-01 12:14:47,349 epoch 5 - iter 2/11 - loss 3.80554891 - samples/sec: 50.08 - lr: 0.100000\n",
+ "2022-05-01 12:14:47,857 epoch 5 - iter 3/11 - loss 3.34506067 - samples/sec: 63.11 - lr: 0.100000\n",
+ "2022-05-01 12:14:48,579 epoch 5 - iter 4/11 - loss 3.88535106 - samples/sec: 44.38 - lr: 0.100000\n",
+ "2022-05-01 12:14:49,170 epoch 5 - iter 5/11 - loss 3.81894360 - samples/sec: 54.28 - lr: 0.100000\n",
+ "2022-05-01 12:14:49,708 epoch 5 - iter 6/11 - loss 4.18858314 - samples/sec: 59.53 - lr: 0.100000\n",
+ "2022-05-01 12:14:50,171 epoch 5 - iter 7/11 - loss 4.13974752 - samples/sec: 69.26 - lr: 0.100000\n",
+ "2022-05-01 12:14:50,593 epoch 5 - iter 8/11 - loss 4.01002905 - samples/sec: 75.98 - lr: 0.100000\n",
+ "2022-05-01 12:14:51,062 epoch 5 - iter 9/11 - loss 3.97078644 - samples/sec: 68.52 - lr: 0.100000\n",
+ "2022-05-01 12:14:51,508 epoch 5 - iter 10/11 - loss 3.94409857 - samples/sec: 71.91 - lr: 0.100000\n",
+ "2022-05-01 12:14:51,960 epoch 5 - iter 11/11 - loss 3.80738796 - samples/sec: 70.95 - lr: 0.100000\n",
+ "2022-05-01 12:14:51,961 ----------------------------------------------------------------------------------------------------\n",
+ "2022-05-01 12:14:51,963 EPOCH 5 done: loss 3.8074 - lr 0.1000000\n",
+ "2022-05-01 12:14:52,103 DEV : loss 5.224854469299316 - score 0.1667\n",
+ "2022-05-01 12:14:52,105 BAD EPOCHS (no improvement): 1\n",
+ "2022-05-01 12:14:52,106 ----------------------------------------------------------------------------------------------------\n",
+ "2022-05-01 12:14:52,616 epoch 6 - iter 1/11 - loss 3.51282573 - samples/sec: 62.91 - lr: 0.100000\n",
+ "2022-05-01 12:14:53,100 epoch 6 - iter 2/11 - loss 3.41601551 - samples/sec: 66.25 - lr: 0.100000\n",
+ "2022-05-01 12:14:53,513 epoch 6 - iter 3/11 - loss 3.08380787 - samples/sec: 77.76 - lr: 0.100000\n",
+ "2022-05-01 12:14:55,121 epoch 6 - iter 4/11 - loss 3.21056002 - samples/sec: 64.71 - lr: 0.100000\n",
+ "2022-05-01 12:14:55,665 epoch 6 - iter 5/11 - loss 3.30184879 - samples/sec: 58.88 - lr: 0.100000\n",
+ "2022-05-01 12:14:56,160 epoch 6 - iter 6/11 - loss 3.20993070 - samples/sec: 64.91 - lr: 0.100000\n",
+ "2022-05-01 12:14:56,670 epoch 6 - iter 7/11 - loss 3.14396119 - samples/sec: 62.91 - lr: 0.100000\n",
+ "2022-05-01 12:14:57,329 epoch 6 - iter 8/11 - loss 3.24591878 - samples/sec: 48.63 - lr: 0.100000\n",
+ "2022-05-01 12:14:57,958 epoch 6 - iter 9/11 - loss 3.31877112 - samples/sec: 51.03 - lr: 0.100000\n",
+ "2022-05-01 12:14:58,527 epoch 6 - iter 10/11 - loss 3.33475649 - samples/sec: 56.34 - lr: 0.100000\n",
+ "2022-05-01 12:14:58,989 epoch 6 - iter 11/11 - loss 3.23232636 - samples/sec: 69.41 - lr: 0.100000\n",
+ "2022-05-01 12:14:58,991 ----------------------------------------------------------------------------------------------------\n",
+ "2022-05-01 12:14:58,991 EPOCH 6 done: loss 3.2323 - lr 0.1000000\n",
+ "2022-05-01 12:14:59,178 DEV : loss 4.557621002197266 - score 0.2381\n",
+ "2022-05-01 12:14:59,180 BAD EPOCHS (no improvement): 0\n",
"saving best model\n",
- "2022-04-28 22:16:32,211 ----------------------------------------------------------------------------------------------------\n",
- "2022-04-28 22:16:32,666 epoch 6 - iter 1/10 - loss 2.04772544 - samples/sec: 70.64 - lr: 0.100000\n",
- "2022-04-28 22:16:33,172 epoch 6 - iter 2/10 - loss 1.61218661 - samples/sec: 63.37 - lr: 0.100000\n",
- "2022-04-28 22:16:33,673 epoch 6 - iter 3/10 - loss 1.55716117 - samples/sec: 64.00 - lr: 0.100000\n",
- "2022-04-28 22:16:34,183 epoch 6 - iter 4/10 - loss 1.54974008 - samples/sec: 62.87 - lr: 0.100000\n",
- "2022-04-28 22:16:34,687 epoch 6 - iter 5/10 - loss 1.50827932 - samples/sec: 63.62 - lr: 0.100000\n",
- "2022-04-28 22:16:35,155 epoch 6 - iter 6/10 - loss 1.46459270 - samples/sec: 68.52 - lr: 0.100000\n",
- "2022-04-28 22:16:35,658 epoch 6 - iter 7/10 - loss 1.50249643 - samples/sec: 63.87 - lr: 0.100000\n",
- "2022-04-28 22:16:36,094 epoch 6 - iter 8/10 - loss 1.51979375 - samples/sec: 73.56 - lr: 0.100000\n",
- "2022-04-28 22:16:36,548 epoch 6 - iter 9/10 - loss 1.56509953 - samples/sec: 70.64 - lr: 0.100000\n",
- "2022-04-28 22:16:36,744 epoch 6 - iter 10/10 - loss 1.55241492 - samples/sec: 164.10 - lr: 0.100000\n",
- "2022-04-28 22:16:36,746 ----------------------------------------------------------------------------------------------------\n",
- "2022-04-28 22:16:36,746 EPOCH 6 done: loss 1.5524 - lr 0.1000000\n",
- "2022-04-28 22:16:36,884 DEV : loss 0.9345423579216003 - score 0.3333\n",
- "2022-04-28 22:16:36,885 BAD EPOCHS (no improvement): 0\n",
+ "2022-05-01 12:15:25,844 ----------------------------------------------------------------------------------------------------\n",
+ "2022-05-01 12:15:26,423 epoch 7 - iter 1/11 - loss 2.71161938 - samples/sec: 55.36 - lr: 0.100000\n",
+ "2022-05-01 12:15:26,886 epoch 7 - iter 2/11 - loss 2.50157821 - samples/sec: 69.26 - lr: 0.100000\n",
+ "2022-05-01 12:15:27,347 epoch 7 - iter 3/11 - loss 2.78014056 - samples/sec: 69.56 - lr: 0.100000\n",
+ "2022-05-01 12:15:27,853 epoch 7 - iter 4/11 - loss 2.82983196 - samples/sec: 63.36 - lr: 0.100000\n",
+ "2022-05-01 12:15:28,393 epoch 7 - iter 5/11 - loss 2.84246483 - samples/sec: 59.37 - lr: 0.100000\n",
+ "2022-05-01 12:15:28,847 epoch 7 - iter 6/11 - loss 2.89787177 - samples/sec: 70.64 - lr: 0.100000\n",
+ "2022-05-01 12:15:29,338 epoch 7 - iter 7/11 - loss 2.74564961 - samples/sec: 65.30 - lr: 0.100000\n",
+ "2022-05-01 12:15:29,813 epoch 7 - iter 8/11 - loss 2.79853699 - samples/sec: 67.58 - lr: 0.100000\n",
+ "2022-05-01 12:15:30,364 epoch 7 - iter 9/11 - loss 2.89167126 - samples/sec: 58.18 - lr: 0.100000\n",
+ "2022-05-01 12:15:30,834 epoch 7 - iter 10/11 - loss 2.86527851 - samples/sec: 68.22 - lr: 0.100000\n",
+ "2022-05-01 12:15:31,296 epoch 7 - iter 11/11 - loss 2.82858575 - samples/sec: 69.41 - lr: 0.100000\n",
+ "2022-05-01 12:15:31,297 ----------------------------------------------------------------------------------------------------\n",
+ "2022-05-01 12:15:31,298 EPOCH 7 done: loss 2.8286 - lr 0.1000000\n",
+ "2022-05-01 12:15:31,462 DEV : loss 4.020608901977539 - score 0.3182\n",
+ "2022-05-01 12:15:31,463 BAD EPOCHS (no improvement): 0\n",
"saving best model\n",
- "2022-04-28 22:16:42,377 ----------------------------------------------------------------------------------------------------\n",
- "2022-04-28 22:16:42,856 epoch 7 - iter 1/10 - loss 2.15539050 - samples/sec: 67.09 - lr: 0.100000\n",
- "2022-04-28 22:16:43,336 epoch 7 - iter 2/10 - loss 1.68949413 - samples/sec: 66.95 - lr: 0.100000\n",
- "2022-04-28 22:16:43,781 epoch 7 - iter 3/10 - loss 1.81478349 - samples/sec: 72.07 - lr: 0.100000\n",
- "2022-04-28 22:16:44,241 epoch 7 - iter 4/10 - loss 1.68033907 - samples/sec: 69.87 - lr: 0.100000\n",
- "2022-04-28 22:16:44,730 epoch 7 - iter 5/10 - loss 1.64062953 - samples/sec: 65.57 - lr: 0.100000\n",
- "2022-04-28 22:16:45,227 epoch 7 - iter 6/10 - loss 1.59568199 - samples/sec: 64.78 - lr: 0.100000\n",
- "2022-04-28 22:16:45,663 epoch 7 - iter 7/10 - loss 1.46137918 - samples/sec: 73.39 - lr: 0.100000\n",
- "2022-04-28 22:16:46,169 epoch 7 - iter 8/10 - loss 1.41721664 - samples/sec: 63.36 - lr: 0.100000\n",
- "2022-04-28 22:16:46,734 epoch 7 - iter 9/10 - loss 1.39811980 - samples/sec: 56.74 - lr: 0.100000\n",
- "2022-04-28 22:16:46,937 epoch 7 - iter 10/10 - loss 1.38412433 - samples/sec: 159.20 - lr: 0.100000\n",
- "2022-04-28 22:16:46,938 ----------------------------------------------------------------------------------------------------\n",
- "2022-04-28 22:16:46,939 EPOCH 7 done: loss 1.3841 - lr 0.1000000\n",
- "2022-04-28 22:16:47,081 DEV : loss 0.6798948049545288 - score 0.5\n",
- "2022-04-28 22:16:47,083 BAD EPOCHS (no improvement): 0\n",
+ "2022-05-01 12:15:38,431 ----------------------------------------------------------------------------------------------------\n",
+ "2022-05-01 12:15:38,979 epoch 8 - iter 1/11 - loss 3.28806710 - samples/sec: 58.61 - lr: 0.100000\n",
+ "2022-05-01 12:15:39,534 epoch 8 - iter 2/11 - loss 2.72140074 - samples/sec: 57.76 - lr: 0.100000\n",
+ "2022-05-01 12:15:40,061 epoch 8 - iter 3/11 - loss 2.77740423 - samples/sec: 60.89 - lr: 0.100000\n",
+ "2022-05-01 12:15:40,541 epoch 8 - iter 4/11 - loss 2.51573136 - samples/sec: 66.72 - lr: 0.100000\n",
+ "2022-05-01 12:15:41,109 epoch 8 - iter 5/11 - loss 2.54271443 - samples/sec: 56.53 - lr: 0.100000\n",
+ "2022-05-01 12:15:41,537 epoch 8 - iter 6/11 - loss 2.47530021 - samples/sec: 75.12 - lr: 0.100000\n",
+ "2022-05-01 12:15:42,078 epoch 8 - iter 7/11 - loss 2.62978831 - samples/sec: 59.26 - lr: 0.100000\n",
+ "2022-05-01 12:15:42,506 epoch 8 - iter 8/11 - loss 2.62844713 - samples/sec: 74.84 - lr: 0.100000\n",
+ "2022-05-01 12:15:42,988 epoch 8 - iter 9/11 - loss 2.61604464 - samples/sec: 66.59 - lr: 0.100000\n",
+ "2022-05-01 12:15:43,471 epoch 8 - iter 10/11 - loss 2.62512223 - samples/sec: 66.39 - lr: 0.100000\n",
+ "2022-05-01 12:15:43,895 epoch 8 - iter 11/11 - loss 2.64045010 - samples/sec: 75.65 - lr: 0.100000\n",
+ "2022-05-01 12:15:43,896 ----------------------------------------------------------------------------------------------------\n",
+ "2022-05-01 12:15:43,897 EPOCH 8 done: loss 2.6405 - lr 0.1000000\n",
+ "2022-05-01 12:15:44,036 DEV : loss 3.542769432067871 - score 0.3846\n",
+ "2022-05-01 12:15:44,038 BAD EPOCHS (no improvement): 0\n",
"saving best model\n",
- "2022-04-28 22:16:52,628 ----------------------------------------------------------------------------------------------------\n",
- "2022-04-28 22:16:53,137 epoch 8 - iter 1/10 - loss 1.08732188 - samples/sec: 63.12 - lr: 0.100000\n",
- "2022-04-28 22:16:53,606 epoch 8 - iter 2/10 - loss 1.29048711 - samples/sec: 68.38 - lr: 0.100000\n",
- "2022-04-28 22:16:54,039 epoch 8 - iter 3/10 - loss 1.04415214 - samples/sec: 74.07 - lr: 0.100000\n",
- "2022-04-28 22:16:54,568 epoch 8 - iter 4/10 - loss 1.02857886 - samples/sec: 60.60 - lr: 0.100000\n",
- "2022-04-28 22:16:55,148 epoch 8 - iter 5/10 - loss 1.26690668 - samples/sec: 55.27 - lr: 0.100000\n",
- "2022-04-28 22:16:55,602 epoch 8 - iter 6/10 - loss 1.30797880 - samples/sec: 70.80 - lr: 0.100000\n",
- "2022-04-28 22:16:56,075 epoch 8 - iter 7/10 - loss 1.22035806 - samples/sec: 67.72 - lr: 0.100000\n",
- "2022-04-28 22:16:56,494 epoch 8 - iter 8/10 - loss 1.23306625 - samples/sec: 76.51 - lr: 0.100000\n",
- "2022-04-28 22:16:56,933 epoch 8 - iter 9/10 - loss 1.18903442 - samples/sec: 73.15 - lr: 0.100000\n",
- "2022-04-28 22:16:57,147 epoch 8 - iter 10/10 - loss 1.31105986 - samples/sec: 150.24 - lr: 0.100000\n",
- "2022-04-28 22:16:57,148 ----------------------------------------------------------------------------------------------------\n",
- "2022-04-28 22:16:57,149 EPOCH 8 done: loss 1.3111 - lr 0.1000000\n",
- "2022-04-28 22:16:57,289 DEV : loss 0.5563207864761353 - score 0.5\n",
- "2022-04-28 22:16:57,290 BAD EPOCHS (no improvement): 0\n",
+ "2022-05-01 12:15:51,672 ----------------------------------------------------------------------------------------------------\n",
+ "2022-05-01 12:15:52,235 epoch 9 - iter 1/11 - loss 1.73337626 - samples/sec: 56.99 - lr: 0.100000\n",
+ "2022-05-01 12:15:52,801 epoch 9 - iter 2/11 - loss 2.09788013 - samples/sec: 56.74 - lr: 0.100000\n",
+ "2022-05-01 12:15:53,288 epoch 9 - iter 3/11 - loss 2.24861153 - samples/sec: 65.84 - lr: 0.100000\n",
+ "2022-05-01 12:15:53,735 epoch 9 - iter 4/11 - loss 2.42630130 - samples/sec: 71.75 - lr: 0.100000\n",
+ "2022-05-01 12:15:54,189 epoch 9 - iter 5/11 - loss 2.42454610 - samples/sec: 70.64 - lr: 0.100000\n",
+ "2022-05-01 12:15:54,720 epoch 9 - iter 6/11 - loss 2.39987107 - samples/sec: 60.38 - lr: 0.100000\n",
+ "2022-05-01 12:15:55,192 epoch 9 - iter 7/11 - loss 2.29154910 - samples/sec: 67.94 - lr: 0.100000\n",
+ "2022-05-01 12:15:55,632 epoch 9 - iter 8/11 - loss 2.22984707 - samples/sec: 73.06 - lr: 0.100000\n",
+ "2022-05-01 12:15:56,162 epoch 9 - iter 9/11 - loss 2.32317919 - samples/sec: 60.49 - lr: 0.100000\n",
+ "2022-05-01 12:15:56,559 epoch 9 - iter 10/11 - loss 2.24865967 - samples/sec: 80.81 - lr: 0.100000\n",
+ "2022-05-01 12:15:56,986 epoch 9 - iter 11/11 - loss 2.27327953 - samples/sec: 75.12 - lr: 0.100000\n",
+ "2022-05-01 12:15:56,988 ----------------------------------------------------------------------------------------------------\n",
+ "2022-05-01 12:15:56,988 EPOCH 9 done: loss 2.2733 - lr 0.1000000\n",
+ "2022-05-01 12:15:57,130 DEV : loss 3.4634602069854736 - score 0.5517\n",
+ "2022-05-01 12:15:57,132 BAD EPOCHS (no improvement): 0\n",
"saving best model\n",
- "2022-04-28 22:17:02,550 ----------------------------------------------------------------------------------------------------\n",
- "2022-04-28 22:17:03,134 epoch 9 - iter 1/10 - loss 1.32691610 - samples/sec: 54.89 - lr: 0.100000\n",
- "2022-04-28 22:17:03,595 epoch 9 - iter 2/10 - loss 1.16159409 - samples/sec: 69.57 - lr: 0.100000\n",
- "2022-04-28 22:17:04,014 epoch 9 - iter 3/10 - loss 1.10929267 - samples/sec: 76.56 - lr: 0.100000\n",
- "2022-04-28 22:17:04,518 epoch 9 - iter 4/10 - loss 1.05318102 - samples/sec: 63.62 - lr: 0.100000\n",
- "2022-04-28 22:17:04,966 epoch 9 - iter 5/10 - loss 1.07275693 - samples/sec: 71.75 - lr: 0.100000\n",
- "2022-04-28 22:17:05,432 epoch 9 - iter 6/10 - loss 1.02824855 - samples/sec: 68.82 - lr: 0.100000\n",
- "2022-04-28 22:17:05,909 epoch 9 - iter 7/10 - loss 1.04051120 - samples/sec: 67.23 - lr: 0.100000\n",
- "2022-04-28 22:17:06,404 epoch 9 - iter 8/10 - loss 1.00513531 - samples/sec: 64.78 - lr: 0.100000\n",
- "2022-04-28 22:17:06,831 epoch 9 - iter 9/10 - loss 1.03960636 - samples/sec: 75.29 - lr: 0.100000\n",
- "2022-04-28 22:17:07,019 epoch 9 - iter 10/10 - loss 1.07805606 - samples/sec: 171.12 - lr: 0.100000\n",
- "2022-04-28 22:17:07,020 ----------------------------------------------------------------------------------------------------\n",
- "2022-04-28 22:17:07,021 EPOCH 9 done: loss 1.0781 - lr 0.1000000\n",
- "2022-04-28 22:17:07,151 DEV : loss 0.909138560295105 - score 0.7143\n",
- "2022-04-28 22:17:07,153 BAD EPOCHS (no improvement): 0\n",
- "saving best model\n",
- "2022-04-28 22:17:12,454 ----------------------------------------------------------------------------------------------------\n",
- "2022-04-28 22:17:12,906 epoch 10 - iter 1/10 - loss 1.49117911 - samples/sec: 70.96 - lr: 0.100000\n",
- "2022-04-28 22:17:13,334 epoch 10 - iter 2/10 - loss 1.23203236 - samples/sec: 74.94 - lr: 0.100000\n",
- "2022-04-28 22:17:13,789 epoch 10 - iter 3/10 - loss 1.12988973 - samples/sec: 70.48 - lr: 0.100000\n",
- "2022-04-28 22:17:14,275 epoch 10 - iter 4/10 - loss 1.07148103 - samples/sec: 65.98 - lr: 0.100000\n",
- "2022-04-28 22:17:14,795 epoch 10 - iter 5/10 - loss 1.08848752 - samples/sec: 61.66 - lr: 0.100000\n",
- "2022-04-28 22:17:15,328 epoch 10 - iter 6/10 - loss 1.05938606 - samples/sec: 60.26 - lr: 0.100000\n",
- "2022-04-28 22:17:15,730 epoch 10 - iter 7/10 - loss 1.00324091 - samples/sec: 79.80 - lr: 0.100000\n",
- "2022-04-28 22:17:16,245 epoch 10 - iter 8/10 - loss 0.93657552 - samples/sec: 62.26 - lr: 0.100000\n",
- "2022-04-28 22:17:16,681 epoch 10 - iter 9/10 - loss 0.95801387 - samples/sec: 73.56 - lr: 0.100000\n",
- "2022-04-28 22:17:16,901 epoch 10 - iter 10/10 - loss 0.87346228 - samples/sec: 146.77 - lr: 0.100000\n",
- "2022-04-28 22:17:16,902 ----------------------------------------------------------------------------------------------------\n",
- "2022-04-28 22:17:16,903 EPOCH 10 done: loss 0.8735 - lr 0.1000000\n",
- "2022-04-28 22:17:17,047 DEV : loss 0.5443210601806641 - score 0.7143\n",
- "2022-04-28 22:17:17,050 BAD EPOCHS (no improvement): 0\n",
- "saving best model\n",
- "2022-04-28 22:17:27,557 ----------------------------------------------------------------------------------------------------\n",
- "2022-04-28 22:17:27,557 Testing using best model ...\n",
- "2022-04-28 22:17:27,566 loading file slot-model\\best-model.pt\n",
- "2022-04-28 22:17:33,102 0.6429\t0.4500\t0.5294\n",
- "2022-04-28 22:17:33,103 \n",
+ "2022-05-01 12:16:04,067 ----------------------------------------------------------------------------------------------------\n",
+ "2022-05-01 12:16:04,643 epoch 10 - iter 1/11 - loss 2.22972107 - samples/sec: 55.65 - lr: 0.100000\n",
+ "2022-05-01 12:16:05,144 epoch 10 - iter 2/11 - loss 2.20346498 - samples/sec: 64.00 - lr: 0.100000\n",
+ "2022-05-01 12:16:05,576 epoch 10 - iter 3/11 - loss 2.07501336 - samples/sec: 74.24 - lr: 0.100000\n",
+ "2022-05-01 12:16:06,036 epoch 10 - iter 4/11 - loss 2.09982607 - samples/sec: 69.72 - lr: 0.100000\n",
+ "2022-05-01 12:16:06,508 epoch 10 - iter 5/11 - loss 2.08048103 - samples/sec: 67.94 - lr: 0.100000\n",
+ "2022-05-01 12:16:07,062 epoch 10 - iter 6/11 - loss 2.08074635 - samples/sec: 57.87 - lr: 0.100000\n",
+ "2022-05-01 12:16:07,590 epoch 10 - iter 7/11 - loss 2.07187140 - samples/sec: 60.84 - lr: 0.100000\n",
+ "2022-05-01 12:16:08,116 epoch 10 - iter 8/11 - loss 2.10148455 - samples/sec: 60.95 - lr: 0.100000\n",
+ "2022-05-01 12:16:08,563 epoch 10 - iter 9/11 - loss 2.06198527 - samples/sec: 71.74 - lr: 0.100000\n",
+ "2022-05-01 12:16:09,066 epoch 10 - iter 10/11 - loss 2.00194792 - samples/sec: 63.75 - lr: 0.100000\n",
+ "2022-05-01 12:16:09,486 epoch 10 - iter 11/11 - loss 2.00801701 - samples/sec: 76.37 - lr: 0.100000\n",
+ "2022-05-01 12:16:09,487 ----------------------------------------------------------------------------------------------------\n",
+ "2022-05-01 12:16:09,488 EPOCH 10 done: loss 2.0080 - lr 0.1000000\n",
+ "2022-05-01 12:16:09,624 DEV : loss 3.1866908073425293 - score 0.4706\n",
+ "2022-05-01 12:16:09,625 BAD EPOCHS (no improvement): 1\n",
+ "2022-05-01 12:16:16,655 ----------------------------------------------------------------------------------------------------\n",
+ "2022-05-01 12:16:16,656 Testing using best model ...\n",
+ "2022-05-01 12:16:16,676 loading file slot-model\\best-model.pt\n",
+ "2022-05-01 12:16:22,739 0.4231\t0.3056\t0.3548\n",
+ "2022-05-01 12:16:22,740 \n",
"Results:\n",
- "- F1-score (micro) 0.5294\n",
- "- F1-score (macro) 0.4533\n",
+ "- F1-score (micro) 0.3548\n",
+ "- F1-score (macro) 0.2570\n",
"\n",
"By class:\n",
- "area tp: 0 - fp: 0 - fn: 1 - precision: 0.0000 - recall: 0.0000 - f1-score: 0.0000\n",
- "date tp: 1 - fp: 1 - fn: 0 - precision: 0.5000 - recall: 1.0000 - f1-score: 0.6667\n",
- "quantity tp: 3 - fp: 1 - fn: 3 - precision: 0.7500 - recall: 0.5000 - f1-score: 0.6000\n",
- "time tp: 2 - fp: 2 - fn: 4 - precision: 0.5000 - recall: 0.3333 - f1-score: 0.4000\n",
- "title tp: 3 - fp: 1 - fn: 3 - precision: 0.7500 - recall: 0.5000 - f1-score: 0.6000\n",
- "2022-04-28 22:17:33,104 ----------------------------------------------------------------------------------------------------\n"
+ "area tp: 1 - fp: 1 - fn: 2 - precision: 0.5000 - recall: 0.3333 - f1-score: 0.4000\n",
+ "date tp: 0 - fp: 3 - fn: 3 - precision: 0.0000 - recall: 0.0000 - f1-score: 0.0000\n",
+ "goal tp: 2 - fp: 2 - fn: 8 - precision: 0.5000 - recall: 0.2000 - f1-score: 0.2857\n",
+ "interval tp: 0 - fp: 0 - fn: 1 - precision: 0.0000 - recall: 0.0000 - f1-score: 0.0000\n",
+ "quantity tp: 4 - fp: 1 - fn: 2 - precision: 0.8000 - recall: 0.6667 - f1-score: 0.7273\n",
+ "seats tp: 0 - fp: 1 - fn: 0 - precision: 0.0000 - recall: 0.0000 - f1-score: 0.0000\n",
+ "time tp: 1 - fp: 4 - fn: 5 - precision: 0.2000 - recall: 0.1667 - f1-score: 0.1818\n",
+ "title tp: 3 - fp: 3 - fn: 4 - precision: 0.5000 - recall: 0.4286 - f1-score: 0.4615\n",
+ "2022-05-01 12:16:22,740 ----------------------------------------------------------------------------------------------------\n"
]
},
{
"data": {
"text/plain": [
- "{'test_score': 0.5294117647058824,\n",
+ "{'test_score': 0.3548387096774194,\n",
" 'dev_score_history': [0.0,\n",
+ " 0.06451612903225806,\n",
" 0.0,\n",
- " 0.0,\n",
- " 0.0,\n",
- " 0.2222222222222222,\n",
- " 0.3333333333333333,\n",
- " 0.5,\n",
- " 0.5,\n",
- " 0.7142857142857143,\n",
- " 0.7142857142857143],\n",
- " 'train_loss_history': [5.108907747268677,\n",
- " 2.6376620531082153,\n",
- " 2.0286046147346495,\n",
- " 1.816417047381401,\n",
- " 1.7658178985118866,\n",
- " 1.5524149179458617,\n",
- " 1.384124332666397,\n",
- " 1.3110598623752594,\n",
- " 1.0780560612678527,\n",
- " 0.8734622806310653],\n",
- " 'dev_loss_history': [1.1116931438446045,\n",
- " 1.2027416229248047,\n",
- " 0.9265440702438354,\n",
- " 0.8311207890510559,\n",
- " 0.7797471880912781,\n",
- " 0.9345423579216003,\n",
- " 0.6798948049545288,\n",
- " 0.5563207864761353,\n",
- " 0.909138560295105,\n",
- " 0.5443210601806641]}"
+ " 0.17142857142857143,\n",
+ " 0.16666666666666663,\n",
+ " 0.23809523809523808,\n",
+ " 0.3181818181818182,\n",
+ " 0.38461538461538464,\n",
+ " 0.5517241379310345,\n",
+ " 0.47058823529411764],\n",
+ " 'train_loss_history': [6.525583657351407,\n",
+ " 5.26294283433394,\n",
+ " 4.7632177526300605,\n",
+ " 4.261161284013228,\n",
+ " 3.807387958873402,\n",
+ " 3.2323263558474453,\n",
+ " 2.828585754741322,\n",
+ " 2.6404500982978125,\n",
+ " 2.2732795260169287,\n",
+ " 2.0080170089548286],\n",
+ " 'dev_loss_history': [8.419286727905273,\n",
+ " 7.168168544769287,\n",
+ " 7.209894180297852,\n",
+ " 5.882441997528076,\n",
+ " 5.224854469299316,\n",
+ " 4.557621002197266,\n",
+ " 4.020608901977539,\n",
+ " 3.542769432067871,\n",
+ " 3.4634602069854736,\n",
+ " 3.1866908073425293]}"
]
},
- "execution_count": 42,
+ "execution_count": 31,
"metadata": {},
"output_type": "execute_result"
}
@@ -922,14 +756,14 @@
},
{
"cell_type": "code",
- "execution_count": 43,
+ "execution_count": 32,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
- "2022-04-28 22:17:33,278 loading file slot-model/final-model.pt\n"
+ "2022-05-01 12:16:22,953 loading file slot-model/final-model.pt\n"
]
}
],
@@ -947,15 +781,28 @@
},
{
"cell_type": "code",
- "execution_count": 44,
+ "execution_count": 69,
"metadata": {},
- "outputs": [],
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[('kiedy', 'O'), ('gracie', 'O'), ('film', 'O'), ('zorro', 'B-title')]"
+ ]
+ },
+ "execution_count": 69,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
"source": [
"def predict(model, sentence):\n",
" csentence = [{'form': word} for word in sentence]\n",
" fsentence = conllu2flair([csentence])[0]\n",
" model.predict(fsentence)\n",
- " return [(token, ftoken.get_tag('slot').value) for token, ftoken in zip(sentence, fsentence)]\n"
+ " return [(token, ftoken.get_tag('slot').value) for token, ftoken in zip(sentence, fsentence)]\n",
+ "\n",
+ "predict(model, 'kiedy gracie film zorro'.split())"
]
},
{
@@ -968,7 +815,7 @@
},
{
"cell_type": "code",
- "execution_count": 90,
+ "execution_count": 68,
"metadata": {},
"outputs": [
{
@@ -976,23 +823,24 @@
"text/html": [
"\n",
"\n",
- "co | O |
\n",
- "gracie | O |
\n",
- "popołudniu | O |
\n",
+ "kiedy | O |
\n",
+ "gracie | O |
\n",
+ "film | O |
\n",
+ "zorro | B-title |
\n",
"\n",
"
"
],
"text/plain": [
- "'\\n\\nco | O |
\\ngracie | O |
\\npopołudniu | O |
\\n\\n
'"
+ "'\\n\\nkiedy | O |
\\ngracie | O |
\\nfilm | O |
\\nzorro | B-title |
\\n\\n
'"
]
},
- "execution_count": 90,
+ "execution_count": 68,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
- "tabulate(predict(model, 'batman'.split()), tablefmt='html')"
+ "tabulate(predict(model, 'kiedy gracie film zorro'.split()), tablefmt='html')"
]
},
{
diff --git a/src/components/NLU.py b/src/components/NLU.py
index b562355..fd2665b 100644
--- a/src/components/NLU.py
+++ b/src/components/NLU.py
@@ -1,28 +1,43 @@
-from jsgf import PublicRule, Grammar
import re
+from flair.data import Sentence, Token
+from flair.datasets import SentenceDataset
+from flair.models import SequenceTagger
class NLU:
- def get_str_cleaned(str_dirty):
- punctuation = '!"#$%&\'()*+,-./:;<=>?@[\\\\]^_`{|}~'
- new_str = str_dirty.lower()
- new_str = re.sub(' +', ' ', new_str)
+ def __init__(self):
+ self.nluModel = SequenceTagger.load('./lab/slot-model/final-model.pt')
+
+ def predict(self, sentence):
+ sentence = self.getStrCleaned(sentence)
+ csentence = [{'form': word} for word in sentence]
+ fsentence = self.conllu2flair([csentence])[0]
+ self.nluModel.predict(fsentence)
+ return [(token, ftoken.get_tag('slot').value) for token, ftoken in zip(sentence, fsentence)]
+
+ def conllu2flair(self, sentences, label=None):
+ fsentences = []
+
+ for sentence in sentences:
+ fsentence = Sentence()
+
+ for token in sentence:
+ ftoken = Token(token['form'])
+
+ if label:
+ ftoken.add_tag(label, token[label])
+
+ fsentence.add_token(ftoken)
+
+ fsentences.append(fsentence)
+
+ return SentenceDataset(fsentences)
+
+ def getStrCleaned(self, rawMessage):
+ # / and : is needed for date and time recognition
+ punctuation = '!"#$%&\'()*+,-.;<=>?@[\\\\]^_`{|}~'
+ messageLower = rawMessage.lower()
+ # new_str = re.sub(' +', ' ', new_str)
for char in punctuation:
- new_str = new_str.replace(char,'')
- return new_str
-
- def getDialogAct(rule):
- slots = []
- return {'act': rule.grammar.name, 'slots': slots}
-
- def nlu(utterance):
- hello = Grammar('hello')
- hello.add_rule(PublicRule('witaj', 'cześć jak masz na imię'))
-
- utterance = NLU.get_str_cleaned(utterance)
-
- matched = hello.find_matching_rules(utterance)
- if matched:
- return NLU.getDialogAct(matched[0])
- else:
- return {'act': 'null', 'slots': []}
+ messageLower = messageLower.replace(char,'')
+ return messageLower.split()
\ No newline at end of file
diff --git a/src/dialogue_system.py b/src/dialogue_system.py
index f290005..b8b6b50 100644
--- a/src/dialogue_system.py
+++ b/src/dialogue_system.py
@@ -7,24 +7,30 @@ slots = [
("date", None),
("time", None),
("quantity", None),
- ("location", None),
("seats", None),
- ("reservation_id", None),
("goal", None),
("area", None),
+ ("interval", None),
]
+def chatbot():
-def generate_response(input):
- # nlu
- nlu = NLU.nlu(input)
+ isActive = True
- # dst
- dst_obj = DST(slots)
- dst = dst_obj.getDialogueState(nlu)
+ # NLU
+ nlu = NLU()
- return dst
+ # hello message
+ print("wpisz /exit aby zakończyć")
+ print("Witaj w systemie kinowym Nachos, w czym mogę Ci pomóc?")
-
-inputText = 'Cześć, jak masz na imię?'
-print(NLG.getResponse(generate_response(inputText)))
+ # main loop
+ while isActive:
+ userMessage = input("$")
+ if userMessage == "/exit":
+ print("Do usłyszenia")
+ isActive = False
+ else:
+ nluPred = nlu.predict(sentence=userMessage)
+ print(nluPred)
+chatbot()
\ No newline at end of file
diff --git a/tasks/zad8/pl/test.conllu b/tasks/zad8/pl/test.conllu
index c1d97c4..4674a20 100644
--- a/tasks/zad8/pl/test.conllu
+++ b/tasks/zad8/pl/test.conllu
@@ -1,125 +1,125 @@
-# text: dzień dobry
-# intent: hello
-# slots:
+# text: dzień dobry
+# intent: hello
+# slots:
1 dzień hello NoLabel
2 dobry hello NoLabel
-
-# text: jakie filmy są w tym tygodniu w repertuarze
-# intent: reqmore
-# slots:
-1 jakie reqmore NoLabel
-2 filmy reqmore NoLabel
+
+# text: jakie filmy są w tym tygodniu w repertuarze
+# intent: reqmore
+# slots: jakiefilmy:goal,wtymtygodniu:interval
+1 jakie reqmore B-goal
+2 filmy reqmore I-goal
3 są reqmore NoLabel
-4 w reqmore NoLabel
-5 tym reqmore NoLabel
-6 tygodniu reqmore NoLabel
+4 w reqmore B-interval
+5 tym reqmore I-interval
+6 tygodniu reqmore I-interval
7 w reqmore NoLabel
8 repertuarze reqmore NoLabel
-
-# text: o której godzinie w piątej mogę zobaczyć na noże
-# intent: reqmore inform
-# slots: nanoże:title
-1 o reqmore inform NoLabel
-2 której reqmore inform NoLabel
-3 godzinie reqmore inform NoLabel
+
+# text: o której godzinie w piątek mogę zobaczyć na noże
+# intent: reqmore inform
+# slots: októrejgodzinie:goal,wpiątek:date,nanoże:title
+1 o reqmore inform B-goal
+2 której reqmore inform I-goal
+3 godzinie reqmore inform I-goal
4 w reqmore inform NoLabel
-5 piątej reqmore inform NoLabel
+5 piątek reqmore inform B-date
6 mogę reqmore inform NoLabel
7 zobaczyć reqmore inform NoLabel
8 na reqmore inform B-title
9 noże reqmore inform I-title
-
-# text: ok w takim razie chciałbym zarezerwować 2 miejsca na seans o 19:30
-# intent: inform
-# slots: 2:quantity,o19:30:time
+
+# text: ok w takim razie chciałbym zarezerwować 2 miejsca na seans o 19:30
+# intent: inform
+# slots: zarezerwować:goal,2:quantity,o19:30:time
1 ok inform NoLabel
2 w inform NoLabel
3 takim inform NoLabel
4 razie inform NoLabel
5 chciałbym inform NoLabel
-6 zarezerwować inform NoLabel
+6 zarezerwować inform B-goal
7 2 inform B-quantity
8 miejsca inform NoLabel
9 na inform NoLabel
10 seans inform NoLabel
-11 o inform B-time
-12 19:30 inform I-time
-
-# text: dwa normalne
-# intent: inform
-# slots: dwa:quantity
+11 o inform NoLabel
+12 19:30 inform B-time
+
+# text: dwa normalne
+# intent: inform
+# slots: dwa:quantity
1 dwa inform B-quantity
2 normalne inform NoLabel
-
-# text: mogą być
-# intent: ack
-# slots:
+
+# text: mogą być
+# intent: ack
+# slots:
1 mogą ack NoLabel
2 być ack NoLabel
-
-# text: dziękuje bardzo
-# intent: bye
-# slots:
+
+# text: dziękuje bardzo
+# intent: bye
+# slots:
1 dziękuje bye NoLabel
2 bardzo bye NoLabel
-
-# text: witam
-# intent: hello
-# slots:
+
+# text: witam
+# intent: hello
+# slots:
1 witam hello NoLabel
-
-# text: chciałbym zarezerować dwa bilety na batman
-# intent: inform
-# slots: dwa:quantity,batman:title
+
+# text: chciałbym zarezerować dwa bilety na batman
+# intent: inform
+# slots: zarezerować:goal,dwa:quantity,batman:title
1 chciałbym inform NoLabel
-2 zarezerować inform NoLabel
+2 zarezerować inform B-goal
3 dwa inform B-quantity
4 bilety inform NoLabel
5 na inform NoLabel
6 batman inform B-title
-
-# text: a kiedy jest najbliższy seans
-# intent: inform
-# slots:
+
+# text: a kiedy jest najbliższy seans
+# intent: inform
+# slots: kiedyjestnajbliższyseans:goal
1 a inform NoLabel
-2 kiedy inform NoLabel
-3 jest inform NoLabel
-4 najbliższy inform NoLabel
-5 seans inform NoLabel
-
-# text: to poproszę trzy bilety na batmana o 15:30
-# intent: inform
-# slots: poproszętrzy:quantity,batmana:title,o15:30:time
+2 kiedy inform B-goal
+3 jest inform I-goal
+4 najbliższy inform I-goal
+5 seans inform I-goal
+
+# text: to poproszę trzy bilety na batmana o 15:30
+# intent: inform
+# slots: poproszętrzy:quantity,batmana:title,o15:30:time
1 to inform NoLabel
2 poproszę inform B-quantity
3 trzy inform I-quantity
4 bilety inform NoLabel
5 na inform NoLabel
6 batmana inform B-title
-7 o inform B-time
-8 15:30 inform I-time
-
-# text: na samym tyle sali
-# intent: inform
-# slots:
-1 na inform NoLabel
-2 samym inform NoLabel
-3 tyle inform NoLabel
-4 sali inform NoLabel
-
-# text: dziękuję
-# intent: thankyou
-# slots:
+7 o inform NoLabel
+8 15:30 inform B-time
+
+# text: na samym tyle sali
+# intent: inform
+# slots: nasamymtyle:area
+1 na inform B-area
+2 samym inform I-area
+3 tyle inform I-area
+4 sali inform I-area
+
+# text: dziękuję
+# intent: thankyou
+# slots:
1 dziękuję thankyou NoLabel
-
-# text: hej
-# intent: hello
-# slots:
+
+# text: hej
+# intent: hello
+# slots:
1 hej hello NoLabel
-
-# text: chciałbym kupić bilety na seans zorro - jak to było na prawdę
-# intent: inform
-# slots: zorro:title
+
+# text: chciałbym kupić bilety na seans zorro - jak to było na prawdę
+# intent: inform
+# slots: zorro:title
1 chciałbym inform NoLabel
2 kupić inform NoLabel
3 bilety inform NoLabel
@@ -132,89 +132,78 @@
10 było inform NoLabel
11 na inform NoLabel
12 prawdę inform NoLabel
-
-# text: zorro2
-# intent: inform
-# slots:
-1 zorro2 inform NoLabel
-
-# text: poproszę o listę seansów
-# intent: reqmore
-# slots:
+
+# text: zorro2
+# intent: inform
+# slots: zorro2:title
+1 zorro2 inform B-title
+
+# text: poproszę o listę seansów
+# intent: reqmore
+# slots: listęseansów:goal
1 poproszę reqmore NoLabel
2 o reqmore NoLabel
-3 listę reqmore NoLabel
-4 seansów reqmore NoLabel
-
-# text: poproszę o listę filmów granych jutro wieczorem
-# intent: reqmore inform
-# slots: jutro:date,wieczorem:time
+3 listę reqmore B-goal
+4 seansów reqmore I-goal
+
+# text: poproszę o listę filmów granych jutro wieczorem
+# intent: reqmore inform
+# slots: listęfilmów:goal,jutro:date,wieczorem:time
1 poproszę reqmore inform NoLabel
2 o reqmore inform NoLabel
-3 listę reqmore inform NoLabel
-4 filmów reqmore inform NoLabel
+3 listę reqmore inform B-goal
+4 filmów reqmore inform B-goal
5 granych reqmore inform NoLabel
6 jutro reqmore inform B-date
7 wieczorem reqmore inform B-time
-
-# text: chciałbym kupić bilety na film to nie wypanda
-# intent: inform
-# slots: toniewypanda:title
+
+# text: chciałbym kupić bilety na film to nie wypanda
+# intent: inform
+# slots: kupić:goal,toniewypanda:title
1 chciałbym inform NoLabel
-2 kupić inform NoLabel
+2 kupić inform B-goal
3 bilety inform NoLabel
4 na inform NoLabel
5 film inform NoLabel
6 to inform B-title
7 nie inform I-title
8 wypada inform I-title
-
-# text: 20:15
-# intent: inform
-# slots: 20:15:time
+
+# text: 20:15
+# intent: inform
+# slots: 20:15:time
1 20:15 inform B-time
-
-# text: 11
-# intent: inform
-# slots: 11:quantity
+
+# text: 11
+# intent: inform
+# slots: 11:quantity
1 11 inform B-quantity
-
-# text: w środku pomiedzy górnym i środkowym rzędzie
-# intent: nan
-# slots:
-1 w NoLabel
-2 środku NoLabel
-3 pomiedzy NoLabel
-4 górnym NoLabel
-5 i NoLabel
-6 środkowym NoLabel
-7 rzędzie NoLabel
-
-# text: w środku pomiedzy górnym i środkowym rzędem
-# intent: inform
-# slots:
-1 w inform NoLabel
-2 środku inform NoLabel
-3 pomiedzy inform NoLabel
-4 górnym inform NoLabel
-5 i inform NoLabel
-6 środkowym inform NoLabel
-7 rzędem inform NoLabel
-
-# text: <3
-# intent: thankyou
-# slots:
+
+# text: w środku pomiedzy górnym i środkowym rzędem
+# intent: nan
+# slots: wśrodkupomiedzygórnymiśrodkowymrzędem:area
+1 w null B-area
+2 środku null I-area
+3 pomiedzy null I-area
+4 górnym null I-area
+5 i null I-area
+6 środkowym null I-area
+7 rzędem null I-area
+
+# text: <3
+# intent: thankyou
+# slots:
1 <3 thankyou NoLabel
-
-# text: dzień dobry
-# intent: hello
-# slots:
+
+# text: dzień dobry
+# intent: hello
+# slots:
1 dzień hello NoLabel
2 dobry hello NoLabel
-
-# text: chciał bym zamówić bilet na film minionki dzisiaj o 18:30
-# intent: inform
-# slots: dzisiaj:date
+
+# text: chciał bym zamówić bilet na film minionki dzisiaj o 18:30
+# intent: inform
+# slots: dzisiaj:date,minionki:title,18:30:time
1 chciał inform NoLabel
2 bym inform NoLabel
3 zamówić inform NoLabel
@@ -222,54 +211,53 @@
5 na inform NoLabel
6 film inform NoLabel
7 minionki inform B-title
-8 dzisiaj inform B-time
-9 o inform I-time
-10 18:30 inform I-time
-
-# text: czy jest jakis film o godzinie 18:30
-# intent: request
-# slots: ogodzinie18:30:time
+8 dzisiaj inform B-date
+9 o inform NoLabel
+10 18:30 inform B-time
+
+# text: czy jest jakis film o godzinie 18:30
+# intent: request
+# slots: jestjakisfilm:goal,18:30:time
1 czy request NoLabel
-2 jest request NoLabel
-3 jakis request NoLabel
-4 film request NoLabel
-5 o request B-time
-6 godzinie request I-time
-7 18:30 request I-time
-
-# text: niech będzie
-# intent: ack
-# slots:
+2 jest request B-goal
+3 jakis request I-goal
+4 film request I-goal
+5 o request NoLabel
+6 godzinie request NoLabel
+7 18:30 request B-time
+
+# text: niech będzie
+# intent: ack
+# slots:
1 niech ack NoLabel
2 będzie ack NoLabel
-
-# text: 1
-# intent: inform
-# slots: 1:quantity
+
+# text: 1
+# intent: inform
+# slots: 1:quantity
1 1 inform B-quantity
-
-# text: jakie sš dostępne ulgi
-# intent: nan
-# slots:
+
+# text: jakie sš dostępne ulgi
+# intent: nan
+# slots:
1 jakie NoLabel
2 sš NoLabel
3 dostępne NoLabel
4 ulgi NoLabel
-
-# text: studencka
-# intent: inform
-# slots:
+
+# text: studencka
+# intent: inform
+# slots:
1 studencka inform NoLabel
-
-# text: daleko od ekranu
-# intent: inform
-# slots: dalekoodekranu:area
+
+# text: daleko od ekranu
+# intent: inform
+# slots: dalekoodekranu:area
1 daleko inform B-area
2 od inform I-area
3 ekranu inform I-area
-
-# text: tak
-# intent: ack
-# slots:
+
+# text: tak
+# intent: ack
+# slots:
1 tak ack NoLabel
-
diff --git a/tasks/zad8/pl/train.conllu b/tasks/zad8/pl/train.conllu
index 34a1c96..5f8e9ad 100644
--- a/tasks/zad8/pl/train.conllu
+++ b/tasks/zad8/pl/train.conllu
@@ -222,13 +222,13 @@
# text: poproszę listę filmów granych jutro wieczorem
# intent: reqmore inform
-# slots: listęfilmów:goaljutro:date,wieczorem:time
+# slots: listęfilmów:goaljutro:date,wieczorem:interval
1 poproszę reqmore inform NoLabel
2 listę reqmore inform B-goal
3 filmów reqmore inform I-goal
4 granych reqmore inform NoLabel
5 jutro reqmore inform B-date
-6 wieczorem reqmore inform B-time
+6 wieczorem reqmore inform B-interval
# text: chciałbym kupić bilety na transformers
# intent: inform
@@ -1769,9 +1769,9 @@
# text: dziś wieczorem
# intent: inform
-# slots: dziś:date,wieczorem:time
+# slots: dziś:date,wieczorem:interval
1 dziś inform B-date
-2 wieczorem inform B-time
+2 wieczorem inform B-interval
# text: proszę o godzinie 20:15
# intent: inform
@@ -2118,7 +2118,7 @@
# text: jaki film jest grany jutro w godzinach popołudniowych
# intent: request
-# slots: jutro:date,popołudniowych:time
+# slots: jutro:date,popołudniowych:interval
1 jaki request NoLabel
2 film request NoLabel
3 jest request NoLabel
@@ -2126,7 +2126,7 @@
5 jutro request B-date
6 w request NoLabel
7 godzinach request NoLabel
-8 popołudniowych request B-time
+8 popołudniowych request B-interval
# text: czy sš wcześniejsze seanse
# intent: reqmore