From 6dbb5168eb757cb785557e78a7e7d3e2b277dc90 Mon Sep 17 00:00:00 2001 From: Dominik Strzako Date: Sat, 22 May 2021 18:52:56 +0200 Subject: [PATCH] ".py" version of LogReg --- main.py => Bayes.py | 0 LogReg.py | 112 +++++++++++++++++++++++++ LogReg_Test.ipynb | 84 ++++++++++--------- dev-0/out.tsv | 174 +++++++++++++++++++------------------- test-A/out.tsv | 198 ++++++++++++++++++++++---------------------- 5 files changed, 343 insertions(+), 225 deletions(-) rename main.py => Bayes.py (100%) create mode 100644 LogReg.py diff --git a/main.py b/Bayes.py similarity index 100% rename from main.py rename to Bayes.py diff --git a/LogReg.py b/LogReg.py new file mode 100644 index 0000000..3fbd71c --- /dev/null +++ b/LogReg.py @@ -0,0 +1,112 @@ +import pandas as pd +import numpy as np +import torch +from nltk.tokenize import word_tokenize +import gensim.downloader as api + +# Wczytanie X i Y do Train oraz X do Dev i Test +X_train = pd.read_table('train/in.tsv', sep='\t', error_bad_lines=False, quoting=3, header=None, names=['content', 'id'], usecols=['content']) +y_train = pd.read_table('train/expected.tsv', sep='\t', error_bad_lines=False, quoting=3, header=None, names=['label']) +X_dev = pd.read_table('dev-0/in.tsv', sep='\t', error_bad_lines=False, header=None, quoting=3, names=['content', 'id'], usecols=['content']) +X_test = pd.read_table('test-A/in.tsv', sep='\t', error_bad_lines=False, header=None, quoting=3, names=['content', 'id'], usecols=['content']) + +# lowercase-ing zbiorów +# https://www.datacamp.com/community/tutorials/case-conversion-python +X_train = X_train.content.str.lower() +X_dev = X_dev.content.str.lower() +X_test = X_test.content.str.lower() + +y_train = y_train['label'] #Df do Series? + +# tokenizacja zbiorów +#https://www.nltk.org/_modules/nltk/tokenize.html +X_train = [word_tokenize(doc) for doc in X_train] +X_dev = [word_tokenize(doc) for doc in X_dev] +X_test = [word_tokenize(doc) for doc in X_test] + +# word2vec zgodnie z poradą Pana Jakuba +# https://radimrehurek.com/gensim/auto_examples/howtos/run_downloader_api.html +# https://www.kaggle.com/kstathou/word-embeddings-logistic-regression +w2v = api.load('word2vec-google-news-300') + +def document_vector(doc): + """Create document vectors by averaging word vectors. Remove out-of-vocabulary words.""" + return np.mean([w2v[w] for w in doc if w in w2v] or [np.zeros(300)], axis=0) + +X_train = [document_vector(doc) for doc in X_train] +X_dev = [document_vector(doc) for doc in X_dev] +X_test = [document_vector(doc) for doc in X_test] + + +#Sieć neuronowa z ćwiczeń 8 +#https://git.wmi.amu.edu.pl/filipg/aitech-eks-pub/src/branch/master/cw/08_regresja_logistyczna.ipynb +class NeuralNetwork(torch.nn.Module): + def __init__(self, hidden_size): + super(NeuralNetwork, self).__init__() + self.l1 = torch.nn.Linear(300, hidden_size) #Korzystamy z word2vec-google-news-300 który ma zawsze na wejściu wymiar 300 + self.l2 = torch.nn.Linear(hidden_size, 1) + + def forward(self, x): + x = self.l1(x) + x = torch.relu(x) + x = self.l2(x) + x = torch.sigmoid(x) + return x + +model = NeuralNetwork(600) +criterion = torch.nn.BCELoss() +optimizer = torch.optim.SGD(model.parameters(), lr = 0.1) +batch_size = 15 + +# Trening modelu z ćwiczeń 8 +#https://git.wmi.amu.edu.pl/filipg/aitech-eks-pub/src/branch/master/cw/08_regresja_logistyczna.ipynb +for epoch in range(5): + model.train() + for i in range(0, y_train.shape[0], batch_size): + X = X_train[i:i+batch_size] + X = torch.tensor(X) + y = y_train[i:i+batch_size] + y = torch.tensor(y.astype(np.float32).to_numpy()).reshape(-1,1) + + outputs = model(X.float()) + loss = criterion(outputs, y) + + optimizer.zero_grad() + loss.backward() + optimizer.step() + +y_dev = [] +y_test = [] + +#Predykcje +#model.eval() will notify all your layers that you are in eval mode +model.eval() + +#torch.no_grad() impacts the autograd engine and deactivate it. It will reduce memory usage and speed up +with torch.no_grad(): + for i in range(0, len(X_dev), batch_size): + X = X_dev[i:i+batch_size] + X = torch.tensor(X) + outputs = model(X.float()) + + y = (outputs > 0.5) + y_dev.extend(y) + + for i in range(0, len(X_test), batch_size): + X = X_test[i:i+batch_size] + X = torch.tensor(X) + outputs = model(X.float()) + + y = (outputs > 0.5) + y_test.extend(y) + + +#Wygenerowanie plików outputowych +y_dev = np.asarray(y_dev, dtype=np.int32) +y_test = np.asarray(y_test, dtype=np.int32) + +y_dev_df = pd.DataFrame({'label':y_dev}) +y_test_df = pd.DataFrame({'label':y_test}) + +y_dev_df.to_csv(r'dev-0/out.tsv', sep='\t', index=False, header=False) +y_test_df.to_csv(r'test-A/out.tsv', sep='\t', index=False, header=False) \ No newline at end of file diff --git a/LogReg_Test.ipynb b/LogReg_Test.ipynb index 0e3f071..8ed074b 100644 --- a/LogReg_Test.ipynb +++ b/LogReg_Test.ipynb @@ -2,43 +2,20 @@ "cells": [ { "cell_type": "code", - "execution_count": 38, + "execution_count": 61, "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "import numpy as np\n", "import torch\n", - "import csv\n", "from nltk.tokenize import word_tokenize\n", - "#from gensim.models import Word2Vec\n", "import gensim.downloader as api" ] }, { "cell_type": "code", - "execution_count": 39, - "metadata": {}, - "outputs": [], - "source": [ - "#Sieć neuronowa z ćwiczeń 8\n", - "class NeuralNetwork(torch.nn.Module): \n", - " def __init__(self, hidden_size):\n", - " super(NeuralNetwork, self).__init__()\n", - " self.l1 = torch.nn.Linear(300, hidden_size) #Korzystamy z Googlowego word2vec-google-news-300 który ma zawsze na wejściu wymiar 300\n", - " self.l2 = torch.nn.Linear(hidden_size, 1)\n", - "\n", - " def forward(self, x):\n", - " x = self.l1(x)\n", - " x = torch.relu(x)\n", - " x = self.l2(x)\n", - " x = torch.sigmoid(x)\n", - " return x" - ] - }, - { - "cell_type": "code", - "execution_count": 40, + "execution_count": 62, "metadata": {}, "outputs": [], "source": [ @@ -51,34 +28,35 @@ }, { "cell_type": "code", - "execution_count": 41, + "execution_count": 63, "metadata": {}, "outputs": [], "source": [ - "# Preprocessing danych\n", - "# lowercase\n", + "# lowercase-ing zbiorów\n", "# https://www.datacamp.com/community/tutorials/case-conversion-python\n", "X_train = X_train.content.str.lower()\n", - "y_train = y_train['label']\n", "X_dev = X_dev.content.str.lower()\n", - "X_test = X_test.content.str.lower()" + "X_test = X_test.content.str.lower()\n", + "\n", + "y_train = y_train['label'] #Df do Series?" ] }, { "cell_type": "code", - "execution_count": 42, + "execution_count": 64, "metadata": {}, "outputs": [], "source": [ - "# tokenize\n", - "X_train = [word_tokenize(content) for content in X_train]\n", - "X_dev = [word_tokenize(content) for content in X_dev]\n", - "X_test = [word_tokenize(content) for content in X_test]" + "# tokenizacja zbiorów\n", + "#https://www.nltk.org/_modules/nltk/tokenize.html\n", + "X_train = [word_tokenize(doc) for doc in X_train]\n", + "X_dev = [word_tokenize(doc) for doc in X_dev]\n", + "X_test = [word_tokenize(doc) for doc in X_test]" ] }, { "cell_type": "code", - "execution_count": 44, + "execution_count": 67, "metadata": {}, "outputs": [], "source": [ @@ -86,9 +64,36 @@ "# https://radimrehurek.com/gensim/auto_examples/howtos/run_downloader_api.html\n", "# https://www.kaggle.com/kstathou/word-embeddings-logistic-regression\n", "w2v = api.load('word2vec-google-news-300')\n", - "X_train = [np.mean([w2v[w] for w in content if w in w2v] or [np.zeros(300)], axis=0) for content in X_train]\n", - "X_dev = [np.mean([w2v[w] for w in content if w in w2v] or [np.zeros(300)], axis=0) for content in X_dev]\n", - "X_test = [np.mean([w2v[w] for w in content if w in w2v] or [np.zeros(300)], axis=0) for content in X_test]" + "\n", + "def document_vector(doc):\n", + " \"\"\"Create document vectors by averaging word vectors. Remove out-of-vocabulary words.\"\"\"\n", + " return np.mean([w2v[w] for w in doc if w in w2v] or [np.zeros(300)], axis=0)\n", + "\n", + "X_train = [document_vector(doc) for doc in X_train]\n", + "X_dev = [document_vector(doc) for doc in X_dev]\n", + "X_test = [document_vector(doc) for doc in X_test]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#Sieć neuronowa z ćwiczeń 8\n", + "#https://git.wmi.amu.edu.pl/filipg/aitech-eks-pub/src/branch/master/cw/08_regresja_logistyczna.ipynb\n", + "class NeuralNetwork(torch.nn.Module): \n", + " def __init__(self, hidden_size):\n", + " super(NeuralNetwork, self).__init__()\n", + " self.l1 = torch.nn.Linear(300, hidden_size) #Korzystamy z word2vec-google-news-300 który ma zawsze na wejściu wymiar 300\n", + " self.l2 = torch.nn.Linear(hidden_size, 1)\n", + "\n", + " def forward(self, x):\n", + " x = self.l1(x)\n", + " x = torch.relu(x)\n", + " x = self.l2(x)\n", + " x = torch.sigmoid(x)\n", + " return x" ] }, { @@ -112,6 +117,7 @@ "outputs": [], "source": [ "# Trening modelu z ćwiczeń 8\n", + "#https://git.wmi.amu.edu.pl/filipg/aitech-eks-pub/src/branch/master/cw/08_regresja_logistyczna.ipynb\n", "for epoch in range(5):\n", " model.train()\n", " for i in range(0, y_train.shape[0], batch_size):\n", diff --git a/dev-0/out.tsv b/dev-0/out.tsv index 575b176..5db58a9 100644 --- a/dev-0/out.tsv +++ b/dev-0/out.tsv @@ -9,7 +9,7 @@ 0 1 0 -1 +0 0 1 0 @@ -26,7 +26,7 @@ 0 1 0 -0 +1 0 0 1 @@ -65,7 +65,7 @@ 0 1 0 -1 +0 1 0 0 @@ -148,7 +148,7 @@ 1 0 1 -1 +0 0 0 0 @@ -205,7 +205,7 @@ 0 1 0 -1 +0 1 0 0 @@ -464,7 +464,7 @@ 0 0 0 -0 +1 0 1 1 @@ -490,7 +490,7 @@ 0 0 0 -0 +1 0 0 0 @@ -537,7 +537,7 @@ 1 1 0 -0 +1 0 1 0 @@ -552,7 +552,7 @@ 1 0 0 -0 +1 0 0 0 @@ -608,7 +608,7 @@ 0 0 0 -1 +0 1 0 0 @@ -654,7 +654,7 @@ 1 0 1 -0 +1 0 0 1 @@ -849,7 +849,7 @@ 0 0 0 -1 +0 0 0 0 @@ -874,7 +874,7 @@ 0 0 1 -0 +1 1 0 0 @@ -896,7 +896,7 @@ 1 0 1 -0 +1 0 0 0 @@ -928,7 +928,7 @@ 1 0 0 -0 +1 0 0 0 @@ -940,7 +940,7 @@ 0 0 0 -0 +1 0 0 0 @@ -1044,7 +1044,7 @@ 1 1 1 -0 +1 1 0 1 @@ -1078,7 +1078,7 @@ 0 1 0 -0 +1 0 1 0 @@ -1312,7 +1312,7 @@ 0 0 0 -0 +1 0 0 0 @@ -1351,7 +1351,7 @@ 0 0 0 -0 +1 0 0 0 @@ -1390,7 +1390,7 @@ 1 0 0 -1 +0 0 0 0 @@ -1435,13 +1435,13 @@ 0 0 1 -0 +1 0 0 1 -0 1 -0 +1 +1 0 0 0 @@ -1485,7 +1485,7 @@ 0 0 0 -0 +1 0 0 0 @@ -1505,20 +1505,20 @@ 0 0 0 -1 0 0 0 0 -1 0 1 0 +1 0 0 0 0 1 +1 0 0 0 @@ -1538,11 +1538,10 @@ 0 1 0 +1 0 0 1 -1 -0 0 0 0 @@ -1558,6 +1557,7 @@ 0 0 0 +1 0 0 0 @@ -1637,7 +1637,7 @@ 1 0 0 -1 +0 0 0 0 @@ -1657,7 +1657,7 @@ 0 1 1 -0 +1 0 0 1 @@ -1709,7 +1709,7 @@ 0 0 0 -0 +1 0 0 0 @@ -1741,7 +1741,7 @@ 0 0 0 -0 +1 1 0 0 @@ -2075,7 +2075,7 @@ 0 1 1 -0 +1 0 0 1 @@ -2084,7 +2084,7 @@ 0 0 0 -0 +1 0 0 1 @@ -2241,7 +2241,7 @@ 0 0 0 -0 +1 0 1 0 @@ -2259,7 +2259,7 @@ 1 0 0 -0 +1 1 0 0 @@ -2367,7 +2367,7 @@ 1 0 0 -0 +1 1 0 0 @@ -2376,7 +2376,7 @@ 0 0 0 -0 +1 0 0 1 @@ -2400,7 +2400,7 @@ 0 1 0 -1 +0 0 0 0 @@ -2443,7 +2443,7 @@ 0 1 1 -0 +1 0 0 0 @@ -2451,7 +2451,7 @@ 0 1 0 -1 +0 1 0 0 @@ -2527,7 +2527,7 @@ 0 0 1 -0 +1 0 1 0 @@ -2552,7 +2552,7 @@ 0 0 0 -0 +1 1 0 0 @@ -2566,11 +2566,11 @@ 0 0 0 +1 0 0 0 -0 -0 +1 0 1 0 @@ -2687,7 +2687,7 @@ 0 1 0 -1 +0 1 1 1 @@ -2721,7 +2721,7 @@ 0 0 1 -0 +1 0 0 0 @@ -2782,7 +2782,7 @@ 0 1 0 -0 +1 0 0 0 @@ -2815,7 +2815,7 @@ 0 0 0 -0 +1 0 0 0 @@ -2955,7 +2955,7 @@ 0 0 0 -0 +1 0 1 0 @@ -2968,7 +2968,7 @@ 0 0 0 -1 +0 0 0 0 @@ -2998,15 +2998,15 @@ 0 0 0 -1 0 0 0 -1 0 0 0 -1 +0 +0 +0 0 1 0 @@ -3087,7 +3087,7 @@ 0 0 0 -0 +1 0 0 1 @@ -3102,7 +3102,7 @@ 1 0 0 -1 +0 1 1 0 @@ -3182,7 +3182,7 @@ 0 1 1 -0 +1 1 0 1 @@ -3203,7 +3203,7 @@ 0 0 0 -1 +0 0 0 0 @@ -3266,7 +3266,7 @@ 0 0 0 -0 +1 0 0 0 @@ -3308,7 +3308,7 @@ 0 0 0 -0 +1 0 0 0 @@ -3363,7 +3363,7 @@ 0 0 0 -0 +1 0 0 1 @@ -3391,7 +3391,7 @@ 0 0 0 -1 +0 0 0 0 @@ -3494,7 +3494,7 @@ 0 1 0 -1 +0 0 0 0 @@ -3569,7 +3569,7 @@ 0 1 1 -0 +1 0 1 0 @@ -3597,7 +3597,7 @@ 0 0 0 -0 +1 0 0 1 @@ -3626,7 +3626,7 @@ 0 0 0 -0 +1 1 0 0 @@ -3663,9 +3663,9 @@ 0 0 0 +1 0 0 -1 0 0 0 @@ -3730,16 +3730,16 @@ 0 0 0 +1 0 0 0 0 0 0 -0 -1 1 0 +0 1 0 0 @@ -3812,7 +3812,7 @@ 0 0 1 -0 +1 0 1 0 @@ -3882,7 +3882,7 @@ 0 0 0 -0 +1 0 0 0 @@ -3951,7 +3951,7 @@ 1 0 1 -0 +1 0 0 0 @@ -4009,7 +4009,7 @@ 0 0 1 -0 +1 0 0 0 @@ -4034,7 +4034,7 @@ 0 0 0 -0 +1 1 0 0 @@ -4133,7 +4133,7 @@ 1 1 0 -0 +1 0 0 0 @@ -4168,7 +4168,7 @@ 1 1 0 -0 +1 0 0 0 @@ -4302,6 +4302,7 @@ 0 1 1 +0 1 1 0 @@ -4313,7 +4314,6 @@ 0 0 0 -0 1 0 0 @@ -4340,7 +4340,7 @@ 1 1 0 -0 +1 0 0 1 @@ -4366,7 +4366,7 @@ 1 1 0 -0 +1 0 1 0 @@ -4511,7 +4511,7 @@ 0 1 0 -0 +1 0 0 0 @@ -4586,7 +4586,7 @@ 1 0 1 -0 +1 1 0 0 @@ -4628,7 +4628,7 @@ 1 0 0 -0 +1 0 0 1 @@ -4637,7 +4637,7 @@ 0 0 0 -0 +1 0 0 1 @@ -4743,7 +4743,7 @@ 0 0 0 -1 +0 0 0 0 @@ -4873,7 +4873,7 @@ 0 0 0 -1 +0 0 1 0 @@ -4936,7 +4936,7 @@ 0 1 0 -0 +1 1 1 0 diff --git a/test-A/out.tsv b/test-A/out.tsv index 2662cf4..05e237f 100644 --- a/test-A/out.tsv +++ b/test-A/out.tsv @@ -126,7 +126,7 @@ 0 0 0 -0 +1 0 0 0 @@ -149,7 +149,7 @@ 1 0 1 -1 +0 0 1 0 @@ -347,7 +347,7 @@ 0 0 0 -0 +1 1 1 0 @@ -356,7 +356,7 @@ 0 1 0 -0 +1 0 0 0 @@ -376,9 +376,9 @@ 1 0 0 +1 0 -0 -0 +1 1 0 0 @@ -425,7 +425,7 @@ 1 1 0 -0 +1 1 0 0 @@ -461,7 +461,7 @@ 0 0 0 -1 +0 0 0 0 @@ -486,7 +486,7 @@ 0 1 0 -0 +1 0 1 0 @@ -557,8 +557,8 @@ 1 0 0 -0 -0 +1 +1 0 1 0 @@ -578,7 +578,7 @@ 0 0 0 -0 +1 1 1 0 @@ -700,10 +700,10 @@ 0 0 0 -1 0 1 1 +1 0 0 0 @@ -793,7 +793,7 @@ 1 0 1 -0 +1 0 0 1 @@ -826,7 +826,7 @@ 0 0 0 -0 +1 0 0 0 @@ -837,7 +837,7 @@ 0 0 0 -1 +0 0 0 0 @@ -943,7 +943,7 @@ 1 0 0 -1 +0 0 0 0 @@ -1021,7 +1021,7 @@ 0 0 0 -0 +1 0 1 0 @@ -1058,7 +1058,7 @@ 0 0 0 -0 +1 0 0 1 @@ -1081,7 +1081,7 @@ 1 1 0 -1 +0 0 0 1 @@ -1114,8 +1114,8 @@ 0 1 0 -0 -0 +1 +1 1 0 0 @@ -1177,7 +1177,7 @@ 0 0 0 -1 +0 0 0 1 @@ -1232,7 +1232,7 @@ 0 0 0 -0 +1 1 0 0 @@ -1253,7 +1253,7 @@ 0 0 0 -1 +0 1 1 0 @@ -1348,7 +1348,7 @@ 0 0 1 -0 +1 0 1 0 @@ -1377,7 +1377,7 @@ 1 0 0 -1 +0 0 0 0 @@ -1408,7 +1408,7 @@ 0 0 1 -0 +1 0 0 0 @@ -1424,7 +1424,7 @@ 0 0 1 -0 +1 0 0 1 @@ -1445,7 +1445,7 @@ 0 0 1 -0 +1 1 1 1 @@ -1482,7 +1482,7 @@ 0 0 1 -0 +1 0 0 0 @@ -1616,7 +1616,7 @@ 0 0 0 -0 +1 0 0 0 @@ -1692,7 +1692,7 @@ 0 0 0 -0 +1 1 1 0 @@ -1713,7 +1713,7 @@ 0 1 1 -0 +1 0 1 0 @@ -1755,21 +1755,21 @@ 0 1 0 -1 0 0 -1 -1 +0 1 1 0 1 0 +1 0 0 0 0 0 +1 0 1 0 @@ -1795,7 +1795,7 @@ 0 1 1 -1 +0 0 0 1 @@ -1812,7 +1812,7 @@ 0 0 0 -0 +1 0 0 1 @@ -1915,7 +1915,7 @@ 0 0 1 -0 +1 0 0 0 @@ -1997,8 +1997,8 @@ 0 0 0 -1 0 +1 0 1 0 @@ -2301,7 +2301,7 @@ 0 0 0 -1 +0 0 1 0 @@ -2318,7 +2318,7 @@ 0 0 1 -1 +0 0 0 0 @@ -2362,7 +2362,7 @@ 0 0 1 -1 +0 0 1 0 @@ -2381,7 +2381,7 @@ 0 0 1 -1 +0 0 1 0 @@ -2416,7 +2416,7 @@ 0 0 0 -1 +0 0 0 1 @@ -2520,7 +2520,7 @@ 0 0 0 -1 +0 0 1 0 @@ -2549,7 +2549,7 @@ 0 0 0 -0 +1 1 0 0 @@ -2564,7 +2564,7 @@ 0 1 0 -1 +0 1 0 0 @@ -2594,7 +2594,7 @@ 1 0 1 -0 +1 1 0 0 @@ -2691,7 +2691,7 @@ 0 1 0 -0 +1 0 0 0 @@ -2725,7 +2725,7 @@ 0 1 0 -0 +1 0 1 0 @@ -2849,7 +2849,7 @@ 1 0 1 -1 +0 1 0 0 @@ -2975,12 +2975,12 @@ 1 0 0 +1 0 0 0 0 -0 -0 +1 0 0 0 @@ -3033,7 +3033,7 @@ 0 0 0 -0 +1 0 0 0 @@ -3088,7 +3088,7 @@ 0 0 0 -0 +1 0 0 0 @@ -3129,6 +3129,7 @@ 0 0 0 +0 1 1 1 @@ -3136,7 +3137,6 @@ 0 0 0 -0 1 0 0 @@ -3145,7 +3145,7 @@ 0 0 0 -0 +1 0 1 0 @@ -3334,6 +3334,7 @@ 1 0 0 +1 0 0 0 @@ -3345,8 +3346,7 @@ 0 0 0 -0 -0 +1 0 0 0 @@ -3363,7 +3363,7 @@ 0 0 1 -0 +1 0 0 1 @@ -3399,12 +3399,12 @@ 0 0 0 -0 +1 1 0 0 0 -1 +0 0 1 0 @@ -3453,7 +3453,7 @@ 1 0 0 -0 +1 0 0 0 @@ -3524,7 +3524,7 @@ 0 0 0 -0 +1 1 1 0 @@ -3549,7 +3549,7 @@ 1 0 0 -1 +0 0 0 0 @@ -3634,7 +3634,7 @@ 0 1 0 -1 +0 0 1 0 @@ -3720,7 +3720,7 @@ 0 0 0 -0 +1 0 1 0 @@ -3786,7 +3786,7 @@ 1 0 0 -0 +1 0 0 1 @@ -3826,7 +3826,7 @@ 0 0 0 -0 +1 0 1 0 @@ -3855,12 +3855,12 @@ 0 0 0 -1 0 -1 0 +1 0 0 +1 0 1 0 @@ -3981,7 +3981,7 @@ 1 0 0 -1 +0 1 1 0 @@ -4010,7 +4010,7 @@ 0 0 0 -0 +1 1 1 0 @@ -4037,7 +4037,7 @@ 0 1 1 -0 +1 0 0 0 @@ -4047,7 +4047,7 @@ 0 1 0 -0 +1 0 0 0 @@ -4067,14 +4067,14 @@ 1 1 1 +1 0 0 0 0 0 0 -0 -0 +1 0 0 0 @@ -4085,7 +4085,7 @@ 0 1 0 -1 +0 0 1 0 @@ -4106,7 +4106,7 @@ 1 0 0 -0 +1 0 1 0 @@ -4164,7 +4164,7 @@ 1 1 0 -1 +0 0 0 0 @@ -4210,7 +4210,7 @@ 0 0 0 -1 +0 1 0 0 @@ -4228,7 +4228,7 @@ 0 0 0 -0 +1 0 1 0 @@ -4262,7 +4262,7 @@ 0 0 0 -0 +1 0 0 0 @@ -4305,7 +4305,7 @@ 0 0 1 -0 +1 0 0 1 @@ -4498,7 +4498,7 @@ 1 0 1 -0 +1 1 0 0 @@ -4513,7 +4513,7 @@ 0 1 0 -0 +1 0 0 0 @@ -4708,7 +4708,7 @@ 1 0 1 -1 +0 0 0 0 @@ -4737,12 +4737,12 @@ 0 0 0 -1 +0 0 0 1 1 -0 +1 0 0 0 @@ -4785,15 +4785,15 @@ 0 0 0 -1 0 0 -1 0 1 0 +1 0 1 +1 0 0 0 @@ -4831,7 +4831,7 @@ 0 0 0 -0 +1 0 0 0 @@ -4844,7 +4844,7 @@ 0 0 1 -1 +0 1 0 1 @@ -4886,7 +4886,7 @@ 0 1 1 -0 +1 1 0 0 @@ -4980,7 +4980,7 @@ 0 0 0 -0 +1 0 0 0 @@ -5054,7 +5054,7 @@ 0 0 0 -1 +0 1 1 1 @@ -5097,7 +5097,7 @@ 0 0 0 -0 +1 0 1 0