import pandas as pd import numpy as np import csv import torch from nltk.tokenize import word_tokenize from gensim import downloader FEATURES = ['content', 'id', 'label'] PATHS = ['train/in.tsv', 'train/expected.tsv', 'dev-0/in.tsv', 'test-A/in.tsv', './dev-0/out.tsv', './test-A/out.tsv'] PRE_TRAINED = 'word2vec-google-news-300' class NeuralNetwork(torch.nn.Module): def __init__(self, INPUT_DIM): super(NeuralNetwork, self).__init__() self.l1 = torch.nn.Linear(INPUT_DIM, 500) self.l2 = torch.nn.Linear(500, 1) def forward(self, x): x = self.l1(x) x = torch.relu(x) x = self.l2(x) x = torch.sigmoid(x) return x def get_data(FEATURES, PATHS): x_train = pd.read_table(PATHS[0], error_bad_lines = False, header = None, quoting = csv.QUOTE_NONE, names = FEATURES[:2]) y_train = pd.read_table(PATHS[1], error_bad_lines = False, header = None, quoting = csv.QUOTE_NONE, names = FEATURES[2:]) x_dev = pd.read_table(PATHS[2], error_bad_lines = False, header = None, quoting = csv.QUOTE_NONE, names = FEATURES[:2]) x_test = pd.read_table(PATHS[3], error_bad_lines = False, header = None, quoting = csv.QUOTE_NONE, names = FEATURES[:2]) return x_train, y_train, x_dev, x_test def preprocess(x_train, y_train, x_dev, x_test): x_train = x_train[FEATURES[0]].str.lower() x_dev = x_dev[FEATURES[0]].str.lower() x_test = x_test[FEATURES[0]].str.lower() y_train = y_train[FEATURES[2]] return x_train, y_train, x_dev, x_test def tokenize(x_train, x_dev, x_test): x_train = [word_tokenize(i) for i in x_train] x_dev = [word_tokenize(i) for i in x_dev] x_test = [word_tokenize(i) for i in x_test] return x_train, x_dev, x_test def use_word2vec(): w2v = downloader.load(PRE_TRAINED) return w2v def document_vector(w2v, x_train, x_dev, x_test): x_train = [np.mean([w2v[word] for word in doc if word in w2v] or [np.zeros(300)], axis = 0) for doc in x_train] x_dev = [np.mean([w2v[word] for word in doc if word in w2v] or [np.zeros(300)], axis = 0) for doc in x_dev] x_test = [np.mean([w2v[word] for word in doc if word in w2v] or [np.zeros(300)], axis = 0) for doc in x_test] return x_train, x_dev, x_test def basic_config(): INPUT_DIM = 300 BATCH_SIZE = 5 return INPUT_DIM, BATCH_SIZE def init_model(INPUT_DIM): nn_model = NeuralNetwork(INPUT_DIM) criterion = torch.nn.BCELoss() optimizer = torch.optim.SGD(nn_model.parameters(), lr = 0.1) return nn_model, optimizer, criterion def train(nn_model, BATCH_SIZE, criterion, optimizer, x_train, y_train): for epoch in range(5): nn_model.train() for i in range(0, y_train.shape[0], BATCH_SIZE): X = x_train[i:i+BATCH_SIZE] X = torch.tensor(X) y = y_train[i:i+BATCH_SIZE] y = torch.tensor(y.astype(np.float32).to_numpy()).reshape(-1, 1) outputs = nn_model(X.float()) loss = criterion(outputs, y) optimizer.zero_grad() loss.backward() optimizer.step() def prediction(nn_model, BATCH_SIZE, x_dev, x_test): y_dev, y_test = [], [] nn_model.eval() with torch.no_grad(): for i in range(0, len(x_dev), BATCH_SIZE): X = x_dev[i:i+BATCH_SIZE] X = torch.tensor(X) outputs = nn_model(X.float()) prediction = (outputs > 0.5) y_dev += prediction.tolist() for i in range(0, len(x_test), BATCH_SIZE): X = x_test[i:i+BATCH_SIZE] X = torch.tensor(X) outputs = nn_model(X.float()) prediction = (outputs > 0.5) y_test += prediction.tolist() return y_dev, y_test def get_result(y_dev, y_test): np.asarray(y_dev, dtype = np.int32).tofile(PATHS[4], sep='\n') np.asarray(y_test, dtype = np.int32).tofile(PATHS[5], sep='\n') def main(): x_train, y_train, x_dev, x_test = get_data(FEATURES, PATHS) x_train, y_train, x_dev, x_test = preprocess(x_train, y_train, x_dev, x_test) x_train, x_dev, x_test = tokenize(x_train, x_dev, x_test) w2v = use_word2vec() x_train, x_dev, x_test = document_vector(w2v, x_train, x_dev, x_test) INPUT_DIM, BATCH_SIZE = basic_config() nn_model, optimizer, criterion = init_model(INPUT_DIM) train(nn_model, BATCH_SIZE, criterion, optimizer, x_train, y_train) y_dev, y_test = prediction(nn_model, BATCH_SIZE, x_dev, x_test) get_result(y_dev, y_test) if _name_ == '_main_': main()