2.7 KiB
2.7 KiB
import pandas as pd
import numpy as np
import torch
import gensim.downloader as gn
import csv
from nltk.tokenize import word_tokenize
names = ['content', 'id', 'label']
train_data_content = pd.read_table('train/in.tsv', error_bad_lines = False, header = None, quoting = csv.QUOTE_NONE, names = ['content', 'id'])
train_data_labels = pd.read_table('train/expected.tsv', error_bad_lines = False, header = None, quoting=csv.QUOTE_NONE, names = ['label'])
dev_data = pd.read_table('dev-0/in.tsv', error_bad_lines = False, header = None, quoting = csv.QUOTE_NONE, names = ['content', 'id'])
test_data = pd.read_table('test-A/in.tsv', error_bad_lines = False, header = None, quoting = csv.QUOTE_NONE, names = ['content', 'id'])
print('STEP 3 - PREPROCESSING')
# lowercase all content
X_train = train_data_content['content'].str.lower()
y_train = train_data_labels['label']
X_dev = dev_data['content'].str.lower()
X_test = test_data['content'].str.lower()
# tokenize datasets
X_train = [word_tokenize(content) for content in X_train]
X_dev = [word_tokenize(content) for content in X_dev]
X_test = [word_tokenize(content) for content in X_test]
STEP 3 - PREPROCESSING
w2v = gn.load('word2vec-google-news-300')
[==================================================] 100.0% 1662.8/1662.8MB downloaded