delete test
This commit is contained in:
parent
ba4aadb5a7
commit
dc2b142326
@ -6,7 +6,7 @@ from nltk.tokenize import word_tokenize
|
||||
from gensim import downloader
|
||||
|
||||
FEATURES = ['content', 'id', 'label']
|
||||
PATHS = ['train/in.tsv', 'train/expected.tsv', 'dev-0/in.tsv', 'test-A/in.tsv', './dev-0/out.tsv', './test-A/out.tsv']
|
||||
PATHS = ['train/in.tsv', 'train/expected.tsv', 'dev-0/in.tsv', './dev-0/out.tsv']
|
||||
PRE_TRAINED = 'word2vec-google-news-300'
|
||||
|
||||
class NeuralNetwork(torch.nn.Module):
|
||||
@ -26,36 +26,32 @@ def get_data(FEATURES, PATHS):
|
||||
x_train = pd.read_table(PATHS[0], error_bad_lines = False, header = None, quoting = csv.QUOTE_NONE, names = FEATURES[:2])
|
||||
y_train = pd.read_table(PATHS[1], error_bad_lines = False, header = None, quoting = csv.QUOTE_NONE, names = FEATURES[2:])
|
||||
x_dev = pd.read_table(PATHS[2], error_bad_lines = False, header = None, quoting = csv.QUOTE_NONE, names = FEATURES[:2])
|
||||
x_test = pd.read_table(PATHS[3], error_bad_lines = False, header = None, quoting = csv.QUOTE_NONE, names = FEATURES[:2])
|
||||
|
||||
return x_train, y_train, x_dev, x_test
|
||||
return x_train, y_train, x_dev
|
||||
|
||||
def preprocess(x_train, y_train, x_dev, x_test):
|
||||
def preprocess(x_train, y_train, x_dev):
|
||||
x_train = x_train[FEATURES[0]].str.lower()
|
||||
x_dev = x_dev[FEATURES[0]].str.lower()
|
||||
x_test = x_test[FEATURES[0]].str.lower()
|
||||
y_train = y_train[FEATURES[2]]
|
||||
|
||||
return x_train, y_train, x_dev, x_test
|
||||
return x_train, y_train, x_dev
|
||||
|
||||
def tokenize(x_train, x_dev, x_test):
|
||||
def tokenize(x_train, x_dev):
|
||||
x_train = [word_tokenize(i) for i in x_train]
|
||||
x_dev = [word_tokenize(i) for i in x_dev]
|
||||
x_test = [word_tokenize(i) for i in x_test]
|
||||
|
||||
return x_train, x_dev, x_test
|
||||
return x_train, x_dev
|
||||
|
||||
def use_word2vec():
|
||||
w2v = downloader.load(PRE_TRAINED)
|
||||
|
||||
return w2v
|
||||
|
||||
def document_vector(w2v, x_train, x_dev, x_test):
|
||||
def document_vector(w2v, x_train, x_dev):
|
||||
x_train = [np.mean([w2v[word] for word in doc if word in w2v] or [np.zeros(300)], axis = 0) for doc in x_train]
|
||||
x_dev = [np.mean([w2v[word] for word in doc if word in w2v] or [np.zeros(300)], axis = 0) for doc in x_dev]
|
||||
x_test = [np.mean([w2v[word] for word in doc if word in w2v] or [np.zeros(300)], axis = 0) for doc in x_test]
|
||||
|
||||
return x_train, x_dev, x_test
|
||||
return x_train, x_dev
|
||||
|
||||
def basic_config():
|
||||
INPUT_DIM = 300
|
||||
@ -84,8 +80,8 @@ def train(nn_model, BATCH_SIZE, criterion, optimizer, x_train, y_train):
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
def prediction(nn_model, BATCH_SIZE, x_dev, x_test):
|
||||
y_dev, y_test = [], []
|
||||
def prediction(nn_model, BATCH_SIZE, x_dev):
|
||||
y_dev = []
|
||||
nn_model.eval()
|
||||
with torch.no_grad():
|
||||
for i in range(0, len(x_dev), BATCH_SIZE):
|
||||
@ -94,30 +90,23 @@ def prediction(nn_model, BATCH_SIZE, x_dev, x_test):
|
||||
outputs = nn_model(X.float())
|
||||
prediction = (outputs > 0.5)
|
||||
y_dev += prediction.tolist()
|
||||
for i in range(0, len(x_test), BATCH_SIZE):
|
||||
X = x_test[i:i+BATCH_SIZE]
|
||||
X = torch.tensor(X)
|
||||
outputs = nn_model(X.float())
|
||||
prediction = (outputs > 0.5)
|
||||
y_test += prediction.tolist()
|
||||
|
||||
return y_dev, y_test
|
||||
return y_dev
|
||||
|
||||
def get_result(y_dev, y_test):
|
||||
np.asarray(y_dev, dtype = np.int32).tofile(PATHS[4], sep='\n')
|
||||
np.asarray(y_test, dtype = np.int32).tofile(PATHS[5], sep='\n')
|
||||
def get_result(y_dev):
|
||||
np.asarray(y_dev, dtype = np.int32).tofile(PATHS[3], sep='\n')
|
||||
|
||||
def main():
|
||||
x_train, y_train, x_dev, x_test = get_data(FEATURES, PATHS)
|
||||
x_train, y_train, x_dev, x_test = preprocess(x_train, y_train, x_dev, x_test)
|
||||
x_train, x_dev, x_test = tokenize(x_train, x_dev, x_test)
|
||||
x_train, y_train, x_dev = get_data(FEATURES, PATHS)
|
||||
x_train, y_train, x_dev = preprocess(x_train, y_train, x_dev)
|
||||
x_train, x_dev = tokenize(x_train, x_dev)
|
||||
w2v = use_word2vec()
|
||||
x_train, x_dev, x_test = document_vector(w2v, x_train, x_dev, x_test)
|
||||
x_train, x_dev = document_vector(w2v, x_train, x_dev)
|
||||
INPUT_DIM, BATCH_SIZE = basic_config()
|
||||
nn_model, optimizer, criterion = init_model(INPUT_DIM)
|
||||
train(nn_model, BATCH_SIZE, criterion, optimizer, x_train, y_train)
|
||||
y_dev, y_test = prediction(nn_model, BATCH_SIZE, x_dev, x_test)
|
||||
get_result(y_dev, y_test)
|
||||
y_dev = prediction(nn_model, BATCH_SIZE, x_dev)
|
||||
get_result(y_dev)
|
||||
|
||||
if _name_ == '_main_':
|
||||
main()
|
||||
|
Loading…
Reference in New Issue
Block a user