nn lr part
This commit is contained in:
parent
cd41e7ed4a
commit
6b27605f82
2342
dev-0/out.tsv
2342
dev-0/out.tsv
File diff suppressed because it is too large
Load Diff
96
main.py
96
main.py
@ -6,14 +6,14 @@ from sklearn.feature_extraction.text import TfidfVectorizer
|
|||||||
import torch
|
import torch
|
||||||
from gensim import downloader
|
from gensim import downloader
|
||||||
from nltk.tokenize import word_tokenize
|
from nltk.tokenize import word_tokenize
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
class NetworkModel(torch.nn.Module):
|
class NetworkModel(torch.nn.Module):
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self, input_size, hidden_size, num_classes):
|
||||||
dim = 200
|
super(NetworkModel, self).__init__()
|
||||||
super(NeuralNetworkModel, self).__init__()
|
self.fc1 = torch.nn.Linear(input_size, hidden_size)
|
||||||
self.fc1 = torch.nn.Linear(dim, 500)
|
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
|
||||||
self.fc2 = torch.nn.Linear(500, 1)
|
|
||||||
|
|
||||||
def forward(self, x):
|
def forward(self, x):
|
||||||
x = self.fc1(x)
|
x = self.fc1(x)
|
||||||
@ -31,59 +31,77 @@ def word2vecOnDoc(document):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def prepareData(data):
|
def prepareData(data):
|
||||||
data = [word_tokenize(row) for row in data]
|
data = [word_tokenize(row) for row in data.content.str.lower()]
|
||||||
print(data)
|
|
||||||
data = [word2vecOnDoc(document) for document in data]
|
data = [word2vecOnDoc(document) for document in data]
|
||||||
return data
|
return data
|
||||||
|
|
||||||
def trainModel(trainFileIn, trainFileExpected):
|
def trainModel(trainFileIn, trainFileExpected):
|
||||||
with open(trainFileExpected, 'r') as f:
|
inData = pd.read_table(
|
||||||
expectedData = f.readlines()
|
trainFileIn,
|
||||||
|
error_bad_lines=False,
|
||||||
|
header=None,
|
||||||
|
quoting=3,
|
||||||
|
usecols=["content"],
|
||||||
|
names=["content", "id"],
|
||||||
|
nrows=225000,
|
||||||
|
)
|
||||||
|
expectedData = pd.read_table(
|
||||||
|
trainFileExpected,
|
||||||
|
error_bad_lines=False,
|
||||||
|
header=None,
|
||||||
|
quoting=3,
|
||||||
|
usecols=["label"],
|
||||||
|
names=["label"],
|
||||||
|
nrows=225000,
|
||||||
|
)
|
||||||
|
|
||||||
with open(trainFileIn, 'r') as f:
|
# expectedData = prepareData(expectedData)
|
||||||
inData = f.readlines()
|
|
||||||
|
|
||||||
expectedData = prepareData(expectedData)
|
|
||||||
inData = prepareData(inData)
|
inData = prepareData(inData)
|
||||||
# networkModel = NetworkModel(300, 300, 1)
|
networkModel = NetworkModel(300, 300, 1)
|
||||||
# criterion = torch.nn.BCELoss()
|
criterion = torch.nn.BCELoss()
|
||||||
# optim = torch.optim.SGD(network.parameters(), lr=0.02)
|
optim = torch.optim.SGD(networkModel.parameters(), lr=0.02)
|
||||||
# epochs = 1
|
epochs = 1
|
||||||
# batchSize = 2
|
batchSize = 2
|
||||||
|
|
||||||
# for _ in range(epochs):
|
for _ in range(epochs):
|
||||||
# network.train()
|
networkModel.train()
|
||||||
# for i in range(0, inData.shape[0], batchSize):
|
for i in range(0, expectedData.shape[0], batchSize):
|
||||||
# x = inData[i : i + batchSize]
|
x = inData[i : i + batchSize]
|
||||||
# x = torch.tensor(x)
|
x = torch.tensor(x)
|
||||||
# y = expectedData[i : i + batchSize]
|
y = expectedData[i : i + batchSize]
|
||||||
# y = torch.tensor(y.astype(np.float32).to_numpy()).reshape(-1, 1)
|
y = torch.tensor(y.astype(numpy.float32).to_numpy()).reshape(-1, 1)
|
||||||
# outputs = network(x.float())
|
outputs = networkModel(x.float())
|
||||||
# loss = criterion(outputs, y)
|
loss = criterion(outputs, y)
|
||||||
# print(loss)
|
# print(loss)
|
||||||
# optim.zero_grad()
|
optim.zero_grad()
|
||||||
# loss.backward()
|
loss.backward()
|
||||||
# optim.step()
|
optim.step()
|
||||||
# return networkModel
|
return networkModel
|
||||||
|
|
||||||
def evaluateModel(model, inFile, outFile):
|
def evaluateModel(model, inFile, outFile):
|
||||||
with open(inFile, 'r') as f:
|
inData = pd.read_table(
|
||||||
inData = f.readlines()
|
inFile,
|
||||||
|
error_bad_lines=False,
|
||||||
|
header=None,
|
||||||
|
quoting=3,
|
||||||
|
usecols=["content"],
|
||||||
|
names=["content", "id"],
|
||||||
|
)
|
||||||
|
|
||||||
inData = prepareData(inData)
|
inData = prepareData(inData)
|
||||||
|
batchSize = 2
|
||||||
pred = []
|
pred = []
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
for i in range(0, len(inData), batch_size):
|
for i in range(0, len(inData), batchSize):
|
||||||
x = inData[i : i + batch_size]
|
x = inData[i : i + batchSize]
|
||||||
x = torch.tensor(x)
|
x = torch.tensor(x)
|
||||||
outputs = model(x.float())
|
outputs = model(x.float())
|
||||||
prediction = outputs >= 0.5
|
prediction = outputs >= 0.5
|
||||||
pred += prediction.tolist()
|
pred += prediction.tolist()
|
||||||
numpy.asarray(pred, dtype=numpyp.int32).tofile(outFile, sep="\n")
|
numpy.asarray(pred, dtype=numpy.int32).tofile(outFile, sep="\n")
|
||||||
|
|
||||||
model = trainModel("train/in.tsv", "train/expected.tsv")
|
model = trainModel("train/in.tsv", "train/expected.tsv")
|
||||||
#evaluateModel(model, "dev-0/in.tsv", "dev-0/out.tsv")
|
evaluateModel(model, "dev-0/in.tsv", "dev-0/out.tsv")
|
||||||
#evaluateModel(model, "test-A/in.tsv", "test-A/out.tsv")
|
evaluateModel(model, "test-A/in.tsv", "test-A/out.tsv")
|
||||||
|
|
||||||
|
|
||||||
|
2330
test-A/out.tsv
2330
test-A/out.tsv
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user