This commit is contained in:
Kasia 2021-06-04 01:59:57 +02:00
commit b22630a9e2
15 changed files with 315817 additions and 0 deletions

8
.gitignore vendored Normal file
View File

@ -0,0 +1,8 @@
*~
*.swp
*.bak
*.pyc
*.o
.DS_Store
.token

13
README.md Normal file
View File

@ -0,0 +1,13 @@
Skeptic vs paranormal subreddits
================================
Classify a reddit as either from Skeptic subreddit or one of the
"paranormal" subreddits (Paranormal, UFOs, TheTruthIsHere, Ghosts,
,Glitch-in-the-Matrix, conspiracytheories).
Output label is the probability of a paranormal subreddit.
Sources
-------
Data taken from <https://archive.org/details/2015_reddit_comments_corpus>.

1
config.txt Normal file
View File

@ -0,0 +1 @@
--metric Likelihood --metric Accuracy --metric F1 --metric F0:N<Precision> --metric F9999999:N<Recall> --precision 4 --in-header in-header.tsv --out-header out-header.tsv

5272
dev-0/expected.tsv Normal file

File diff suppressed because it is too large Load Diff

5272
dev-0/in.tsv Normal file

File diff suppressed because one or more lines are too long

BIN
dev-0/in.tsv.xz Normal file

Binary file not shown.

5272
dev-0/out.tsv Normal file

File diff suppressed because it is too large Load Diff

1
in-header.tsv Normal file
View File

@ -0,0 +1 @@
PostText Timestamp
1 PostText Timestamp

94
main.py Normal file
View File

@ -0,0 +1,94 @@
import gensim.downloader as gensim
import numpy as np
import pandas as pd
import torch
from nltk.tokenize import word_tokenize
class NeuralNetworkModel(torch.nn.Module):
def __init__(self):
super(NeuralNetworkModel, self).__init__()
self.1 = torch.nn.Linear(300, 300)
self.2 = torch.nn.Linear(300, 1)
def forward(self, x):
x = self.1(x)
x = torch.relu(x)
x = self.2(x)
x = torch.sigmoid(x)
return x
nm = NeuralNetworkModel()
dev_train = []
test_train = []
word2vec = gensim.load('word2vec-google-news-300')
np.mean([word2vec[word] for word in doc if word in word2vec] or [np.zeros(300)], axis=0)
def model_train():
train = pd.read_table('train/in.tsv.xz', compression='xz', sep='\t', quoting=3)
trainy = pd.read_table('train/expected.tsv', sep='\t', quoting=3)
trainy = trainy[0]
def model_prepare():
dev = pd.read_table('dev-0/in.tsv.xz', compression='xz', sep='\t', quoting=3)
test = pd.read_table('test-A/in.tsv.xz', compression='xz', sep='\t', quoting=3)
train = [word_tokenize(x) for x in train]
dev = [word_tokenize(x) for x in dev]
test = [word_tokenize(x) for x in test]
def word_2_voc():
train = [d2v(doc) for doc in train]
dev = [d2v(doc) for doc in dev]
test = [d2v(doc) for doc in test]
criterion = torch.nn.BCELoss()
optimizer = torch.optim.Adam(model.parameters())
print ("1")
for epoch in range(5):
model.train()
for i in range(0, y_train.shape[0], 5):
X = train[i:i + 5]
X = torch.tensor(X)
y = trainy[i:i + 5]
y = torch.tensor(y.astype(np.float32).to_numpy()).reshape(-1, 1)
optimizer.zero_grad()
outputs = nm(X.float())
loss = criterion(outputs, y)
loss.backward()
optimizer.step()
print ("2")
with torch.no_grad():
for i in range(0, len(dev), 5):
X = dev[i:i + 5]
X = torch.tensor(X)
outputs = nm(X.float())
y = (outputs > 0.5)
dev_train.extend(y)
for i in range(0, len(test), 5):
X = test[i:i + 5]
X = torch.tensor(X)
outputs = nm(X.float())
y = (outputs >= 0.5)
testy.extend(y)
dev_train.to_csv(r'dev-0/out.tsv', sep='\t', index=False, header=False)
test_train.to_csv(r'test-A/out.tsv', sep='\t', index=False, header=False)

1
out-header.tsv Normal file
View File

@ -0,0 +1 @@
Label
1 Label

5152
test-A/in.tsv Normal file

File diff suppressed because one or more lines are too long

BIN
test-A/in.tsv.xz Normal file

Binary file not shown.

5152
test-A/out.tsv Normal file

File diff suppressed because it is too large Load Diff

289579
train/expected.tsv Normal file

File diff suppressed because it is too large Load Diff

BIN
train/in.tsv.xz Normal file

Binary file not shown.