lr
This commit is contained in:
parent
172b0ce2f9
commit
ec806efdf6
10545
dev-0/out.tsv
10545
dev-0/out.tsv
File diff suppressed because it is too large
Load Diff
36
predict_lr.py
Normal file
36
predict_lr.py
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
import pickle
|
||||||
|
import sys
|
||||||
|
import math
|
||||||
|
import fileinput
|
||||||
|
|
||||||
|
model = pickle.load(open("model.pkl", "rb"))
|
||||||
|
word_index, vocabulary, weights, words_count = model
|
||||||
|
|
||||||
|
def predict():
|
||||||
|
output = []
|
||||||
|
for line in fileinput.input(openhook=fileinput.hook_encoded("utf-8")):
|
||||||
|
line = line.rstrip()
|
||||||
|
fields = line.split('\t')
|
||||||
|
label = fields[0].strip()
|
||||||
|
document = fields[0]
|
||||||
|
terms = document.split(' ')
|
||||||
|
for term in terms:
|
||||||
|
if term in words_count:
|
||||||
|
words_count[term] += 1
|
||||||
|
else:
|
||||||
|
words_count[term] = 1
|
||||||
|
expected = weights[0]
|
||||||
|
for t in terms:
|
||||||
|
if t in vocabulary:
|
||||||
|
expected +=(words_count[t]/len(words_count)*(weights[word_index[t]]))
|
||||||
|
if expected > 0.9:
|
||||||
|
output.append(1)
|
||||||
|
else:
|
||||||
|
output.append(0)
|
||||||
|
|
||||||
|
with open("out.tsv", "w") as out:
|
||||||
|
for val in output:
|
||||||
|
out.write(str(val)+"\n")
|
||||||
|
|
||||||
|
predict()
|
||||||
|
|
98
program_lr.py
Normal file
98
program_lr.py
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
import pickle
|
||||||
|
import fileinput
|
||||||
|
import random
|
||||||
|
import math
|
||||||
|
import random
|
||||||
|
import re
|
||||||
|
from _collections import defaultdict
|
||||||
|
|
||||||
|
def define_vocabulary(file_to_learn_new_words):
|
||||||
|
word_counts = {'count': defaultdict(int)}
|
||||||
|
with open(file_to_learn_new_words, encoding='utf-8') as in_file:
|
||||||
|
for line in in_file:
|
||||||
|
text, timestamp = line.rstrip('\n').split('\t')
|
||||||
|
tokens = text.lower().split(' ')
|
||||||
|
for token in tokens:
|
||||||
|
word_counts['count'][token] += 1
|
||||||
|
in_file.close()
|
||||||
|
return word_counts
|
||||||
|
|
||||||
|
def tokenize_list(string_input):
|
||||||
|
words=[]
|
||||||
|
string=string_input.replace('\\n',' ')
|
||||||
|
text=re.sub(r'\w+:\/{2}[\d\w-]+(\.[\d\w-]+)*(?:(?:\/[^\s/]*))*', '', string)
|
||||||
|
string=''
|
||||||
|
for word in text:
|
||||||
|
string+=word
|
||||||
|
words=re.split(';+|,+|\*+|\n+| +|\_+|\%+|\t+|\[+|\]+|\.+|\(+|\)+|\++|\\+|\/+|[0-9]+|\#+|\'+|\"+|\-+|\=+|\&+|\:+|\?+|\!+|\^+|\·+',string)
|
||||||
|
regex=re.compile(r'http|^[a-zA-Z]$|org')
|
||||||
|
filtered_values=[
|
||||||
|
word
|
||||||
|
for word in words if not regex.match(word)
|
||||||
|
]
|
||||||
|
filtered_values[:] = (
|
||||||
|
value.lower()
|
||||||
|
for value in filtered_values if len(value)!=0
|
||||||
|
)
|
||||||
|
return filtered_values
|
||||||
|
|
||||||
|
def train(vocabulary,input_train,expected_train):
|
||||||
|
learning_rate=0.001
|
||||||
|
learning_precision=0.00001
|
||||||
|
words_vocabulary={}
|
||||||
|
with open(input_train,encoding='utf-8') as input_file, open(expected_train,encoding='utf-8') as expected_file:
|
||||||
|
for line, exp in zip(input_file,expected_file):
|
||||||
|
words_vocabulary[line]=int(exp)
|
||||||
|
weights={}
|
||||||
|
weight={}
|
||||||
|
delta=1
|
||||||
|
iteration=0
|
||||||
|
loss_sum=0.0
|
||||||
|
error=10.0
|
||||||
|
max_iteration=len(vocabulary)
|
||||||
|
for i in vocabulary['count'].keys():
|
||||||
|
weights[i]=random.uniform(-0.01,0.01)
|
||||||
|
while delta>learning_precision and iteration<max_iteration:
|
||||||
|
d,y = random.choice(list(words_vocabulary.items()))
|
||||||
|
y_hat=0
|
||||||
|
tokens=tokenize_list(d)
|
||||||
|
for token in tokens:
|
||||||
|
if token in vocabulary['count'].keys():
|
||||||
|
y_hat += weights[token] * tokens.count(token)
|
||||||
|
delta=(y_hat-y) * learning_rate
|
||||||
|
for word in tokens:
|
||||||
|
if word in words_vocabulary:
|
||||||
|
weights[word] -= (tokens.count(word)) * delta
|
||||||
|
loss = (y_hat - y)**2.0
|
||||||
|
loss_sum += loss
|
||||||
|
if iteration%1000 == 0:
|
||||||
|
if (error>(loss_sum/1000)):
|
||||||
|
weight=weights
|
||||||
|
error=loss_sum/1000
|
||||||
|
loss_sum=0.0
|
||||||
|
iteration += 1
|
||||||
|
input_file.close()
|
||||||
|
expected_file.close()
|
||||||
|
return weight, vocabulary
|
||||||
|
|
||||||
|
def prediction(input,output,weights,vocabulary):
|
||||||
|
with open(input,encoding='utf-8') as input_file, open(output,'w+',encoding='utf-8') as output:
|
||||||
|
for line in input_file:
|
||||||
|
y_hat=0
|
||||||
|
tokens=tokenize_list(line)
|
||||||
|
for token in tokens:
|
||||||
|
if token in vocabulary['count'].keys():
|
||||||
|
y_hat += weights[token] * (token.count(token))
|
||||||
|
if y_hat>0.0:
|
||||||
|
output.write('1\n')
|
||||||
|
else:
|
||||||
|
output.write('0\n')
|
||||||
|
output.close()
|
||||||
|
input_file.close()
|
||||||
|
|
||||||
|
vocabulary=define_vocabulary('train/in.tsv');
|
||||||
|
weights, words = train(vocabulary,'train/in.tsv','train/expected.tsv')
|
||||||
|
prediction('dev-0/in.tsv','dev-0/out.tsv',weights,words)
|
||||||
|
prediction('test-A/in.tsv','test-A/out.tsv',weights,words)
|
||||||
|
|
10305
test-A/out.tsv
10305
test-A/out.tsv
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user