linear regression

This commit is contained in:
s152483 2020-04-10 21:47:04 +00:00
parent 754fd76874
commit 172b0ce2f9
3 changed files with 5404 additions and 29 deletions

5272
out.tsv Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,29 +1,35 @@
import sys
import pickle import pickle
import sys
import math import math
from normalize import normalize import fileinput
model = pickle.load(open("model.pkl", "rb")) model = pickle.load(open("model.pkl", "rb"))
word_index, vocabulary, weights, words_count = model
pskeptic, vocabulary_size,skeptick_words_total, paranormal_words_total, skeptic_count,paranormal_count = model def predict():
output = []
for line in fileinput.input(openhook=fileinput.hook_encoded("utf-8")):
line = line.rstrip()
fields = line.split('\t')
label = fields[0].strip()
document = fields[0]
terms = document.split(' ')
for term in terms:
if term in words_count:
words_count[term] += 1
else:
words_count[term] = 1
expected = weights[0]
for t in terms:
if t in vocabulary:
expected +=(words_count[t]/len(words_count)*(weights[word_index[t]]))
if expected > 0.9:
output.append(1)
else:
output.append(0)
for line in sys.stdin: with open("out.tsv", "w") as out:
document = line.rstrip() for val in output:
terms = normalize(document) out.write(str(val)+"\n")
log_prob_skeptic = math.log(pskeptic) predict()
log_prob_paranormal = math.log(1-pskeptic)
for term in terms:
if term not in skeptic_count:
skeptic_count[term] = 0
if term not in paranormal_count:
paranormal_count[term] = 0
log_prob_skeptic += math.log((skeptic_count[term]+1)/(skeptick_words_total + vocabulary_size))
log_prob_paranormal += math.log((paranormal_count[term]+1)/(paranormal_words_total + vocabulary_size))
if log_prob_skeptic > log_prob_paranormal:
print("S")
else:
print("P")

97
prgram.py Normal file
View File

@ -0,0 +1,97 @@
#!/usr/bin/env python3
import pickle
import fileinput
import random
import math
import random
import re
from _collections import defaultdict
def define_vocabulary(file_to_learn_new_words):
word_counts = {'count': defaultdict(int)}
with open(file_to_learn_new_words, encoding='utf-8') as in_file:
for line in in_file:
text, timestamp = line.rstrip('\n').split('\t')
tokens = text.lower().split(' ')
for token in tokens:
word_counts['count'][token] += 1
in_file.close()
return word_counts
def tokenize_list(string_input):
words=[]
string=string_input.replace('\\n',' ')
text=re.sub(r'\w+:\/{2}[\d\w-]+(\.[\d\w-]+)*(?:(?:\/[^\s/]*))*', '', string)
string=''
for word in text:
string+=word
words=re.split(';+|,+|\*+|\n+| +|\_+|\%+|\t+|\[+|\]+|\.+|\(+|\)+|\++|\\+|\/+|[0-9]+|\#+|\'+|\"+|\-+|\=+|\&+|\:+|\?+|\!+|\^+|\·+',string)
regex=re.compile(r'http|^[a-zA-Z]$|org')
filtered_values=[
word
for word in words if not regex.match(word)
]
filtered_values[:] = (
value.lower()
for value in filtered_values if len(value)!=0
)
return filtered_values
def train(vocabulary,input_train,expected_train):
learning_rate=0.001
learning_precision=0.00001
words_vocabulary={}
with open(input_train,encoding='utf-8') as input_file, open(expected_train,encoding='utf-8') as expected_file:
for line, exp in zip(input_file,expected_file):
words_vocabulary[line]=int(exp)
weights={}
weight={}
delta=1
iteration=0
loss_sum=0.0
error=10.0
max_iteration=len(vocabulary)
for i in vocabulary['count'].keys():
weights[i]=random.uniform(-0.01,0.01)
while delta>learning_precision and iteration<max_iteration:
d,y = random.choice(list(words_vocabulary.items()))
y_hat=0
tokens=tokenize_list(d)
for token in tokens:
if token in vocabulary['count'].keys():
y_hat += weights[token] * tokens.count(token)
delta=(y_hat-y) * learning_rate
for word in tokens:
if word in words_vocabulary:
weights[word] -= (tokens.count(word)) * delta
loss = (y_hat - y)**2.0
loss_sum += loss
if iteration%1000 == 0:
if (error>(loss_sum/1000)):
weight=weights
error=loss_sum/1000
loss_sum=0.0
iteration += 1
input_file.close()
expected_file.close()
return weight, vocabulary
def prediction(input,output,weights,vocabulary):
with open(input,encoding='utf-8') as input_file, open(output,'w+',encoding='utf-8') as output:
for line in input_file:
y_hat=0
tokens=tokenize_list(line)
for token in tokens:
if token in vocabulary['count'].keys():
y_hat += weights[token] * (token.count(token))
if y_hat>0.0:
output.write('1\n')
else:
output.write('0\n')
output.close()
input_file.close()
vocabulary=define_vocabulary('train/in.tsv');
weights, words = train(vocabulary,'train/in.tsv','train/expected.tsv')
prediction('dev-0/in.tsv','dev-0/out.tsv',weights,words)
prediction('test-A/in.tsv','test-A/out.tsv',weights,words)