paranormal-or-skeptic/code_regression.py

124 lines
3.9 KiB
Python
Raw Normal View History

2020-04-04 19:55:07 +02:00
import random
2020-04-09 00:23:08 +02:00
import re
from _collections import defaultdict
2020-04-02 18:29:06 +02:00
2020-04-04 19:55:07 +02:00
def define_vocabulary(file_to_learn_new_words):
2020-04-09 00:23:08 +02:00
word_counts = {'count': defaultdict(int)}
with open(file_to_learn_new_words, encoding='utf-8') as in_file:
for line in in_file:
text, timestamp = line.rstrip('\n').split('\t')
tokens = text.lower().split(' ')
for token in tokens:
word_counts['count'][token] += 1
in_file.close()
return word_counts
2020-04-02 18:29:06 +02:00
2020-04-09 00:23:08 +02:00
def tokenize_list(string_input):
words=[]
string=string_input.replace('\\n',' ')
2020-05-02 20:28:50 +02:00
#text=re.sub(r'\w+:\/{2}[\d\w-]+(\.[\d\w-]+)*(?:(?:\/[^\s/]*))*', '', string)
text = re.sub(r'\\n+', " ", string)
2020-05-02 20:07:06 +02:00
text = re.sub(r'http\S+', " ", text)
text = re.sub(r'\/[a-z]\/', " ", text)
text = re.sub(r'[^a-z]', " ", text)
text = re.sub(r'\s{2,}', " ", text)
text = re.sub(r'\W\w{1,3}\W|\A\w{1,3}\W', " ", text)
text = re.sub(r'^\s', "", text)
2020-04-09 00:23:08 +02:00
string=''
for word in text:
string+=word
words=re.split(';+|,+|\*+|\n+| +|\_+|\%+|\t+|\[+|\]+|\.+|\(+|\)+|\++|\\+|\/+|[0-9]+|\#+|\'+|\"+|\-+|\=+|\&+|\:+|\?+|\!+|\^+|\·+',string)
regex=re.compile(r'http|^[a-zA-Z]$|org')
filtered_values=[
word
for word in words if not regex.match(word)
]
filtered_values[:] = (
value.lower()
for value in filtered_values if len(value)!=0
)
return filtered_values
2020-04-06 19:11:16 +02:00
2020-04-09 00:23:08 +02:00
def read_words(input_path):
vocabulary = {'count':defaultdict(int)}
index=0
with open(input_path,encoding='utf-8') as infile:
for line in infile:
index+=1
tokens = tokenize_list(line)
2020-04-05 00:34:05 +02:00
for token in tokens:
2020-04-09 00:23:08 +02:00
if token not in vocabulary:
vocabulary['vocabulary'][token]+=1
infile.close()
return vocabulary
def train(vocabulary,input_train,expected_train):
2020-05-02 22:10:18 +02:00
learning_rate=0.00001
#learning_precision=0.000001
2020-04-09 00:23:08 +02:00
words_vocabulary={}
with open(input_train,encoding='utf-8') as input_file, open(expected_train,encoding='utf-8') as expected_file:
for line, exp in zip(input_file,expected_file):
words_vocabulary[line]=int(exp)
weights={}
weight={}
delta=1
iteration=0
loss_sum=0.0
error=10.0
2020-05-02 22:10:18 +02:00
max_iteration=10000
2020-04-09 00:23:08 +02:00
for i in vocabulary['count'].keys():
weights[i]=random.uniform(-0.01,0.01)
2020-05-02 20:40:03 +02:00
# delta>learning_precision and
while iteration<max_iteration:
2020-04-09 00:23:08 +02:00
d,y = random.choice(list(words_vocabulary.items()))
y_hat=0
tokens=tokenize_list(d)
for token in tokens:
if token in vocabulary['count'].keys():
y_hat += weights[token] * tokens.count(token)
delta=(y_hat-y) * learning_rate
for word in tokens:
if word in words_vocabulary:
weights[word] -= (tokens.count(word)) * delta
loss = (y_hat - y)**2.0
2020-04-06 19:11:16 +02:00
loss_sum += loss
2020-04-09 00:23:08 +02:00
if iteration%1000 == 0:
if (error>(loss_sum/1000)):
weight=weights
error=loss_sum/1000
loss_sum=0.0
2020-04-06 19:11:16 +02:00
iteration += 1
2020-04-09 00:23:08 +02:00
input_file.close()
expected_file.close()
return weight, vocabulary
def prediction(input,output,weights,vocabulary):
with open(input,encoding='utf-8') as input_file, open(output,'w+',encoding='utf-8') as output:
for line in input_file:
y_hat=0
tokens=tokenize_list(line)
for token in tokens:
if token in vocabulary['count'].keys():
y_hat += weights[token] * (token.count(token))
if y_hat>0.0:
output.write('1\n')
else:
output.write('0\n')
output.close()
input_file.close()
2020-04-02 18:29:06 +02:00
def main():
2020-04-09 00:23:08 +02:00
vocabulary=define_vocabulary('train/in.tsv');
weights, words = train(vocabulary,'train/in.tsv','train/expected.tsv')
prediction('dev-0/in.tsv','dev-0/out.tsv',weights,words)
2020-04-20 19:07:31 +02:00
prediction('test-A/in.tsv','test-A/out.tsv',weights,words)
2020-04-05 20:10:04 +02:00
2020-04-09 00:23:08 +02:00
main()
2020-04-05 20:10:04 +02:00
2020-04-02 20:01:33 +02:00
2020-04-02 18:29:06 +02:00
2020-04-09 00:23:08 +02:00