okapi/main.py
2022-04-11 10:09:45 +02:00

64 lines
2.1 KiB
Python

# Amazon revievs search engine
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from vectorizer_idf import VectorizerIdf
from vectorizer_tf import VectorizerTf
def get_answers_array():
d = pd.read_csv('data.csv', engine='python', error_bad_lines=False)
answers = d["ad"]
answers = answers.dropna()
return np.array(answers)
def okapi_mb25(query, tf, idf, a_len, documents):
k = 1.6
b = 0.75
scores = []
for index, document in enumerate(documents):
s = 0
try:
v_tf = VectorizerTf([document])
tf_for_doc = v_tf.get_tf_for_document(query)
tf_for_document = v_tf.tf_matrix.toarray() * tf_for_doc[0]
for idx, val in enumerate(tf_for_document[0]):
licznik = val * (k + 1)
mianownik = val + k * (1 - b + b * (len(tf_for_doc) / a_len))
idf_for_word = idf.get_idf_for_word(v_tf.feature_names[idx])
s += idf_for_word * (licznik / mianownik)
scores.append(s)
except Exception as e:
scores.append(0)
return scores
if __name__ == "__main__":
# data = get_answers_array()
data = ['Ala ma kota', 'Maciej i Ala ma psa i Ala Ala Ala', 'Ala ma żółwia', 'Maciej ma psa, żółwia i kota',
'Ola ma psa, żółwia i kota, masło']
average_lens = []
for doc in data:
words = doc.split()
average_lens.append(sum(len(word) for word in words) / len(words))
average_doc_len = sum(average_lens) / len(average_lens)
vectorizer_tf = VectorizerTf(data)
vectorizer_idf = VectorizerIdf(data)
while True:
q = input('Wpisz fraze: ')
score = okapi_mb25(q, vectorizer_tf, vectorizer_idf, average_doc_len, data)
list1, list2 = zip(*sorted(zip(score, data)))
i = 0
for sc, sent in zip(reversed(list1), reversed(list2)):
if sc:
print(sent, sc)
i += 1
if i == 5:
break
X = [i for i in score if i != 0]
print('Znaleziono ' + str(len(X)) + ' wyniki')