2024-05-17 21:36:24 +02:00
|
|
|
import nltk
|
|
|
|
import pandas as pd
|
|
|
|
from sklearn.neural_network import MLPClassifier
|
|
|
|
from nltk.tokenize import word_tokenize
|
|
|
|
from gensim.models import Word2Vec
|
2024-05-25 20:50:24 +02:00
|
|
|
from gensim.models import KeyedVectors
|
|
|
|
from unidecode import unidecode
|
|
|
|
import fasttext.util
|
|
|
|
|
2024-05-17 21:36:24 +02:00
|
|
|
nltk.download('punkt')
|
|
|
|
|
2024-05-25 20:50:24 +02:00
|
|
|
# # wczytaj model word2vec jezyka polskiego
|
|
|
|
# model: KeyedVectors = KeyedVectors.load("word2vec_100_3_polish.bin")
|
|
|
|
# print("wczytano model word2vec jezyka polskiego")
|
|
|
|
fasttext.util.download_model('pl', if_exists='ignore') # English
|
|
|
|
model = fasttext.load_model('cc.pl.300.bin')
|
|
|
|
print(model.get_word_vector('polska').shape)
|
|
|
|
print(model.get_nearest_neighbors('polska'))
|
|
|
|
|
|
|
|
# odczytaj dane treningowe
|
2024-05-17 21:36:24 +02:00
|
|
|
# w pliku train.tsv w kolumnach 25706, 58881, 73761 trzeba zamienic w tekscie tabulator na 4 spacje
|
|
|
|
train = pd.read_csv('train/train.tsv', sep='\t')
|
|
|
|
train.columns = ["y", "x"]
|
|
|
|
|
2024-05-25 20:50:24 +02:00
|
|
|
print("wczytano dane treningowe")
|
2024-05-17 21:36:24 +02:00
|
|
|
print(train["y"][0], train["x"][0])
|
|
|
|
|
2024-05-25 20:50:24 +02:00
|
|
|
# podziel dane treningowe na słowa
|
2024-05-17 21:36:24 +02:00
|
|
|
# https://www.geeksforgeeks.org/python-word-embedding-using-word2vec/
|
|
|
|
slowa = []
|
|
|
|
for tekst in train["x"]:
|
|
|
|
pom = []
|
|
|
|
for slowo in word_tokenize(tekst):
|
|
|
|
pom.append(slowo.lower())
|
|
|
|
slowa.append(pom)
|
2024-05-25 20:50:24 +02:00
|
|
|
print("podzielono dane treningowe na słowa")
|
2024-05-17 21:36:24 +02:00
|
|
|
print(slowa[0])
|
|
|
|
|
2024-05-25 20:50:24 +02:00
|
|
|
# # https://radimrehurek.com/gensim/models/word2vec.html
|
|
|
|
# model = Word2Vec(sentences=slowa, vector_size=100, window=5, min_count=1, workers=4)
|
|
|
|
# model.save("word2vec.model")
|
2024-05-17 21:36:24 +02:00
|
|
|
|
2024-05-25 20:50:24 +02:00
|
|
|
# zamien slowa z danych treningowych na wektory
|
2024-05-17 21:36:24 +02:00
|
|
|
teksty = []
|
2024-05-25 20:50:24 +02:00
|
|
|
nieistniejace_slowa =[]
|
2024-05-17 21:36:24 +02:00
|
|
|
for tekst in train["x"]:
|
|
|
|
pom = None
|
|
|
|
for slowo in word_tokenize(tekst):
|
2024-05-25 20:50:24 +02:00
|
|
|
try:
|
|
|
|
wektor = model.get_word_vector(slowo.lower())
|
|
|
|
except KeyError:
|
|
|
|
try:
|
|
|
|
wektor = model.get_word_vector(unidecode(slowo.lower()))
|
|
|
|
nieistniejace_slowa.append(slowo.lower())
|
|
|
|
except KeyError:
|
|
|
|
nieistniejace_slowa.append(slowo.lower())
|
|
|
|
podobne = model.get_word_vector("piłka")
|
2024-05-17 21:36:24 +02:00
|
|
|
if pom is None:
|
|
|
|
pom = wektor
|
|
|
|
else:
|
|
|
|
pom = pom + wektor
|
|
|
|
teksty.append(wektor)
|
2024-05-25 20:50:24 +02:00
|
|
|
print("zamieniono slowa z danych treningowych na wektory")
|
2024-05-17 21:36:24 +02:00
|
|
|
print(teksty[0])
|
2024-05-25 20:50:24 +02:00
|
|
|
print(nieistniejace_slowa)
|
|
|
|
print(len(nieistniejace_slowa))
|
2024-05-17 21:36:24 +02:00
|
|
|
|
|
|
|
X = teksty
|
|
|
|
y = train["y"]
|
|
|
|
|
2024-05-25 20:50:24 +02:00
|
|
|
clf = MLPClassifier() # activation="tanh"
|
2024-05-17 21:36:24 +02:00
|
|
|
clf.fit(X, y)
|
|
|
|
|
2024-05-25 20:50:24 +02:00
|
|
|
# odczytaj dane testowe
|
2024-05-17 21:36:24 +02:00
|
|
|
# w pliku in.tsv w kolumnach 1983, 5199 trzeba zamienic w tekscie tabulator na 4 spacje
|
|
|
|
test = pd.read_csv('test-A/in.tsv', sep='\t')
|
|
|
|
test.columns = ["x"]
|
|
|
|
|
2024-05-25 20:50:24 +02:00
|
|
|
print("wczytano dane testowe")
|
2024-05-17 21:36:24 +02:00
|
|
|
print(test["x"][0])
|
|
|
|
|
2024-05-25 20:50:24 +02:00
|
|
|
# podziel dane testowe na słowa
|
2024-05-17 21:36:24 +02:00
|
|
|
# https://www.geeksforgeeks.org/python-word-embedding-using-word2vec/
|
|
|
|
slowa = []
|
|
|
|
for tekst in test["x"]:
|
|
|
|
pom = []
|
|
|
|
for slowo in word_tokenize(tekst):
|
|
|
|
pom.append(slowo.lower())
|
|
|
|
slowa.append(pom)
|
2024-05-25 20:50:24 +02:00
|
|
|
print("podzielono dane treningowe na słowa")
|
2024-05-17 21:36:24 +02:00
|
|
|
print(slowa[0])
|
|
|
|
|
2024-05-25 20:50:24 +02:00
|
|
|
# zamien slowa z danych testowych na wektory
|
2024-05-17 21:36:24 +02:00
|
|
|
teksty = []
|
2024-05-25 20:50:24 +02:00
|
|
|
nieistniejace_slowa = []
|
2024-05-17 21:36:24 +02:00
|
|
|
for tekst in test["x"]:
|
|
|
|
pom = None
|
|
|
|
for slowo in word_tokenize(tekst):
|
|
|
|
wektor = None
|
|
|
|
try:
|
2024-05-25 20:50:24 +02:00
|
|
|
wektor = model.get_word_vector(slowo.lower())
|
2024-05-17 21:36:24 +02:00
|
|
|
except KeyError:
|
2024-05-25 20:50:24 +02:00
|
|
|
try:
|
|
|
|
wektor = model.get_word_vector(unidecode(slowo.lower()))
|
|
|
|
nieistniejace_slowa.append(slowo.lower())
|
|
|
|
except KeyError:
|
|
|
|
nieistniejace_slowa.append(slowo.lower())
|
|
|
|
podobne = model.get_word_vector("piłka")
|
2024-05-17 21:36:24 +02:00
|
|
|
if wektor is not None:
|
|
|
|
if pom is None:
|
|
|
|
pom = wektor
|
|
|
|
else:
|
|
|
|
pom = pom + wektor
|
|
|
|
teksty.append(wektor)
|
2024-05-25 20:50:24 +02:00
|
|
|
print("zamieniono slowa z danych testowych na wektory")
|
2024-05-17 21:36:24 +02:00
|
|
|
print(teksty[0])
|
2024-05-25 20:50:24 +02:00
|
|
|
print(nieistniejace_slowa)
|
|
|
|
print(len(nieistniejace_slowa))
|
2024-05-17 21:36:24 +02:00
|
|
|
|
|
|
|
przewidywania = clf.predict(teksty)
|
|
|
|
print(przewidywania)
|
|
|
|
|
|
|
|
with open("test-A/out.tsv", "w", encoding="utf-8") as uwu:
|
|
|
|
for p in przewidywania:
|
2024-05-25 20:50:24 +02:00
|
|
|
uwu.write(str(p) + "\n")
|
2024-05-17 22:04:12 +02:00
|
|
|
|
|
|
|
### dev-0
|
|
|
|
|
|
|
|
# w pliku in.tsv w kolumnach 1983, 5199 trzeba zamienic w tekscie tabulator na 4 spacje
|
|
|
|
dev_in = pd.read_csv('dev-0/in.tsv', sep='\t')
|
|
|
|
dev_in.columns = ["x"]
|
|
|
|
|
|
|
|
print(dev_in["x"][0])
|
|
|
|
|
|
|
|
dev_expected = pd.read_csv('dev-0/expected.tsv', sep='\t')
|
|
|
|
dev_expected.columns = ["y"]
|
|
|
|
|
|
|
|
print(dev_expected["y"][0])
|
|
|
|
|
|
|
|
# https://www.geeksforgeeks.org/python-word-embedding-using-word2vec/
|
|
|
|
slowa = []
|
|
|
|
for tekst in dev_in["x"]:
|
|
|
|
pom = []
|
|
|
|
for slowo in word_tokenize(tekst):
|
|
|
|
pom.append(slowo.lower())
|
|
|
|
slowa.append(pom)
|
|
|
|
print(slowa[0])
|
|
|
|
|
|
|
|
teksty = []
|
|
|
|
for tekst in test["x"]:
|
|
|
|
pom = None
|
|
|
|
for slowo in word_tokenize(tekst):
|
|
|
|
wektor = None
|
|
|
|
try:
|
|
|
|
wektor = model.wv[slowo.lower()]
|
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
if wektor is not None:
|
|
|
|
if pom is None:
|
|
|
|
pom = wektor
|
|
|
|
else:
|
|
|
|
pom = pom + wektor
|
|
|
|
teksty.append(wektor)
|
|
|
|
print(teksty[0])
|
|
|
|
|
|
|
|
przewidywania = clf.predict(teksty)
|
|
|
|
print(przewidywania)
|
|
|
|
|
|
|
|
with open("dev-0/out.tsv", "w", encoding="utf-8") as uwu:
|
|
|
|
for p in przewidywania:
|
2024-05-25 20:50:24 +02:00
|
|
|
uwu.write(str(p) + "\n")
|
2024-05-17 22:04:12 +02:00
|
|
|
|
|
|
|
for i in range(len(przewidywania)):
|
|
|
|
print(przewidywania[i], dev_expected["y"][i])
|