76 lines
2.5 KiB
Python
76 lines
2.5 KiB
Python
|
import os
|
||
|
import pandas as pd
|
||
|
import rapidfuzz
|
||
|
import sys
|
||
|
|
||
|
from rapidfuzz.fuzz import partial_ratio
|
||
|
from rapidfuzz.utils import default_process
|
||
|
|
||
|
|
||
|
def read_arguments():
|
||
|
try:
|
||
|
path_glossary, path_in = sys.argv
|
||
|
return path_glossary, path_in
|
||
|
except Exception:
|
||
|
print("ERROR: Wrong argument.")
|
||
|
sys.exit(1)
|
||
|
|
||
|
|
||
|
def full_strip(line):
|
||
|
return ' '.join(line.split())
|
||
|
|
||
|
|
||
|
|
||
|
def get_injected(sentence, sentence_en, sequence, inject):
|
||
|
sen = sentence.split()
|
||
|
sen_en = sentence_en.split()
|
||
|
window_size = len(sequence.split())
|
||
|
maxx = 0
|
||
|
maxx_prv = 0
|
||
|
maxxi = 0
|
||
|
for i in range(len(sen) - window_size + 1):
|
||
|
current = rapidfuzz.fuzz.ratio(' '.join(sen[i:i + window_size]), sequence)
|
||
|
if current >= maxx:
|
||
|
maxx_prv = maxx
|
||
|
maxx = current
|
||
|
maxxi = i
|
||
|
if maxx_prv != maxx:
|
||
|
return ' '.join(sen_en[:maxxi + window_size]) + ' $' + inject + '$ ' + ' '.join(sen_en[maxxi + window_size:])
|
||
|
return sentence_en
|
||
|
|
||
|
|
||
|
THRESHOLD = 70
|
||
|
|
||
|
glossary_arg_path, in_arg_path = read_arguments()
|
||
|
train_in_path = os.path.join(os.path.expanduser('~'), in_arg_path)
|
||
|
|
||
|
glossary = pd.read_csv(os.path.join(os.path.expanduser('~'), glossary_arg_path), sep='\t')
|
||
|
glossary['source_lem'] = [str(default_process(x)) for x in glossary['source_lem']]
|
||
|
glossary['hash'] = [hash(x) for x in glossary['source']]
|
||
|
glossary = glossary[glossary['hash'] % 100 > 16]
|
||
|
|
||
|
file_en = pd.read_csv(train_in_path, sep='\t', header=None, names=['text'])
|
||
|
file_en['text'] = [default_process(text) for text in file_en['text'].values.tolist()]
|
||
|
file_en = file_en['text'].values.tolist()
|
||
|
|
||
|
file_en_lemmatized = pd.read_csv(train_in_path + '.lemmatized', sep='\t', header=None, names=['text'])
|
||
|
file_en_lemmatized['text'] = [default_process(text) for text in file_en_lemmatized['text'].values.tolist()]
|
||
|
file_en_lemmatized = file_en_lemmatized['text'].values.tolist()
|
||
|
|
||
|
en = []
|
||
|
translation_line_counts = []
|
||
|
for line, line_en in zip(file_en_lemmatized, file_en):
|
||
|
line = default_process(line)
|
||
|
matchez = rapidfuzz.process.extract(
|
||
|
query=line, choices=glossary['source_lem'], limit=5, score_cutoff=THRESHOLD, scorer=partial_ratio)
|
||
|
if len(matchez) > 0:
|
||
|
for match in matchez:
|
||
|
polish_translation = \
|
||
|
glossary.loc[lambda df: df['source_lem'] == match[0]]['result'].astype(str).values.flatten()[0]
|
||
|
en.append(get_injected(line, line_en, match[0], polish_translation))
|
||
|
|
||
|
|
||
|
with open(train_in_path, 'w') as file_en_write:
|
||
|
for e in en:
|
||
|
file_en_write.write(e + '\n')
|