import os import pandas as pd import rapidfuzz import sys from rapidfuzz.fuzz import partial_ratio from rapidfuzz.utils import default_process def read_arguments(): try: path_glossary, path_in, path_expected = sys.argv return path_glossary, path_in, path_expected except Exception: print("ERROR: Wrong argument.") sys.exit(1) def full_strip(line): return ' '.join(line.split()) def is_injectable(sentence_pl, sequence): sen = sentence_pl.split() window_size = len(sequence.split()) maxx = 0 for i in range(len(sen) - window_size + 1): current = rapidfuzz.fuzz.ratio(' '.join(sen[i:i + window_size]), sequence) if current > maxx: maxx = current return maxx >= THRESHOLD def get_injected(sentence, sentence_en, sequence, inject): sen = sentence.split() sen_en = sentence_en.split() window_size = len(sequence.split()) maxx = 0 maxx_prv = 0 maxxi = 0 for i in range(len(sen) - window_size + 1): current = rapidfuzz.fuzz.ratio(' '.join(sen[i:i + window_size]), sequence) if current >= maxx: maxx_prv = maxx maxx = current maxxi = i if maxx_prv != maxx: return ' '.join(sen_en[:maxxi + window_size]) + ' $' + inject + '$ ' + ' '.join(sen_en[maxxi + window_size:]) return sentence_en THRESHOLD = 70 glossary_arg_path, in_arg_path, expected_arg_path = read_arguments() train_in_path = os.path.join(os.path.expanduser('~'), in_arg_path) train_expected_path = os.path.join(os.path.expanduser('~'), expected_arg_path) glossary = pd.read_csv(os.path.join(os.path.expanduser('~'), glossary_arg_path), sep='\t') glossary['source_lem'] = [str(default_process(x)) for x in glossary['source_lem']] glossary['hash'] = [hash(x) for x in glossary['source']] glossary = glossary[glossary['hash'] % 100 > 16] file_pl = pd.read_csv(train_expected_path, sep='\t', header=None, names=['text']) file_pl['text'] = [default_process(text) for text in file_pl['text'].values.tolist()] file_pl = file_pl['text'].values.tolist() file_en = pd.read_csv(train_in_path, sep='\t', header=None, names=['text']) file_en['text'] = [default_process(text) for text in file_en['text'].values.tolist()] file_en = file_en['text'].values.tolist() file_en_lemmatized = pd.read_csv(train_in_path + '.lemmatized', sep='\t', header=None, names=['text']) file_en_lemmatized['text'] = [default_process(text) for text in file_en_lemmatized['text'].values.tolist()] file_en_lemmatized = file_en_lemmatized['text'].values.tolist() en = [] translation_line_counts = [] for line, line_en, line_pl in zip(file_en_lemmatized, file_en, file_pl): line = default_process(line) matchez = rapidfuzz.process.extract( query=line, choices=glossary['source_lem'], limit=5, score_cutoff=THRESHOLD, scorer=partial_ratio) if len(matchez) > 0: lines_added = 0 for match in matchez: polish_translation = \ glossary.loc[lambda df: df['source_lem'] == match[0]]['result'].astype(str).values.flatten()[0] if is_injectable(line_pl, polish_translation): en.append(get_injected(line, line_en, match[0], polish_translation)) lines_added += 1 if lines_added == 0: en.append(line_en) lines_added = 1 translation_line_counts.append(lines_added) else: translation_line_counts.append(1) en.append(line_en) if len(translation_line_counts) % 1000 == 0: print('injecting into file: ' + train_in_path + ': ' + str(len(translation_line_counts)), end='\r') print('\n') with open(train_expected_path, 'w') as file_pl_write: for line, translation_line_ct in zip(file_pl, translation_line_counts): for i in range(translation_line_ct): file_pl_write.write(full_strip(line) + '\n') with open(train_in_path, 'w') as file_en_write: for e in en: file_en_write.write(e + '\n')