2022-01-23 16:39:11 +01:00
|
|
|
import os
|
2022-01-23 16:01:44 +01:00
|
|
|
import pandas as pd
|
|
|
|
import rapidfuzz
|
|
|
|
|
|
|
|
from rapidfuzz.fuzz import partial_ratio
|
|
|
|
from rapidfuzz.utils import default_process
|
|
|
|
|
|
|
|
|
|
|
|
def full_strip(line):
|
|
|
|
return ' '.join(line.split())
|
|
|
|
|
|
|
|
|
|
|
|
def is_injectable(sentence_pl, sequence):
|
|
|
|
sen = sentence_pl.split()
|
|
|
|
window_size = len(sequence.split())
|
|
|
|
maxx = 0
|
|
|
|
for i in range(len(sen) - window_size + 1):
|
|
|
|
current = rapidfuzz.fuzz.ratio(' '.join(sen[i:i + window_size]), sequence)
|
|
|
|
if current > maxx:
|
|
|
|
maxx = current
|
|
|
|
return maxx >= THRESHOLD
|
|
|
|
|
|
|
|
|
|
|
|
def get_injected(sentence, sentence_en, sequence, inject):
|
|
|
|
sen = sentence.split()
|
|
|
|
sen_en = sentence_en.split()
|
|
|
|
window_size = len(sequence.split())
|
|
|
|
maxx = 0
|
|
|
|
maxx_prv = 0
|
|
|
|
maxxi = 0
|
|
|
|
for i in range(len(sen) - window_size + 1):
|
|
|
|
current = rapidfuzz.fuzz.ratio(' '.join(sen[i:i + window_size]), sequence)
|
|
|
|
if current >= maxx:
|
|
|
|
maxx_prv = maxx
|
|
|
|
maxx = current
|
|
|
|
maxxi = i
|
|
|
|
if maxx_prv != maxx:
|
|
|
|
return ' '.join(sen_en[:maxxi + window_size]) + ' $' + inject + '$ ' + ' '.join(sen_en[maxxi + window_size:])
|
|
|
|
return sentence_en
|
|
|
|
|
|
|
|
|
|
|
|
THRESHOLD = 70
|
|
|
|
|
2022-01-23 16:39:11 +01:00
|
|
|
# train_in_path = '~/mt-summit-corpora/train/in.tsv'
|
|
|
|
# train_expected_path = '~/mt-summit-corpora/train/expected.tsv'
|
2022-01-23 16:01:44 +01:00
|
|
|
|
2022-01-23 16:39:11 +01:00
|
|
|
train_in_path = os.path.join(os.path.expanduser('~'), 'mt-summit-corpora/dev-0/in.tsv')
|
|
|
|
train_expected_path = os.path.join(os.path.expanduser('~'), 'mt-summit-corpora/dev-0/expected.tsv')
|
2022-01-23 16:01:44 +01:00
|
|
|
|
2022-01-23 16:39:11 +01:00
|
|
|
glossary = pd.read_csv('~/mt-summit-corpora/glossary.tsv.lemmatized', sep='\t')
|
2022-01-23 16:01:44 +01:00
|
|
|
glossary['source_lem'] = [str(default_process(x)) for x in glossary['source_lem']]
|
|
|
|
|
|
|
|
file_pl = pd.read_csv(train_expected_path, sep='\t', header=None, names=['text'])
|
|
|
|
file_pl['text'] = [default_process(text) for text in file_pl['text'].values.tolist()]
|
|
|
|
file_pl = file_pl['text'].values.tolist()
|
|
|
|
|
|
|
|
file_en = pd.read_csv(train_in_path, sep='\t', header=None, names=['text'])
|
|
|
|
file_en['text'] = [default_process(text) for text in file_en['text'].values.tolist()]
|
|
|
|
file_en = file_en['text'].values.tolist()
|
|
|
|
|
|
|
|
file_en_lemmatized = pd.read_csv(train_in_path + '.lemmatized', sep='\t', header=None, names=['text'])
|
|
|
|
file_en_lemmatized['text'] = [default_process(text) for text in file_en_lemmatized['text'].values.tolist()]
|
|
|
|
file_en_lemmatized = file_en_lemmatized['text'].values.tolist()
|
|
|
|
|
|
|
|
en = []
|
|
|
|
translation_line_counts = []
|
|
|
|
for line, line_en, line_pl in zip(file_en_lemmatized, file_en, file_pl):
|
|
|
|
line = default_process(line)
|
|
|
|
matchez = rapidfuzz.process.extract(
|
|
|
|
query=line, choices=glossary['source_lem'], limit=5, score_cutoff=THRESHOLD, scorer=partial_ratio)
|
|
|
|
if len(matchez) > 0:
|
|
|
|
lines_added = 0
|
|
|
|
for match in matchez:
|
|
|
|
polish_translation = \
|
|
|
|
glossary.loc[lambda df: df['source_lem'] == match[0]]['result'].astype(str).values.flatten()[0]
|
|
|
|
if is_injectable(line_pl, polish_translation):
|
|
|
|
en.append(get_injected(line, line_en, match[0], polish_translation))
|
|
|
|
lines_added += 1
|
|
|
|
if lines_added == 0:
|
|
|
|
en.append(line_en)
|
|
|
|
lines_added = 1
|
|
|
|
translation_line_counts.append(lines_added)
|
|
|
|
else:
|
|
|
|
translation_line_counts.append(1)
|
|
|
|
en.append(line_en)
|
|
|
|
|
|
|
|
|
|
|
|
with open(train_expected_path + '.injected', 'w') as file_pl_write:
|
|
|
|
for line, translation_line_ct in zip(file_pl, translation_line_counts):
|
|
|
|
for i in range(translation_line_ct):
|
|
|
|
file_pl_write.write(full_strip(line) + '\n')
|
|
|
|
|
|
|
|
|
|
|
|
with open(train_in_path + '.injected', 'w') as file_en_write:
|
|
|
|
for e in en:
|
|
|
|
file_en_write.write(e + '\n')
|