transfix-mt/random-scripts/rapidfuzztest.ipynb
2022-01-22 00:04:56 +01:00

5.7 KiB

import pandas as pd
import nltk
from nltk.stem import WordNetLemmatizer


wl = WordNetLemmatizer()

glossary = pd.read_csv('../kompendium.tsv', sep='\t', header=None, names=['source', 'result'])

source_lemmatized = []
for word in glossary['source']:
    word = nltk.word_tokenize(word)
    source_lemmatized.append(' '.join([wl.lemmatize(x) for x in word]))

glossary['source_lem'] = source_lemmatized
glossary = glossary[['source', 'source_lem', 'result']]
glossary.set_index('source_lem')


start_time = time.time_ns()
filex = []
with open(dev_path + '.pl', 'r') as file:
    for line in file:
        if len(filex) % 50000 == 0:
            print(len(filex), end='\r')
        line = nltk.word_tokenize(line)
        filex.append(' '.join([wl.lemmatize(x) for x in line]))


print(filex)

stop = time.time_ns()
timex = (stop - start_time) / 1000000000
print(timex)
import copy
import pandas as pd
import rapidfuzz
import time

from rapidfuzz.fuzz import  partial_ratio
from rapidfuzz.utils import default_process


THRESHOLD = 88

def is_injectable(sentence_pl, sequence):
    sen = sentence_pl.split()
    window_size = len(sequence.split())
    maxx = 0
    for i in range(len(sen) - window_size):
        current = rapidfuzz.fuzz.ratio(' '.join(sen[i:i + window_size]), sequence)
        if current > maxx:
            maxx = current
    if maxx >= THRESHOLD:
        return True
    else:
        return False

def get_injected(sentence, sequence, inject):
    sen = sentence.split()
    window_size = len(sequence.split())
    maxx = 0
    maxxi = 0
    for i in range(len(sen) - window_size + 1):
        current = rapidfuzz.fuzz.ratio(' '.join(sen[i:i + window_size]), sequence)
        if current >= maxx:
            maxx = current
            maxxi = i
    return ' '.join(sen[:maxxi + window_size]) + ' ' + inject + ' ' + ' '.join(sen[maxxi + window_size:])

glossary = pd.read_csv('../kompendium_lem_cleaned.tsv', sep='\t', header=0, index_col=0)
glossary['source_lem'] = [' ' + str(default_process(x)) + ' ' for x in glossary['source_lem']]

start_time = time.time_ns()
en = []
translation_line_counts = []
for line, line_pl in zip(file_lemmatized, file_pl_lemmatized):
    line = default_process(line)
    line_pl = default_process(line_pl)
    matchez = rapidfuzz.process.extract(query=line, choices=glossary['source_lem'], limit=5, score_cutoff=THRESHOLD, scorer=partial_ratio)
    if len(matchez) > 0:
        translation_line_counts.append(len(matchez))
        for match in matchez:
            polish_translation = glossary.loc[lambda df: df['source_lem'] == match[0]]['result'].astype(str).values.flatten()[0]
            if is_injectable(line_pl, polish_translation):
                en.append(get_injected(line, match[0], polish_translation)[0])
            else:
                en.append(line)
    else:
        translation_line_counts.append(1)
        en.append(line)


stop = time.time_ns()
timex = (stop - start_time) / 1000000000
print(timex)
78.948892319
640
tlcs = copy.deepcopy(translation_line_counts)

translations = pd.read_csv(dev_path + '.pl', sep='\t', header=None, names=['text'])
with open(dev_path + '.injected.crossvalidated.pl', 'w') as file_pl:
    for line, translation_line_ct in zip(translations, tlcs):
        for i in range(translation_line_ct):
            file_pl.write(line)


with open(dev_path + '.injected.crossvalidated.en', 'w') as file_en:
    for e in en:
        file_en.write(e + '\n')