transfix-mt/random-scripts/rapidfuzztest.ipynb

196 lines
5.7 KiB
Plaintext
Raw Normal View History

2022-01-22 00:04:56 +01:00
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"outputs": [],
"source": [
"import pandas as pd\n",
"import nltk\n",
"from nltk.stem import WordNetLemmatizer\n",
"\n",
"\n",
"wl = WordNetLemmatizer()\n",
"\n",
"glossary = pd.read_csv('../kompendium.tsv', sep='\\t', header=None, names=['source', 'result'])\n",
"\n",
"source_lemmatized = []\n",
"for word in glossary['source']:\n",
" word = nltk.word_tokenize(word)\n",
" source_lemmatized.append(' '.join([wl.lemmatize(x) for x in word]))\n",
"\n",
"glossary['source_lem'] = source_lemmatized\n",
"glossary = glossary[['source', 'source_lem', 'result']]\n",
"glossary.set_index('source_lem')\n",
"\n"
],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%%\n",
"is_executing": true
}
}
},
{
"cell_type": "code",
"execution_count": null,
"outputs": [],
"source": [
"\n",
"start_time = time.time_ns()\n",
"filex = []\n",
"with open(dev_path + '.pl', 'r') as file:\n",
" for line in file:\n",
" if len(filex) % 50000 == 0:\n",
" print(len(filex), end='\\r')\n",
" line = nltk.word_tokenize(line)\n",
" filex.append(' '.join([wl.lemmatize(x) for x in line]))\n",
"\n",
"\n",
"print(filex)\n",
"\n",
"stop = time.time_ns()\n",
"timex = (stop - start_time) / 1000000000\n",
"print(timex)\n"
],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%%\n",
"is_executing": true
}
}
},
{
"cell_type": "code",
"execution_count": 23,
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"78.948892319\n",
"640\n"
]
}
],
"source": [
"import copy\n",
"import pandas as pd\n",
"import rapidfuzz\n",
"import time\n",
"\n",
"from rapidfuzz.fuzz import partial_ratio\n",
"from rapidfuzz.utils import default_process\n",
"\n",
"\n",
"THRESHOLD = 88\n",
"\n",
"def is_injectable(sentence_pl, sequence):\n",
" sen = sentence_pl.split()\n",
" window_size = len(sequence.split())\n",
" maxx = 0\n",
" for i in range(len(sen) - window_size):\n",
" current = rapidfuzz.fuzz.ratio(' '.join(sen[i:i + window_size]), sequence)\n",
" if current > maxx:\n",
" maxx = current\n",
" if maxx >= THRESHOLD:\n",
" return True\n",
" else:\n",
" return False\n",
"\n",
"def get_injected(sentence, sequence, inject):\n",
" sen = sentence.split()\n",
" window_size = len(sequence.split())\n",
" maxx = 0\n",
" maxxi = 0\n",
" for i in range(len(sen) - window_size + 1):\n",
" current = rapidfuzz.fuzz.ratio(' '.join(sen[i:i + window_size]), sequence)\n",
" if current >= maxx:\n",
" maxx = current\n",
" maxxi = i\n",
" return ' '.join(sen[:maxxi + window_size]) + ' ' + inject + ' ' + ' '.join(sen[maxxi + window_size:])\n",
"\n",
"glossary = pd.read_csv('../kompendium_lem_cleaned.tsv', sep='\\t', header=0, index_col=0)\n",
"glossary['source_lem'] = [' ' + str(default_process(x)) + ' ' for x in glossary['source_lem']]\n",
"\n",
"start_time = time.time_ns()\n",
"en = []\n",
"translation_line_counts = []\n",
"for line, line_pl in zip(file_lemmatized, file_pl_lemmatized):\n",
" line = default_process(line)\n",
" line_pl = default_process(line_pl)\n",
" matchez = rapidfuzz.process.extract(query=line, choices=glossary['source_lem'], limit=5, score_cutoff=THRESHOLD, scorer=partial_ratio)\n",
" if len(matchez) > 0:\n",
" translation_line_counts.append(len(matchez))\n",
" for match in matchez:\n",
" polish_translation = glossary.loc[lambda df: df['source_lem'] == match[0]]['result'].astype(str).values.flatten()[0]\n",
" if is_injectable(line_pl, polish_translation):\n",
" en.append(get_injected(line, match[0], polish_translation)[0])\n",
" else:\n",
" en.append(line)\n",
" else:\n",
" translation_line_counts.append(1)\n",
" en.append(line)\n",
"\n",
"\n",
"stop = time.time_ns()\n",
"timex = (stop - start_time) / 1000000000\n",
"print(timex)\n"
],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%%\n"
}
}
},
{
"cell_type": "code",
"execution_count": 32,
"outputs": [],
"source": [
"tlcs = copy.deepcopy(translation_line_counts)\n",
"\n",
"translations = pd.read_csv(dev_path + '.pl', sep='\\t', header=None, names=['text'])\n",
"with open(dev_path + '.injected.crossvalidated.pl', 'w') as file_pl:\n",
" for line, translation_line_ct in zip(translations, tlcs):\n",
" for i in range(translation_line_ct):\n",
" file_pl.write(line)\n",
"\n",
"\n",
"with open(dev_path + '.injected.crossvalidated.en', 'w') as file_en:\n",
" for e in en:\n",
" file_en.write(e + '\\n')"
],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%%\n"
}
}
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 0
}