import nltk import os import pandas as pd from nltk.stem import WordNetLemmatizer nltk.download('wordnet') wl = WordNetLemmatizer() glossary_path = os.path.join(os.path.expanduser('~'), 'mt-summit-corpora/glossary.tsv') glossary = pd.read_csv(glossary_path, sep='\t', header=None, names=['source', 'result']) source_lemmatized = [] for word in glossary['source']: word = nltk.word_tokenize(word) source_lemmatized.append(' '.join([wl.lemmatize(x) for x in word])) glossary['source_lem'] = source_lemmatized glossary = glossary[['source', 'source_lem', 'result']] glossary.set_index('source_lem') glossary.to_csv(glossary_path + '.lemmatized', sep='\t', index=False)