import nltk from nltk.stem import WordNetLemmatizer wl = WordNetLemmatizer() # train_in_path = 'mt-summit-corpora/train/in.tsv' # train_expected_path = 'mt-summit-corpora/train/expected.tsv' train_in_path = 'mt-summit-corpora/dev-0/in.tsv' train_expected_path = 'mt-summit-corpora/dev-0/expected.tsv' file_lemmatized = [] with open(train_in_path, 'r') as file: for line in file: if len(file_lemmatized) % 50000 == 0: print('lemmatizing file: ' + train_in_path + ': ' + str(len(file_lemmatized)), end='\r') line = nltk.word_tokenize(line) file_lemmatized.append(' '.join([wl.lemmatize(x) for x in line])) with open(train_in_path + '.lemmatized', 'w') as file_write: for line in file_lemmatized: file_write.write(line + '\n')