41 lines
1.1 KiB
Python
Executable File
41 lines
1.1 KiB
Python
Executable File
#!/usr/bin/python3
|
|
# -*- coding: utf-8 -*-
|
|
|
|
import json
|
|
import requests
|
|
import sys
|
|
|
|
BUFFER_SIZE = 500
|
|
|
|
def lemmatize_sentences(language_code, sentences):
|
|
data = {
|
|
'language':language_code,
|
|
'sentences':sentences
|
|
}
|
|
response = requests.post(url = 'http://concordia-preprocessor:9001/lemmatize', json = data, timeout = 120)
|
|
response_json = json.loads(response.text)
|
|
|
|
return response_json['processed_sentences']
|
|
|
|
def write_result(result, lem_file):
|
|
for s in result:
|
|
lem_file.write(s['tokens']+'\n')
|
|
|
|
|
|
file_name = sys.argv[1]
|
|
language_code = sys.argv[2]
|
|
lem_output_name = sys.argv[3]
|
|
|
|
sentences_buffer = []
|
|
with open(file_name) as in_file, open(lem_output_name, 'w') as out_lem:
|
|
for line in in_file:
|
|
sentences_buffer.append(line.rstrip())
|
|
if len(sentences_buffer) == BUFFER_SIZE:
|
|
write_result(lemmatize_sentences(language_code,sentences_buffer), out_lem)
|
|
sentences_buffer = []
|
|
|
|
if len(sentences_buffer) > 0:
|
|
write_result(lemmatize_sentences(language_code,sentences_buffer), out_lem)
|
|
|
|
|