leave out parameter

This commit is contained in:
Rafał Jaworski 2019-02-26 14:00:10 +01:00
parent 5e9c8cf93f
commit c20a8ae58a
2 changed files with 22 additions and 19 deletions

View File

@ -8,6 +8,7 @@ import host
import time
BUFFER_SIZE = 500
LEAVE_OUT = 2 # this leaves out every second sentence
address = 'http://'+host.concordia_host
if len(host.concordia_port) > 0:
@ -23,7 +24,7 @@ def file_len(fname):
def add_examples(examplesData):
req = urllib2.Request(address)
req.add_header('Content-Type', 'application/json')
response = json.loads(urllib2.urlopen(req, json.dumps(examplesData)).read())
response = json.loads(urllib2.urlopen(req, json.dumps(examplesData), 3600).read())
print(response)
if response['status'] == 'error':
raise Exception(response['message'])
@ -51,7 +52,7 @@ if not (sourceFileLength == lemmatizedSourceFileLength and lemmatizedSourceFileL
print("source file: %d\nlemmatized source file: %d\ntarget file: %d\nalignments file: %d\nsource ids file: %d" % (sourceFileLength, lemmatizedSourceFileLength, targetFileLength, alignmentsFileLength, sourceIdsFileLength))
raise Exception("files are not of the same length!")
totalExamples = file_len(sourceFile)
totalExamples = sourceFileLength / LEAVE_OUT
data = {
'operation': 'addTm',
@ -63,7 +64,7 @@ data = {
req = urllib2.Request(address)
req.add_header('Content-Type', 'application/json')
response = json.loads(urllib2.urlopen(req, json.dumps(data)).read())
response = json.loads(urllib2.urlopen(req, json.dumps(data), 3600).read())
print(response)
tmId = int(response['newTmId'])
print "Added new tm: %d" % tmId
@ -76,21 +77,23 @@ data = {
examples = []
start = time.time()
with open(sourceFile) as source_file, open(lemmatizedSourceFile) as lemmatized_source_file, open(targetFile) as target_file, open(alignmentsFile) as alignments_file, open(sourceIdsFile) as source_ids_file:
addedCount = 0
for lineNumber in range(totalExamples):
sourceSentence = source_file.readline().strip()
lemmatizedSourceSentence = lemmatized_source_file.readline().strip()
targetSentence = target_file.readline().strip()
alignment = json.loads(alignments_file.readline().strip())
sourceId = int(source_ids_file.readline().strip())
if lineNumber % LEAVE_OUT == 0:
sourceSentence = source_file.readline().strip()
lemmatizedSourceSentence = lemmatized_source_file.readline().strip()
targetSentence = target_file.readline().strip()
alignment = json.loads(alignments_file.readline().strip())
sourceId = int(source_ids_file.readline().strip())
examples.append([sourceSentence, lemmatizedSourceSentence, targetSentence, alignment, sourceId])
if len(examples) >= BUFFER_SIZE:
data['examples'] = examples
add_examples(data)
mark = time.time()
print "Added %d of %d lemmatized examples. Time elapsed: %.4f s, current speed: %.4f examples/second" % ( (lineNumber+1), totalExamples, mark-start, (lineNumber+1)/(mark-start))
examples = []
examples.append([sourceSentence, lemmatizedSourceSentence, targetSentence, alignment, sourceId])
addedCount += 1
if len(examples) >= BUFFER_SIZE:
data['examples'] = examples
add_examples(data)
mark = time.time()
print "Added %d of %d lemmatized examples. Time elapsed: %.4f s, current speed: %.4f examples/second" % (addedCount, totalExamples, mark-start, addedCount/(mark-start))
examples = []
if len(examples) > 0:
@ -98,7 +101,7 @@ if len(examples) > 0:
add_examples(data)
end = time.time()
print "Added all %d lemmatized sentences. Time elapsed: %.4f s, overall speed: %.4f sentences/second" % ((lineNumber+1), end-start, (lineNumber+1)/(end-start))
print "Added all %d lemmatized sentences. Time elapsed: %.4f s, overall speed: %.4f sentences/second" % (addedCount, end-start, addedCount/(end-start))
print "Generating index..."
start = time.time()
@ -108,7 +111,7 @@ data = {
}
req = urllib2.Request(address)
req.add_header('Content-Type', 'application/json')
urllib2.urlopen(req, json.dumps(data)).read()
urllib2.urlopen(req, json.dumps(data), 3600).read()
end = time.time()
print "Index regeneration complete. The operation took %.4f s" % (end - start)

View File

@ -1,6 +1,6 @@
#!/bin/sh
CORPUS_NAME=opensubtitles_sample
CORPUS_NAME=opensubtitles
CORPUS_PATH=../fast-aligner/corpora/$CORPUS_NAME
SRC_LANG_ID=1
TRG_LANG_ID=2