diff --git a/cat/js/cat.js b/cat/js/cat.js
index 08513ed..eddcde7 100644
--- a/cat/js/cat.js
+++ b/cat/js/cat.js
@@ -69,10 +69,10 @@ function renderResult(data) {
for(var i = 0; i < data['result']['bestOverlay'].length; i++) {
var fragment = data['result']['bestOverlay'][i];
//previous unmarked fragment
- markedSentence += inputSentence.slice(lastInsertedEnd, fragment['matchedPatternStart']);
+ markedSentence += htmlEncode(inputSentence.slice(lastInsertedEnd, fragment['matchedPatternStart']));
//the marked fragment
- markedSentence += ''+inputSentence.slice(fragment['matchedPatternStart'], fragment['matchedPatternEnd'])+'';
+ markedSentence += ''+htmlEncode(inputSentence.slice(fragment['matchedPatternStart'], fragment['matchedPatternEnd']))+'';
lastInsertedEnd = fragment['matchedPatternEnd'];
@@ -80,7 +80,7 @@ function renderResult(data) {
}
//remaining unmarked fragment
- markedSentence += inputSentence.slice(lastInsertedEnd);
+ markedSentence += htmlEncode(inputSentence.slice(lastInsertedEnd));
res += '
'+markedSentence+'
';
@@ -89,6 +89,12 @@ function renderResult(data) {
return res;
}
+function htmlEncode(value){
+ // Create a in-memory div, set its inner text (which jQuery automatically encodes)
+ // Then grab the encoded contents back out. The div never exists on the page.
+ return $('').text(value).html();
+}
+
function renderFragment(fragment, number) {
var result = '';
diff --git a/cat/versions_available/europarl_sample.cfg b/cat/versions_available/europarl_sample.cfg
new file mode 100644
index 0000000..9494336
--- /dev/null
+++ b/cat/versions_available/europarl_sample.cfg
@@ -0,0 +1,10 @@
+dir@#@europarl_sample
+concordia_host@#@localhost
+concordia_port@#@8800
+tmid@#@1
+desc@#@Europarl sample (1000 sentences)
+enjoy@#@Życzymy udanej pracy z systemem!
+prompt@#@Wprowadź zdanie (po polsku):
+suggestion@#@Na każde państwo członkowskie Unii Europejskiej przypada jeden komisarz.
+suggestion@#@Komisja Europejska przygotowuje raport na najbliższym posiedzeniu.
+suggestion@#@Wspólny Komitet przyjmuje swój statut.
diff --git a/cat/versions_enabled/europarl_sample.cfg b/cat/versions_enabled/europarl_sample.cfg
new file mode 120000
index 0000000..c90ed2e
--- /dev/null
+++ b/cat/versions_enabled/europarl_sample.cfg
@@ -0,0 +1 @@
+../versions_available/europarl_sample.cfg
\ No newline at end of file
diff --git a/cat/versions_enabled/stocznia_enpl.cfg b/cat/versions_enabled/stocznia_enpl.cfg
deleted file mode 120000
index 884dd56..0000000
--- a/cat/versions_enabled/stocznia_enpl.cfg
+++ /dev/null
@@ -1 +0,0 @@
-../versions_available/stocznia_enpl.cfg
\ No newline at end of file
diff --git a/cat/versions_enabled/stocznia_plen.cfg b/cat/versions_enabled/stocznia_plen.cfg
deleted file mode 120000
index 0ba3868..0000000
--- a/cat/versions_enabled/stocznia_plen.cfg
+++ /dev/null
@@ -1 +0,0 @@
-../versions_available/stocznia_plen.cfg
\ No newline at end of file
diff --git a/concordia-server/index_controller.cpp b/concordia-server/index_controller.cpp
index 6c04dd6..ae4782f 100644
--- a/concordia-server/index_controller.cpp
+++ b/concordia-server/index_controller.cpp
@@ -137,8 +137,8 @@ void IndexController::addAlignedLemmatizedSentences(
std::vector tokenizedLemmatizedSourceSentences =
it->second->tokenizeAll(lemmatizedSourceSentences, true, true);
- std::vector tokenizedSourceSentences = it->second->tokenizeAll(sourceSentences, true, false);
- std::vector tokenizedTargetSentences = it->second->tokenizeAll(targetSentences, true, false);
+ std::vector tokenizedSourceSentences = it->second->tokenizeAll(sourceSentences, false, false);
+ std::vector tokenizedTargetSentences = it->second->tokenizeAll(targetSentences, false, false);
std::vector sentenceIds =
_unitDAO.addAlignedSentences(tokenizedSourceSentences, tokenizedTargetSentences, allAlignments, tmId);
diff --git a/concordia-server/searcher_controller.cpp b/concordia-server/searcher_controller.cpp
index 4c59a4a..a176bb9 100644
--- a/concordia-server/searcher_controller.cpp
+++ b/concordia-server/searcher_controller.cpp
@@ -3,6 +3,8 @@
#include
#include
#include
+#include
+
#include "json_generator.hpp"
#include "logger.hpp"
@@ -24,7 +26,8 @@ void SearcherController::simpleSearch(rapidjson::Writer
const int tmId) {
boost::ptr_map::iterator it = _concordiasMap->find(tmId);
if (it != _concordiasMap->end()) {
- pattern = _lemmatizerFacade->lemmatizeIfNeeded(pattern, tmId);
+ TokenizedSentence tokenizedPattern = it->second->tokenize(pattern, false, false);
+ pattern = _lemmatizerFacade->lemmatizeIfNeeded(tokenizedPattern.getTokenizedSentence(), tmId);
SimpleSearchResult result = _unitDAO.getSimpleSearchResult(it->second->simpleSearch(pattern, true));
jsonWriter.StartObject();
jsonWriter.String("status");
@@ -106,18 +109,12 @@ void SearcherController::concordiaPhraseSearch(rapidjson::Writer & jsonWriter,
std::string & pattern,
const int tmId) {
- Logger::log("concordiaSearch");
boost::ptr_map::iterator it = _concordiasMap->find(tmId);
if (it != _concordiasMap->end()) {
- std::string lemmatizedPattern = _lemmatizerFacade->lemmatizeIfNeeded(pattern, tmId);
- Logger::logString("pattern lemmatized", lemmatizedPattern);
- TokenizedSentence originalPattern = it->second->tokenize(pattern, true, false);
- Logger::logInt("original pattern tokenized, token count", originalPattern.getTokens().size());
+ TokenizedSentence originalPattern = it->second->tokenize(pattern, false, false);
+ std::string lemmatizedPattern = _lemmatizerFacade->lemmatizeIfNeeded(originalPattern.getTokenizedSentence(), tmId);
boost::shared_ptr rawConcordiaResult = it->second->concordiaSearch(lemmatizedPattern, true);
- Logger::log("concordia searched, result:");
- Logger::logConcordiaSearchResult(*rawConcordiaResult);
CompleteConcordiaSearchResult result = _unitDAO.getConcordiaResult(rawConcordiaResult, originalPattern);
- Logger::log("result got");
jsonWriter.StartObject();
jsonWriter.String("status");
diff --git a/concordia-server/unit_dao.cpp b/concordia-server/unit_dao.cpp
index 9cd6f3f..eca6f5b 100644
--- a/concordia-server/unit_dao.cpp
+++ b/concordia-server/unit_dao.cpp
@@ -81,11 +81,8 @@ CompleteConcordiaSearchResult UnitDAO::getConcordiaResult(boost::shared_ptr rawConcordiaResult, TokenizedSentence originalPattern) {
- Logger::log("getConcordiaResult with original pattern");
CompleteConcordiaSearchResult result(rawConcordiaResult->getBestOverlayScore());
BOOST_FOREACH(MatchedPatternFragment fragment, rawConcordiaResult->getBestOverlay()) {
- Logger::log("Working on fragment:");
- Logger::logFragment(fragment);
result.addToBestOverlay(_getResultFromFragment(fragment, originalPattern));
}
return result;
@@ -95,7 +92,6 @@ SimpleSearchResult UnitDAO::_getResultFromFragment(
const MatchedPatternFragment & fragment,
const TokenizedSentence & tokenizedPattern) {
- Logger::log("getResultFromFragment");
DBconnection connection;
connection.startTransaction();
@@ -103,15 +99,11 @@ SimpleSearchResult UnitDAO::_getResultFromFragment(
int matchedPatternEnd = 0;
if (tokenizedPattern.getTokens().size() > 0) {
// if it is concordia searching
- Logger::logInt("tokenizedPattern size",tokenizedPattern.getTokens().size());
- Logger::logInt("fragment start",fragment.getStart());
- Logger::logInt("fragment matched length",fragment.getMatchedLength());
matchedPatternStart = tokenizedPattern.getTokens().at(fragment.getStart()).getStart();
matchedPatternEnd = tokenizedPattern.getTokens().at(fragment.getStart()+fragment.getMatchedLength() - 1).getEnd();
}
SimpleSearchResult ssResult(matchedPatternStart, matchedPatternEnd);
- Logger::log("simple search result created");
BOOST_FOREACH(SubstringOccurence sOccurence, fragment.getOccurences()) {
std::string query = "SELECT id, source_segment, target_segment, source_tokens[$1::integer], source_tokens[$2::integer] FROM unit WHERE id = $3::integer;";
@@ -218,24 +210,18 @@ int UnitDAO::_addAlignedUnit (
const std::vector > & alignments,
const int tmId) throw(ConcordiaException) {
- if (sourceSentence.getTokens().size() < alignments.size()) {
+ if (sourceSentence.getTokens().size() != alignments.size()) {
// Here we check if the source sentence, taken from src.tok,
// is shorter than alignments array.
std::stringstream ss;
- ss << "The size of source sentence is lower than the size of alignments array. Source sentence: " << sourceSentence.getSentence() << ", alignments size:" << alignments.size();
+ ss << "The size of source sentence is different than the size of alignments array. Source sentence: " << sourceSentence.getSentence() << ", alignments size:" << alignments.size();
throw ConcordiaException(ss.str());
- } else if (sourceSentence.getTokens().size() > alignments.size()) {
- // On the other hand, alignments array can be shorter than the source tokenized
- // sentence, because giza can truncate the sentence. In this case, we have to
- // truncate the source sentence too.
-
-
}
std::string query = "INSERT INTO unit(source_segment, target_segment, tm_id, source_tokens, target_tokens) values($1::text,$2::text,$3::integer,$4,$5) RETURNING id";
std::vector params;
- params.push_back(new StringParam(sourceSentence.getSentence()));
- params.push_back(new StringParam(targetSentence.getSentence()));
+ params.push_back(new StringParam(sourceSentence.getOriginalSentence()));
+ params.push_back(new StringParam(targetSentence.getOriginalSentence()));
params.push_back(new IntParam(tmId));
params.push_back(new IntArrayParam(_getTokenPositions(sourceSentence)));
params.push_back(new IntArrayParam(_getTokenPositions(targetSentence)));
diff --git a/concordia.cfg.in b/concordia.cfg.in
index 3be19ab..abae712 100644
--- a/concordia.cfg.in
+++ b/concordia.cfg.in
@@ -2,30 +2,22 @@
# Concordia configuration file
#---------------------------
#
-
-#-------------------------------------------------------------------------------
-# The following settings control the sentence anonymizer mechanism. It is used to
-# remove unnecessary symbols and possibly words from sentences added to index
-# and search patterns. Anonymizer removes html tags, substitutes predefined symbols
-# with a single space, removes stop words (if the option is enabled), as well as
-# named entities and special symbols. All these have to be listed in files.
-
# File containing all html tags (one per line)
-html_tags_path = "@RESOURCES_DIRECTORY@/anonymizer/html_tags.txt"
+html_tags_path = "@RESOURCES_DIRECTORY@/tokenizer/html_tags.txt"
# File containing all symbols to be replaced by spaces
-space_symbols_path = "@RESOURCES_DIRECTORY@/anonymizer/space_symbols.txt"
+space_symbols_path = "@RESOURCES_DIRECTORY@/tokenizer/space_symbols.txt"
# If set to true, words from predefined list are removed
stop_words_enabled = "@STOP_WORDS_ENABLED@"
# If stop_words_enabled is true, set the path to the stop words file
-#stop_words_path = "@RESOURCES_DIRECTORY@/anonymizer/stop_words.txt"
+#stop_words_path = "@RESOURCES_DIRECTORY@/tokenizer/stop_words.txt"
# File containing regular expressions that match named entities
-named_entities_path = "@RESOURCES_DIRECTORY@/anonymizer/named_entities.txt"
+named_entities_path = "@RESOURCES_DIRECTORY@/tokenizer/named_entities.txt"
# File containing special symbols (one per line) to be removed
-stop_symbols_path = "@RESOURCES_DIRECTORY@/anonymizer/stop_symbols.txt"
+stop_symbols_path = "@RESOURCES_DIRECTORY@/tokenizer/stop_symbols.txt"
### eof
diff --git a/mgiza-aligner/Makefile b/mgiza-aligner/Makefile
index f3f99f2..a93731a 100644
--- a/mgiza-aligner/Makefile
+++ b/mgiza-aligner/Makefile
@@ -1,16 +1,16 @@
-SRC_LANG=en
-TRG_LANG=pl
+SRC_LANG=pl
+TRG_LANG=en
CORPUS_NAME=europarl_sample
+SEPARATOR=@\#@
-all: corpora/$(CORPUS_NAME)/aligned.txt corpora/$(CORPUS_NAME)/src.tok corpora/$(CORPUS_NAME)/trg.tok
+all: corpora/$(CORPUS_NAME)/aligned.txt corpora/$(CORPUS_NAME)/src_clean.txt corpora/$(CORPUS_NAME)/trg_clean.txt
-corpora/$(CORPUS_NAME)/aligned.txt: corpora/$(CORPUS_NAME)/giza.cfg corpora/$(CORPUS_NAME)/src.low_trg.low.cooc corpora/$(CORPUS_NAME)/src.low_trg.low.snt corpora/$(CORPUS_NAME)/src.low.vcb corpora/$(CORPUS_NAME)/trg.low.vcb
+corpora/$(CORPUS_NAME)/aligned.txt: corpora/$(CORPUS_NAME)/giza.cfg corpora/$(CORPUS_NAME)/src.lem_trg.lem.cooc corpora/$(CORPUS_NAME)/src.lem_trg.lem.snt corpora/$(CORPUS_NAME)/src.lem.vcb corpora/$(CORPUS_NAME)/trg.lem.vcb
mgiza/mgizapp/bin/mgiza corpora/$(CORPUS_NAME)/giza.cfg
cat corpora/$(CORPUS_NAME)/aligned*part* | ./sortGizaAlignments.py > corpora/$(CORPUS_NAME)/aligned.txt
clean-intermediate-files:
rm -f corpora/$(CORPUS_NAME)/*.lem
- rm -f corpora/$(CORPUS_NAME)/*.low
rm -f corpora/$(CORPUS_NAME)/*.classes
rm -f corpora/$(CORPUS_NAME)/*.classes.cats
rm -f corpora/$(CORPUS_NAME)/*.vcb
@@ -18,47 +18,51 @@ clean-intermediate-files:
rm -f corpora/$(CORPUS_NAME)/*.cooc
rm -f corpora/$(CORPUS_NAME)/aligned*part*
rm -f corpora/$(CORPUS_NAME)/giza.cfg
+ rm -f corpora/$(CORPUS_NAME)/pasted.txt
+ rm -f corpora/$(CORPUS_NAME)/pasted_deduplicated.txt
+ rm -f corpora/$(CORPUS_NAME)/src_deduplicated.txt
+ rm -f corpora/$(CORPUS_NAME)/trg_deduplicated.txt
+ rm -f corpora/$(CORPUS_NAME)/src_deduplicated.tok
+ rm -f corpora/$(CORPUS_NAME)/trg_deduplicated.tok
+ rm -f corpora/$(CORPUS_NAME)/src_clean.tok
+ rm -f corpora/$(CORPUS_NAME)/trg_clean.tok
-
-clean:
- rm -f corpora/$(CORPUS_NAME)/*.tok
- rm -f corpora/$(CORPUS_NAME)/*.lem
- rm -f corpora/$(CORPUS_NAME)/*.low
- rm -f corpora/$(CORPUS_NAME)/*.classes
- rm -f corpora/$(CORPUS_NAME)/*.classes.cats
- rm -f corpora/$(CORPUS_NAME)/*.vcb
- rm -f corpora/$(CORPUS_NAME)/*.snt
- rm -f corpora/$(CORPUS_NAME)/*.cooc
+clean: clean-intermediate-files
+ rm -f corpora/$(CORPUS_NAME)/src_clean.txt
+ rm -f corpora/$(CORPUS_NAME)/trg_clean.txt
rm -f corpora/$(CORPUS_NAME)/aligned*
- rm -f corpora/$(CORPUS_NAME)/giza.cfg
corpora/$(CORPUS_NAME)/giza.cfg: giza.cfg.pattern
sed 's/CORPUS_NAME/'$(CORPUS_NAME)'/' < $< > $@
-corpora/$(CORPUS_NAME)/src.low_trg.low.cooc: corpora/$(CORPUS_NAME)/src.low.vcb corpora/$(CORPUS_NAME)/trg.low.vcb corpora/$(CORPUS_NAME)/src.low_trg.low.snt
- mgiza/mgizapp/bin/snt2cooc $@ corpora/$(CORPUS_NAME)/src.low.vcb corpora/$(CORPUS_NAME)/trg.low.vcb corpora/$(CORPUS_NAME)/src.low_trg.low.snt
+corpora/$(CORPUS_NAME)/src.lem_trg.lem.cooc: corpora/$(CORPUS_NAME)/src.lem.vcb corpora/$(CORPUS_NAME)/trg.lem.vcb corpora/$(CORPUS_NAME)/src.lem_trg.lem.snt
+ mgiza/mgizapp/bin/snt2cooc $@ corpora/$(CORPUS_NAME)/src.lem.vcb corpora/$(CORPUS_NAME)/trg.lem.vcb corpora/$(CORPUS_NAME)/src.lem_trg.lem.snt
-corpora/$(CORPUS_NAME)/src.low_trg.low.snt corpora/$(CORPUS_NAME)/trg.low_src.low.snt corpora/$(CORPUS_NAME)/src.low.vcb corpora/$(CORPUS_NAME)/trg.low.vcb: corpora/$(CORPUS_NAME)/src.low corpora/$(CORPUS_NAME)/trg.low
- mgiza/mgizapp/bin/plain2snt corpora/$(CORPUS_NAME)/src.low corpora/$(CORPUS_NAME)/trg.low
+corpora/$(CORPUS_NAME)/src.lem_trg.lem.snt corpora/$(CORPUS_NAME)/trg.lem_src.lem.snt corpora/$(CORPUS_NAME)/src.lem.vcb corpora/$(CORPUS_NAME)/trg.lem.vcb: corpora/$(CORPUS_NAME)/src.lem corpora/$(CORPUS_NAME)/trg.lem
+ mgiza/mgizapp/bin/plain2snt corpora/$(CORPUS_NAME)/src.lem corpora/$(CORPUS_NAME)/trg.lem
-corpora/$(CORPUS_NAME)/%.classes: corpora/$(CORPUS_NAME)/%.low
+corpora/$(CORPUS_NAME)/%.classes: corpora/$(CORPUS_NAME)/%.lem
mgiza/mgizapp/bin/mkcls -n10 -p$< -V$@
-corpora/$(CORPUS_NAME)/%.low: corpora/$(CORPUS_NAME)/%.lem
- tr '[:upper:]' '[:lower:]' < $< > $@
-
-corpora/$(CORPUS_NAME)/trg.lem: corpora/$(CORPUS_NAME)/trg.tok
+corpora/$(CORPUS_NAME)/trg.lem: corpora/$(CORPUS_NAME)/trg_clean.tok
mono LemmaGenSentenceLemmatizer/LemmaGenSentenceLemmatizer/bin/Debug/LemmaGenSentenceLemmatizer.exe $(TRG_LANG) < $< > $@
-corpora/$(CORPUS_NAME)/src.lem: corpora/$(CORPUS_NAME)/src.tok
+corpora/$(CORPUS_NAME)/src.lem: corpora/$(CORPUS_NAME)/src_clean.tok
mono LemmaGenSentenceLemmatizer/LemmaGenSentenceLemmatizer/bin/Debug/LemmaGenSentenceLemmatizer.exe $(SRC_LANG) < $< > $@
-corpora/$(CORPUS_NAME)/src.tok corpora/$(CORPUS_NAME)/trg.tok: corpora/$(CORPUS_NAME)/src.txt corpora/$(CORPUS_NAME)/trg.txt
- europarl/tools/tokenizer.perl -l $(SRC_LANG) < corpora/$(CORPUS_NAME)/src.txt > corpora/$(CORPUS_NAME)/$(CORPUS_NAME).$(SRC_LANG)
- europarl/tools/tokenizer.perl -l $(TRG_LANG) < corpora/$(CORPUS_NAME)/trg.txt > corpora/$(CORPUS_NAME)/$(CORPUS_NAME).$(TRG_LANG)
- ./clean-corpus-n.perl corpora/$(CORPUS_NAME)/$(CORPUS_NAME) $(TRG_LANG) $(SRC_LANG) corpora/$(CORPUS_NAME)/$(CORPUS_NAME)_clean 0 100
- mv corpora/$(CORPUS_NAME)/$(CORPUS_NAME)_clean.$(SRC_LANG) corpora/$(CORPUS_NAME)/src.tok
- mv corpora/$(CORPUS_NAME)/$(CORPUS_NAME)_clean.$(TRG_LANG) corpora/$(CORPUS_NAME)/trg.tok
- rm corpora/$(CORPUS_NAME)/$(CORPUS_NAME).$(SRC_LANG)
- rm corpora/$(CORPUS_NAME)/$(CORPUS_NAME).$(TRG_LANG)
+
+corpora/$(CORPUS_NAME)/src_clean.txt corpora/$(CORPUS_NAME)/trg_clean.txt corpora/$(CORPUS_NAME)/src_clean.tok corpora/$(CORPUS_NAME)/trg_clean.tok: corpora/$(CORPUS_NAME)/pasted_deduplicated.txt corpora/$(CORPUS_NAME)/src_deduplicated.tok corpora/$(CORPUS_NAME)/trg_deduplicated.tok
+ ./clean_corpus.py $< corpora/$(CORPUS_NAME)/src_deduplicated.tok corpora/$(CORPUS_NAME)/trg_deduplicated.tok corpora/$(CORPUS_NAME)/src_clean.txt corpora/$(CORPUS_NAME)/trg_clean.txt corpora/$(CORPUS_NAME)/src_clean.tok corpora/$(CORPUS_NAME)/trg_clean.tok $(SEPARATOR)
+
+corpora/$(CORPUS_NAME)/%_deduplicated.tok: corpora/$(CORPUS_NAME)/%_deduplicated.txt
+ concordia-sentence-tokenizer -c ../concordia.cfg < $< > $@
+
+corpora/$(CORPUS_NAME)/src_deduplicated.txt corpora/$(CORPUS_NAME)/trg_deduplicated.txt: corpora/$(CORPUS_NAME)/pasted_deduplicated.txt
+ ./cut.py $< corpora/$(CORPUS_NAME)/src_deduplicated.txt corpora/$(CORPUS_NAME)/trg_deduplicated.txt $(SEPARATOR)
+
+corpora/$(CORPUS_NAME)/pasted_deduplicated.txt: corpora/$(CORPUS_NAME)/pasted.txt
+ sort -k 1.13 $< | uniq -s 12 | sort > $@
+
+corpora/$(CORPUS_NAME)/pasted.txt: corpora/$(CORPUS_NAME)/src.txt corpora/$(CORPUS_NAME)/trg.txt
+ ./paste.py corpora/$(CORPUS_NAME)/src.txt corpora/$(CORPUS_NAME)/trg.txt $(SEPARATOR)> $@
diff --git a/mgiza-aligner/clean_corpus.py b/mgiza-aligner/clean_corpus.py
new file mode 100755
index 0000000..6c9ed9f
--- /dev/null
+++ b/mgiza-aligner/clean_corpus.py
@@ -0,0 +1,24 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+
+import sys
+
+max_tokens = 100
+max_ratio = 4.0
+
+separator = sys.argv[8]
+
+with open(sys.argv[1]) as pasted_file, open(sys.argv[2]) as src_deduplicated_tok, open(sys.argv[3]) as trg_deduplicated_tok, open(sys.argv[4], 'w') as src_clean, open(sys.argv[5], 'w') as trg_clean, open(sys.argv[6], 'w') as src_clean_tok, open(sys.argv[7], 'w') as trg_clean_tok:
+ for line in pasted_file:
+ src_line_orig, trg_line_orig = line.strip()[12:].split(separator)
+ src_line_tok = src_deduplicated_tok.readline().strip()
+ trg_line_tok = trg_deduplicated_tok.readline().strip()
+ src_token_count = len(src_line_tok.split())
+ trg_token_count = len(trg_line_tok.split())
+ if (src_token_count > 0 and trg_token_count > 0 and src_token_count <= max_tokens and trg_token_count <= max_tokens):
+ ratio = float(src_token_count/trg_token_count) if src_token_count > trg_token_count else float(trg_token_count/src_token_count)
+ if (ratio <= max_ratio):
+ src_clean.write(src_line_orig+"\n")
+ trg_clean.write(trg_line_orig+"\n")
+ src_clean_tok.write(src_line_tok+"\n")
+ trg_clean_tok.write(trg_line_tok+"\n")
diff --git a/mgiza-aligner/cut.py b/mgiza-aligner/cut.py
new file mode 100755
index 0000000..e8361ed
--- /dev/null
+++ b/mgiza-aligner/cut.py
@@ -0,0 +1,12 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+
+import sys
+
+separator = sys.argv[4]
+
+with open(sys.argv[1]) as pasted_file, open(sys.argv[2], 'w') as src_file, open(sys.argv[3], 'w') as trg_file:
+ for line in pasted_file:
+ src_line, trg_line = line.strip()[12:].split(separator)
+ src_file.write(src_line+"\n")
+ trg_file.write(trg_line+"\n")
diff --git a/mgiza-aligner/giza.cfg.pattern b/mgiza-aligner/giza.cfg.pattern
index 12193da..83db5df 100644
--- a/mgiza-aligner/giza.cfg.pattern
+++ b/mgiza-aligner/giza.cfg.pattern
@@ -1,8 +1,8 @@
adbackoff 0
compactadtable 1
compactalignmentformat 0
-coocurrencefile corpora/CORPUS_NAME/src.low_trg.low.cooc
-corpusfile corpora/CORPUS_NAME/src.low_trg.low.snt
+coocurrencefile corpora/CORPUS_NAME/src.lem_trg.lem.cooc
+corpusfile corpora/CORPUS_NAME/src.lem_trg.lem.snt
countcutoff 1e-06
countcutoffal 1e-05
countincreasecutoff 1e-06
@@ -84,13 +84,13 @@ probcutoff 1e-07
probsmooth 1e-07
readtableprefix
restart 0
-sourcevocabularyfile corpora/CORPUS_NAME/src.low.vcb
+sourcevocabularyfile corpora/CORPUS_NAME/src.lem.vcb
t1 1
t2 0
t2to3 0
t3 0
t345 0
-targetvocabularyfile corpora/CORPUS_NAME/trg.low.vcb
+targetvocabularyfile corpora/CORPUS_NAME/trg.lem.vcb
tc
testcorpusfile
th 0
diff --git a/mgiza-aligner/paste.py b/mgiza-aligner/paste.py
new file mode 100755
index 0000000..77fba84
--- /dev/null
+++ b/mgiza-aligner/paste.py
@@ -0,0 +1,15 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+
+import sys
+
+separator = sys.argv[3]
+
+with open(sys.argv[1]) as src_file, open(sys.argv[2]) as trg_file:
+ index = 0
+ for src_line in src_file:
+ trg_line = trg_file.readline()
+ if separator in src_line or separator in trg_line:
+ raise Exception("Can not use: "+separator+" as a separator. Please set a different one in the Makefile")
+ print ("%012d%s%s%s" % (index, src_line.strip(), separator, trg_line.strip()))
+ index += 1
diff --git a/resources/anonymizer/named_entities.txt b/resources/anonymizer/named_entities.txt
deleted file mode 100644
index 905e61b..0000000
--- a/resources/anonymizer/named_entities.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-[0-9]{1,2}[\.\-/][0-9]{1,2}[\.\-/][0-9]{4} ne_date
-[\w\._\d]+@\w+(\.\w+)* ne_email
-[0-9]+([\.\,][0-9]+)? ne_number
diff --git a/resources/anonymizer/html_tags.txt b/resources/tokenizer/html_tags.txt
similarity index 100%
rename from resources/anonymizer/html_tags.txt
rename to resources/tokenizer/html_tags.txt
diff --git a/resources/tokenizer/named_entities.txt b/resources/tokenizer/named_entities.txt
new file mode 100644
index 0000000..25e292a
--- /dev/null
+++ b/resources/tokenizer/named_entities.txt
@@ -0,0 +1,5 @@
+[0-9]{1,2}[\.\-/][0-9]{1,2}[\.\-/][0-9]{4} ne_date
+[0-9]{4}[\.\-/][0-9]{1,2}[\.\-/][0-9]{1,2} ne_date
+[\w\._\d]+@\w+(\.\w+)* ne_email
+[0-9]+[\.\)]([0-9]+\.)+ ne_bullet
+\b[0-9]+([\.\,][0-9]+)?\b ne_number
diff --git a/resources/anonymizer/space_symbols.txt b/resources/tokenizer/space_symbols.txt
similarity index 100%
rename from resources/anonymizer/space_symbols.txt
rename to resources/tokenizer/space_symbols.txt
diff --git a/resources/anonymizer/stop_symbols.txt b/resources/tokenizer/stop_symbols.txt
similarity index 100%
rename from resources/anonymizer/stop_symbols.txt
rename to resources/tokenizer/stop_symbols.txt
diff --git a/resources/anonymizer/stop_words.txt b/resources/tokenizer/stop_words.txt
similarity index 100%
rename from resources/anonymizer/stop_words.txt
rename to resources/tokenizer/stop_words.txt
diff --git a/tests/addLemmatizedTM.sh b/tests/addLemmatizedTM.sh
index b533764..fc34843 100755
--- a/tests/addLemmatizedTM.sh
+++ b/tests/addLemmatizedTM.sh
@@ -1,7 +1,7 @@
#!/bin/sh
-CORPUS_NAME="setimes_enhr"
-SRC_LANG_ID=2
-TRG_LANG_ID=6
+CORPUS_NAME="europarl_sample"
+SRC_LANG_ID=1
+TRG_LANG_ID=2
-./addAlignedLemmatizedTM.py $CORPUS_NAME ../mgiza-aligner/corpora/$CORPUS_NAME/src.tok $SRC_LANG_ID ../mgiza-aligner/corpora/$CORPUS_NAME/trg.tok $TRG_LANG_ID ../mgiza-aligner/corpora/$CORPUS_NAME/aligned.txt
+./addAlignedLemmatizedTM.py $CORPUS_NAME ../mgiza-aligner/corpora/$CORPUS_NAME/src_clean.txt $SRC_LANG_ID ../mgiza-aligner/corpora/$CORPUS_NAME/trg_clean.txt $TRG_LANG_ID ../mgiza-aligner/corpora/$CORPUS_NAME/aligned.txt