63 lines
1.9 KiB
C++
63 lines
1.9 KiB
C++
#ifndef SENTENCE_TOKENIZER_HDR
|
|
#define SENTENCE_TOKENIZER_HDR
|
|
|
|
#include <string>
|
|
#include <vector>
|
|
#include "concordia/common/config.hpp"
|
|
#include "concordia/tokenized_sentence.hpp"
|
|
#include "concordia/regex_rule.hpp"
|
|
#include "concordia/concordia_config.hpp"
|
|
#include "concordia/concordia_exception.hpp"
|
|
#include <boost/shared_ptr.hpp>
|
|
#include <boost/filesystem.hpp>
|
|
|
|
|
|
/*!
|
|
Class for tokenizing sentence before generating hash.
|
|
Tokenizer ignores unnecessary symbols, html tags and possibly stop words
|
|
(if the option is enabled) in sentences added to index
|
|
as well as annotates named entities. All these have to be listed in files
|
|
(see \ref tutorial3).
|
|
*/
|
|
|
|
class SentenceTokenizer {
|
|
public:
|
|
/*! Constructor.
|
|
\param config config object, holding paths to necessary files
|
|
*/
|
|
explicit SentenceTokenizer(boost::shared_ptr<ConcordiaConfig> config);
|
|
|
|
/*! Destructor.
|
|
*/
|
|
virtual ~SentenceTokenizer();
|
|
|
|
/*! Tokenizes the sentence.
|
|
\param sentence input sentence
|
|
\param byWhitespace whether to tokenize the sentence by whitespace
|
|
\returns tokenized sentence object build on the input sentence
|
|
*/
|
|
TokenizedSentence tokenize(const std::string & sentence,
|
|
bool byWhitespace = false);
|
|
|
|
private:
|
|
void _createNeRules(std::string & namedEntitiesPath);
|
|
|
|
void _createHtmlTagsRule(std::string & htmlTagsPath);
|
|
|
|
boost::shared_ptr<RegexRule> _getMultipleRegexRule(
|
|
std::string filePath,
|
|
char annotationType,
|
|
std::string value,
|
|
bool wholeWord = false);
|
|
|
|
std::vector<RegexRule> _namedEntities;
|
|
|
|
boost::shared_ptr<RegexRule> _htmlTags;
|
|
|
|
bool _stopWordsEnabled;
|
|
|
|
boost::shared_ptr<RegexRule> _stopWords;
|
|
};
|
|
|
|
#endif
|