#ifndef SENTENCE_TOKENIZER_HDR #define SENTENCE_TOKENIZER_HDR #include #include #include "concordia/common/config.hpp" #include "concordia/tokenized_sentence.hpp" #include "concordia/regex_rule.hpp" #include "concordia/concordia_config.hpp" #include "concordia/concordia_exception.hpp" #include #include /*! Class for tokenizing sentence before generating hash. This operation is is used to remove unnecessary symbols and possibly words from sentences added to index and search patterns. Tokenizer annotates html tags, removes stop words (if the option is enabled), as well as annotates named entities and special symbols. All these have to be listed in files (see \ref tutorial3). */ class SentenceTokenizer { public: /*! Constructor. \param config config object, holding paths to necessary files */ explicit SentenceTokenizer(boost::shared_ptr config) throw(ConcordiaException); /*! Destructor. */ virtual ~SentenceTokenizer(); /*! Tokenizes the sentence. \param sentence input sentence \returns altered version of the input sentence */ boost::shared_ptr tokenize(const std::string & sentence); private: void _createNeRules(std::string & namedEntitiesPath); void _createHtmlTagsRule(std::string & htmlTagsPath); boost::shared_ptr _getMultipleRegexRule( std::string filePath, char annotationType, std::string value, bool wholeWord = false); std::vector _namedEntities; boost::shared_ptr _htmlTags; bool _stopWordsEnabled; boost::shared_ptr _stopWords; }; #endif