mirror of
https://github.com/marcin-szczepanski/jFuzzyLogic.git
synced 2024-12-19 00:35:29 +01:00
1352 lines
37 KiB
Plaintext
1352 lines
37 KiB
Plaintext
|
/*
|
||
|
[The "BSD licence"]
|
||
|
Copyright (c) 2005-2006 Terence Parr
|
||
|
All rights reserved.
|
||
|
|
||
|
Redistribution and use in source and binary forms, with or without
|
||
|
modification, are permitted provided that the following conditions
|
||
|
are met:
|
||
|
1. Redistributions of source code must retain the above copyright
|
||
|
notice, this list of conditions and the following disclaimer.
|
||
|
2. Redistributions in binary form must reproduce the above copyright
|
||
|
notice, this list of conditions and the following disclaimer in the
|
||
|
documentation and/or other materials provided with the distribution.
|
||
|
3. The name of the author may not be used to endorse or promote products
|
||
|
derived from this software without specific prior written permission.
|
||
|
|
||
|
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||
|
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||
|
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||
|
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||
|
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||
|
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||
|
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||
|
*/
|
||
|
group Cpp implements ANTLRCore;
|
||
|
|
||
|
cppTypeInitMap ::= [
|
||
|
"int":"0",
|
||
|
"long":"0",
|
||
|
"float":"0.0",
|
||
|
"double":"0.0",
|
||
|
"bool":"false",
|
||
|
"byte":"0",
|
||
|
"short":"0",
|
||
|
"char":"0",
|
||
|
default:"0" // anything other than an atomic type
|
||
|
]
|
||
|
|
||
|
// What we generate lexer/parser/treeparser, used a suffix in a few places
|
||
|
generatedType() ::= <<
|
||
|
<if(LEXER)>Lexer<endif><if(PARSER)>Parser<endif><if(TREE_PARSER)>TreeParser<endif>
|
||
|
>>
|
||
|
|
||
|
leadIn(type) ::=
|
||
|
<<
|
||
|
/** \file
|
||
|
*
|
||
|
* This <type> file was generated by ANTLR version <ANTLRVersion>
|
||
|
*
|
||
|
* - From the grammar source file : <fileName>
|
||
|
* - On : <generatedTimestamp>
|
||
|
<if(LEXER)>
|
||
|
* - for the lexer : <name><\n>
|
||
|
<endif>
|
||
|
<if(PARSER)>
|
||
|
* - for the parser : <name><\n>
|
||
|
<endif>
|
||
|
<if(TREE_PARSER)>
|
||
|
* - for the tree parser : <name><\n>
|
||
|
<endif>
|
||
|
*
|
||
|
* Edit at your own peril.
|
||
|
*/
|
||
|
>>
|
||
|
|
||
|
standardHeaders() ::=
|
||
|
<<
|
||
|
#include \<antlr3/<generatedType()>.h>
|
||
|
|
||
|
<if(profile)>
|
||
|
#warning "No profiling support.."
|
||
|
<endif>
|
||
|
<if(TREE_PARSER)>
|
||
|
#warning "No tree parsing yet..."
|
||
|
<endif>
|
||
|
>>
|
||
|
|
||
|
/** The overall file structure of a recognizer; stores methods for rules
|
||
|
* and cyclic DFAs plus support code.
|
||
|
*/
|
||
|
outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
|
||
|
docComment, recognizer,
|
||
|
name, tokens, tokenNames, rules, cyclicDFAs,
|
||
|
bitsets, buildTemplate, profile,
|
||
|
backtracking, synpreds, memoize, numRules,
|
||
|
fileName, ANTLRVersion, generatedTimestamp, trace,
|
||
|
scopes, superClass) ::=
|
||
|
<<
|
||
|
<leadIn("C++ source")>
|
||
|
<@includes>
|
||
|
#include "<name><headerFileExtension()>"
|
||
|
<@end>
|
||
|
<if(actions.(actionScope).header)>
|
||
|
// Header action start ========================================================
|
||
|
<actions.(actionScope).header>
|
||
|
// Header action end ========================================================
|
||
|
<endif>
|
||
|
|
||
|
<headerAction>
|
||
|
|
||
|
<standardHeaders()>
|
||
|
|
||
|
<docComment>
|
||
|
<recognizer>
|
||
|
>>
|
||
|
parserHeaderFile() ::= <<
|
||
|
>>
|
||
|
treeParserHeaderFile() ::= <<
|
||
|
>>
|
||
|
lexerHeaderFile() ::= <<
|
||
|
template\<typename StreamType, typename TokenType, typename TokenBuilder>
|
||
|
class <name> : public antlr3::Lexer\<StreamType,TokenType,TokenBuilder> {
|
||
|
// carry over general types
|
||
|
typedef typename StreamType::position_type position_type;
|
||
|
typedef typename StreamType::char_type char_type;
|
||
|
|
||
|
typedef antlr3::tokenid_type tokenid_type;
|
||
|
typedef antlr3::channel_type channel_type;
|
||
|
typedef antlr3::decision_type decision_type;
|
||
|
// exception shorthands
|
||
|
typedef antlr3::MismatchException\<position_type,char_type> MismatchException;
|
||
|
typedef antlr3::MismatchedRangeException\<position_type,char_type> MismatchedRangeException;
|
||
|
typedef antlr3::MismatchedSetException\<position_type,char_type> MismatchedSetException;
|
||
|
typedef antlr3::EarlyExitException\<position_type> EarlyExitException;
|
||
|
typedef antlr3::NoViableAltException\<position_type> NoViableAltException;
|
||
|
<if(backtracking)>
|
||
|
// @TODO backtracking ruleMemo = new HashMap[<numRules>+1];<\n> <! index from 1..n !>
|
||
|
<endif>
|
||
|
|
||
|
public:
|
||
|
<tokens:{static const tokenid_type <tokenPrefix()><it.name> = <it.type>;}; separator="\n">
|
||
|
<scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
|
||
|
<actions.lexer.members>
|
||
|
|
||
|
<name>(StreamType* input)
|
||
|
: antlr3::Lexer\<StreamType,TokenType,TokenBuilder>(input)
|
||
|
{
|
||
|
}
|
||
|
|
||
|
<!if(filterMode)!>
|
||
|
<!filteringNextToken()!>
|
||
|
<!endif!>
|
||
|
<rules; separator="\n\n">
|
||
|
|
||
|
// syn preds
|
||
|
<synpreds:{p | <lexerSynpred(p)>}>
|
||
|
|
||
|
// cyclic dfa's
|
||
|
<cyclicDFAs:{dfa | DFA<dfa.decisionNumber> dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>(this);}; separator="\n">
|
||
|
// dfa tables..
|
||
|
}; // class <name><\n>
|
||
|
>>
|
||
|
|
||
|
headerFile( LEXER,
|
||
|
PARSER,
|
||
|
TREE_PARSER,
|
||
|
actionScope,
|
||
|
actions,
|
||
|
docComment,
|
||
|
recognizer,
|
||
|
name,
|
||
|
tokens,
|
||
|
tokenNames,
|
||
|
rules,
|
||
|
cyclicDFAs,
|
||
|
bitsets,
|
||
|
buildTemplate,
|
||
|
profile,
|
||
|
backtracking,
|
||
|
synpreds,
|
||
|
memoize,
|
||
|
numRules,
|
||
|
fileName,
|
||
|
ANTLRVersion,
|
||
|
generatedTimestamp,
|
||
|
trace,
|
||
|
scopes,
|
||
|
superClass
|
||
|
) ::=
|
||
|
<<
|
||
|
#ifndef _<name>_H
|
||
|
#define _<name>_H
|
||
|
<leadIn("C++ header")>
|
||
|
<actions.(actionScope).headerfile>
|
||
|
|
||
|
<@includes>
|
||
|
<standardHeaders()>
|
||
|
<@end>
|
||
|
|
||
|
<if(LEXER)>
|
||
|
<lexerHeaderFile()>
|
||
|
<endif>
|
||
|
<if(PARSER)>
|
||
|
<parserHeaderFile()>
|
||
|
<endif>
|
||
|
<if(TREE_PARSER)>
|
||
|
<treeParserHeaderFile()>
|
||
|
<endif>
|
||
|
|
||
|
|
||
|
#endif // _<name>_H<\n>
|
||
|
>>
|
||
|
|
||
|
lexer(grammar, name, tokens, scopes, rules, numRules, labelType="Token",
|
||
|
filterMode) ::= <<
|
||
|
|
||
|
<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
|
||
|
|
||
|
>>
|
||
|
|
||
|
filteringNextToken() ::= <<
|
||
|
/** A override of Lexer.nextToken() that backtracks over mTokens() looking
|
||
|
* for matches. No error can be generated upon error; just rewind, consume
|
||
|
* a token and then try again. backtracking needs to be set as well.
|
||
|
* Make rule memoization happen only at levels above 1 as we start mTokens
|
||
|
* at backtracking==1.
|
||
|
*/
|
||
|
public Token nextToken() {
|
||
|
while (true) {
|
||
|
if ( input.LA(1)==CharStream.EOF ) {
|
||
|
return Token.EOF_TOKEN;
|
||
|
}
|
||
|
this->token = 0;
|
||
|
tokenStartCharIndex = getCharIndex();
|
||
|
try {
|
||
|
int m = input.mark();
|
||
|
backtracking=1; <! means we won't throw slow exception !>
|
||
|
failed=false;
|
||
|
mTokens();
|
||
|
backtracking=0;
|
||
|
<! mTokens backtracks with synpred at backtracking==2
|
||
|
and we set the synpredgate to allow actions at level 1. !>
|
||
|
if ( failed ) {
|
||
|
input.rewind(m);
|
||
|
input.consume(); <! advance one char and try again !>
|
||
|
}
|
||
|
else {
|
||
|
return token;
|
||
|
}
|
||
|
}
|
||
|
catch (RecognitionException re) {
|
||
|
// shouldn't happen in backtracking mode, but...
|
||
|
reportError(re);
|
||
|
recover(re);
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
public void memoize(IntStream input, int ruleIndex, int ruleStartIndex)
|
||
|
{
|
||
|
if ( backtracking > 1 )
|
||
|
super.memoize(input, ruleIndex, ruleStartIndex);
|
||
|
}
|
||
|
|
||
|
public boolean alreadyParsedRule(IntStream input, int ruleIndex)
|
||
|
{
|
||
|
if ( backtracking > 1 )
|
||
|
return super.alreadyParsedRule(input, ruleIndex);
|
||
|
return false;
|
||
|
}
|
||
|
>>
|
||
|
|
||
|
filteringActionGate() ::= "backtracking == 1"
|
||
|
|
||
|
/** How to generate a parser */
|
||
|
genericParser(
|
||
|
grammar, name, scopes, tokens, tokenNames, rules, numRules, cyclicDFAs,
|
||
|
bitsets, inputStreamType, superClass, ASTLabelType="Object",
|
||
|
labelType, members
|
||
|
) ::= <<
|
||
|
// genericParser
|
||
|
class <name> : public <@superClassName><superClass><@end> {
|
||
|
public:
|
||
|
static const char* tokenNames[] = {
|
||
|
"\<invalid>", "\<EOR>", "\<DOWN>", "\<UP>", <tokenNames; separator=", ">
|
||
|
};
|
||
|
<tokens:{static tokenid_type <tokenPrefix()><it.name>=<it.type>;}; separator="\n">
|
||
|
<scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
|
||
|
<@members>
|
||
|
|
||
|
<name>(StreamType* input)
|
||
|
: <superClass>\<StreamType,TokenType>(input)
|
||
|
{
|
||
|
<if(backtracking)>
|
||
|
ruleMemo = new HashMap[<numRules>+1];<\n> <! index from 1..n !>
|
||
|
<endif>
|
||
|
}
|
||
|
<@end>
|
||
|
|
||
|
//@TODO public String[] getTokenNames() { return tokenNames; }
|
||
|
//@TODO public String getGrammarFileName() { return "<fileName>"; }
|
||
|
<members>
|
||
|
|
||
|
<rules; separator="\n\n">
|
||
|
|
||
|
<synpreds:{p | <synpred(p)>}>
|
||
|
|
||
|
<cyclicDFAs:{dfa | protected DFA<dfa.decisionNumber> dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>(this);}; separator="\n">
|
||
|
<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
|
||
|
|
||
|
<bitsets:bitset(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>},
|
||
|
words64=it.bits)>
|
||
|
};
|
||
|
>>
|
||
|
|
||
|
parser(
|
||
|
grammar, name, scopes, tokens, tokenNames,
|
||
|
rules, numRules, bitsets, ASTLabelType,
|
||
|
superClass="Parser", labelType="Token",
|
||
|
members={<actions.parser.members>}) ::= <<
|
||
|
<genericParser(inputStreamType="TokenStream", ...)>
|
||
|
>>
|
||
|
|
||
|
/** How to generate a tree parser; same as parser except the input
|
||
|
* stream is a different type.
|
||
|
*/
|
||
|
treeParser(grammar, name, scopes, tokens, tokenNames, globalAction,
|
||
|
rules, numRules,
|
||
|
bitsets,
|
||
|
labelType={<ASTLabelType>}, ASTLabelType="Object",
|
||
|
superClass="TreeParser", members={<actions.treeparser.members>}
|
||
|
) ::= <<
|
||
|
<genericParser(inputStreamType="TreeNodeStream", ...)>
|
||
|
>>
|
||
|
|
||
|
/** A simpler version of a rule template that is specific to the imaginary
|
||
|
* rules created for syntactic predicates. As they never have return values
|
||
|
* nor parameters etc..., just give simplest possible method. Don't do
|
||
|
* any of the normal memoization stuff in here either; it's a waste.
|
||
|
* As predicates cannot be inlined into the invoking rule, they need to
|
||
|
* be in a rule by themselves.
|
||
|
*/
|
||
|
synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
|
||
|
<<
|
||
|
// $ANTLR start <ruleName>
|
||
|
public void <ruleName>_fragment(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) throws RecognitionException {
|
||
|
<if(trace)>System.out.println("enter <ruleName> "+input.LT(1)+" failed="+failed+" backtracking="+backtracking);<endif>
|
||
|
<if(trace)>
|
||
|
try {
|
||
|
<block>
|
||
|
}
|
||
|
finally {
|
||
|
System.out.println("exit <ruleName> "+input.LT(1)+" failed="+failed+" backtracking="+backtracking);
|
||
|
}
|
||
|
<else>
|
||
|
<block>
|
||
|
<endif>
|
||
|
}
|
||
|
// $ANTLR end <ruleName>
|
||
|
>>
|
||
|
|
||
|
synpred(name) ::= <<
|
||
|
public boolean <name>() {
|
||
|
this->backtracking++;
|
||
|
<@start()>
|
||
|
int start = input.mark();
|
||
|
try {
|
||
|
<name>_fragment(); // can never throw exception
|
||
|
} catch (RecognitionException re) {
|
||
|
System.err.println("impossible: "+re);
|
||
|
}
|
||
|
boolean success = ! this->failed;
|
||
|
input.rewind(start);
|
||
|
<@stop()>
|
||
|
this->backtracking--;
|
||
|
this->failed = false;
|
||
|
return success;
|
||
|
}<\n>
|
||
|
>>
|
||
|
|
||
|
lexerSynpred(name) ::= <<
|
||
|
<synpred(name)>
|
||
|
>>
|
||
|
|
||
|
ruleMemoization(name) ::= <<
|
||
|
<if(memoize)>
|
||
|
if ( backtracking > 0 && alreadyParsedRule(input, <ruleDescriptor.index>) )
|
||
|
return <ruleReturnValue()>;
|
||
|
<endif>
|
||
|
>>
|
||
|
|
||
|
/** How to test for failure and return from rule */
|
||
|
checkRuleBacktrackFailure() ::= <<
|
||
|
<if(backtracking)>
|
||
|
if (failed)
|
||
|
return <ruleReturnValue()>;
|
||
|
<endif>
|
||
|
>>
|
||
|
|
||
|
/** This rule has failed, exit indicating failure during backtrack */
|
||
|
ruleBacktrackFailure() ::= <<
|
||
|
<if(backtracking)>
|
||
|
if (backtracking > 0)
|
||
|
{
|
||
|
failed = true;
|
||
|
return <ruleReturnValue()>;
|
||
|
}
|
||
|
<endif>
|
||
|
>>
|
||
|
|
||
|
/** How to generate code for a rule. This includes any return type
|
||
|
* data aggregates required for multiple return values.
|
||
|
*/
|
||
|
rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,memoize) ::= <<
|
||
|
<ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
|
||
|
<returnScope(scope=ruleDescriptor.returnScope)>
|
||
|
|
||
|
// $ANTLR start <ruleName>
|
||
|
// <fileName>:<description>
|
||
|
public <returnType()> <ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) throw(antlr3::BaseRecognitionException)
|
||
|
{
|
||
|
<if(trace)>
|
||
|
antlr3::Tracer trace(this,"<ruleName>");
|
||
|
System.out.println("enter <ruleName> "+input.LT(1)+" failed="+failed+" backtracking="+backtracking);
|
||
|
<endif>
|
||
|
<ruleDeclarations()>
|
||
|
<ruleLabelDefs()>
|
||
|
<ruleDescriptor.actions.init>
|
||
|
<@preamble()>
|
||
|
try {
|
||
|
<ruleMemoization(name=ruleName)>
|
||
|
<block>
|
||
|
}
|
||
|
<if(exceptions)>
|
||
|
<exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
|
||
|
<else>
|
||
|
<if(!emptyRule)>
|
||
|
<if(actions.(actionScope).rulecatch)>
|
||
|
<actions.(actionScope).rulecatch>
|
||
|
<else>
|
||
|
catch (RecognitionException re) {
|
||
|
reportError(re);
|
||
|
recover(input,re);
|
||
|
}<\n>
|
||
|
<endif>
|
||
|
<endif>
|
||
|
<endif>
|
||
|
finally {
|
||
|
<if(trace)>System.out.println("exit <ruleName> "+input.LT(1)+" failed="+failed+" backtracking="+backtracking);<endif>
|
||
|
<ruleCleanUp()>
|
||
|
<(ruleDescriptor.actions.finally):execAction()>
|
||
|
}
|
||
|
<@postamble()>
|
||
|
return <ruleReturnValue()>;
|
||
|
}
|
||
|
// $ANTLR end <ruleName>
|
||
|
>>
|
||
|
|
||
|
catch(decl,action) ::= <<
|
||
|
catch (<e.decl>) {
|
||
|
<e.action>
|
||
|
}
|
||
|
>>
|
||
|
|
||
|
ruleDeclarations() ::= <<
|
||
|
<ruleDescriptor.useScopes:{<it>_stack.push(new <it>_scope());}; separator="\n">
|
||
|
<ruleDescriptor.ruleScope:{<it.name>_stack.push(new <it.name>_scope());}; separator="\n">
|
||
|
<if(ruleDescriptor.hasMultipleReturnValues)>
|
||
|
<returnType()> retval = new <returnType()>();
|
||
|
retval.start = input.LT(1);<\n>
|
||
|
<else>
|
||
|
<ruleDescriptor.returnScope.attributes:{ a |
|
||
|
<a.type> <a.name> = <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>;
|
||
|
}>
|
||
|
<endif>
|
||
|
<if(memoize)>
|
||
|
int <ruleDescriptor.name>_StartIndex = input.index();
|
||
|
<endif>
|
||
|
>>
|
||
|
|
||
|
ruleLabelDefs() ::= <<
|
||
|
<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels]
|
||
|
:{<labelType> <it.label.text>=null;}; separator="\n"
|
||
|
>
|
||
|
<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]
|
||
|
:{List list_<it.label.text>=null;}; separator="\n"
|
||
|
>
|
||
|
<[ruleDescriptor.ruleLabels,ruleDescriptor.ruleListLabels]
|
||
|
:ruleLabelDef(label=it); separator="\n"
|
||
|
>
|
||
|
<[ruleDescriptor.allRuleRefsInAltsWithRewrites,ruleDescriptor.allTokenRefsInAltsWithRewrites]
|
||
|
:{List list_<it>=new ArrayList();}; separator="\n"
|
||
|
>
|
||
|
>>
|
||
|
|
||
|
ruleReturnValue() ::= <<
|
||
|
<if(!ruleDescriptor.isSynPred)>
|
||
|
<if(ruleDescriptor.hasReturnValue)>
|
||
|
<if(ruleDescriptor.hasSingleReturnValue)>
|
||
|
<ruleDescriptor.singleValueReturnName>
|
||
|
<else>
|
||
|
retval
|
||
|
<endif>
|
||
|
<endif>
|
||
|
<endif>
|
||
|
>>
|
||
|
|
||
|
ruleCleanUp() ::= <<
|
||
|
<ruleDescriptor.useScopes:{<it>_stack.pop();}; separator="\n">
|
||
|
<ruleDescriptor.ruleScope:{<it.name>_stack.pop();}; separator="\n">
|
||
|
<if(ruleDescriptor.hasMultipleReturnValues)>
|
||
|
retval.stop = input.LT(-1);<\n>
|
||
|
<endif>
|
||
|
<if(memoize)>
|
||
|
<if(backtracking)>
|
||
|
if ( backtracking > 0 ) { memoize(input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); }
|
||
|
<endif>
|
||
|
<endif>
|
||
|
>>
|
||
|
|
||
|
/** How to generate a rule in the lexer; naked blocks are used for
|
||
|
* fragment rules.
|
||
|
*/
|
||
|
lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
|
||
|
void m<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) throw(antlr3::BaseRecognitionException)
|
||
|
{
|
||
|
<if(trace)>
|
||
|
antlr3::Tracer trace(this,"<ruleName>");
|
||
|
<endif>
|
||
|
antlr3::CountScope nestingTracker(this->ruleNestingLevel);
|
||
|
StreamType& input(this->getInput());
|
||
|
<if(nakedBlock)>
|
||
|
<ruleDescriptor.actions.init>
|
||
|
<ruleMemoization(name=ruleName)>
|
||
|
<block><\n>
|
||
|
<else>
|
||
|
tokenid_type type = <tokenPrefix()><ruleName>;
|
||
|
channel_type channel = antlr3::Token::DEFAULT_CHANNEL;
|
||
|
position_type start(input.getPosition());
|
||
|
<ruleDescriptor.actions.init>
|
||
|
<ruleMemoization(name=ruleName)>
|
||
|
<block>
|
||
|
<! create token if none exists *and* we are an outermost token rule !>
|
||
|
<execAction({if ( this->token == 0 && this->ruleNestingLevel == 1 ) {
|
||
|
TokenType *tt = TokenBuilder::build(type,start,input,channel);
|
||
|
std::cout \<\< (*tt) \<\< std::endl;
|
||
|
this->emit(tt);
|
||
|
}<\n>
|
||
|
})>
|
||
|
<endif>
|
||
|
}
|
||
|
>>
|
||
|
|
||
|
/** How to generate code for the implicitly-defined lexer grammar rule
|
||
|
* that chooses between lexer rules.
|
||
|
*/
|
||
|
tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
|
||
|
void mTokens() throw(antlr3::BaseRecognitionException)
|
||
|
{
|
||
|
StreamType& input(this->getInput());
|
||
|
<block><\n>
|
||
|
}
|
||
|
>>
|
||
|
|
||
|
// S U B R U L E S
|
||
|
|
||
|
/** A (...) subrule with multiple alternatives */
|
||
|
block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,
|
||
|
maxK,maxAlt,description) ::= <<
|
||
|
// block <fileName>:<description>
|
||
|
decision_type alt<decisionNumber>=<maxAlt>;
|
||
|
<decls>
|
||
|
<@predecision()>
|
||
|
<decision>
|
||
|
<@postdecision()>
|
||
|
<@prebranch()>
|
||
|
switch (alt<decisionNumber>) {
|
||
|
<alts:altSwitchCase()>
|
||
|
}
|
||
|
<@postbranch()>
|
||
|
>>
|
||
|
|
||
|
/** A rule block with multiple alternatives */
|
||
|
ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
|
||
|
// ruleBlock <fileName>:<description>
|
||
|
decision_type alt<decisionNumber>=<maxAlt>;
|
||
|
<decls>
|
||
|
<@predecision()>
|
||
|
<decision>
|
||
|
<@postdecision()>
|
||
|
switch (alt<decisionNumber>) {
|
||
|
<alts:altSwitchCase()>
|
||
|
}
|
||
|
>>
|
||
|
|
||
|
ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
|
||
|
// ruleBlockSingleAlt <fileName>:<description>
|
||
|
<decls>
|
||
|
<@prealt()>
|
||
|
<alts>
|
||
|
<@postalt()>
|
||
|
>>
|
||
|
|
||
|
/** A special case of a (...) subrule with a single alternative */
|
||
|
blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
|
||
|
// <fileName>:<description>
|
||
|
<decls>
|
||
|
<@prealt()>
|
||
|
<alts>
|
||
|
<@postalt()>
|
||
|
>>
|
||
|
|
||
|
/** A (..)+ block with 0 or more alternatives */
|
||
|
positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
|
||
|
// positiveClosureBlock <fileName>:<description>
|
||
|
decision_type cnt<decisionNumber>=0;
|
||
|
<decls>
|
||
|
<@preloop()>
|
||
|
do {
|
||
|
decision_type alt<decisionNumber>=<maxAlt>;
|
||
|
<@predecision()>
|
||
|
<decision>
|
||
|
<@postdecision()>
|
||
|
switch (alt<decisionNumber>) {
|
||
|
<alts:altSwitchCase()>
|
||
|
default :
|
||
|
if ( cnt<decisionNumber> >= 1 )
|
||
|
goto loop<decisionNumber>;
|
||
|
EarlyExitException eee( input.getPosition(), <decisionNumber> );
|
||
|
<@earlyExitException()>
|
||
|
throw eee;
|
||
|
}
|
||
|
cnt<decisionNumber>++;
|
||
|
} while (true);
|
||
|
loop<decisionNumber>: ;
|
||
|
<@postloop()>
|
||
|
>>
|
||
|
|
||
|
positiveClosureBlockSingleAlt ::= positiveClosureBlock
|
||
|
|
||
|
/** A (..)* block with 1 or more alternatives */
|
||
|
closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
|
||
|
// closureBlock <fileName>:<description>
|
||
|
<decls>
|
||
|
<@preloop()>
|
||
|
do {
|
||
|
decision_type alt<decisionNumber>=<maxAlt>;
|
||
|
<@predecision()>
|
||
|
<decision>
|
||
|
<@postdecision()>
|
||
|
switch (alt<decisionNumber>) {
|
||
|
<alts:altSwitchCase()>
|
||
|
default :
|
||
|
goto loop<decisionNumber>;
|
||
|
}
|
||
|
} while (true);
|
||
|
loop<decisionNumber>: ;
|
||
|
<@postloop()>
|
||
|
>>
|
||
|
|
||
|
closureBlockSingleAlt ::= closureBlock
|
||
|
|
||
|
/** Optional blocks (x)? are translated to (x|) by before code generation
|
||
|
* so we can just use the normal block template
|
||
|
*/
|
||
|
optionalBlock ::= block
|
||
|
|
||
|
optionalBlockSingleAlt ::= block
|
||
|
|
||
|
/** A case in a switch that jumps to an alternative given the alternative
|
||
|
* number. A DFA predicts the alternative and then a simple switch
|
||
|
* does the jump to the code that actually matches that alternative.
|
||
|
*/
|
||
|
altSwitchCase() ::= <<
|
||
|
case <i> :
|
||
|
<@prealt()>
|
||
|
<it>
|
||
|
break;<\n>
|
||
|
>>
|
||
|
|
||
|
/** An alternative is just a list of elements; at outermost level */
|
||
|
alt(elements,altNum,description,autoAST,outerAlt) ::= <<
|
||
|
// alt <fileName>:<description>
|
||
|
{
|
||
|
<@declarations()>
|
||
|
<elements:element()>
|
||
|
<@cleanup()>
|
||
|
}
|
||
|
>>
|
||
|
|
||
|
// E L E M E N T S
|
||
|
|
||
|
/** Dump the elements one per line */
|
||
|
element() ::= <<
|
||
|
// element <fileName>:<description>
|
||
|
<@prematch()>
|
||
|
<it.el><\n>
|
||
|
>>
|
||
|
|
||
|
/** match a token optionally with a label in front */
|
||
|
tokenRef(token,label,elementIndex) ::= <<
|
||
|
// tokenRef
|
||
|
<if(label)>
|
||
|
<label> = input.LT(1);<\n>
|
||
|
<endif>
|
||
|
this->match(input,<token>,FOLLOW_<token>_in_<ruleName><elementIndex>);
|
||
|
<checkRuleBacktrackFailure()>
|
||
|
>>
|
||
|
|
||
|
/** ids+=ID no AST building */
|
||
|
tokenRefAndListLabel(token,label,elementIndex) ::= <<
|
||
|
<tokenRef(...)>
|
||
|
<listLabel(...)>
|
||
|
>>
|
||
|
|
||
|
listLabel(label) ::= <<
|
||
|
if (list_<label>==null) list_<label>=new ArrayList();
|
||
|
list_<label>.add(<label>);<\n>
|
||
|
>>
|
||
|
|
||
|
/** match a character */
|
||
|
charRef(char,label) ::= <<
|
||
|
// charRef
|
||
|
<if(label)>
|
||
|
<tokenid_type()> <label> = input.LA(1);<\n>
|
||
|
<endif>
|
||
|
this->match(<char>);
|
||
|
<checkRuleBacktrackFailure()>
|
||
|
>>
|
||
|
|
||
|
/** match a character range */
|
||
|
charRangeRef(a,b) ::= "this->matchRange(<a>,<b>); <checkRuleBacktrackFailure()>"
|
||
|
|
||
|
/** For now, sets are interval tests and must be tested inline */
|
||
|
matchSet(s,label,elementIndex,postmatchCode="") ::= <<
|
||
|
// matchSet
|
||
|
<if(label)>
|
||
|
<label> = input.LT(1);<\n>
|
||
|
<endif>
|
||
|
if ( <s> )
|
||
|
{
|
||
|
<postmatchCode>
|
||
|
input.consume();
|
||
|
<if(!LEXER)>
|
||
|
errorRecovery=false;
|
||
|
<endif>
|
||
|
<if(backtracking)>failed=false;<endif>
|
||
|
}
|
||
|
else
|
||
|
{
|
||
|
<ruleBacktrackFailure()>
|
||
|
MismatchedSetException mse(input.getPosition(),input.LA(1));
|
||
|
<@mismatchedSetException()>
|
||
|
<if(LEXER)>
|
||
|
this->recover(mse);
|
||
|
<else>
|
||
|
this->recoverFromMismatchedSet(input,mse,FOLLOW_set_in_<ruleName><elementIndex>);
|
||
|
<endif>
|
||
|
throw mse;
|
||
|
}<\n>
|
||
|
>>
|
||
|
|
||
|
matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
|
||
|
<matchSet(...)>
|
||
|
<listLabel(...)>
|
||
|
>>
|
||
|
|
||
|
/** Match a string literal */
|
||
|
lexerStringRef(string,label) ::= <<
|
||
|
// lexerStringRef
|
||
|
<if(label)>
|
||
|
position_type <label>Start(input.getPosition());
|
||
|
this->match( <string> );
|
||
|
<checkRuleBacktrackFailure()>
|
||
|
TokenType* <label> = TokenBuilder::build(Token.INVALID_TOKEN_TYPE,<label>Start,input,Token.DEFAULT_CHANNEL);
|
||
|
<else>
|
||
|
this->match( <string> );
|
||
|
<checkRuleBacktrackFailure()><\n>
|
||
|
<endif>
|
||
|
>>
|
||
|
|
||
|
wildcard(label,elementIndex) ::= <<
|
||
|
<if(label)>
|
||
|
<label> = input.LT(1);<\n>
|
||
|
<endif>
|
||
|
this->matchAny( input );
|
||
|
<checkRuleBacktrackFailure()>
|
||
|
>>
|
||
|
|
||
|
wildcardAndListLabel(label,elementIndex) ::= <<
|
||
|
<wildcard(...)>
|
||
|
<listLabel(...)>
|
||
|
>>
|
||
|
|
||
|
/** Match . wildcard */
|
||
|
wildcardChar(label, elementIndex) ::= <<
|
||
|
<if(label)>
|
||
|
<tokenid_type()> <label> = input.LA(1);<\n>
|
||
|
<endif>
|
||
|
this->matchAny();
|
||
|
<checkRuleBacktrackFailure()>
|
||
|
>>
|
||
|
|
||
|
tokenid_type() ::= "<if(LEXER)>char_type<else>tokenid_type<endif>"
|
||
|
|
||
|
wildcardCharListLabel(label, elementIndex) ::= <<
|
||
|
<wildcardChar(...)>
|
||
|
<listLabel(...)>
|
||
|
>>
|
||
|
|
||
|
/** Match a rule reference by invoking it possibly with arguments
|
||
|
* and a return value or values.
|
||
|
*/
|
||
|
ruleRef(rule,label,elementIndex,args) ::= <<
|
||
|
following.push(FOLLOW_<rule>_in_<ruleName><elementIndex>);
|
||
|
<if(label)>
|
||
|
<label>=<rule>(<args>);<\n>
|
||
|
<else>
|
||
|
<rule>(<args>);<\n>
|
||
|
<endif>
|
||
|
following.pop();
|
||
|
<checkRuleBacktrackFailure()>
|
||
|
>>
|
||
|
|
||
|
/** ids+=ID */
|
||
|
ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
|
||
|
<ruleRef(...)>
|
||
|
<listLabel(...)>
|
||
|
>>
|
||
|
|
||
|
/** A lexer rule reference */
|
||
|
lexerRuleRef(rule,label,args) ::= <<
|
||
|
<if(label)>
|
||
|
position_type <label>Start(input.getPosition());
|
||
|
m<rule>(<args>);
|
||
|
<checkRuleBacktrackFailure()>
|
||
|
TokenType* <label> = TokenBuilder::build(Token.INVALID_TOKEN_TYPE,<label>Start,input,Token.DEFAULT_CHANNEL);
|
||
|
<else>
|
||
|
m<rule>(<args>);
|
||
|
<checkRuleBacktrackFailure()>
|
||
|
<endif>
|
||
|
>>
|
||
|
|
||
|
/** EOF in the lexer */
|
||
|
lexerMatchEOF(label) ::= <<
|
||
|
<if(label)>
|
||
|
position_type <label>Start(input.getPosition());
|
||
|
match(EOF);
|
||
|
<checkRuleBacktrackFailure()>
|
||
|
TokenType* <label> = TokenBuilder::build(Token.EOF,<label>Start,input,Token.DEFAULT_CHANNEL);
|
||
|
<else>
|
||
|
match(EOF);
|
||
|
<checkRuleBacktrackFailure()>
|
||
|
<endif>
|
||
|
>>
|
||
|
|
||
|
/** match ^(root children) in tree parser */
|
||
|
tree(root, children, nullableChildList) ::= <<
|
||
|
<root:element()>
|
||
|
<if(nullableChildList)>
|
||
|
if ( input.LA(1)==antlr3::Token::DOWN ) {
|
||
|
match(input, antlr3::Token::DOWN, null);
|
||
|
<checkRuleBacktrackFailure()>
|
||
|
<children:element()>
|
||
|
match(input, antlr3::Token::UP, null);
|
||
|
<checkRuleBacktrackFailure()>
|
||
|
}
|
||
|
<else>
|
||
|
match(input, antlr3::Token::DOWN, null);
|
||
|
<checkRuleBacktrackFailure()>
|
||
|
<children:element()>
|
||
|
match(input, antlr3::Token::UP, null);
|
||
|
<checkRuleBacktrackFailure()>
|
||
|
<endif>
|
||
|
>>
|
||
|
|
||
|
/** Every predicate is used as a validating predicate (even when it is
|
||
|
* also hoisted into a prediction expression).
|
||
|
*/
|
||
|
validateSemanticPredicate(pred,description) ::= <<
|
||
|
if ( !(<evalPredicate(...)>) ) {
|
||
|
<ruleBacktrackFailure()>
|
||
|
throw new FailedPredicateException(input, "<ruleName>", "<description>");
|
||
|
}
|
||
|
>>
|
||
|
|
||
|
// F i x e d D F A (if-then-else)
|
||
|
dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
|
||
|
<if(!semPredState)>
|
||
|
<tokenid_type()> LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
|
||
|
<endif>
|
||
|
<edges; separator="\nelse ">
|
||
|
else
|
||
|
{
|
||
|
<if(eotPredictsAlt)>
|
||
|
alt<decisionNumber> = <eotPredictsAlt>;<\n>
|
||
|
<else>
|
||
|
<ruleBacktrackFailure()>
|
||
|
NoViableAltException nvae(input.getPosition(), "<description>", <decisionNumber>, <stateNumber>);<\n>
|
||
|
<@noViableAltException()>
|
||
|
throw nvae;<\n>
|
||
|
<endif>
|
||
|
}
|
||
|
>>
|
||
|
|
||
|
/** Same as a normal DFA state except that we don't examine lookahead
|
||
|
* for the bypass alternative. It delays error detection but this
|
||
|
* is faster, smaller, and more what people expect. For (X)? people
|
||
|
* expect "if ( LA(1)==X ) match(X);" and that's it.
|
||
|
*/
|
||
|
dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
|
||
|
<if(!semPredState)>
|
||
|
<tokenid_type()> LA<decisionNumber>_<stateNumber> = input.LA(<k>);
|
||
|
<endif>
|
||
|
<edges; separator="\nelse ">
|
||
|
>>
|
||
|
|
||
|
/** A DFA state that is actually the loopback decision of a closure
|
||
|
* loop. If end-of-token (EOT) predicts any of the targets then it
|
||
|
* should act like a default clause (i.e., no error can be generated).
|
||
|
* This is used only in the lexer so that for ('a')* on the end of a rule
|
||
|
* anything other than 'a' predicts exiting.
|
||
|
*/
|
||
|
dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
|
||
|
<if(!semPredState)>
|
||
|
<tokenid_type()> LA<decisionNumber>_<stateNumber> = input.LA(<k>);
|
||
|
<endif>
|
||
|
<edges; separator="\nelse "><\n>
|
||
|
<if(eotPredictsAlt)>
|
||
|
else
|
||
|
{
|
||
|
alt<decisionNumber> = <eotPredictsAlt>;
|
||
|
}<\n>
|
||
|
<endif>
|
||
|
>>
|
||
|
|
||
|
/** An accept state indicates a unique alternative has been predicted */
|
||
|
dfaAcceptState(alt) ::= "alt<decisionNumber> = <alt>;"
|
||
|
|
||
|
/** A simple edge with an expression. If the expression is satisfied,
|
||
|
* enter to the target state. To handle gated productions, we may
|
||
|
* have to evaluate some predicates for this edge.
|
||
|
*/
|
||
|
dfaEdge(labelExpr, targetState, predicates) ::= <<
|
||
|
if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>)
|
||
|
{
|
||
|
<targetState>
|
||
|
}
|
||
|
>>
|
||
|
|
||
|
// F i x e d D F A (switch case)
|
||
|
|
||
|
/** A DFA state where a SWITCH may be generated. The code generator
|
||
|
* decides if this is possible: CodeGenerator.canGenerateSwitch().
|
||
|
*/
|
||
|
dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
|
||
|
switch ( input.LA(<k>) ) {
|
||
|
<edges; separator="\n">
|
||
|
default:
|
||
|
<if(eotPredictsAlt)>
|
||
|
alt<decisionNumber> = <eotPredictsAlt>;
|
||
|
<else>
|
||
|
NoViableAltException nvae( input.getPosition(), "<description>", <decisionNumber>, <stateNumber> );<\n>
|
||
|
<@noViableAltException()>
|
||
|
throw nvae;<\n>
|
||
|
<endif>
|
||
|
}<\n>
|
||
|
>>
|
||
|
|
||
|
dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
|
||
|
switch ( input.LA(<k>) ) {
|
||
|
<edges; separator="\n">
|
||
|
}<\n>
|
||
|
>>
|
||
|
|
||
|
dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
|
||
|
switch ( input.LA(<k>) ) {
|
||
|
<edges; separator="\n"><\n>
|
||
|
<if(eotPredictsAlt)>
|
||
|
default:
|
||
|
alt<decisionNumber> = <eotPredictsAlt>;
|
||
|
break;<\n>
|
||
|
<endif>
|
||
|
}<\n>
|
||
|
>>
|
||
|
|
||
|
dfaEdgeSwitch(labels, targetState) ::= <<
|
||
|
<labels:{case <it>:}; separator="\n"> {
|
||
|
<targetState>
|
||
|
} break;
|
||
|
>>
|
||
|
|
||
|
// C y c l i c D F A
|
||
|
|
||
|
/** The code to initiate execution of a cyclic DFA; this is used
|
||
|
* in the rule to predict an alt just like the fixed DFA case.
|
||
|
* The <name> attribute is inherited via the parser, lexer, ...
|
||
|
*/
|
||
|
dfaDecision(decisionNumber,description) ::= <<
|
||
|
// dfaDecision
|
||
|
alt<decisionNumber> = predictDFA<decisionNumber>(input);
|
||
|
>>
|
||
|
|
||
|
/** The overall cyclic DFA chunk; contains all the DFA states */
|
||
|
cyclicDFA(dfa) ::= <<
|
||
|
/* cyclicDFA=<dfa>
|
||
|
*/
|
||
|
// cyclic = <dfa.cyclic>
|
||
|
// numstates = <dfa.numberOfStates>
|
||
|
|
||
|
// startState = <dfa.startState>
|
||
|
// startState.numberOfTransitions = <dfa.startState.NumberOfTransitions>
|
||
|
// startState.lookaheadDepth = <dfa.startState.LookaheadDepth>
|
||
|
|
||
|
const static short <name>dfa<dfa.decisionNumber>_eot[<dfa.numberOfStates>] = {
|
||
|
<dfa.eot; wrap="\n ", separator=",", null="-1">
|
||
|
};
|
||
|
const static short <name>dfa<dfa.decisionNumber>_eof[<dfa.numberOfStates>] = {
|
||
|
<dfa.eof; wrap="\n ", separator=",", null="-1">
|
||
|
};
|
||
|
const static unichar <name>dfa<dfa.decisionNumber>_min[<dfa.numberOfStates>] = {
|
||
|
<dfa.min; wrap="\n ", separator=",", null="0">
|
||
|
};
|
||
|
const static unichar <name>dfa<dfa.decisionNumber>_max[<dfa.numberOfStates>] = {
|
||
|
<dfa.max; wrap="\n ", separator=",", null="0">
|
||
|
};
|
||
|
const static short <name>dfa<dfa.decisionNumber>_accept[<dfa.numberOfStates>] = {
|
||
|
<dfa.accept; wrap="\n ", separator=",", null="-1">
|
||
|
};
|
||
|
const static short <name>dfa<dfa.decisionNumber>_special[<dfa.numberOfStates>] = {
|
||
|
<dfa.special; wrap="\n ", separator=",", null="-1">
|
||
|
};
|
||
|
<dfa.edgeTransitionClassMap.keys:{ table |
|
||
|
const static short <name>dfa<dfa.decisionNumber>_transition<i0>[] = {
|
||
|
<table; separator=", ", wrap="\n ", null="-1">
|
||
|
};
|
||
|
}; null="">
|
||
|
const static short <name>dfa<dfa.decisionNumber>_transition[] = {
|
||
|
<dfa.transitionEdgeTables:{whichTable|<name>dfa<dfa.decisionNumber>_transition<whichTable>,}; separator="\n", null="0 /* fixme? */">
|
||
|
};
|
||
|
<! add attribute for the DFA !>
|
||
|
DFA\<char_type> dfa<dfa.decisionNumber>;
|
||
|
<! this should go in the initializer of the thing
|
||
|
- (id) init
|
||
|
{
|
||
|
if ((self = [super init]) != nil) {
|
||
|
eot = <name>dfa<dfa.decisionNumber>_eot;
|
||
|
eof = <name>dfa<dfa.decisionNumber>_eof;
|
||
|
min = <name>dfa<dfa.decisionNumber>_min;
|
||
|
max = <name>dfa<dfa.decisionNumber>_max;
|
||
|
accept = <name>dfa<dfa.decisionNumber>_accept;
|
||
|
special = <name>dfa<dfa.decisionNumber>_special;
|
||
|
if (!(transition = calloc(<dfa.numberOfStates>, sizeof(void*)))) {
|
||
|
[self release];
|
||
|
return nil;
|
||
|
}
|
||
|
<dfa.transitionEdgeTables:{whichTable|transition[<i0>] = <name>dfa<dfa.decisionNumber>_transition<whichTable>;}; separator="\n", null="">
|
||
|
}
|
||
|
return self;
|
||
|
}
|
||
|
!>
|
||
|
|
||
|
<if(dfa.specialStateSTs)>
|
||
|
int specialStateTransition( int state )
|
||
|
{
|
||
|
int s = state;
|
||
|
switch ( s ) {
|
||
|
<dfa.specialStateSTs:{state |
|
||
|
case <i0> : <! compressed special state numbers 0..n-1 !>
|
||
|
<state>}; separator="\n">
|
||
|
}
|
||
|
<if(backtracking)>
|
||
|
if ( recognizer.isBacktracking() ) {
|
||
|
recognizer.setFailed();
|
||
|
return -1;
|
||
|
}<\n>
|
||
|
<endif>
|
||
|
noViableAlt(s, input);
|
||
|
}<\n>
|
||
|
<endif>
|
||
|
|
||
|
|
||
|
<\n>
|
||
|
|
||
|
// <dfa.description>
|
||
|
decision_type predictDFA<dfa.decisionNumber>( StreamType& input )
|
||
|
{
|
||
|
/* mark current location (rewind automatically when the rewinder goes
|
||
|
* out of scope */
|
||
|
antlr3::Rewinder\<position_type> markPoint(input.getPosition());
|
||
|
goto s0; // goto start...
|
||
|
// ...
|
||
|
throw NoViableAltException( input.getPosition(), "<dfa.description>", <dfa.decisionNumber>, 0 /* fixme */ );<\n>
|
||
|
}<\n>
|
||
|
>>
|
||
|
|
||
|
/** A state in a cyclic DFA */
|
||
|
cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
|
||
|
// cyclicDFAState
|
||
|
s<stateNumber>: {
|
||
|
<if(semPredState)>
|
||
|
input.rewind();<\n>
|
||
|
<else>
|
||
|
<tokenid_type()> LA<decisionNumber>_<stateNumber> = input.LA(1);
|
||
|
<endif>
|
||
|
<edges>
|
||
|
<if(needErrorClause)>
|
||
|
throw NoViableAltException( input.getPosition(), "<description>", <decisionNumber>, <stateNumber> );<\n>
|
||
|
<endif><\n>
|
||
|
}<\n>
|
||
|
>>
|
||
|
|
||
|
/** Just like a fixed DFA edge, test the lookahead and indicate what
|
||
|
* state to jump to next if successful.
|
||
|
*/
|
||
|
cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
|
||
|
// cyclicDFAEdge
|
||
|
if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>)
|
||
|
{
|
||
|
input.consume();
|
||
|
goto s<targetStateNumber>;
|
||
|
}<\n>
|
||
|
>>
|
||
|
|
||
|
/** An edge pointing at end-of-token; essentially matches any char;
|
||
|
* always jump to the target.
|
||
|
*/
|
||
|
eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= "goto s<targetStateNumber>;"
|
||
|
|
||
|
// D F A E X P R E S S I O N S
|
||
|
|
||
|
andPredicates(left,right) ::= "(<left> && <right>)"
|
||
|
|
||
|
orPredicates(operands) ::= "(<first(operands)><rest(operands):{o | ||<o>}>)"
|
||
|
|
||
|
notPredicate(pred) ::= "!(<pred>)"
|
||
|
|
||
|
evalPredicate(pred,description) ::= "<pred>"
|
||
|
|
||
|
evalSynPredicate(pred,description) ::= "<pred>()"
|
||
|
|
||
|
lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber>==<atom>"
|
||
|
|
||
|
/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
|
||
|
* somewhere. Must ask for the lookahead directly.
|
||
|
*/
|
||
|
isolatedLookaheadTest(atom,k,atomAsInt) ::= "input.LA(<k>)==<atom>"
|
||
|
|
||
|
lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <<
|
||
|
(LA<decisionNumber>_<stateNumber>\>=<lower> && LA<decisionNumber>_<stateNumber>\<=<upper>)
|
||
|
>>
|
||
|
|
||
|
isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(input.LA(<k>)\>=<lower> && input.LA(<k>)\<=<upper>)"
|
||
|
|
||
|
setTest(ranges) ::= "<ranges; separator=\"||\">"
|
||
|
|
||
|
// A T T R I B U T E S
|
||
|
|
||
|
globalAttributeScope(scope) ::= <<
|
||
|
<if(scope.attributes)>
|
||
|
protected static class <scope.name> {
|
||
|
<scope.attributes:{<it.decl>;}; separator="\n">
|
||
|
}
|
||
|
protected Stack <scope.name>_stack = new Stack();<\n>
|
||
|
<endif>
|
||
|
>>
|
||
|
|
||
|
ruleAttributeScope(scope) ::= <<
|
||
|
<if(scope.attributes)>
|
||
|
protected static class <scope.name>_scope {
|
||
|
<scope.attributes:{<it.decl>;}; separator="\n">
|
||
|
}
|
||
|
protected Stack <scope.name>_stack = new Stack();<\n>
|
||
|
<endif>
|
||
|
>>
|
||
|
|
||
|
returnType() ::= <<
|
||
|
<if(ruleDescriptor.hasMultipleReturnValues)>
|
||
|
<ruleDescriptor.name>_return
|
||
|
<else>
|
||
|
<if(ruleDescriptor.singleValueReturnType)>
|
||
|
<ruleDescriptor.singleValueReturnType>
|
||
|
<else>
|
||
|
void
|
||
|
<endif>
|
||
|
<endif>
|
||
|
>>
|
||
|
|
||
|
ruleLabelType(referencedRule) ::= <<
|
||
|
<if(referencedRule.hasMultipleReturnValues)>
|
||
|
<referencedRule.name>_return
|
||
|
<else>
|
||
|
<if(referencedRule.singleValueReturnType)>
|
||
|
<referencedRule.singleValueReturnType>
|
||
|
<else>
|
||
|
void
|
||
|
<endif>
|
||
|
<endif>
|
||
|
>>
|
||
|
|
||
|
/** Using a type to init value map, try to init a type; if not in table
|
||
|
* must be an object, default value is "null".
|
||
|
*/
|
||
|
initValue(typeName) ::= <<
|
||
|
<javaTypeInitMap.(typeName)>
|
||
|
>>
|
||
|
|
||
|
ruleLabelDef(label) ::= <<
|
||
|
<ruleLabelType(referencedRule=label.referencedRule)> <label.label.text> = <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;<\n>
|
||
|
>>
|
||
|
|
||
|
returnScope(scope) ::= <<
|
||
|
<if(ruleDescriptor.hasMultipleReturnValues)>
|
||
|
public static class <returnType()> {
|
||
|
<labelType> start, stop;
|
||
|
<if(buildAST)>
|
||
|
<ASTLabelType> tree;
|
||
|
<else>
|
||
|
<if(buildTemplate)>
|
||
|
StringTemplate st;
|
||
|
<endif>
|
||
|
<endif>
|
||
|
<scope.attributes:{<it.decl>;}; separator="\n">
|
||
|
};
|
||
|
<endif>
|
||
|
>>
|
||
|
|
||
|
parameterScope(scope) ::= <<
|
||
|
<scope.attributes:{<it.decl>}; separator=", ">
|
||
|
>>
|
||
|
|
||
|
/** Used in codegen.g to translate $x.y references.
|
||
|
* I could have left actions as StringTemplates to be inserted in
|
||
|
* the output (so they could use attributes inherited from surrounding
|
||
|
* templates), but really wanted to pass in AttributeScope and Attribute
|
||
|
* objects so this translation could query them. So, translation of
|
||
|
* $x.y to executable code occurs before recognizerST.toString() occurs.
|
||
|
* I.e., actions are just text strings during final code generation.
|
||
|
*/
|
||
|
globalAttributeRef(scope,attr) ::= <<
|
||
|
((<scope>)<scope>_stack.peek()).<attr.name>
|
||
|
>>
|
||
|
|
||
|
parameterAttributeRef(attr) ::= "<attr.name>"
|
||
|
|
||
|
scopeAttributeRef(scope,attr,index,negIndex) ::= <<
|
||
|
<if(negIndex)>
|
||
|
((<scope>_scope)<scope>_stack.elementAt(<scope>_stack.size()-<negIndex>-1)).<attr.name>
|
||
|
<else>
|
||
|
<if(index)>
|
||
|
((<scope>_scope)<scope>_stack.elementAt(<index>)).<attr.name>
|
||
|
<else>
|
||
|
((<scope>_scope)<scope>_stack.peek()).<attr.name>
|
||
|
<endif>
|
||
|
<endif>
|
||
|
>>
|
||
|
|
||
|
/** $x is either global scope or x is rule with dynamic scope; refers
|
||
|
* to stack itself not top of stack. This is useful for predicates
|
||
|
* like {$function.size()>0 && $function::name.equals("foo")}?
|
||
|
*/
|
||
|
isolatedDynamicScopeRef(scope) ::= "<scope>_stack"
|
||
|
|
||
|
/** reference an attribute of rule; might only have single return value */
|
||
|
ruleLabelRef(referencedRule,scope,attr) ::= <<
|
||
|
<if(referencedRule.singleValueReturnType)>
|
||
|
<scope>
|
||
|
<else>
|
||
|
<scope>.<attr.name>
|
||
|
<endif>
|
||
|
>>
|
||
|
|
||
|
returnAttributeRef(ruleDescriptor,attr) ::= <<
|
||
|
<if(ruleDescriptor.singleValueReturnType)>
|
||
|
<attr.name>
|
||
|
<else>
|
||
|
retval.<attr.name>
|
||
|
<endif>
|
||
|
>>
|
||
|
|
||
|
/** How to translate $tokenLabel */
|
||
|
tokenLabelRef(label) ::= "<label>"
|
||
|
|
||
|
/** ids+=ID {$ids} or e+=expr {$e} */
|
||
|
listLabelRef(label) ::= "list_<label>"
|
||
|
|
||
|
// not sure the next are the right approach; and they are evaluated early;
|
||
|
// they cannot see TREE_PARSER or PARSER attributes for example. :(
|
||
|
|
||
|
tokenLabelPropertyRef_text(scope,attr) ::= "<scope>.getText()"
|
||
|
tokenLabelPropertyRef_type(scope,attr) ::= "<scope>.getType()"
|
||
|
tokenLabelPropertyRef_line(scope,attr) ::= "<scope>.getLine()"
|
||
|
tokenLabelPropertyRef_pos(scope,attr) ::= "<scope>.getCharPositionInLine()"
|
||
|
tokenLabelPropertyRef_channel(scope,attr) ::= "<scope>.getChannel()"
|
||
|
tokenLabelPropertyRef_index(scope,attr) ::= "<scope>.getTokenIndex()"
|
||
|
tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
|
||
|
|
||
|
ruleLabelPropertyRef_start(scope,attr) ::= "<scope>.start"
|
||
|
ruleLabelPropertyRef_stop(scope,attr) ::= "<scope>.stop"
|
||
|
ruleLabelPropertyRef_tree(scope,attr) ::= "<scope>.tree"
|
||
|
ruleLabelPropertyRef_text(scope,attr) ::= "input.toString(<scope>.start,<scope>.stop)"
|
||
|
ruleLabelPropertyRef_st(scope,attr) ::= "<scope>.st"
|
||
|
|
||
|
/** Isolated $RULE ref ok in lexer as it's a Token */
|
||
|
lexerRuleLabel(label) ::= "<label>"
|
||
|
|
||
|
lexerRuleLabelPropertyRef_type(scope,attr) ::= "<scope>.getType()"
|
||
|
lexerRuleLabelPropertyRef_line(scope,attr) ::= "<scope>.getLine()"
|
||
|
lexerRuleLabelPropertyRef_pos(scope,attr) ::= "<scope>.getCharPositionInLine()"
|
||
|
lexerRuleLabelPropertyRef_channel(scope,attr) ::= "<scope>.getChannel()"
|
||
|
lexerRuleLabelPropertyRef_index(scope,attr) ::= "<scope>.getTokenIndex()"
|
||
|
lexerRuleLabelPropertyRef_text(scope,attr) ::= "<scope>.getText()"
|
||
|
|
||
|
// Somebody may ref $template or $tree or $stop within a rule:
|
||
|
rulePropertyRef_start(scope,attr) ::= "((<labelType>)retval.start)"
|
||
|
rulePropertyRef_stop(scope,attr) ::= "((<labelType>)retval.stop)"
|
||
|
rulePropertyRef_tree(scope,attr) ::= "((<ASTLabelType>)retval.tree)"
|
||
|
rulePropertyRef_text(scope,attr) ::= "input.toString(retval.start,input.LT(-1))"
|
||
|
rulePropertyRef_st(scope,attr) ::= "retval.st"
|
||
|
|
||
|
// A C T I O N S
|
||
|
|
||
|
emit(type) ::= "emit(<type>);"
|
||
|
|
||
|
setType(type) ::= "setType(<type>);"
|
||
|
|
||
|
/** How to execute an action */
|
||
|
execAction(action) ::= <<
|
||
|
<if(backtracking)>
|
||
|
<if(actions.(actionScope).synpredgate)>
|
||
|
if ( <actions.(actionScope).synpredgate> )
|
||
|
{
|
||
|
<action>
|
||
|
}
|
||
|
<else>
|
||
|
if ( backtracking == 0 )
|
||
|
{
|
||
|
<action>
|
||
|
}
|
||
|
<endif>
|
||
|
<else>
|
||
|
<action>
|
||
|
<endif>
|
||
|
>>
|
||
|
|
||
|
// M I S C (properties, etc...)
|
||
|
|
||
|
bitset(name, words64) ::= <<
|
||
|
public static final BitSet <name> = new BitSet(new long[]{<words64:{<it>L};separator=",">});<\n>
|
||
|
>>
|
||
|
|
||
|
tokenPrefix() ::= "TOK_"
|
||
|
codeFileExtension() ::= ".cpp"
|
||
|
// used in CPPTarget.java to generate the headerfile extension
|
||
|
headerFileExtension() ::= ".h"
|
||
|
|
||
|
true() ::= "true"
|
||
|
false() ::= "false"
|