/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* NOTE : please see documentation at bottom of this file. (It was placed there its tiring
* to always have to page past it... :)
*/
options
{
/** The default package for this parser kit. This is now done from Maven.
NODE_PACKAGE="org.apache.velocity.runtime.parser";
*/
/** A source file will be generated for each non-terminal */
MULTI=true;
/**
* Each node will have access to the parser, I did this so
* some global information can be shared via the parser. I
* think this will come in handly keeping track of
* context, and being able to push changes back into
* the context when nodes make modifications to the
* context by setting properties, variables and
* what not.
*/
NODE_USES_PARSER=true;
/**
* The parser must be non-static in order for the
* above option to work, otherwise the parser value
* is passed in as null, which isn't all the useful ;)
*/
STATIC=false;
/**
* Enables the use of a visitor that each of nodes
* will accept. This way we can separate the logic
* of node processing in a visitor and out of the
* nodes themselves. If processing changes then
* the nothing has to change in the node code.
*/
VISITOR=true;
/**
* Declare that we are accepting unicode input and
* that we are using a custom character stream class
* Note that the char stream class is really a slightly
* modified ASCII_CharStream, as it appears we are safe
* because we only deal with pre-encoding-converted
* Readers rather than raw input streams.
*/
UNICODE_INPUT=true;
USER_CHAR_STREAM=true;
/**
* for debugging purposes. Those are now handled from within javacc-maven-plugin debugging flags in pom.xml
DEBUG_PARSER = true;
DEBUG_LOOKAHEAD = true;
DEBUG_TOKEN_MANAGER = true;
*/
}
PARSER_BEGIN(${parser.basename}Parser)
package ${parser.package};
import java.io.*;
import java.util.*;
import org.apache.velocity.Template;
import org.apache.velocity.exception.VelocityException;
import org.apache.velocity.runtime.RuntimeServices;
import org.apache.velocity.runtime.parser.*;
import org.apache.velocity.runtime.parser.node.*;
import org.apache.velocity.runtime.directive.*;
import org.apache.velocity.runtime.directive.MacroParseException;
import org.apache.velocity.runtime.RuntimeConstants;
import static org.apache.velocity.runtime.RuntimeConstants.SpaceGobbling;
import org.slf4j.Logger;
/**
* This class is responsible for parsing a Velocity
* template. This class was generated by JavaCC using
* the JJTree extension to produce an Abstract
* Syntax Tree (AST) of the template.
*
* Please look at the Parser.jjt file which is
* what controls the generation of this class.
*
* @author Jason van Zyl
* @author Geir Magnusson Jr.
* @author Henning P. Schmiedehausen
* @version $Id$
*/
public class ${parser.basename}Parser implements Parser
{
/**
* Parser debugging flag.
* When debug is active, javacc Parser will contain (among other things)
* a trace_call() method. So we use the presence of this method to
* initialize our flag.
*/
private static boolean debugParser;
static
{
try
{
${parser.basename}Parser.class.getDeclaredMethod("trace_call", String.class);
debugParser = true;
}
catch(NoSuchMethodException nsfe)
{
debugParser = false;
}
}
/**
* Our own trace method. Use sparsingly in production, since each
* and every call will introduce an execution branch and slow down parsing.
*/
public static void trace(String message)
{
if (debugParser) System.out.println(message);
}
/**
* Keep track of defined macros, used for escape processing
*/
private Map macroNames = new HashMap();
/**
* Current template we are parsing. Passed to us in parse()
*/
public Template currentTemplate = null;
/**
* Set to true if the property
* RuntimeConstants.RUNTIME_REFERENCES_STRICT_ESCAPE is set to true
*/
public boolean strictEscape = false;
/**
* Set to true if the propoerty
* RuntimeConstants.PARSER_HYPHEN_ALLOWED is set to true
*/
public boolean hyphenAllowedInIdentifiers = false;
VelocityCharStream velcharstream = null;
private RuntimeServices rsvc = null;
@Override
public RuntimeServices getRuntimeServices()
{
return rsvc;
}
private Logger log = null;
/**
* This constructor was added to allow the re-use of parsers.
* The normal constructor takes a single argument which
* an InputStream. This simply creates a re-usable parser
* object, we satisfy the requirement of an InputStream
* by using a newline character as an input stream.
*/
public ${parser.basename}Parser( RuntimeServices rs)
{
/*
* need to call the CTOR first thing.
*/
this( new VelocityCharStream(
new ByteArrayInputStream("\n".getBytes()), 1, 1 ));
/*
* then initialize logger
*/
log = rs.getLog("parser");
/*
* now setup a VCS for later use
*/
velcharstream = new VelocityCharStream(
new ByteArrayInputStream("\n".getBytes()), 1, 1 );
strictEscape =
rs.getBoolean(RuntimeConstants.RUNTIME_REFERENCES_STRICT_ESCAPE, false);
hyphenAllowedInIdentifiers =
rs.getBoolean(RuntimeConstants.PARSER_HYPHEN_ALLOWED, false);
/*
* and save the RuntimeServices
*/
rsvc = rs;
/*
* then initialize customizable characters
*/
dollar = '${parser.char.dollar}';
hash = '${parser.char.hash}';
at = '${parser.char.at}';
asterisk = '${parser.char.asterisk}';
}
/**
* This was also added to allow parsers to be
* re-usable. Normal JavaCC use entails passing an
* input stream to the constructor and the parsing
* process is carried out once. We want to be able
* to re-use parsers: we do this by adding this
* method and re-initializing the lexer with
* the new stream that we want parsed.
*/
@Override
public SimpleNode parse( Reader reader, Template template )
throws ParseException
{
SimpleNode sn = null;
currentTemplate = template;
try
{
token_source.clearStateVars();
/*
* reinitialize the VelocityCharStream
* with the new reader
*/
velcharstream.ReInit( reader, 1, 1 );
/*
* now reinit the Parser with this CharStream
*/
ReInit( velcharstream );
/*
* do that voodoo...
*/
sn = process();
}
catch (MacroParseException mee)
{
/*
* thrown by the Macro class when something is amiss in the
* Macro specification
*/
log.error("{}: {}", template.getName(), mee.getMessage(), mee);
throw mee;
}
catch (ParseException pe)
{
log.error("{}: {}", currentTemplate.getName(), pe.getMessage());
throw new TemplateParseException (pe.currentToken,
pe.expectedTokenSequences, pe.tokenImage, currentTemplate.getName());
}
catch (TokenMgrError tme)
{
throw new ParseException("Lexical error: " + tme.toString());
}
catch (Exception e)
{
String msg = template.getName() + ": " + e.getMessage();
log.error(msg, e);
throw new VelocityException(msg, e, getRuntimeServices().getLogContext().getStackTrace());
}
currentTemplate = null;
return sn;
}
/**
* This method gets a Directive from the directives Hashtable
*/
@Override
public Directive getDirective(String directive)
{
return (Directive) rsvc.getDirective(directive);
}
/**
* This method finds out of the directive exists in the directives Map.
*/
@Override
public boolean isDirective(String directive)
{
return rsvc.getDirective(directive) != null;
}
/**
* Produces a processed output for an escaped control or
* pluggable directive
*/
private String escapedDirective( String strImage )
{
int iLast = strImage.lastIndexOf("\\");
String strDirective = strImage.substring(iLast + 1);
boolean bRecognizedDirective = false;
// we don't have to call substring method all the time in this method
String dirTag = strDirective.substring(1);
if (dirTag.charAt(0) == '{')
{
dirTag = dirTag.substring(1, dirTag.length() - 1);
}
/*
* If this is a predefined derective or if we detect
* a macro definition (this is aproximate at best) then
* we absorb the forward slash. If in strict reference
* mode then we always absord the forward slash regardless
* if the derective is defined or not.
*/
if (strictEscape
|| isDirective(dirTag)
|| macroNames.containsKey(dirTag)
|| rsvc.isVelocimacro(dirTag, currentTemplate))
{
bRecognizedDirective = true;
}
else
{
/* order for speed? */
if ( dirTag.equals("if")
|| dirTag.equals("end")
|| dirTag.equals("set")
|| dirTag.equals("else")
|| dirTag.equals("elseif")
)
{
bRecognizedDirective = true;
}
}
/*
* if so, make the proper prefix string (let the escapes do their thing..)
* otherwise, just return what it is..
*/
if (bRecognizedDirective)
return ( strImage.substring(0,iLast/2) + strDirective);
else
return ( strImage );
}
/**
* Check whether there is a left parenthesis with leading optional
* whitespaces. This method is used in the semantic look ahead of
* Directive method. This is done in code instead of as a production
* for simplicity and efficiency.
*/
private boolean isLeftParenthesis()
{
char c;
int no = 0;
try {
while(true)
{
/**
* Read a character
*/
c = velcharstream.readChar();
no++;
if (c == '(')
{
return true;
}
/**
* if not a white space return
*/
else if (c != ' ' && c != '\n' && c != '\r' && c != '\t')
{
return false;
}
}
}
catch(IOException e)
{
}
finally
{
/**
* Backup the stream to the initial state
*/
velcharstream.backup(no);
}
return false;
}
/**
* Check whether there is a right parenthesis with leading optional
* whitespaces. This method is used in the semantic look ahead of
* Directive method. This is done in code instead of as a production
* for simplicity and efficiency.
*/
private boolean isRightParenthesis()
{
char c;
int no = -1;
try {
while(true)
{
/**
* Read a character
*/
if (no == -1)
{
switch (getToken(1).kind)
{
case RPAREN:
return true;
case WHITESPACE:
case NEWLINE:
no = 0;
break;
default:
return false;
}
}
c = velcharstream.readChar();
no++;
if (c == ')')
{
return true;
}
/**
* if not a white space return
*/
else if (c != ' ' && c != '\n' && c != '\r' && c != '\t')
{
return false;
}
}
}
catch(IOException e)
{
}
finally
{
/**
* Backup the stream to the initial state
*/
if (no > 0) velcharstream.backup(no);
}
return false;
}
/**
* We use this method in a lookahead to determine if we are in a macro
* default value assignment. The standard lookahead is not smart enough.
* here we look for the equals after the reference.
*/
private boolean isAssignment()
{
// Basically if the last character read was not '$' then false
if (token_source.getCurrentLexicalState() != REFERENCE) return false;
char c = ' ';
int backup = 0;
try
{
// Read through any white space
while(Character.isWhitespace(c))
{
c = velcharstream.readChar();
backup++;
}
// This is what we are ultimately looking for
if (c != '=') return false;
}
catch (IOException e)
{
}
finally
{
velcharstream.backup(backup);
}
return true;
}
@Override
public Template getCurrentTemplate()
{
return currentTemplate;
}
@Override
public void resetCurrentTemplate()
{
currentTemplate = null;
}
@Override
public char dollar()
{
return dollar;
}
@Override
public char hash()
{
return hash;
}
@Override
public char at()
{
return at;
}
@Override
public char asterisk()
{
return asterisk;
}
private char dollar = '$';
private char hash = '#';
private char at = '@';
private char asterisk = '*';
}
PARSER_END(${parser.basename}Parser)
TOKEN_MGR_DECLS:
{
private int fileDepth = 0;
private int lparen = 0;
private int rparen = 0;
private int curlyLevel = 0;
List stateStack = new ArrayList(50);
private boolean inComment;
private boolean inSet;
/**
* Our own trace method. Use sparsingly in production, since each
* and every call will introduce an execution branch and slow down parsing.
*/
public static void trace(String message)
{
${parser.basename}Parser.trace(message);
}
/**
* Switches to a new state (add some log to the default method)
*/
public void switchTo(int lexState)
{
trace(" switch to " + lexStateNames[lexState]);
SwitchTo(lexState);
}
public int getCurrentLexicalState()
{
return curLexState;
}
/**
* pops a state off the stack, and restores paren counts
*
* @return boolean : success of operation
*/
public boolean stateStackPop()
{
ParserState s;
try
{
s = (ParserState) stateStack.remove(stateStack.size() - 1); // stack.pop
}
catch(IndexOutOfBoundsException e)
{
// empty stack
lparen=0;
switchTo(DEFAULT);
return false;
}
trace(" stack pop (" + stateStack.size() + ")");
lparen = s.lparen;
rparen = s.rparen;
curlyLevel = s.curlyLevel;
switchTo(s.lexstate);
return true;
}
/**
* pushes the current state onto the 'state stack',
* and maintains the parens counts
* public because we need it in PD & VM handling
*
* @return boolean : success. It can fail if the state machine
* gets messed up (do don't mess it up :)
*/
public boolean stateStackPush()
{
trace(" (" + stateStack.size() + ") pushing cur state : " + lexStateNames[curLexState] );
ParserState s = new ParserState();
s.lparen = lparen;
s.rparen = rparen;
s.curlyLevel = curlyLevel;
s.lexstate = curLexState;
stateStack.add(s); // stack.push
lparen = 0;
curlyLevel = 0;
return true;
}
/**
* Clears all state variables, resets to
* start values, clears stateStack. Call
* before parsing.
*/
public void clearStateVars()
{
stateStack.clear();
lparen = 0;
rparen = 0;
curlyLevel = 0;
inComment = false;
inSet = false;
return;
}
public void setInSet(boolean value)
{
inSet = value;
}
public boolean isInSet()
{
return inSet;
}
/**
* Holds the state of the parsing process.
*/
private static class ParserState
{
int lparen;
int rparen;
int curlyLevel;
int lexstate;
}
/**
* handles the dropdown logic when encountering a RPAREN
*/
private void RPARENHandler()
{
/*
* Ultimately, we want to drop down to the state below
* the one that has an open (if we hit bottom (DEFAULT),
* that's fine. It's just text schmoo.
*/
boolean closed = false;
if (inComment)
closed = true;
while( !closed )
{
/*
* look at current state. If we haven't seen a lparen
* in this state then we drop a state, because this
* lparen clearly closes our state
*/
if( lparen > 0)
{
/*
* if rparen + 1 == lparen, then this state is closed.
* Otherwise, increment and keep parsing
*/
if( lparen == rparen + 1)
{
stateStackPop();
}
else
{
rparen++;
}
closed = true;
}
else
{
/*
* now, drop a state
*/
if(!stateStackPop())
break;
}
}
}
}
/* ------------------------------------------------------------------------
*
* Tokens
*
* ------------------------------------------------------------------------- */
/* The VelocityCharStream will send a zero-width whitespace
just before EOF to let us accept a terminal $ or #
*/
TOKEN :
{
{
stateStackPop();
}
}
/* In all other states, keep the zero-width whitespace for now */
TOKEN :
{
}
TOKEN:
{
{
stateStackPush();
switchTo(REFINDEX);
}
|
/* we need to give precedence to the logical 'or' here, it's a hack to avoid multiplying parsing modes */
{
stateStackPop();
}
|
{
if (curlyLevel == 1)
{
switchTo(ALT_VAL);
}
else
{
stateStackPop();
}
}
}
TOKEN:
{
{
stateStackPop();
}
}
TOKEN:
{
|
|
}
TOKEN:
{
}
TOKEN:
{
}
TOKEN :
{
{
++curlyLevel;
}
|
{
--curlyLevel;
if (curLexState == ALT_VAL && curlyLevel == 0)
{
stateStackPop();
}
}
}
TOKEN:
{
{
if (!inComment)
lparen++;
/*
* If in REFERENCE and we have seen the dot, then move
* to REFMOD2 -> Modifier()
*/
if (curLexState == REFMODIFIER || curLexState == OLD_REFMODIFIER )
switchTo( REFMOD2 );
}
}
/*
* we never will see a ')' in anything but DIRECTIVE and REFMOD2.
* Each have their own
*/
TOKEN:
{
{
RPARENHandler();
}
}
TOKEN:
{
/*
* in REFMOD2, we don't want to bind the whitespace and \n like we
* do when closing a directive.
*/
{
/*
* need to simply switch back to REFERENCE, not drop down the stack
* because we can (infinitely) chain, ala
* $foo.bar().blargh().woogie().doogie()
*/
switchTo( REFMOD3 );
}
}
/*----------------------------------------------
*
* escape "\\" handling for the built-in directives
*
*--------------------------------------------- */
TOKEN:
{
/*
* We have to do this, because we want these to be a Text node, and
* whatever follows to be peer to this text in the tree.
*
* We need to touch the ASTs for these, because we want an even # of \'s
* to render properly in front of the block
*
* This is really simplistic. I actually would prefer to find them in
* grammatical context, but I am neither smart nor rested, a receipe
* for disaster, another long night with Mr. Parser, or both.
*/
)* "\\${parser.char.hash}" ( | ) >
}
/*
* We added the lexical states REFERENCE, REFMODIFIER, REFMOD2 to
* address JIRA issue VELOCITY-631. With SET_DIRECTIVE only in the
* DEFAULT lexical state the following VTL fails "$a#set($b = 1)"
* because the Reference token uses LOOKAHEAD(2) combined with the
* fact that we explicity set the lex state to REFERENCE with the $
* token, which means we would never evaluate this token during the
* look ahead. This general issue is disscussed here:
*
* http://www.engr.mun.ca/~theo/JavaCC-FAQ/javacc-faq-ie.htm#tth_sEc3.12
*
*/
TOKEN:
{
{
if (! inComment)
{
trace(" #set : going to DIRECTIVE" );
stateStackPush();
setInSet(true);
switchTo(DIRECTIVE);
}
/*
* need the LPAREN action
*/
if (!inComment)
{
lparen++;
/*
* If in REFERENCE and we have seen the dot, then move
* to REFMOD2 -> Modifier()
*/
if (curLexState == REFMODIFIER || curLexState == OLD_REFMODIFIER )
switchTo( REFMOD2 );
}
}
}
<*>
MORE :
{
/*
* Note : DOLLARBANG is a duplicate of DOLLAR. They must be identical.
*/
{
if (! inComment)
{
/*
* if we find ourselves in REFERENCE or PRE_REFERENCE, we need to pop down
* to end the previous ref
*/
if (curLexState == REFERENCE || curLexState == PRE_REFERENCE || curLexState == PRE_OLD_REFERENCE)
{
stateStackPop();
}
int preReferenceState = parser.hyphenAllowedInIdentifiers ? PRE_OLD_REFERENCE : PRE_REFERENCE;
trace( " $ : going to " + lexStateNames[preReferenceState]);
/* do not push PRE states */
if (curLexState != PRE_REFERENCE && curLexState != PRE_DIRECTIVE && curLexState != PRE_OLD_REFERENCE)
{
stateStackPush();
}
switchTo(preReferenceState);
}
}
|
{
if (! inComment)
{
/*
* if we find ourselves in REFERENCE or PRE_REFERENCE, we need to pop down
* to end the previous ref
*/
if (curLexState == REFERENCE || curLexState == PRE_REFERENCE || curLexState == PRE_OLD_REFERENCE)
{
stateStackPop();
}
int preReferenceState = parser.hyphenAllowedInIdentifiers ? PRE_OLD_REFERENCE : PRE_REFERENCE;
trace( " $ : going to " + lexStateNames[preReferenceState]);
/* do not push PRE states */
if (curLexState != PRE_REFERENCE && curLexState != PRE_DIRECTIVE && curLexState != PRE_OLD_REFERENCE)
{
stateStackPush();
}
switchTo(preReferenceState);
}
}
| "${parser.char.hash}[["
{
if (!inComment)
{
inComment = true;
/* do not push PRE states */
if (curLexState != PRE_REFERENCE && curLexState != PRE_DIRECTIVE && curLexState != PRE_OLD_REFERENCE)
{
stateStackPush();
}
switchTo( IN_TEXTBLOCK );
}
}
| <"${parser.char.hash}${parser.char.asterisk}${parser.char.asterisk}" ~["${parser.char.hash}","\u001C"]>
{
if (!inComment)
{
input_stream.backup(1);
inComment = true;
/* do not push PRE states */
if (curLexState != PRE_REFERENCE && curLexState != PRE_DIRECTIVE && curLexState != PRE_OLD_REFERENCE)
{
stateStackPush();
}
switchTo( IN_FORMAL_COMMENT);
}
}
| "${parser.char.hash}${parser.char.asterisk}"
{
if (!inComment)
{
inComment=true;
/* do not push PRE states */
if (curLexState != PRE_REFERENCE && curLexState != PRE_DIRECTIVE && curLexState != PRE_OLD_REFERENCE)
{
stateStackPush();
}
switchTo( IN_MULTI_LINE_COMMENT );
}
}
|
{
if (! inComment)
{
/*
* We can have the situation where #if($foo)$foo#end.
* We need to transition out of REFERENCE before going to DIRECTIVE.
* I don't really like this, but I can't think of a legal way
* you are going into DIRECTIVE while in REFERENCE. -gmj
*/
if (curLexState == REFERENCE || curLexState == PRE_REFERENCE || curLexState == PRE_OLD_REFERENCE || curLexState == REFMODIFIER || curLexState == OLD_REFMODIFIER )
{
stateStackPop();
}
trace(" # : going to PRE_DIRECTIVE" );
/* do not push PRE states */
if (curLexState != PRE_REFERENCE && curLexState != PRE_DIRECTIVE && curLexState != PRE_OLD_REFERENCE)
{
stateStackPush();
}
switchTo(PRE_DIRECTIVE);
}
}
}
// treat the single line comment case separately
// to avoid ## errors
TOKEN :
{
{
if (!inComment)
{
if (curLexState == REFERENCE || curLexState == PRE_REFERENCE || curLexState == PRE_OLD_REFERENCE)
{
stateStackPop();
}
inComment = true;
stateStackPush();
switchTo(IN_SINGLE_LINE_COMMENT);
}
}
}
/* -----------------------------------------------------------------------
*
* *_COMMENT Lexical tokens
*
*-----------------------------------------------------------------------*/
TOKEN :
{
{
inComment = false;
stateStackPop();
if (curLexState == REFERENCE || curLexState == REFMOD3)
{
// end of reference: pop again
stateStackPop();
}
}
}
TOKEN :
{
{
inComment = false;
stateStackPop();
if (curLexState == REFERENCE || curLexState == REFMOD3)
{
// end of reference: pop again
stateStackPop();
}
}
}
TOKEN :
{
{
inComment = false;
stateStackPop();
if (curLexState == REFERENCE || curLexState == REFMOD3)
{
// end of reference: pop again
stateStackPop();
}
}
}
TOKEN :
{
{
inComment = false;
stateStackPop();
}
}
SKIP :
{
< ~[] >
}
MORE :
{
< ~["\u001C"] >
}
/* -----------------------------------------------------------------------
*
* DIRECTIVE Lexical State (some of it, anyway)
*
* ---------------------------------------------------------------------- */
TOKEN:
{
|
{
trace(" NEWLINE :");
/* if (isInSet()) */
setInSet(false);
}
}
/* needed for stuff like #foo() followed by ( '$' | '#' )* followed by ( | )
so that directive postfix doesn't eat the '$'s and '#'s
*/
TOKEN:
{
{
stateStackPop();
}
}
TOKEN :
{
//
< STRING_LITERAL:
("\""
( (~["\"","\u001C"])
| ("\\"
( ["n","t","b","r","f"]
| ["0"-"7"] ( ["0"-"7"] )?
| ["0"-"3"] ["0"-"7"] ["0"-"7"]
| "u" ["0"-"9", "a"-"f", "A"-"F"] ["0"-"9", "a"-"f", "A"-"F"] ["0"-"9", "a"-"f", "A"-"F"] ["0"-"9", "a"-"f", "A"-"F"]
)
)
| ("\"\"")
| ( "\\" (" ")* "\n")
)*
"\""
)
|
("\'"
( (~["\'","\u001C"])
| ("''")
| ( "\\" (" ")* "\n")
)*
"\'"
)
>
{
/*
* - if we are in DIRECTIVE and haven't seen ( yet, then also drop out.
* don't forget to account for the beloved yet wierd #set
* - finally, if we are in REFMOD2 (remember : $foo.bar( ) then " is ok!
*/
if( curLexState == DIRECTIVE && !isInSet() && lparen == 0)
stateStackPop();
}
}
TOKEN:
{
|
}
TOKEN :
{
|
|
|
|
|
|
|
|
| " | "gt" >
| =" | "ge" >
|
|
|
|
}
TOKEN :
{
{
stateStackPop();
}
|
{
switchTo(DIRECTIVE);
}
|
{
switchTo(DIRECTIVE);
}
|
{
stateStackPop();
}
}
TOKEN:
{
<#DIGIT: [ "0"-"9" ] >
/*
* treat FLOATING_POINT_LITERAL and INTEGER_LITERAL differently as a range can only handle integers.
*/
/**
* Note -- we also define an integer as ending with a double period,
* in order to avoid 1..3 being defined as floating point (1.) then a period, then a integer
*/
| )+ ("..")? >
{
/*
* Remove the double period if it is there
*/
if (matchedToken.image.endsWith("..")) {
input_stream.backup(2);
matchedToken.image = matchedToken.image.substring(0,matchedToken.image.length()-2);
}
/*
* check to see if we are in set
* ex. #set($foo = $foo + 3)
* because we want to handle the \n after
*/
if ( lparen == 0 && !isInSet() && curLexState != REFMOD2 && curLexState != REFINDEX && curLexState != ALT_VAL)
{
stateStackPop();
}
}
| )+ "." ()* ()?
| ("-")? "." ()+ ()?
| ("-")? ()+
>
{
/*
* check to see if we are in set
* ex. #set $foo = $foo + 3
* because we want to handle the \n after
*/
if ( lparen == 0 && !isInSet() && curLexState != REFMOD2 && curLexState != ALT_VAL)
{
stateStackPop();
}
}
|
<#EXPONENT: ["e","E"] (["+","-"])? (["0"-"9"])+ >
}
/**
* TODO, the "@" symbol for block macros to be correct really should prefix WORD
* and BRACKETED_WORD, e.g.,
TOKEN:
{
<#LETTER: [ "a"-"z", "A"-"Z" ] >
| <#DIRECTIVE_CHAR: [ "a"-"z", "A"-"Z", "0"-"9", "_" ] >
| | ["_"] | ["${parser.char.at}"]) ()* >
| | ["_"] | ["${parser.char.at}"]) ()* "}" >
}
/* -----------------------------------------------------------------------
*
* REFERENCE Lexical States
*
* This is more than a single state, because of the structure of
* the VTL references. We use three states because the set of tokens
* for each state can be different.
*
* $foo.bar( "arg" )
* ^ ^ ^ ^ ^
* | | | | |
* |_________________ > PRE_REFERENCE : state initiated by the '$' character.
* | | | | (or PRE_OLD_REFERENCE if '-' is allowed in identifiers)
* |________________> REFERENCE : state initiated by the identifier. Continues
* | | | until end of the reference, or the . character.
* |_____________ > REFMODIFIER : state switched to when the is encountered.
* | | (or OLD_REFMODIFIER if '-' is allowed in identifiers)
* | | note that this is a switch, not a push. See notes at bottom.
* |_________ > REFMOD2 : state switch to when the LPAREN is encountered.
* | again, this is a switch, not a push.
* |_ > REFMOD3 : state only checking for a possible '.' or '[' continuation.
*
* During the REFERENCE, REFMODIFIER or REFMOD3 lex states we will switch to:
* - REFINDEX if a bracket '[' is encountered: $foo[1], $foo.bar[1], $foo.bar( "arg" )[1]
* - ALT_VAL if a pipe '|' is encountered (only for formal references): ${foo|'foo'}
* ---------------------------------------------------------------------------- */
TOKEN :
{
<#ALPHA_CHAR: ["a"-"z", "A"-"Z", "_"] >
| <#IDENTIFIER_CHAR: [ "a"-"z", "A"-"Z", "0"-"9", "_" ] >
| ) ()* >
{
if (curLexState == PRE_REFERENCE)
{
switchTo(REFERENCE);
}
}
}
TOKEN :
{
<#OLD_ALPHA_CHAR: ["a"-"z", "A"-"Z", "_"] >
| <#OLD_IDENTIFIER_CHAR: [ "a"-"z", "A"-"Z", "0"-"9", "_", "-" ] >
| ) ()* >
{
if (curLexState == PRE_OLD_REFERENCE)
{
switchTo(REFERENCE);
}
}
}
TOKEN:
{
>
{
/*
* push the alpha char back into the stream so the following identifier
* is complete
*/
input_stream.backup(1);
/*
* and munge the so we just get a . when we have normal text that
* looks like a ref.ident
*/
matchedToken.image = ".";
int refModifierState = parser.hyphenAllowedInIdentifiers ? OLD_REFMODIFIER : REFMODIFIER;
trace("DOT : switching to " + lexStateNames[refModifierState]);
switchTo(refModifierState);
}
}
TOKEN :
{
{
++curlyLevel;
}
|
{
/* maybe it wasn't for our state */
while (curlyLevel == 0 && curLexState != DEFAULT)
{
stateStackPop();
}
/* At this point, here are all the possible states:
* - DEFAULT, which means the '}' is schmoo
* - DIRECTIVE or REFMOD2, which means the '}' is a closing map curly
* - one of the other REFERENCE states or ALT_VAL, which means the '}' ends the reference
* If we're in the last case, pop up state.
*/
if (curLexState != DEFAULT && curLexState != DIRECTIVE && curLexState != REFMOD2)
{
stateStackPop();
}
}
}
SPECIAL_TOKEN :
{
{
/*
* push every terminator character back into the stream
*/
input_stream.backup(1);
trace("REF_TERM :");
stateStackPop();
}
}
SPECIAL_TOKEN :
{
{
trace("DIRECTIVE_TERM :");
input_stream.backup(1);
stateStackPop();
}
}
/* TEXT must end with a newline, and contain at least one non-whitespace character in the first line,
so that the sequence is not read as a TEXT (needed for space gobbling)
*/
TOKEN :
{
|
| ((~["${parser.char.dollar}", "${parser.char.hash}", "\\", "\r", "\n","\u001C"])* )* >
}
TOKEN :
{
}
/**
* This method is what starts the whole parsing
* process. After the parsing is complete and
* the template has been turned into an AST,
* this method returns the root of AST which
* can subsequently be traversed by a visitor
* which implements the ParserVisitor interface
* which is generated automatically by JavaCC
*/
SimpleNode process() :
{
boolean afterNewline = true;
}
{
( LOOKAHEAD({ getToken(1).kind != EOF }) afterNewline = Statement(afterNewline) )*
{ return jjtThis; }
}
/**
* These are the types of statements that
* are acceptable in Velocity templates.
*/
boolean Statement(boolean afterNewline) #void :
{
}
{
LOOKAHEAD( { getToken(1).kind == IF_DIRECTIVE || afterNewline && getToken(1).kind == WHITESPACE && getToken(2).kind == IF_DIRECTIVE } ) afterNewline = IfStatement(afterNewline) { return afterNewline; }
| LOOKAHEAD(2) Reference() { return false; }
| LOOKAHEAD(2) afterNewline = Comment() { return afterNewline; }
| Textblock() { return false; }
| LOOKAHEAD( { getToken(1).kind == SET_DIRECTIVE || afterNewline && getToken(1).kind == WHITESPACE && getToken(2).kind == SET_DIRECTIVE } ) afterNewline = SetDirective(afterNewline) { return afterNewline; }
| EscapedDirective() { return false; }
| Escape() { return false; }
| LOOKAHEAD( { getToken(1).kind == WORD || getToken(1).kind == BRACKETED_WORD || afterNewline && getToken(1).kind == WHITESPACE && ( getToken(2).kind == WORD || getToken(2).kind == BRACKETED_WORD ) } ) afterNewline = Directive(afterNewline) { return afterNewline; }
| afterNewline = Text() { return afterNewline; }
| () #Text { return true; }
| ((() { afterNewline = false; } ) (() { afterNewline = true; })? ) #Text { return afterNewline; }
| () #Text { return false; }
| () #Text { return true; }
| LOOKAHEAD(2) EndingZeroWidthWhitespace() { return afterNewline; }
| () #Text { return afterNewline; } // needed here since it can be triggered in mode out of any boolean evaluation
| () #Text { afterNewline = !afterNewline; return false; }
}
void EndingZeroWidthWhitespace() #void : {}
{
{ }
}
/**
* used to separate the notion of a valid directive that has been
* escaped, versus something that looks like a directive and
* is just schmoo. This is important to do as a separate production
* that creates a node, because we want this, in either case, to stop
* the further parsing of the Directive() tree.
*/
void EscapedDirective() : {}
{
{
Token t = null;
}
t =
{
/*
* churn and burn..
*/
t.image = escapedDirective( t.image );
}
}
/**
* Used to catch and process escape sequences in grammatical constructs
* as escapes outside of VTL are just characters. Right now we have both
* this and the EscapeDirective() construction because in the EscapeDirective()
* case, we want to suck in the #<directive> and here we don't. We just want
* the escapes to render correctly
*/
void Escape() : {}
{
{
Token t = null;
int count = 0;
boolean control = false;
}
( LOOKAHEAD(2) t =
{
count++;
}
)+
{
/*
* first, check to see if we have a control directive
*/
switch(t.next.kind ) {
case IF_DIRECTIVE :
case ELSE :
case ELSEIF :
case END :
control = true;
break;
}
/*
* if that failed, lets lookahead to see if we matched a PD or a VM
*/
String nTag = t.next.image.substring(1);
if (strictEscape
|| isDirective(nTag)
|| macroNames.containsKey(nTag)
|| rsvc.isVelocimacro(nTag, currentTemplate))
{
control = true;
}
jjtThis.val = "";
for( int i = 0; i < count; i++)
jjtThis.val += ( control ? "\\" : "\\\\");
}
}
boolean Comment() : {}
{
( ) ? { return true; }
| { return false; }
| { return false; }
}
void Textblock() : {}
{
}
void FloatingPointLiteral() : {}
{
}
void IntegerLiteral() : {}
{
}
void StringLiteral() : {}
{
}
/**
* This method corresponds to variable
* references in Velocity templates.
* The following are examples of variable
* references that may be found in a
* template:
*
* $foo
* $bar
*
*/
void Identifier() : {}
{
|
}
void Word() : {}
{
}
/**
* Supports the arguments for the Pluggable Directives
*/
int DirectiveArg() #void : {}
{
Reference()
{
return ParserTreeConstants.JJTREFERENCE;
}
| Word()
{
return ParserTreeConstants.JJTWORD;
}
| StringLiteral()
{
return ParserTreeConstants.JJTSTRINGLITERAL;
}
| IntegerLiteral()
{
return ParserTreeConstants.JJTINTEGERLITERAL;
}
/*
* Need to put this before the floating point expansion
*/
| LOOKAHEAD( ( | )* ( Reference() | IntegerLiteral()) ( | )* ) IntegerRange()
{
return ParserTreeConstants.JJTINTEGERRANGE;
}
| FloatingPointLiteral()
{
return ParserTreeConstants.JJTFLOATINGPOINTLITERAL;
}
| Map()
{
return ParserTreeConstants.JJTMAP;
}
| ObjectArray()
{
return ParserTreeConstants.JJTOBJECTARRAY;
}
| True()
{
return ParserTreeConstants.JJTTRUE;
}
| False()
{
return ParserTreeConstants.JJTFALSE;
}
}
void DirectiveAssign() : {}
{
Reference()
}
/**
* Supports the Pluggable Directives
* #foo( arg+ )
* @return true if ends with a newline
*/
boolean Directive(boolean afterNewline) :
{
Token id = null, t = null, u = null, end = null, _else = null;
int argType;
int argPos = 0;
Directive d;
int directiveType;
boolean isVM = false;
boolean isMacro = false;
ArrayList argtypes = new ArrayList(4);
String blockPrefix = "";
ASTBlock block = null, elseBlock = null;
boolean hasParentheses = false;
boolean newlineAtStart = afterNewline;
}
{
[
(t = )
{
// only possible if not after new line
jjtThis.setPrefix(t.image);
t = null;
}
]
/*
* note that if we were escaped, that is now handled by
* EscapedDirective()
*/
((id = ) | (id = ))
{
String directiveName;
int p = id.image.lastIndexOf(hash);
if (id.kind == StandardParserConstants.BRACKETED_WORD)
{
directiveName = id.image.substring(p + 2, id.image.length() - 1);
}
else
{
directiveName = id.image.substring(p + 1);
}
d = getDirective(directiveName);
/*
* Velocimacro support : if the directive is macro directive
* then set the flag so after the block parsing, we add the VM
* right then. (So available if used w/in the current template )
*/
if (directiveName.equals("macro"))
{
isMacro = true;
}
/*
* set the directive name from here. No reason for the thing to know
* about parser tokens
*/
jjtThis.setDirectiveName(directiveName);
if ( d == null)
{
if( directiveName.charAt(0) == at )
{
// block macro call of type: #@foobar($arg1 $arg2) astBody #end
directiveType = Directive.BLOCK;
}
else
{
/*
* if null, then not a real directive, but maybe a Velocimacro
*/
isVM = rsvc.isVelocimacro(directiveName, currentTemplate);
directiveType = Directive.LINE;
}
}
else
{
directiveType = d.getType();
}
/*
* now, switch us out of PRE_DIRECTIVE
*/
token_source.switchTo(DIRECTIVE);
argPos = 0;
}
/**
* Look for the pattern [WHITESPACE]
*/
(
LOOKAHEAD( { isLeftParenthesis() } )
/*
* if this is indeed a token, match the #foo ( arg, arg... ) pattern
*/
(
( | )*
(
LOOKAHEAD({ !isRightParenthesis() }) ( | )* [ ( | )*]
(
[
LOOKAHEAD( { isMacro && isAssignment() })
DirectiveAssign() ( | )* ( | )*
{
argtypes.add(ParserTreeConstants.JJTDIRECTIVEASSIGN);
}
]
LOOKAHEAD( { !isRightParenthesis() } )
(
argType = DirectiveArg()
{
argtypes.add(argType);
if (d == null && argType == ParserTreeConstants.JJTWORD)
{
if (isVM)
{
throw new MacroParseException("Invalid argument "
+ (argPos+1) + " in macro call " + id.image, currentTemplate.getName(), id);
}
}
argPos++;
}
)
|
{
if (!isMacro)
{
// We only allow line comments in macro definitions for now
throw new MacroParseException("A Line comment is not allowed in " + id.image
+ " arguments", currentTemplate.getName(), id);
}
}
[]
)
)* ( | )*
{ hasParentheses = true; }
)
|
{
token_source.stateStackPop();
}
)
{ afterNewline = false; }
[
// Conditions where whitespace and newline postfix is eaten by space gobbling at this point:
// - block directive
// - new line before directive without backward compatibility mode
// - backward compatibility mode *with parentheses*
// - #include() or #parse()
LOOKAHEAD(2, { directiveType != Directive.LINE || newlineAtStart && rsvc.getSpaceGobbling() != SpaceGobbling.BC || rsvc.getSpaceGobbling() == SpaceGobbling.BC && hasParentheses || d != null && (d instanceof Include || d instanceof Parse) })
( [ ( t = ) ] ( u = ) )
{
afterNewline = true;
if (directiveType == Directive.LINE)
{
jjtThis.setPostfix(t == null ? u.image : t.image + u.image);
}
else
{
blockPrefix = (t == null ? u.image : t.image + u.image);
}
t = u = null;
}
]
{
if (d != null)
{
d.checkArgs(argtypes, id, currentTemplate.getName());
}
if (directiveType == Directive.LINE)
{
return afterNewline;
}
}
/*
* and the following block if the PD needs it
*/
(
(
(
LOOKAHEAD( { getToken(1).kind != END && getToken(1).kind != ELSE && ( !afterNewline || getToken(1).kind != WHITESPACE || getToken(2).kind != END && getToken(2).kind != ELSE ) }) afterNewline = Statement(afterNewline)
)*
{
block = jjtThis;
block.setPrefix(blockPrefix);
blockPrefix = "";
}
)
#Block
)
[
LOOKAHEAD( 1, { afterNewline })
(t = )
{
block.setPostfix(t.image);
t = null;
}
]
/*
* then an optional #else for the #foreach directive
*/
(
[
LOOKAHEAD( { d != null && (d instanceof Foreach) && getToken(1).kind == ELSE } )
(
(_else = )
(
[
LOOKAHEAD(2) ( [ ( t = ) ] ( u = ) )
{
jjtThis.setPrefix(t == null ? u.image : t.image + u.image);
t = u = null;
afterNewline = true;
}
]
(
LOOKAHEAD( { getToken(1).kind != END && (!afterNewline || getToken(1).kind != WHITESPACE || getToken(2).kind != END) })
afterNewline = Statement(afterNewline)
)*
{
elseBlock = jjtThis;
}
)
#Block
{
int pos = _else.image.lastIndexOf(hash);
if (pos > 0)
{
block.setMorePostfix(_else.image.substring(0, pos));
}
block = elseBlock;
}
)
]
)
[
LOOKAHEAD( 1, { afterNewline })
(t = )
{
block.setPostfix(t.image);
t = null;
afterNewline = false;
}
]
(
(end = )
{ afterNewline = false; }
[
LOOKAHEAD(2, { newlineAtStart || rsvc.getSpaceGobbling() == SpaceGobbling.BC })
( [ ( t = ) ] ( u = ) )
{
jjtThis.setPostfix(t == null ? u.image : t.image + u.image);
t = u = null;
afterNewline = true;
}
]
{
int pos = end.image.lastIndexOf(hash);
if (pos > 0)
{
block.setMorePostfix(end.image.substring(0, pos));
}
}
)
{
/*
* VM : if we are processing a #macro directive, we need to
* process the block. In truth, I can just register the name
* and do the work later when init-ing. That would work
* as long as things were always defined before use. This way
* we don't have to worry about forward references and such...
*/
if (isMacro)
{
// Add the macro name so that we can peform escape processing
// on defined macros
String macroName = jjtThis.jjtGetChild(0).getFirstToken().image;
macroNames.put(macroName, macroName);
}
if (d != null)
{
d.checkArgs(argtypes, id, currentTemplate.getName());
}
/*
* VM : end
*/
return afterNewline;
}
}
/**
* for creating a map in a #set
*
* #set($foo = {$foo : $bar, $blargh : $thingy})
*/
void Map() : {}
{
(
LOOKAHEAD(( | )* Parameter() ) ( Parameter() Parameter() ( Parameter() Parameter() )* )
|
( | )*
)
/** note: need both tokens as they are generated in different states **/
( | )
}
void ObjectArray() : {}
{
[ Parameter() ( Parameter() )* ]
}
/**
* supports the [n..m] vector generator for use in
* the #foreach() to generate measured ranges w/o
* needing explicit support from the app/servlet
*/
void IntegerRange() : {}
{
( | )*
( Reference() | IntegerLiteral())
(|)* (|)*
(Reference() | IntegerLiteral())
(|)*
}
/**
* A Simplified parameter more suitable for an index position: $foo[$index]
*/
void IndexParameter() #void: {}
{
(|)*
(
Expression()
)
(|)*
}
/**
* This method has yet to be fully implemented
* but will allow arbitrarily nested method
* calls
*/
void Parameter() #void: {}
{
(|)*
(
StringLiteral()
| IntegerLiteral()
| LOOKAHEAD( ( | )* ( Reference() | IntegerLiteral()) ( | )* ) IntegerRange()
| Map()
| ObjectArray()
| True()
| False()
| Reference()
| FloatingPointLiteral()
)
(|)*
}
/**
* This method has yet to be fully implemented
* but will allow arbitrarily nested method
* calls
*/
void Method() : {}
{
Identifier() [ Expression() ( Expression() )* ]
}
void Index() : {}
{
IndexParameter()
}
void Reference() : {}
{
/*
* A reference is either $ or ${} or ${'|')
*/
(
( | ) (Index())*
(LOOKAHEAD(2) (LOOKAHEAD(3) Method() | Identifier() ) (Index())* )*
)
|
(
( | ) (Index())*
(LOOKAHEAD(2) (LOOKAHEAD(3) Method() | Identifier() ) (Index())* )*
[ Expression() ]
( | )
)
}
void True() : {}
{
}
void False() : {}
{
}
/**
* This is somewhat of a kludge, the problem is that the parser picks
* up on '$[' , or '$![' as being a Reference, and does not dismiss it even though
* there is no between $ and [, This has something to do
* with the LOOKAHEAD in Reference, but I never found a way to resolve
* it in a more fashionable way..
*/
TOKEN :
{
}
/**
* This method is responsible for allowing
* all non-grammar text to pass through
* unscathed.
* @return true if last read token was a newline
*/
boolean Text() :
{
Token t = null;
}
{
{ return true; }
| { return false; }
| { return false; }
| { return false; }
| { return false; }
| { return false; }
| { return false; }
| { return false; }
| { return false; }
| { return false; }
|