• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * [The "BSD license"]
3  *  Copyright (c) 2010 Terence Parr
4  *  All rights reserved.
5  *
6  *  Redistribution and use in source and binary forms, with or without
7  *  modification, are permitted provided that the following conditions
8  *  are met:
9  *  1. Redistributions of source code must retain the above copyright
10  *      notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *  3. The name of the author may not be used to endorse or promote products
15  *      derived from this software without specific prior written permission.
16  *
17  *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 package org.antlr;
29 
30 import org.antlr.analysis.*;
31 import org.antlr.codegen.CodeGenerator;
32 import org.antlr.misc.Graph;
33 import org.antlr.runtime.misc.Stats;
34 import org.antlr.tool.*;
35 import org.stringtemplate.v4.STGroup;
36 
37 import java.io.*;
38 import java.util.*;
39 
40 /** The main ANTLR entry point.  Read a grammar and generate a parser. */
41 public class Tool {
42 
43     public final Properties antlrSettings = new Properties();
44 
45 	public final String VERSION;
46 	{
47 		String version = Tool.class.getPackage().getImplementationVersion();
48 		VERSION = version != null ? version : "3.x";
49 	}
50 
51     public static final String UNINITIALIZED_DIR = "<unset-dir>";
52     private List<String> grammarFileNames = new ArrayList<String>();
53     private boolean generate_NFA_dot = false;
54     private boolean generate_DFA_dot = false;
55     private String outputDirectory = ".";
56     private boolean haveOutputDir = false;
57     private String inputDirectory = null;
58     private String parentGrammarDirectory;
59     private String grammarOutputDirectory;
60     private boolean haveInputDir = false;
61     private String libDirectory = ".";
62     private boolean debug = false;
63     private boolean trace = false;
64     private boolean profile = false;
65     private boolean report = false;
66     private boolean printGrammar = false;
67     private boolean depend = false;
68     private boolean forceAllFilesToOutputDir = false;
69     private boolean forceRelativeOutput = false;
70     protected boolean deleteTempLexer = true;
71     private boolean verbose = false;
72     /** Don't process grammar file if generated files are newer than grammar */
73     private boolean make = false;
74     private boolean showBanner = true;
75 	private static boolean exitNow = false;
76 	private static boolean return_dont_exit = false;
77 
78 
79 	public String forcedLanguageOption; // -language L on command line
80 
81     // The internal options are for my use on the command line during dev
82     //
83     public static boolean internalOption_PrintGrammarTree = false;
84     public static boolean internalOption_PrintDFA = false;
85     public static boolean internalOption_ShowNFAConfigsInDFA = false;
86     public static boolean internalOption_watchNFAConversion = false;
87 
88     /**
89      * A list of dependency generators that are accumulated aaaas (and if) the
90      * tool is required to sort the provided grammars into build dependency order.
91     protected Map&lt;String, BuildDependencyGenerator&gt; buildDependencyGenerators;
92      */
93 
main(String[] args)94     public static void main(String[] args) {
95         Tool antlr = new Tool(args);
96 
97         if (!exitNow) {
98             antlr.process();
99 			if ( return_dont_exit ) return;
100             if (ErrorManager.getNumErrors() > 0) {
101                 System.exit(1);
102             }
103             System.exit(0);
104         }
105     }
106 
107     /**
108      * Load the properties file org/antlr/antlr.properties and populate any
109      * variables that must be initialized from it, such as the version of ANTLR.
110      */
loadResources()111     private void loadResources() {
112         InputStream in;
113         in = this.getClass().getResourceAsStream("antlr.properties");
114 
115         // If we found the resource, then load it, otherwise revert to the
116         // defaults.
117         //
118         if (in != null) {
119             try {
120                 // Load the resources into the map
121                 //
122                 antlrSettings.load(in);
123 
124                 // Set any variables that we need to populate from the resources
125                 //
126 //                VERSION = antlrSettings.getProperty("antlr.version");
127             } catch (Exception e) {
128                 // Do nothing, just leave the defaults in place
129             }
130         }
131     }
132 
Tool()133     public Tool() {
134         loadResources();
135     }
136 
137 	@SuppressWarnings("OverridableMethodCallInConstructor")
Tool(String[] args)138     public Tool(String[] args) {
139         loadResources();
140 
141         // Set all the options and pick up all the named grammar files
142         processArgs(args);
143     }
144 
processArgs(String[] args)145     public void processArgs(String[] args) {
146 
147         if (isVerbose()) {
148             ErrorManager.info("ANTLR Parser Generator  Version " + VERSION);
149             showBanner = false;
150         }
151 
152         if (args == null || args.length == 0) {
153             help();
154             return;
155         }
156         for (int i = 0; i < args.length; i++) {
157             if (args[i].equals("-o") || args[i].equals("-fo")) {
158                 if (i + 1 >= args.length) {
159                     System.err.println("missing output directory with -fo/-o option; ignoring");
160                 }
161                 else {
162                     if (args[i].equals("-fo")) { // force output into dir
163                         setForceAllFilesToOutputDir(true);
164                     }
165                     i++;
166                     outputDirectory = args[i];
167                     if (outputDirectory.endsWith("/") ||
168                         outputDirectory.endsWith("\\")) {
169                         outputDirectory =
170                             outputDirectory.substring(0, getOutputDirectory().length() - 1);
171                     }
172                     File outDir = new File(outputDirectory);
173                     haveOutputDir = true;
174                     if (outDir.exists() && !outDir.isDirectory()) {
175                         ErrorManager.error(ErrorManager.MSG_OUTPUT_DIR_IS_FILE, outputDirectory);
176                         setLibDirectory(".");
177                     }
178                 }
179             }
180 			else if (args[i].equals("-lib")) {
181 				if (i + 1 >= args.length) {
182 					System.err.println("missing library directory with -lib option; ignoring");
183 				}
184 				else {
185 					i++;
186 					setLibDirectory(args[i]);
187 					if (getLibraryDirectory().endsWith("/") ||
188 						getLibraryDirectory().endsWith("\\")) {
189 						setLibDirectory(getLibraryDirectory().substring(0, getLibraryDirectory().length() - 1));
190 					}
191 					File outDir = new File(getLibraryDirectory());
192 					if (!outDir.exists()) {
193 						ErrorManager.error(ErrorManager.MSG_DIR_NOT_FOUND, getLibraryDirectory());
194 						setLibDirectory(".");
195 					}
196 				}
197 			}
198 			else if (args[i].equals("-language")) {
199 				if (i + 1 >= args.length) {
200 					System.err.println("missing language name; ignoring");
201 				}
202 				else {
203 					i++;
204 					forcedLanguageOption = args[i];
205 				}
206 			}
207             else if (args[i].equals("-nfa")) {
208                 setGenerate_NFA_dot(true);
209             }
210             else if (args[i].equals("-dfa")) {
211                 setGenerate_DFA_dot(true);
212             }
213             else if (args[i].equals("-debug")) {
214                 setDebug(true);
215             }
216             else if (args[i].equals("-trace")) {
217                 setTrace(true);
218             }
219             else if (args[i].equals("-report")) {
220                 setReport(true);
221             }
222             else if (args[i].equals("-profile")) {
223                 setProfile(true);
224             }
225             else if (args[i].equals("-print")) {
226                 setPrintGrammar(true);
227             }
228             else if (args[i].equals("-depend")) {
229                 setDepend(true);
230             }
231             else if (args[i].equals("-verbose")) {
232                 setVerbose(true);
233             }
234             else if (args[i].equals("-version")) {
235                 version();
236                 exitNow = true;
237             }
238             else if (args[i].equals("-make")) {
239                 setMake(true);
240             }
241             else if (args[i].equals("-message-format")) {
242                 if (i + 1 >= args.length) {
243                     System.err.println("missing output format with -message-format option; using default");
244                 }
245                 else {
246                     i++;
247                     ErrorManager.setFormat(args[i]);
248                 }
249             }
250             else if (args[i].equals("-Xgrtree")) {
251                 internalOption_PrintGrammarTree = true; // print grammar tree
252             }
253             else if (args[i].equals("-Xdfa")) {
254                 internalOption_PrintDFA = true;
255             }
256             else if (args[i].equals("-Xnoprune")) {
257                 DFAOptimizer.PRUNE_EBNF_EXIT_BRANCHES = false;
258             }
259             else if (args[i].equals("-Xnocollapse")) {
260                 DFAOptimizer.COLLAPSE_ALL_PARALLEL_EDGES = false;
261             }
262             else if (args[i].equals("-Xdbgconversion")) {
263                 NFAToDFAConverter.debug = true;
264             }
265             else if (args[i].equals("-Xmultithreaded")) {
266                 NFAToDFAConverter.SINGLE_THREADED_NFA_CONVERSION = false;
267             }
268             else if (args[i].equals("-Xnomergestopstates")) {
269                 DFAOptimizer.MERGE_STOP_STATES = false;
270             }
271             else if (args[i].equals("-Xdfaverbose")) {
272                 internalOption_ShowNFAConfigsInDFA = true;
273             }
274             else if (args[i].equals("-Xwatchconversion")) {
275                 internalOption_watchNFAConversion = true;
276             }
277             else if (args[i].equals("-XdbgST")) {
278                 CodeGenerator.LAUNCH_ST_INSPECTOR = true;
279 				STGroup.trackCreationEvents = true;
280 				return_dont_exit = true;
281             }
282             else if (args[i].equals("-Xmaxinlinedfastates")) {
283                 if (i + 1 >= args.length) {
284                     System.err.println("missing max inline dfa states -Xmaxinlinedfastates option; ignoring");
285                 }
286                 else {
287                     i++;
288                     CodeGenerator.MAX_ACYCLIC_DFA_STATES_INLINE = Integer.parseInt(args[i]);
289                 }
290             }
291             else if (args[i].equals("-Xmaxswitchcaselabels")) {
292                 if (i + 1 >= args.length) {
293                     System.err.println("missing max switch case labels -Xmaxswitchcaselabels option; ignoring");
294                 }
295                 else {
296                     i++;
297                     CodeGenerator.MAX_SWITCH_CASE_LABELS = Integer.parseInt(args[i]);
298                 }
299             }
300             else if (args[i].equals("-Xminswitchalts")) {
301                 if (i + 1 >= args.length) {
302                     System.err.println("missing min switch alternatives -Xminswitchalts option; ignoring");
303                 }
304                 else {
305                     i++;
306                     CodeGenerator.MIN_SWITCH_ALTS = Integer.parseInt(args[i]);
307                 }
308             }
309             else if (args[i].equals("-Xm")) {
310                 if (i + 1 >= args.length) {
311                     System.err.println("missing max recursion with -Xm option; ignoring");
312                 }
313                 else {
314                     i++;
315                     NFAContext.MAX_SAME_RULE_INVOCATIONS_PER_NFA_CONFIG_STACK = Integer.parseInt(args[i]);
316                 }
317             }
318             else if (args[i].equals("-Xmaxdfaedges")) {
319                 if (i + 1 >= args.length) {
320                     System.err.println("missing max number of edges with -Xmaxdfaedges option; ignoring");
321                 }
322                 else {
323                     i++;
324                     DFA.MAX_STATE_TRANSITIONS_FOR_TABLE = Integer.parseInt(args[i]);
325                 }
326             }
327             else if (args[i].equals("-Xconversiontimeout")) {
328                 if (i + 1 >= args.length) {
329                     System.err.println("missing max time in ms -Xconversiontimeout option; ignoring");
330                 }
331                 else {
332                     i++;
333                     DFA.MAX_TIME_PER_DFA_CREATION = Integer.parseInt(args[i]);
334                 }
335             }
336 			else if (args[i].equals("-Xnfastates")) {
337 				DecisionProbe.verbose = true;
338 			}
339 			else if (args[i].equals("-Xsavelexer")) {
340 				deleteTempLexer = false;
341 			}
342             else if (args[i].equals("-X")) {
343                 Xhelp();
344             }
345             else {
346                 if (args[i].charAt(0) != '-') {
347                     // Must be the grammar file
348                     addGrammarFile(args[i]);
349                 }
350             }
351         }
352     }
353 
354     /*
355     protected void checkForInvalidArguments(String[] args, BitSet cmdLineArgValid) {
356     // check for invalid command line args
357     for (int a = 0; a < args.length; a++) {
358     if (!cmdLineArgValid.member(a)) {
359     System.err.println("invalid command-line argument: " + args[a] + "; ignored");
360     }
361     }
362     }
363      */
364 
365     /**
366      * Checks to see if the list of outputFiles all exist, and have
367      * last-modified timestamps which are later than the last-modified
368      * timestamp of all the grammar files involved in build the output
369      * (imports must be checked). If these conditions hold, the method
370      * returns false, otherwise, it returns true.
371      *
372      * @param grammarFileName The grammar file we are checking
373      */
buildRequired(String grammarFileName)374     public boolean buildRequired(String grammarFileName)
375         throws IOException
376     {
377         BuildDependencyGenerator bd =
378             new BuildDependencyGenerator(this, grammarFileName);
379 
380         List<File> outputFiles = bd.getGeneratedFileList();
381         List<File> inputFiles = bd.getDependenciesFileList();
382         // Note that input directory must be set to use buildRequired
383         File grammarFile;
384         if (haveInputDir) {
385             grammarFile = new File(inputDirectory, grammarFileName);
386         }
387         else {
388             grammarFile = new File(grammarFileName);
389         }
390         long grammarLastModified = grammarFile.lastModified();
391         for (File outputFile : outputFiles) {
392             if (!outputFile.exists() || grammarLastModified > outputFile.lastModified()) {
393                 // One of the output files does not exist or is out of date, so we must build it
394 				if (isVerbose()) {
395 					if (!outputFile.exists()) {
396 						System.out.println("Output file " + outputFile + " does not exist: must build " + grammarFile);
397 					}
398 					else {
399 						System.out.println("Output file " + outputFile + " is not up-to-date: must build " + grammarFile);
400 					}
401 				}
402 
403                 return true;
404             }
405             // Check all of the imported grammars and see if any of these are younger
406             // than any of the output files.
407             if (inputFiles != null) {
408                 for (File inputFile : inputFiles) {
409 
410                     if (inputFile.lastModified() > outputFile.lastModified()) {
411                         // One of the imported grammar files has been updated so we must build
412 						if (isVerbose()) {
413 							System.out.println("Input file " + inputFile + " is newer than output: must rebuild " + grammarFile);
414 						}
415 
416                         return true;
417                     }
418                 }
419             }
420         }
421         if (isVerbose()) {
422             System.out.println("Grammar " + grammarFile + " is up to date - build skipped");
423         }
424         return false;
425     }
426 
process()427     public void process() {
428         boolean exceptionWhenWritingLexerFile = false;
429         String lexerGrammarFileName;		// necessary at this scope to have access in the catch below
430 
431         // Have to be tricky here when Maven or build tools call in and must new Tool()
432         // before setting options. The banner won't display that way!
433         if (isVerbose() && showBanner) {
434             ErrorManager.info("ANTLR Parser Generator  Version " + VERSION);
435             showBanner = false;
436         }
437 
438         try {
439             sortGrammarFiles(); // update grammarFileNames
440         }
441         catch (Exception e) {
442             ErrorManager.error(ErrorManager.MSG_INTERNAL_ERROR,e);
443         }
444         catch (Error e) {
445             ErrorManager.error(ErrorManager.MSG_INTERNAL_ERROR, e);
446         }
447 
448         for (String grammarFileName : grammarFileNames) {
449             // If we are in make mode (to support build tools like Maven) and the
450             // file is already up to date, then we do not build it (and in verbose mode
451             // we will say so).
452             if (make) {
453                 try {
454                     if ( !buildRequired(grammarFileName) ) continue;
455                 }
456                 catch (Exception e) {
457                     ErrorManager.error(ErrorManager.MSG_INTERNAL_ERROR,e);
458                 }
459             }
460 
461             if (isVerbose() && !isDepend()) {
462                 System.out.println(grammarFileName);
463             }
464             try {
465                 if (isDepend()) {
466                     BuildDependencyGenerator dep =
467                         new BuildDependencyGenerator(this, grammarFileName);
468                     /*
469                     List outputFiles = dep.getGeneratedFileList();
470                     List dependents = dep.getDependenciesFileList();
471                     System.out.println("output: "+outputFiles);
472                     System.out.println("dependents: "+dependents);
473                      */
474                     System.out.println(dep.getDependencies().render());
475                     continue;
476                 }
477 
478                 Grammar rootGrammar = getRootGrammar(grammarFileName);
479                 // we now have all grammars read in as ASTs
480                 // (i.e., root and all delegates)
481 				rootGrammar.composite.assignTokenTypes();
482 				//rootGrammar.composite.translateLeftRecursiveRules();
483 				rootGrammar.addRulesForSyntacticPredicates();
484 				rootGrammar.composite.defineGrammarSymbols();
485                 rootGrammar.composite.createNFAs();
486 
487                 generateRecognizer(rootGrammar);
488 
489                 if (isPrintGrammar()) {
490                     rootGrammar.printGrammar(System.out);
491                 }
492 
493                 if (isReport()) {
494 					GrammarReport2 greport = new GrammarReport2(rootGrammar);
495 					System.out.print(greport.toString());
496 //                    GrammarReport greport = new GrammarReport(rootGrammar);
497 //                    System.out.println(greport.toString());
498 //                    // print out a backtracking report too (that is not encoded into log)
499 //                    System.out.println(greport.getBacktrackingReport());
500                 }
501                 if (isProfile()) {
502                     GrammarReport greport = new GrammarReport(rootGrammar);
503                     Stats.writeReport(GrammarReport.GRAMMAR_STATS_FILENAME,
504                                       greport.toNotifyString());
505                 }
506 
507                 // now handle the lexer if one was created for a merged spec
508                 String lexerGrammarStr = rootGrammar.getLexerGrammar();
509                 //System.out.println("lexer rootGrammar:\n"+lexerGrammarStr);
510                 if (rootGrammar.type == Grammar.COMBINED && lexerGrammarStr != null) {
511                     lexerGrammarFileName = rootGrammar.getImplicitlyGeneratedLexerFileName();
512                     try {
513                         Writer w = getOutputFile(rootGrammar, lexerGrammarFileName);
514                         w.write(lexerGrammarStr);
515                         w.close();
516                     }
517                     catch (IOException e) {
518                         // emit different error message when creating the implicit lexer fails
519                         // due to write permission error
520                         exceptionWhenWritingLexerFile = true;
521                         throw e;
522                     }
523                     try {
524                         StringReader sr = new StringReader(lexerGrammarStr);
525                         Grammar lexerGrammar = new Grammar(this);
526                         lexerGrammar.composite.watchNFAConversion = internalOption_watchNFAConversion;
527                         lexerGrammar.implicitLexer = true;
528                         //lexerGrammar.setTool(this);
529                         File lexerGrammarFullFile =
530                             new File(getFileDirectory(lexerGrammarFileName), lexerGrammarFileName);
531                         lexerGrammar.setFileName(lexerGrammarFullFile.toString());
532 
533                         lexerGrammar.importTokenVocabulary(rootGrammar);
534                         lexerGrammar.parseAndBuildAST(sr);
535 
536                         sr.close();
537 
538                         lexerGrammar.composite.assignTokenTypes();
539 						lexerGrammar.addRulesForSyntacticPredicates();
540                         lexerGrammar.composite.defineGrammarSymbols();
541                         lexerGrammar.composite.createNFAs();
542 
543                         generateRecognizer(lexerGrammar);
544                     }
545                     finally {
546                         // make sure we clean up
547                         if (deleteTempLexer) {
548                             File outputDir = getOutputDirectory(lexerGrammarFileName);
549                             File outputFile = new File(outputDir, lexerGrammarFileName);
550                             outputFile.delete();
551                         }
552                     }
553                 }
554             }
555             catch (IOException e) {
556                 if (exceptionWhenWritingLexerFile) {
557                     ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, e);
558                 }
559                 else {
560                     ErrorManager.error(ErrorManager.MSG_CANNOT_OPEN_FILE,
561                                        grammarFileName, e);
562                 }
563             }
564             catch (Exception e) {
565                 ErrorManager.error(ErrorManager.MSG_INTERNAL_ERROR, grammarFileName, e);
566             }
567             /*
568            finally {
569            System.out.println("creates="+ Interval.creates);
570            System.out.println("hits="+ Interval.hits);
571            System.out.println("misses="+ Interval.misses);
572            System.out.println("outOfRange="+ Interval.outOfRange);
573            }
574             */
575         }
576     }
577 
sortGrammarFiles()578     public void sortGrammarFiles() throws IOException {
579         //System.out.println("Grammar names "+getGrammarFileNames());
580         Graph<String> g = new Graph<String>();
581         List<String> missingFiles = new ArrayList<String>();
582         for (String gfile : grammarFileNames) {
583             try {
584                 GrammarSpelunker grammar = new GrammarSpelunker(inputDirectory, gfile);
585                 grammar.parse();
586                 String vocabName = grammar.getTokenVocab();
587                 String grammarName = grammar.getGrammarName();
588                 // Make all grammars depend on any tokenVocab options
589                 if ( vocabName!=null ) g.addEdge(gfile, vocabName+CodeGenerator.VOCAB_FILE_EXTENSION);
590                 // Make all generated tokens files depend on their grammars
591                 g.addEdge(grammarName+CodeGenerator.VOCAB_FILE_EXTENSION, gfile);
592             }
593             catch (FileNotFoundException fnfe) {
594                 ErrorManager.error(ErrorManager.MSG_CANNOT_OPEN_FILE, gfile, fnfe);
595                 missingFiles.add(gfile);
596             }
597         }
598         List<String> sorted = g.sort();
599         //System.out.println("sorted="+sorted);
600         grammarFileNames.clear(); // wipe so we can give new ordered list
601         for (int i = 0; i < sorted.size(); i++) {
602             String f = sorted.get(i);
603             if ( missingFiles.contains(f) ) continue;
604             if ( !(f.endsWith(".g") || f.endsWith(".g3")) ) continue;
605             grammarFileNames.add(f);
606         }
607         //System.out.println("new grammars="+grammarFileNames);
608     }
609 
610     /** Get a grammar mentioned on the command-line and any delegates */
getRootGrammar(String grammarFileName)611     public Grammar getRootGrammar(String grammarFileName)
612         throws IOException
613     {
614         //ST.setLintMode(true);
615         // grammars mentioned on command line are either roots or single grammars.
616         // create the necessary composite in case it's got delegates; even
617         // single grammar needs it to get token types.
618         CompositeGrammar composite = new CompositeGrammar();
619         Grammar grammar = new Grammar(this, grammarFileName, composite);
620         composite.setDelegationRoot(grammar);
621         FileReader fr;
622         File f;
623 
624         if (haveInputDir) {
625             f = new File(inputDirectory, grammarFileName);
626         }
627         else {
628             f = new File(grammarFileName);
629         }
630 
631         // Store the location of this grammar as if we import files, we can then
632         // search for imports in the same location as the original grammar as well as in
633         // the lib directory.
634         //
635         parentGrammarDirectory = f.getParent();
636 
637         if (grammarFileName.lastIndexOf(File.separatorChar) == -1) {
638             grammarOutputDirectory = ".";
639         }
640         else {
641             grammarOutputDirectory = grammarFileName.substring(0, grammarFileName.lastIndexOf(File.separatorChar));
642         }
643         fr = new FileReader(f);
644         BufferedReader br = new BufferedReader(fr);
645         grammar.parseAndBuildAST(br);
646         composite.watchNFAConversion = internalOption_watchNFAConversion;
647         br.close();
648         fr.close();
649         return grammar;
650     }
651 
652     /** Create NFA, DFA and generate code for grammar.
653      *  Create NFA for any delegates first.  Once all NFA are created,
654      *  it's ok to create DFA, which must check for left-recursion.  That check
655      *  is done by walking the full NFA, which therefore must be complete.
656      *  After all NFA, comes DFA conversion for root grammar then code gen for
657      *  root grammar.  DFA and code gen for delegates comes next.
658      */
generateRecognizer(Grammar grammar)659     protected void generateRecognizer(Grammar grammar) {
660         String language = (String) grammar.getOption("language");
661         if (language != null) {
662             CodeGenerator generator = new CodeGenerator(this, grammar, language);
663             grammar.setCodeGenerator(generator);
664             generator.setDebug(isDebug());
665             generator.setProfile(isProfile());
666             generator.setTrace(isTrace());
667 
668             // generate NFA early in case of crash later (for debugging)
669             if (isGenerate_NFA_dot()) {
670                 generateNFAs(grammar);
671             }
672 
673             // GENERATE CODE
674             generator.genRecognizer();
675 
676             if (isGenerate_DFA_dot()) {
677                 generateDFAs(grammar);
678             }
679 
680             List<Grammar> delegates = grammar.getDirectDelegates();
681             for (int i = 0; delegates != null && i < delegates.size(); i++) {
682                 Grammar delegate = delegates.get(i);
683                 if (delegate != grammar) { // already processing this one
684                     generateRecognizer(delegate);
685                 }
686             }
687         }
688     }
689 
generateDFAs(Grammar g)690     public void generateDFAs(Grammar g) {
691         for (int d = 1; d <= g.getNumberOfDecisions(); d++) {
692             DFA dfa = g.getLookaheadDFA(d);
693             if (dfa == null) {
694                 continue; // not there for some reason, ignore
695             }
696             DOTGenerator dotGenerator = new DOTGenerator(g);
697             String dot = dotGenerator.getDOT(dfa.startState);
698             String dotFileName = g.name + "." + "dec-" + d;
699             if (g.implicitLexer) {
700                 dotFileName = g.name + Grammar.grammarTypeToFileNameSuffix[g.type] + "." + "dec-" + d;
701             }
702             try {
703                 writeDOTFile(g, dotFileName, dot);
704             } catch (IOException ioe) {
705                 ErrorManager.error(ErrorManager.MSG_CANNOT_GEN_DOT_FILE,
706                                    dotFileName,
707                                    ioe);
708             }
709         }
710     }
711 
generateNFAs(Grammar g)712     protected void generateNFAs(Grammar g) {
713         DOTGenerator dotGenerator = new DOTGenerator(g);
714         Collection<Rule> rules = new HashSet<Rule>(g.getAllImportedRules());
715         rules.addAll(g.getRules());
716 
717         for (Rule r : rules) {
718             try {
719                 String dot = dotGenerator.getDOT(r.startState);
720                 if (dot != null) {
721                     writeDOTFile(g, r, dot);
722                 }
723             } catch (IOException ioe) {
724                 ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, ioe);
725             }
726         }
727     }
728 
writeDOTFile(Grammar g, Rule r, String dot)729     protected void writeDOTFile(Grammar g, Rule r, String dot) throws IOException {
730         writeDOTFile(g, r.grammar.name + "." + r.name, dot);
731     }
732 
writeDOTFile(Grammar g, String name, String dot)733     protected void writeDOTFile(Grammar g, String name, String dot) throws IOException {
734         Writer fw = getOutputFile(g, name + ".dot");
735         fw.write(dot);
736         fw.close();
737     }
738 
version()739     private static void version() {
740         ErrorManager.info("ANTLR Parser Generator  Version " + new Tool().VERSION);
741     }
742 
help()743     private static void help() {
744         ErrorManager.info("ANTLR Parser Generator  Version " + new Tool().VERSION);
745         System.err.println("usage: java org.antlr.Tool [args] file.g [file2.g file3.g ...]");
746         System.err.println("  -o outputDir          specify output directory where all output is generated");
747         System.err.println("  -fo outputDir         same as -o but force even files with relative paths to dir");
748         System.err.println("  -lib dir              specify location of token files");
749         System.err.println("  -depend               generate file dependencies");
750         System.err.println("  -report               print out a report about the grammar(s) processed");
751         System.err.println("  -print                print out the grammar without actions");
752         System.err.println("  -debug                generate a parser that emits debugging events");
753 		System.err.println("  -profile              generate a parser that computes profiling information");
754 		System.err.println("  -trace                generate a recognizer that traces rule entry/exit");
755         System.err.println("  -nfa                  generate an NFA for each rule");
756         System.err.println("  -dfa                  generate a DFA for each decision point");
757         System.err.println("  -message-format name  specify output style for messages");
758         System.err.println("  -verbose              generate ANTLR version and other information");
759         System.err.println("  -make                 only build if generated files older than grammar");
760 		System.err.println("  -version              print the version of ANTLR and exit.");
761 		System.err.println("  -language L           override language grammar option; generate L");
762         System.err.println("  -X                    display extended argument list");
763     }
764 
Xhelp()765     private static void Xhelp() {
766         ErrorManager.info("ANTLR Parser Generator  Version " + new Tool().VERSION);
767         System.err.println("  -Xgrtree                print the grammar AST");
768         System.err.println("  -Xdfa                   print DFA as text ");
769         System.err.println("  -Xnoprune               test lookahead against EBNF block exit branches");
770         System.err.println("  -Xnocollapse            collapse incident edges into DFA states");
771 		System.err.println("  -Xdbgconversion         dump lots of info during NFA conversion");
772 		System.err.println("  -Xconversiontimeout     use to restrict NFA conversion exponentiality");
773         System.err.println("  -Xmultithreaded         run the analysis in 2 threads");
774         System.err.println("  -Xnomergestopstates     do not merge stop states");
775         System.err.println("  -Xdfaverbose            generate DFA states in DOT with NFA configs");
776         System.err.println("  -Xwatchconversion       print a message for each NFA before converting");
777         System.err.println("  -XdbgST                 put tags at start/stop of all templates in output");
778         System.err.println("  -Xnfastates             for nondeterminisms, list NFA states for each path");
779         System.err.println("  -Xm m                   max number of rule invocations during conversion           [" + NFAContext.MAX_SAME_RULE_INVOCATIONS_PER_NFA_CONFIG_STACK + "]");
780         System.err.println("  -Xmaxdfaedges m         max \"comfortable\" number of edges for single DFA state     [" + DFA.MAX_STATE_TRANSITIONS_FOR_TABLE + "]");
781         System.err.println("  -Xmaxinlinedfastates m  max DFA states before table used rather than inlining      [" + CodeGenerator.MADSI_DEFAULT +"]");
782         System.err.println("  -Xmaxswitchcaselabels m don't generate switch() statements for dfas bigger  than m [" + CodeGenerator.MSCL_DEFAULT +"]");
783 		System.err.println("  -Xminswitchalts m       don't generate switch() statements for dfas smaller than m [" + CodeGenerator.MSA_DEFAULT + "]");
784 		System.err.println("  -Xsavelexer             don't delete temporary lexers generated from combined grammars");
785     }
786 
787     /**
788      * Set the threshold of case labels beyond which ANTLR will not instruct the target template
789      * to generate switch() { case xxx: ...
790      *
791      * @param maxSwitchCaseLabels Maximum number of case lables that ANTLR should allow the target code
792      */
setMaxSwitchCaseLabels(int maxSwitchCaseLabels)793     public void setMaxSwitchCaseLabels(int maxSwitchCaseLabels) {
794         CodeGenerator.MAX_SWITCH_CASE_LABELS = maxSwitchCaseLabels;
795     }
796 
797     /**
798      * Set the threshold of the number alts, below which ANTLR will not instruct the target
799      * template to use a switch statement.
800      *
801      * @param minSwitchAlts the minimum number of alts required to use a switch staement
802      */
setMinSwitchAlts(int minSwitchAlts)803     public void setMinSwitchAlts(int minSwitchAlts) {
804         CodeGenerator.MIN_SWITCH_ALTS = minSwitchAlts;
805     }
806 
807     /**
808      * Set the location (base directory) where output files should be produced
809      * by the ANTLR tool.
810      * @param outputDirectory
811      */
setOutputDirectory(String outputDirectory)812     public void setOutputDirectory(String outputDirectory) {
813         haveOutputDir = true;
814         this.outputDirectory = outputDirectory;
815     }
816 
817     /**
818      * Used by build tools to force the output files to always be
819      * relative to the base output directory, even though the tool
820      * had to set the output directory to an absolute path as it
821      * cannot rely on the workign directory like command line invocation
822      * can.
823      *
824      * @param forceRelativeOutput true if output files hould always be relative to base output directory
825      */
setForceRelativeOutput(boolean forceRelativeOutput)826     public void setForceRelativeOutput(boolean forceRelativeOutput) {
827         this.forceRelativeOutput = forceRelativeOutput;
828     }
829 
830     /**
831      * Set the base location of input files. Normally (when the tool is
832      * invoked from the command line), the inputDirectory is not set, but
833      * for build tools such as Maven, we need to be able to locate the input
834      * files relative to the base, as the working directory could be anywhere and
835      * changing workig directories is not a valid concept for JVMs because of threading and
836      * so on. Setting the directory just means that the getFileDirectory() method will
837      * try to open files relative to this input directory.
838      *
839      * @param inputDirectory Input source base directory
840      */
setInputDirectory(String inputDirectory)841     public void setInputDirectory(String inputDirectory) {
842         this.inputDirectory = inputDirectory;
843         haveInputDir = true;
844     }
845 
846     /** This method is used by all code generators to create new output
847      *  files. If the outputDir set by -o is not present it will be created.
848      *  The final filename is sensitive to the output directory and
849      *  the directory where the grammar file was found.  If -o is /tmp
850      *  and the original grammar file was foo/t.g then output files
851      *  go in /tmp/foo.
852      *
853      *  The output dir -o spec takes precedence if it's absolute.
854      *  E.g., if the grammar file dir is absolute the output dir is given
855      *  precendence. "-o /tmp /usr/lib/t.g" results in "/tmp/T.java" as
856      *  output (assuming t.g holds T.java).
857      *
858      *  If no -o is specified, then just write to the directory where the
859      *  grammar file was found.
860      *
861      *  If outputDirectory==null then write a String.
862      */
getOutputFile(Grammar g, String fileName)863     public Writer getOutputFile(Grammar g, String fileName) throws IOException {
864         if (getOutputDirectory() == null) {
865             return new StringWriter();
866         }
867         // output directory is a function of where the grammar file lives
868         // for subdir/T.g, you get subdir here.  Well, depends on -o etc...
869         // But, if this is a .tokens file, then we force the output to
870         // be the base output directory (or current directory if there is not a -o)
871         //
872         File outputDir;
873         if (fileName.endsWith(CodeGenerator.VOCAB_FILE_EXTENSION)) {
874             if (haveOutputDir) {
875                 outputDir = new File(getOutputDirectory());
876             }
877             else {
878                 outputDir = new File(".");
879             }
880         }
881         else {
882             outputDir = getOutputDirectory(g.getFileName());
883         }
884         File outputFile = new File(outputDir, fileName);
885 
886         if (!outputDir.exists()) {
887             outputDir.mkdirs();
888         }
889         FileWriter fw = new FileWriter(outputFile);
890         return new BufferedWriter(fw);
891     }
892 
893     /**
894      * Return the location where ANTLR will generate output files for a given file. This is a
895      * base directory and output files will be relative to here in some cases
896      * such as when -o option is used and input files are given relative
897      * to the input directory.
898      *
899      * @param fileNameWithPath path to input source
900      */
getOutputDirectory(String fileNameWithPath)901     public File getOutputDirectory(String fileNameWithPath) {
902 
903         File outputDir;
904         String fileDirectory;
905 
906         // Some files are given to us without a PATH but should should
907         // still be written to the output directory in the relative path of
908         // the output directory. The file directory is either the set of sub directories
909         // or just or the relative path recorded for the parent grammar. This means
910         // that when we write the tokens files, or the .java files for imported grammars
911         // taht we will write them in the correct place.
912         //
913         if (fileNameWithPath.lastIndexOf(File.separatorChar) == -1) {
914 
915             // No path is included in the file name, so make the file
916             // directory the same as the parent grammar (which might sitll be just ""
917             // but when it is not, we will write the file in the correct place.
918             //
919             fileDirectory = grammarOutputDirectory;
920 
921         }
922         else {
923             fileDirectory = fileNameWithPath.substring(0, fileNameWithPath.lastIndexOf(File.separatorChar));
924         }
925         if (haveOutputDir) {
926             // -o /tmp /var/lib/t.g => /tmp/T.java
927             // -o subdir/output /usr/lib/t.g => subdir/output/T.java
928             // -o . /usr/lib/t.g => ./T.java
929             if ((fileDirectory != null && !forceRelativeOutput) &&
930                 (new File(fileDirectory).isAbsolute() ||
931                  fileDirectory.startsWith("~")) || // isAbsolute doesn't count this :(
932                 isForceAllFilesToOutputDir()) {
933                 // somebody set the dir, it takes precendence; write new file there
934                 outputDir = new File(getOutputDirectory());
935             }
936             else {
937                 // -o /tmp subdir/t.g => /tmp/subdir/t.g
938                 if (fileDirectory != null) {
939                     outputDir = new File(getOutputDirectory(), fileDirectory);
940                 }
941                 else {
942                     outputDir = new File(getOutputDirectory());
943                 }
944             }
945         }
946         else {
947             // they didn't specify a -o dir so just write to location
948             // where grammar is, absolute or relative, this will only happen
949             // with command line invocation as build tools will always
950             // supply an output directory.
951             //
952             outputDir = new File(fileDirectory);
953         }
954         return outputDir;
955     }
956 
957     /**
958      * Name a file from the -lib dir.  Imported grammars and .tokens files
959      *
960      * If we do not locate the file in the library directory, then we try
961      * the location of the originating grammar.
962      *
963      * @param fileName input name we are looking for
964      * @return Path to file that we think shuold be the import file
965      *
966      * @throws java.io.IOException
967      */
getLibraryFile(String fileName)968     public String getLibraryFile(String fileName) throws IOException {
969 
970         // First, see if we can find the file in the library directory
971         //
972         File f = new File(getLibraryDirectory() + File.separator + fileName);
973 
974         if (f.exists()) {
975 
976             // Found in the library directory
977             //
978             return f.getAbsolutePath();
979         }
980 
981         // Need to assume it is in the same location as the input file. Note that
982         // this is only relevant for external build tools and when the input grammar
983         // was specified relative to the source directory (working directory if using
984         // the command line.
985         //
986         return parentGrammarDirectory + File.separator + fileName;
987     }
988 
989     /** Return the directory containing the grammar file for this grammar.
990      *  normally this is a relative path from current directory.  People will
991      *  often do "java org.antlr.Tool grammars/*.g3"  So the file will be
992      *  "grammars/foo.g3" etc...  This method returns "grammars".
993      *
994      *  If we have been given a specific input directory as a base, then
995      *  we must find the directory relative to this directory, unless the
996      *  file name is given to us in absolute terms.
997      */
getFileDirectory(String fileName)998     public String getFileDirectory(String fileName) {
999 
1000         File f;
1001         if (haveInputDir && !fileName.startsWith(File.separator)) {
1002             f = new File(inputDirectory, fileName);
1003         }
1004         else {
1005             f = new File(fileName);
1006         }
1007         // And ask Java what the base directory of this location is
1008         //
1009         return f.getParent();
1010     }
1011 
1012     /** Return a File descriptor for vocab file.  Look in library or
1013      *  in -o output path.  antlr -o foo T.g U.g where U needs T.tokens
1014      *  won't work unless we look in foo too. If we do not find the
1015      *  file in the lib directory then must assume that the .tokens file
1016      *  is going to be generated as part of this build and we have defined
1017      *  .tokens files so that they ALWAYS are generated in the base output
1018      *  directory, which means the current directory for the command line tool if there
1019      *  was no output directory specified.
1020      */
getImportedVocabFile(String vocabName)1021     public File getImportedVocabFile(String vocabName) {
1022 
1023         File f = new File(getLibraryDirectory(),
1024                           File.separator +
1025                           vocabName +
1026                           CodeGenerator.VOCAB_FILE_EXTENSION);
1027         if (f.exists()) {
1028             return f;
1029         }
1030 
1031         // We did not find the vocab file in the lib directory, so we need
1032         // to look for it in the output directory which is where .tokens
1033         // files are generated (in the base, not relative to the input
1034         // location.)
1035         //
1036         if (haveOutputDir) {
1037             f = new File(getOutputDirectory(), vocabName + CodeGenerator.VOCAB_FILE_EXTENSION);
1038         }
1039         else {
1040             f = new File(vocabName + CodeGenerator.VOCAB_FILE_EXTENSION);
1041         }
1042         return f;
1043     }
1044 
1045     /** If the tool needs to panic/exit, how do we do that?
1046      */
panic()1047     public void panic() {
1048         throw new Error("ANTLR panic");
1049     }
1050 
1051     /** Return a time stamp string accurate to sec: yyyy-mm-dd hh:mm:ss
1052      */
getCurrentTimeStamp()1053     public static String getCurrentTimeStamp() {
1054         GregorianCalendar calendar = new java.util.GregorianCalendar();
1055         int y = calendar.get(Calendar.YEAR);
1056         int m = calendar.get(Calendar.MONTH) + 1; // zero-based for months
1057         int d = calendar.get(Calendar.DAY_OF_MONTH);
1058         int h = calendar.get(Calendar.HOUR_OF_DAY);
1059         int min = calendar.get(Calendar.MINUTE);
1060         int sec = calendar.get(Calendar.SECOND);
1061         String sy = String.valueOf(y);
1062         String sm = m < 10 ? "0" + m : String.valueOf(m);
1063         String sd = d < 10 ? "0" + d : String.valueOf(d);
1064         String sh = h < 10 ? "0" + h : String.valueOf(h);
1065         String smin = min < 10 ? "0" + min : String.valueOf(min);
1066         String ssec = sec < 10 ? "0" + sec : String.valueOf(sec);
1067         return new StringBuffer().append(sy).append("-").append(sm).append("-").append(sd).append(" ").append(sh).append(":").append(smin).append(":").append(ssec).toString();
1068     }
1069 
1070     /**
1071      * Provide the List of all grammar file names that the ANTLR tool will
1072      * process or has processed.
1073      *
1074      * @return the grammarFileNames
1075      */
1076     public List<String> getGrammarFileNames() {
1077         return grammarFileNames;
1078     }
1079 
1080     /**
1081      * Indicates whether ANTLR has gnerated or will generate a description of
1082      * all the NFAs in <a href="http://www.graphviz.org">Dot format</a>
1083      *
1084      * @return the generate_NFA_dot
1085      */
1086     public boolean isGenerate_NFA_dot() {
1087         return generate_NFA_dot;
1088     }
1089 
1090     /**
1091      * Indicates whether ANTLR has generated or will generate a description of
1092      * all the NFAs in <a href="http://www.graphviz.org">Dot format</a>
1093      *
1094      * @return the generate_DFA_dot
1095      */
1096     public boolean isGenerate_DFA_dot() {
1097         return generate_DFA_dot;
1098     }
1099 
1100     /**
1101      * Return the Path to the base output directory, where ANTLR
1102      * will generate all the output files for the current language target as
1103      * well as any ancillary files such as .tokens vocab files.
1104      *
1105      * @return the output Directory
1106      */
1107     public String getOutputDirectory() {
1108         return outputDirectory;
1109     }
1110 
1111     /**
1112      * Return the Path to the directory in which ANTLR will search for ancillary
1113      * files such as .tokens vocab files and imported grammar files.
1114      *
1115      * @return the lib Directory
1116      */
1117     public String getLibraryDirectory() {
1118         return libDirectory;
1119     }
1120 
1121     /**
1122      * Indicate if ANTLR has generated, or will generate a debug version of the
1123      * recognizer. Debug versions of a parser communicate with a debugger such
1124      * as that contained in ANTLRWorks and at start up will 'hang' waiting for
1125      * a connection on an IP port (49100 by default).
1126      *
1127      * @return the debug flag
1128      */
1129     public boolean isDebug() {
1130         return debug;
1131     }
1132 
1133     /**
1134      * Indicate whether ANTLR has generated, or will generate a version of the
1135      * recognizer that prints trace messages on entry and exit of each rule.
1136      *
1137      * @return the trace flag
1138      */
1139     public boolean isTrace() {
1140         return trace;
1141     }
1142 
1143     /**
1144      * Indicates whether ANTLR has generated or will generate a version of the
1145      * recognizer that gathers statistics about its execution, which it prints when
1146      * it terminates.
1147      *
1148      * @return the profile
1149      */
1150     public boolean isProfile() {
1151         return profile;
1152     }
1153 
1154     /**
1155      * Indicates whether ANTLR has generated or will generate a report of various
1156      * elements of the grammar analysis, once it it has finished analyzing a grammar
1157      * file.
1158      *
1159      * @return the report flag
1160      */
1161     public boolean isReport() {
1162         return report;
1163     }
1164 
1165     /**
1166      * Indicates whether ANTLR has printed, or will print, a version of the input grammar
1167      * file(s) that is stripped of any action code embedded within.
1168      *
1169      * @return the printGrammar flag
1170      */
1171     public boolean isPrintGrammar() {
1172         return printGrammar;
1173     }
1174 
1175     /**
1176      * Indicates whether ANTLR has supplied, or will supply, a list of all the things
1177      * that the input grammar depends upon and all the things that will be generated
1178      * when that grammar is successfully analyzed.
1179      *
1180      * @return the depend flag
1181      */
1182     public boolean isDepend() {
1183         return depend;
1184     }
1185 
1186     /**
1187      * Indicates whether ANTLR will force all files to the output directory, even
1188      * if the input files have relative paths from the input directory.
1189      *
1190      * @return the forceAllFilesToOutputDir flag
1191      */
1192     public boolean isForceAllFilesToOutputDir() {
1193         return forceAllFilesToOutputDir;
1194     }
1195 
1196     /**
1197      * Indicates whether ANTLR will be verbose when analyzing grammar files, such as
1198      * displaying the names of the files it is generating and similar information.
1199      *
1200      * @return the verbose flag
1201      */
1202     public boolean isVerbose() {
1203         return verbose;
1204     }
1205 
1206     /**
1207      * Provide the current setting of the conversion timeout on DFA creation.
1208      *
1209      * @return DFA creation timeout value in milliseconds
1210      */
1211     public int getConversionTimeout() {
1212         return DFA.MAX_TIME_PER_DFA_CREATION;
1213     }
1214 
1215     /**
1216      * Returns the current setting of the message format descriptor
1217      * @return Current message format
1218      */
1219     public String getMessageFormat() {
1220         return ErrorManager.getMessageFormat().toString();
1221     }
1222 
1223     /**
1224      * Returns the number of errors that the analysis/processing threw up.
1225      * @return Error count
1226      */
1227     public int getNumErrors() {
1228         return ErrorManager.getNumErrors();
1229     }
1230 
1231     /**
1232      * Indicate whether the tool will analyze the dependencies of the provided grammar
1233      * file list and ensure that grammars with dependencies are built
1234      * after any of the other gramamrs in the list that they are dependent on. Setting
1235      * this option also has the side effect that any grammars that are includes for other
1236      * grammars in the list are excluded from individual analysis, which allows the caller
1237      * to invoke the tool via org.antlr.tool -make *.g and not worry about the inclusion
1238      * of grammars that are just includes for other grammars or what order the grammars
1239      * appear on the command line.
1240      *
1241      * This option was coded to make life easier for tool integration (such as Maven) but
1242      * may also be useful at the command line.
1243      *
1244      * @return true if the tool is currently configured to analyze and sort grammar files.
1245      */
1246     public boolean getMake() {
1247         return make;
1248     }
1249 
1250     /**
1251      * Set the message format to one of ANTLR, gnu, vs2005
1252      *
1253      * @param format
1254      */
1255     public void setMessageFormat(String format) {
1256         ErrorManager.setFormat(format);
1257     }
1258 
1259     /** Provide the List of all grammar file names that the ANTLR tool should process.
1260      *
1261      * @param grammarFileNames The list of grammar files to process
1262      */
1263     public void setGrammarFileNames(List<String> grammarFileNames) {
1264         this.grammarFileNames = grammarFileNames;
1265     }
1266 
1267     public void addGrammarFile(String grammarFileName) {
1268         if (!grammarFileNames.contains(grammarFileName)) {
1269             grammarFileNames.add(grammarFileName);
1270         }
1271     }
1272 
1273     /**
1274      * Indicate whether ANTLR should generate a description of
1275      * all the NFAs in <a href="http://www.graphviz.org">Dot format</a>
1276      *
1277      * @param generate_NFA_dot True to generate dot descriptions
1278      */
1279     public void setGenerate_NFA_dot(boolean generate_NFA_dot) {
1280         this.generate_NFA_dot = generate_NFA_dot;
1281     }
1282 
1283     /**
1284      * Indicates whether ANTLR should generate a description of
1285      * all the NFAs in <a href="http://www.graphviz.org">Dot format</a>
1286      *
1287      * @param generate_DFA_dot True to generate dot descriptions
1288      */
1289     public void setGenerate_DFA_dot(boolean generate_DFA_dot) {
1290         this.generate_DFA_dot = generate_DFA_dot;
1291     }
1292 
1293     /**
1294      * Set the Path to the directory in which ANTLR will search for ancillary
1295      * files such as .tokens vocab files and imported grammar files.
1296      *
1297      * @param libDirectory the libDirectory to set
1298      */
1299     public void setLibDirectory(String libDirectory) {
1300         this.libDirectory = libDirectory;
1301     }
1302 
1303     /**
1304      * Indicate whether ANTLR should generate a debug version of the
1305      * recognizer. Debug versions of a parser communicate with a debugger such
1306      * as that contained in ANTLRWorks and at start up will 'hang' waiting for
1307      * a connection on an IP port (49100 by default).
1308      *
1309      * @param debug true to generate a debug mode parser
1310      */
1311     public void setDebug(boolean debug) {
1312         this.debug = debug;
1313     }
1314 
1315     /**
1316      * Indicate whether ANTLR should generate a version of the
1317      * recognizer that prints trace messages on entry and exit of each rule
1318      *
1319      * @param trace true to generate a tracing parser
1320      */
1321     public void setTrace(boolean trace) {
1322         this.trace = trace;
1323     }
1324 
1325     /**
1326      * Indicate whether ANTLR should generate a version of the
1327      * recognizer that gathers statistics about its execution, which it prints when
1328      * it terminates.
1329      *
1330      * @param profile true to generate a profiling parser
1331      */
1332     public void setProfile(boolean profile) {
1333         this.profile = profile;
1334     }
1335 
1336     /**
1337      * Indicate whether ANTLR should generate a report of various
1338      * elements of the grammar analysis, once it it has finished analyzing a grammar
1339      * file.
1340      *
1341      * @param report true to generate the analysis report
1342      */
1343     public void setReport(boolean report) {
1344         this.report = report;
1345     }
1346 
1347     /**
1348      * Indicate whether ANTLR should print a version of the input grammar
1349      * file(s) that is stripped of any action code embedded within.
1350      *
1351      * @param printGrammar true to generate a stripped file
1352      */
1353     public void setPrintGrammar(boolean printGrammar) {
1354         this.printGrammar = printGrammar;
1355     }
1356 
1357     /**
1358      * Indicate whether ANTLR should supply a list of all the things
1359      * that the input grammar depends upon and all the things that will be generated
1360      * when that gramamr is successfully analyzed.
1361      *
1362      * @param depend true to get depends set rather than process the grammar
1363      */
1364     public void setDepend(boolean depend) {
1365         this.depend = depend;
1366     }
1367 
1368     /**
1369      * Indicates whether ANTLR will force all files to the output directory, even
1370      * if the input files have relative paths from the input directory.
1371      *
1372      * @param forceAllFilesToOutputDir true to force files to output directory
1373      */
1374     public void setForceAllFilesToOutputDir(boolean forceAllFilesToOutputDir) {
1375         this.forceAllFilesToOutputDir = forceAllFilesToOutputDir;
1376     }
1377 
1378     /**
1379      * Indicate whether ANTLR should be verbose when analyzing grammar files, such as
1380      * displaying the names of the files it is generating and similar information.
1381      *
1382      * @param verbose true to be verbose
1383      */
1384     public void setVerbose(boolean verbose) {
1385         this.verbose = verbose;
1386     }
1387 
1388     /**
1389      * Indicate whether the tool should analyze the dependencies of the provided grammar
1390      * file list and ensure that the grammars with dependencies are built
1391      * after any of the other gramamrs in the list that they are dependent on. Setting
1392      * this option also has the side effect that any grammars that are includes for other
1393      * grammars in the list are excluded from individual analysis, which allows the caller
1394      * to invoke the tool via org.antlr.tool -make *.g and not worry about the inclusion
1395      * of grammars that are just includes for other grammars or what order the grammars
1396      * appear on the command line.
1397      *
1398      * This option was coded to make life easier for tool integration (such as Maven) but
1399      * may also be useful at the command line.
1400      *
1401      * @param make
1402      */
1403     public void setMake(boolean make) {
1404         this.make = make;
1405     }
1406 
1407 }
1408