• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * [The "BSD license"]
3  *  Copyright (c) 2010 Terence Parr
4  *  All rights reserved.
5  *
6  *  Redistribution and use in source and binary forms, with or without
7  *  modification, are permitted provided that the following conditions
8  *  are met:
9  *  1. Redistributions of source code must retain the above copyright
10  *      notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *  3. The name of the author may not be used to endorse or promote products
15  *      derived from this software without specific prior written permission.
16  *
17  *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 package org.antlr;
29 
30 import org.antlr.analysis.*;
31 import org.antlr.codegen.CodeGenerator;
32 import org.antlr.misc.Graph;
33 import org.antlr.runtime.misc.Stats;
34 import org.antlr.tool.*;
35 import org.stringtemplate.v4.STGroup;
36 
37 import java.io.*;
38 import java.util.*;
39 
40 /** The main ANTLR entry point.  Read a grammar and generate a parser. */
41 public class Tool {
42 
43     public final Properties antlrSettings = new Properties();
44     public String VERSION = "3.4";
45     //public static final String VERSION = "${project.version}";
46     public static final String UNINITIALIZED_DIR = "<unset-dir>";
47     private List<String> grammarFileNames = new ArrayList<String>();
48     private boolean generate_NFA_dot = false;
49     private boolean generate_DFA_dot = false;
50     private String outputDirectory = ".";
51     private boolean haveOutputDir = false;
52     private String inputDirectory = null;
53     private String parentGrammarDirectory;
54     private String grammarOutputDirectory;
55     private boolean haveInputDir = false;
56     private String libDirectory = ".";
57     private boolean debug = false;
58     private boolean trace = false;
59     private boolean profile = false;
60     private boolean report = false;
61     private boolean printGrammar = false;
62     private boolean depend = false;
63     private boolean forceAllFilesToOutputDir = false;
64     private boolean forceRelativeOutput = false;
65     protected boolean deleteTempLexer = true;
66     private boolean verbose = false;
67     /** Don't process grammar file if generated files are newer than grammar */
68     private boolean make = false;
69     private boolean showBanner = true;
70 	private static boolean exitNow = false;
71 	private static boolean return_dont_exit = false;
72 
73 
74 	public String forcedLanguageOption; // -language L on command line
75 
76     // The internal options are for my use on the command line during dev
77     //
78     public static boolean internalOption_PrintGrammarTree = false;
79     public static boolean internalOption_PrintDFA = false;
80     public static boolean internalOption_ShowNFAConfigsInDFA = false;
81     public static boolean internalOption_watchNFAConversion = false;
82 
83     /**
84      * A list of dependency generators that are accumulated aaaas (and if) the
85      * tool is required to sort the provided grammars into build dependency order.
86     protected Map<String, BuildDependencyGenerator> buildDependencyGenerators;
87      */
88 
main(String[] args)89     public static void main(String[] args) {
90         Tool antlr = new Tool(args);
91 
92         if (!exitNow) {
93             antlr.process();
94 			if ( return_dont_exit ) return;
95             if (ErrorManager.getNumErrors() > 0) {
96                 System.exit(1);
97             }
98             System.exit(0);
99         }
100     }
101 
102     /**
103      * Load the properties file org/antlr/antlr.properties and populate any
104      * variables that must be initialized from it, such as the version of ANTLR.
105      */
loadResources()106     private void loadResources() {
107         InputStream in = null;
108         in = this.getClass().getResourceAsStream("antlr.properties");
109 
110         // If we found the resource, then load it, otherwise revert to the
111         // defaults.
112         //
113         if (in != null) {
114             try {
115                 // Load the resources into the map
116                 //
117                 antlrSettings.load(in);
118 
119                 // Set any variables that we need to populate from the resources
120                 //
121 //                VERSION = antlrSettings.getProperty("antlr.version");
122             } catch (Exception e) {
123                 // Do nothing, just leave the defaults in place
124             }
125         }
126     }
127 
Tool()128     public Tool() {
129         loadResources();
130     }
131 
Tool(String[] args)132     public Tool(String[] args) {
133         loadResources();
134 
135         // Set all the options and pick up all the named grammar files
136         processArgs(args);
137     }
138 
processArgs(String[] args)139     public void processArgs(String[] args) {
140 
141         if (isVerbose()) {
142             ErrorManager.info("ANTLR Parser Generator  Version " + VERSION);
143             showBanner = false;
144         }
145 
146         if (args == null || args.length == 0) {
147             help();
148             return;
149         }
150         for (int i = 0; i < args.length; i++) {
151             if (args[i].equals("-o") || args[i].equals("-fo")) {
152                 if (i + 1 >= args.length) {
153                     System.err.println("missing output directory with -fo/-o option; ignoring");
154                 }
155                 else {
156                     if (args[i].equals("-fo")) { // force output into dir
157                         setForceAllFilesToOutputDir(true);
158                     }
159                     i++;
160                     outputDirectory = args[i];
161                     if (outputDirectory.endsWith("/") ||
162                         outputDirectory.endsWith("\\")) {
163                         outputDirectory =
164                             outputDirectory.substring(0, getOutputDirectory().length() - 1);
165                     }
166                     File outDir = new File(outputDirectory);
167                     haveOutputDir = true;
168                     if (outDir.exists() && !outDir.isDirectory()) {
169                         ErrorManager.error(ErrorManager.MSG_OUTPUT_DIR_IS_FILE, outputDirectory);
170                         setLibDirectory(".");
171                     }
172                 }
173             }
174 			else if (args[i].equals("-lib")) {
175 				if (i + 1 >= args.length) {
176 					System.err.println("missing library directory with -lib option; ignoring");
177 				}
178 				else {
179 					i++;
180 					setLibDirectory(args[i]);
181 					if (getLibraryDirectory().endsWith("/") ||
182 						getLibraryDirectory().endsWith("\\")) {
183 						setLibDirectory(getLibraryDirectory().substring(0, getLibraryDirectory().length() - 1));
184 					}
185 					File outDir = new File(getLibraryDirectory());
186 					if (!outDir.exists()) {
187 						ErrorManager.error(ErrorManager.MSG_DIR_NOT_FOUND, getLibraryDirectory());
188 						setLibDirectory(".");
189 					}
190 				}
191 			}
192 			else if (args[i].equals("-language")) {
193 				if (i + 1 >= args.length) {
194 					System.err.println("missing language name; ignoring");
195 				}
196 				else {
197 					i++;
198 					forcedLanguageOption = args[i];
199 				}
200 			}
201             else if (args[i].equals("-nfa")) {
202                 setGenerate_NFA_dot(true);
203             }
204             else if (args[i].equals("-dfa")) {
205                 setGenerate_DFA_dot(true);
206             }
207             else if (args[i].equals("-debug")) {
208                 setDebug(true);
209             }
210             else if (args[i].equals("-trace")) {
211                 setTrace(true);
212             }
213             else if (args[i].equals("-report")) {
214                 setReport(true);
215             }
216             else if (args[i].equals("-profile")) {
217                 setProfile(true);
218             }
219             else if (args[i].equals("-print")) {
220                 setPrintGrammar(true);
221             }
222             else if (args[i].equals("-depend")) {
223                 setDepend(true);
224             }
225             else if (args[i].equals("-verbose")) {
226                 setVerbose(true);
227             }
228             else if (args[i].equals("-version")) {
229                 version();
230                 exitNow = true;
231             }
232             else if (args[i].equals("-make")) {
233                 setMake(true);
234             }
235             else if (args[i].equals("-message-format")) {
236                 if (i + 1 >= args.length) {
237                     System.err.println("missing output format with -message-format option; using default");
238                 }
239                 else {
240                     i++;
241                     ErrorManager.setFormat(args[i]);
242                 }
243             }
244             else if (args[i].equals("-Xgrtree")) {
245                 internalOption_PrintGrammarTree = true; // print grammar tree
246             }
247             else if (args[i].equals("-Xdfa")) {
248                 internalOption_PrintDFA = true;
249             }
250             else if (args[i].equals("-Xnoprune")) {
251                 DFAOptimizer.PRUNE_EBNF_EXIT_BRANCHES = false;
252             }
253             else if (args[i].equals("-Xnocollapse")) {
254                 DFAOptimizer.COLLAPSE_ALL_PARALLEL_EDGES = false;
255             }
256             else if (args[i].equals("-Xdbgconversion")) {
257                 NFAToDFAConverter.debug = true;
258             }
259             else if (args[i].equals("-Xmultithreaded")) {
260                 NFAToDFAConverter.SINGLE_THREADED_NFA_CONVERSION = false;
261             }
262             else if (args[i].equals("-Xnomergestopstates")) {
263                 DFAOptimizer.MERGE_STOP_STATES = false;
264             }
265             else if (args[i].equals("-Xdfaverbose")) {
266                 internalOption_ShowNFAConfigsInDFA = true;
267             }
268             else if (args[i].equals("-Xwatchconversion")) {
269                 internalOption_watchNFAConversion = true;
270             }
271             else if (args[i].equals("-XdbgST")) {
272                 CodeGenerator.LAUNCH_ST_INSPECTOR = true;
273 				STGroup.trackCreationEvents = true;
274 				return_dont_exit = true;
275             }
276             else if (args[i].equals("-Xmaxinlinedfastates")) {
277                 if (i + 1 >= args.length) {
278                     System.err.println("missing max inline dfa states -Xmaxinlinedfastates option; ignoring");
279                 }
280                 else {
281                     i++;
282                     CodeGenerator.MAX_ACYCLIC_DFA_STATES_INLINE = Integer.parseInt(args[i]);
283                 }
284             }
285             else if (args[i].equals("-Xmaxswitchcaselabels")) {
286                 if (i + 1 >= args.length) {
287                     System.err.println("missing max switch case labels -Xmaxswitchcaselabels option; ignoring");
288                 }
289                 else {
290                     i++;
291                     CodeGenerator.MAX_SWITCH_CASE_LABELS = Integer.parseInt(args[i]);
292                 }
293             }
294             else if (args[i].equals("-Xminswitchalts")) {
295                 if (i + 1 >= args.length) {
296                     System.err.println("missing min switch alternatives -Xminswitchalts option; ignoring");
297                 }
298                 else {
299                     i++;
300                     CodeGenerator.MIN_SWITCH_ALTS = Integer.parseInt(args[i]);
301                 }
302             }
303             else if (args[i].equals("-Xm")) {
304                 if (i + 1 >= args.length) {
305                     System.err.println("missing max recursion with -Xm option; ignoring");
306                 }
307                 else {
308                     i++;
309                     NFAContext.MAX_SAME_RULE_INVOCATIONS_PER_NFA_CONFIG_STACK = Integer.parseInt(args[i]);
310                 }
311             }
312             else if (args[i].equals("-Xmaxdfaedges")) {
313                 if (i + 1 >= args.length) {
314                     System.err.println("missing max number of edges with -Xmaxdfaedges option; ignoring");
315                 }
316                 else {
317                     i++;
318                     DFA.MAX_STATE_TRANSITIONS_FOR_TABLE = Integer.parseInt(args[i]);
319                 }
320             }
321             else if (args[i].equals("-Xconversiontimeout")) {
322                 if (i + 1 >= args.length) {
323                     System.err.println("missing max time in ms -Xconversiontimeout option; ignoring");
324                 }
325                 else {
326                     i++;
327                     DFA.MAX_TIME_PER_DFA_CREATION = Integer.parseInt(args[i]);
328                 }
329             }
330 			else if (args[i].equals("-Xnfastates")) {
331 				DecisionProbe.verbose = true;
332 			}
333 			else if (args[i].equals("-Xsavelexer")) {
334 				deleteTempLexer = false;
335 			}
336             else if (args[i].equals("-X")) {
337                 Xhelp();
338             }
339             else {
340                 if (args[i].charAt(0) != '-') {
341                     // Must be the grammar file
342                     addGrammarFile(args[i]);
343                 }
344             }
345         }
346     }
347 
348     /*
349     protected void checkForInvalidArguments(String[] args, BitSet cmdLineArgValid) {
350     // check for invalid command line args
351     for (int a = 0; a < args.length; a++) {
352     if (!cmdLineArgValid.member(a)) {
353     System.err.println("invalid command-line argument: " + args[a] + "; ignored");
354     }
355     }
356     }
357      */
358 
359     /**
360      * Checks to see if the list of outputFiles all exist, and have
361      * last-modified timestamps which are later than the last-modified
362      * timestamp of all the grammar files involved in build the output
363      * (imports must be checked). If these conditions hold, the method
364      * returns false, otherwise, it returns true.
365      *
366      * @param grammarFileName The grammar file we are checking
367      */
buildRequired(String grammarFileName)368     public boolean buildRequired(String grammarFileName)
369         throws IOException
370     {
371         BuildDependencyGenerator bd =
372             new BuildDependencyGenerator(this, grammarFileName);
373 
374         List<File> outputFiles = bd.getGeneratedFileList();
375         List<File> inputFiles = bd.getDependenciesFileList();
376         // Note that input directory must be set to use buildRequired
377         File grammarFile;
378         if (haveInputDir) {
379             grammarFile = new File(inputDirectory, grammarFileName);
380         }
381         else {
382             grammarFile = new File(grammarFileName);
383         }
384         long grammarLastModified = grammarFile.lastModified();
385         for (File outputFile : outputFiles) {
386             if (!outputFile.exists() || grammarLastModified > outputFile.lastModified()) {
387                 // One of the output files does not exist or is out of date, so we must build it
388                 return true;
389             }
390             // Check all of the imported grammars and see if any of these are younger
391             // than any of the output files.
392             if (inputFiles != null) {
393                 for (File inputFile : inputFiles) {
394 
395                     if (inputFile.lastModified() > outputFile.lastModified()) {
396                         // One of the imported grammar files has been updated so we must build
397                         return true;
398                     }
399                 }
400             }
401         }
402         if (isVerbose()) {
403             System.out.println("Grammar " + grammarFile + " is up to date - build skipped");
404         }
405         return false;
406     }
407 
process()408     public void process() {
409         boolean exceptionWhenWritingLexerFile = false;
410         String lexerGrammarFileName = null;		// necessary at this scope to have access in the catch below
411 
412         // Have to be tricky here when Maven or build tools call in and must new Tool()
413         // before setting options. The banner won't display that way!
414         if (isVerbose() && showBanner) {
415             ErrorManager.info("ANTLR Parser Generator  Version " + VERSION);
416             showBanner = false;
417         }
418 
419         try {
420             sortGrammarFiles(); // update grammarFileNames
421         }
422         catch (Exception e) {
423             ErrorManager.error(ErrorManager.MSG_INTERNAL_ERROR,e);
424         }
425         catch (Error e) {
426             ErrorManager.error(ErrorManager.MSG_INTERNAL_ERROR, e);
427         }
428 
429         for (String grammarFileName : grammarFileNames) {
430             // If we are in make mode (to support build tools like Maven) and the
431             // file is already up to date, then we do not build it (and in verbose mode
432             // we will say so).
433             if (make) {
434                 try {
435                     if ( !buildRequired(grammarFileName) ) continue;
436                 }
437                 catch (Exception e) {
438                     ErrorManager.error(ErrorManager.MSG_INTERNAL_ERROR,e);
439                 }
440             }
441 
442             if (isVerbose() && !isDepend()) {
443                 System.out.println(grammarFileName);
444             }
445             try {
446                 if (isDepend()) {
447                     BuildDependencyGenerator dep =
448                         new BuildDependencyGenerator(this, grammarFileName);
449                     /*
450                     List outputFiles = dep.getGeneratedFileList();
451                     List dependents = dep.getDependenciesFileList();
452                     System.out.println("output: "+outputFiles);
453                     System.out.println("dependents: "+dependents);
454                      */
455                     System.out.println(dep.getDependencies().render());
456                     continue;
457                 }
458 
459                 Grammar rootGrammar = getRootGrammar(grammarFileName);
460                 // we now have all grammars read in as ASTs
461                 // (i.e., root and all delegates)
462 				rootGrammar.composite.assignTokenTypes();
463 				//rootGrammar.composite.translateLeftRecursiveRules();
464 				rootGrammar.addRulesForSyntacticPredicates();
465 				rootGrammar.composite.defineGrammarSymbols();
466                 rootGrammar.composite.createNFAs();
467 
468                 generateRecognizer(rootGrammar);
469 
470                 if (isPrintGrammar()) {
471                     rootGrammar.printGrammar(System.out);
472                 }
473 
474                 if (isReport()) {
475 					GrammarReport2 greport = new GrammarReport2(rootGrammar);
476 					System.out.print(greport.toString());
477 //                    GrammarReport greport = new GrammarReport(rootGrammar);
478 //                    System.out.println(greport.toString());
479 //                    // print out a backtracking report too (that is not encoded into log)
480 //                    System.out.println(greport.getBacktrackingReport());
481                 }
482                 if (isProfile()) {
483                     GrammarReport greport = new GrammarReport(rootGrammar);
484                     Stats.writeReport(GrammarReport.GRAMMAR_STATS_FILENAME,
485                                       greport.toNotifyString());
486                 }
487 
488                 // now handle the lexer if one was created for a merged spec
489                 String lexerGrammarStr = rootGrammar.getLexerGrammar();
490                 //System.out.println("lexer rootGrammar:\n"+lexerGrammarStr);
491                 if (rootGrammar.type == Grammar.COMBINED && lexerGrammarStr != null) {
492                     lexerGrammarFileName = rootGrammar.getImplicitlyGeneratedLexerFileName();
493                     try {
494                         Writer w = getOutputFile(rootGrammar, lexerGrammarFileName);
495                         w.write(lexerGrammarStr);
496                         w.close();
497                     }
498                     catch (IOException e) {
499                         // emit different error message when creating the implicit lexer fails
500                         // due to write permission error
501                         exceptionWhenWritingLexerFile = true;
502                         throw e;
503                     }
504                     try {
505                         StringReader sr = new StringReader(lexerGrammarStr);
506                         Grammar lexerGrammar = new Grammar(this);
507                         lexerGrammar.composite.watchNFAConversion = internalOption_watchNFAConversion;
508                         lexerGrammar.implicitLexer = true;
509                         //lexerGrammar.setTool(this);
510                         File lexerGrammarFullFile =
511                             new File(getFileDirectory(lexerGrammarFileName), lexerGrammarFileName);
512                         lexerGrammar.setFileName(lexerGrammarFullFile.toString());
513 
514                         lexerGrammar.importTokenVocabulary(rootGrammar);
515                         lexerGrammar.parseAndBuildAST(sr);
516 
517                         sr.close();
518 
519                         lexerGrammar.composite.assignTokenTypes();
520 						lexerGrammar.addRulesForSyntacticPredicates();
521                         lexerGrammar.composite.defineGrammarSymbols();
522                         lexerGrammar.composite.createNFAs();
523 
524                         generateRecognizer(lexerGrammar);
525                     }
526                     finally {
527                         // make sure we clean up
528                         if (deleteTempLexer) {
529                             File outputDir = getOutputDirectory(lexerGrammarFileName);
530                             File outputFile = new File(outputDir, lexerGrammarFileName);
531                             outputFile.delete();
532                         }
533                     }
534                 }
535             }
536             catch (IOException e) {
537                 if (exceptionWhenWritingLexerFile) {
538                     ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, e);
539                 }
540                 else {
541                     ErrorManager.error(ErrorManager.MSG_CANNOT_OPEN_FILE,
542                                        grammarFileName);
543                 }
544             }
545             catch (Exception e) {
546                 ErrorManager.error(ErrorManager.MSG_INTERNAL_ERROR, grammarFileName, e);
547             }
548             /*
549            finally {
550            System.out.println("creates="+ Interval.creates);
551            System.out.println("hits="+ Interval.hits);
552            System.out.println("misses="+ Interval.misses);
553            System.out.println("outOfRange="+ Interval.outOfRange);
554            }
555             */
556         }
557     }
558 
sortGrammarFiles()559     public void sortGrammarFiles() throws IOException {
560         //System.out.println("Grammar names "+getGrammarFileNames());
561         Graph g = new Graph();
562         List<String> missingFiles = new ArrayList<String>();
563         for (String gfile : grammarFileNames) {
564             try {
565                 GrammarSpelunker grammar = new GrammarSpelunker(inputDirectory, gfile);
566                 grammar.parse();
567                 String vocabName = grammar.getTokenVocab();
568                 String grammarName = grammar.getGrammarName();
569                 // Make all grammars depend on any tokenVocab options
570                 if ( vocabName!=null ) g.addEdge(gfile, vocabName+CodeGenerator.VOCAB_FILE_EXTENSION);
571                 // Make all generated tokens files depend on their grammars
572                 g.addEdge(grammarName+CodeGenerator.VOCAB_FILE_EXTENSION, gfile);
573             }
574             catch (FileNotFoundException fnfe) {
575                 ErrorManager.error(ErrorManager.MSG_CANNOT_OPEN_FILE, gfile);
576                 missingFiles.add(gfile);
577             }
578         }
579         List<Object> sorted = g.sort();
580         //System.out.println("sorted="+sorted);
581         grammarFileNames.clear(); // wipe so we can give new ordered list
582         for (int i = 0; i < sorted.size(); i++) {
583             String f = (String)sorted.get(i);
584             if ( missingFiles.contains(f) ) continue;
585             if ( !(f.endsWith(".g") || f.endsWith(".g3")) ) continue;
586             grammarFileNames.add(f);
587         }
588         //System.out.println("new grammars="+grammarFileNames);
589     }
590 
591     /** Get a grammar mentioned on the command-line and any delegates */
getRootGrammar(String grammarFileName)592     public Grammar getRootGrammar(String grammarFileName)
593         throws IOException
594     {
595         //ST.setLintMode(true);
596         // grammars mentioned on command line are either roots or single grammars.
597         // create the necessary composite in case it's got delegates; even
598         // single grammar needs it to get token types.
599         CompositeGrammar composite = new CompositeGrammar();
600         Grammar grammar = new Grammar(this, grammarFileName, composite);
601         composite.setDelegationRoot(grammar);
602         FileReader fr = null;
603         File f = null;
604 
605         if (haveInputDir) {
606             f = new File(inputDirectory, grammarFileName);
607         }
608         else {
609             f = new File(grammarFileName);
610         }
611 
612         // Store the location of this grammar as if we import files, we can then
613         // search for imports in the same location as the original grammar as well as in
614         // the lib directory.
615         //
616         parentGrammarDirectory = f.getParent();
617 
618         if (grammarFileName.lastIndexOf(File.separatorChar) == -1) {
619             grammarOutputDirectory = ".";
620         }
621         else {
622             grammarOutputDirectory = grammarFileName.substring(0, grammarFileName.lastIndexOf(File.separatorChar));
623         }
624         fr = new FileReader(f);
625         BufferedReader br = new BufferedReader(fr);
626         grammar.parseAndBuildAST(br);
627         composite.watchNFAConversion = internalOption_watchNFAConversion;
628         br.close();
629         fr.close();
630         return grammar;
631     }
632 
633     /** Create NFA, DFA and generate code for grammar.
634      *  Create NFA for any delegates first.  Once all NFA are created,
635      *  it's ok to create DFA, which must check for left-recursion.  That check
636      *  is done by walking the full NFA, which therefore must be complete.
637      *  After all NFA, comes DFA conversion for root grammar then code gen for
638      *  root grammar.  DFA and code gen for delegates comes next.
639      */
generateRecognizer(Grammar grammar)640     protected void generateRecognizer(Grammar grammar) {
641         String language = (String) grammar.getOption("language");
642         if (language != null) {
643             CodeGenerator generator = new CodeGenerator(this, grammar, language);
644             grammar.setCodeGenerator(generator);
645             generator.setDebug(isDebug());
646             generator.setProfile(isProfile());
647             generator.setTrace(isTrace());
648 
649             // generate NFA early in case of crash later (for debugging)
650             if (isGenerate_NFA_dot()) {
651                 generateNFAs(grammar);
652             }
653 
654             // GENERATE CODE
655             generator.genRecognizer();
656 
657             if (isGenerate_DFA_dot()) {
658                 generateDFAs(grammar);
659             }
660 
661             List<Grammar> delegates = grammar.getDirectDelegates();
662             for (int i = 0; delegates != null && i < delegates.size(); i++) {
663                 Grammar delegate = (Grammar) delegates.get(i);
664                 if (delegate != grammar) { // already processing this one
665                     generateRecognizer(delegate);
666                 }
667             }
668         }
669     }
670 
generateDFAs(Grammar g)671     public void generateDFAs(Grammar g) {
672         for (int d = 1; d <= g.getNumberOfDecisions(); d++) {
673             DFA dfa = g.getLookaheadDFA(d);
674             if (dfa == null) {
675                 continue; // not there for some reason, ignore
676             }
677             DOTGenerator dotGenerator = new DOTGenerator(g);
678             String dot = dotGenerator.getDOT(dfa.startState);
679             String dotFileName = g.name + "." + "dec-" + d;
680             if (g.implicitLexer) {
681                 dotFileName = g.name + Grammar.grammarTypeToFileNameSuffix[g.type] + "." + "dec-" + d;
682             }
683             try {
684                 writeDOTFile(g, dotFileName, dot);
685             } catch (IOException ioe) {
686                 ErrorManager.error(ErrorManager.MSG_CANNOT_GEN_DOT_FILE,
687                                    dotFileName,
688                                    ioe);
689             }
690         }
691     }
692 
generateNFAs(Grammar g)693     protected void generateNFAs(Grammar g) {
694         DOTGenerator dotGenerator = new DOTGenerator(g);
695         Collection rules = g.getAllImportedRules();
696         rules.addAll(g.getRules());
697 
698         for (Iterator itr = rules.iterator(); itr.hasNext();) {
699             Rule r = (Rule) itr.next();
700             try {
701                 String dot = dotGenerator.getDOT(r.startState);
702                 if (dot != null) {
703                     writeDOTFile(g, r, dot);
704                 }
705             } catch (IOException ioe) {
706                 ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, ioe);
707             }
708         }
709     }
710 
writeDOTFile(Grammar g, Rule r, String dot)711     protected void writeDOTFile(Grammar g, Rule r, String dot) throws IOException {
712         writeDOTFile(g, r.grammar.name + "." + r.name, dot);
713     }
714 
writeDOTFile(Grammar g, String name, String dot)715     protected void writeDOTFile(Grammar g, String name, String dot) throws IOException {
716         Writer fw = getOutputFile(g, name + ".dot");
717         fw.write(dot);
718         fw.close();
719     }
720 
version()721     private static void version() {
722         ErrorManager.info("ANTLR Parser Generator  Version " + new Tool().VERSION);
723     }
724 
help()725     private static void help() {
726         ErrorManager.info("ANTLR Parser Generator  Version " + new Tool().VERSION);
727         System.err.println("usage: java org.antlr.Tool [args] file.g [file2.g file3.g ...]");
728         System.err.println("  -o outputDir          specify output directory where all output is generated");
729         System.err.println("  -fo outputDir         same as -o but force even files with relative paths to dir");
730         System.err.println("  -lib dir              specify location of token files");
731         System.err.println("  -depend               generate file dependencies");
732         System.err.println("  -report               print out a report about the grammar(s) processed");
733         System.err.println("  -print                print out the grammar without actions");
734         System.err.println("  -debug                generate a parser that emits debugging events");
735 		System.err.println("  -profile              generate a parser that computes profiling information");
736 		System.err.println("  -trace                generate a recognizer that traces rule entry/exit");
737         System.err.println("  -nfa                  generate an NFA for each rule");
738         System.err.println("  -dfa                  generate a DFA for each decision point");
739         System.err.println("  -message-format name  specify output style for messages");
740         System.err.println("  -verbose              generate ANTLR version and other information");
741         System.err.println("  -make                 only build if generated files older than grammar");
742 		System.err.println("  -version              print the version of ANTLR and exit.");
743 		System.err.println("  -language L           override language grammar option; generate L");
744         System.err.println("  -X                    display extended argument list");
745     }
746 
Xhelp()747     private static void Xhelp() {
748         ErrorManager.info("ANTLR Parser Generator  Version " + new Tool().VERSION);
749         System.err.println("  -Xgrtree                print the grammar AST");
750         System.err.println("  -Xdfa                   print DFA as text ");
751         System.err.println("  -Xnoprune               test lookahead against EBNF block exit branches");
752         System.err.println("  -Xnocollapse            collapse incident edges into DFA states");
753 		System.err.println("  -Xdbgconversion         dump lots of info during NFA conversion");
754 		System.err.println("  -Xconversiontimeout     use to restrict NFA conversion exponentiality");
755         System.err.println("  -Xmultithreaded         run the analysis in 2 threads");
756         System.err.println("  -Xnomergestopstates     do not merge stop states");
757         System.err.println("  -Xdfaverbose            generate DFA states in DOT with NFA configs");
758         System.err.println("  -Xwatchconversion       print a message for each NFA before converting");
759         System.err.println("  -XdbgST                 put tags at start/stop of all templates in output");
760         System.err.println("  -Xnfastates             for nondeterminisms, list NFA states for each path");
761         System.err.println("  -Xm m                   max number of rule invocations during conversion           [" + NFAContext.MAX_SAME_RULE_INVOCATIONS_PER_NFA_CONFIG_STACK + "]");
762         System.err.println("  -Xmaxdfaedges m         max \"comfortable\" number of edges for single DFA state     [" + DFA.MAX_STATE_TRANSITIONS_FOR_TABLE + "]");
763         System.err.println("  -Xmaxinlinedfastates m  max DFA states before table used rather than inlining      [" + CodeGenerator.MADSI_DEFAULT +"]");
764         System.err.println("  -Xmaxswitchcaselabels m don't generate switch() statements for dfas bigger  than m [" + CodeGenerator.MSCL_DEFAULT +"]");
765 		System.err.println("  -Xminswitchalts m       don't generate switch() statements for dfas smaller than m [" + CodeGenerator.MSA_DEFAULT + "]");
766 		System.err.println("  -Xsavelexer             don't delete temporary lexers generated from combined grammars");
767     }
768 
769     /**
770      * Set the threshold of case labels beyond which ANTLR will not instruct the target template
771      * to generate switch() { case xxx: ...
772      *
773      * @param maxSwitchCaseLabels Maximum number of case lables that ANTLR should allow the target code
774      */
setMaxSwitchCaseLabels(int maxSwitchCaseLabels)775     public void setMaxSwitchCaseLabels(int maxSwitchCaseLabels) {
776         CodeGenerator.MAX_SWITCH_CASE_LABELS = maxSwitchCaseLabels;
777     }
778 
779     /**
780      * Set the threshold of the number alts, below which ANTLR will not instruct the target
781      * template to use a switch statement.
782      *
783      * @param minSwitchAlts the minimum number of alts required to use a switch staement
784      */
setMinSwitchAlts(int minSwitchAlts)785     public void setMinSwitchAlts(int minSwitchAlts) {
786         CodeGenerator.MIN_SWITCH_ALTS = minSwitchAlts;
787     }
788 
789     /**
790      * Set the location (base directory) where output files should be produced
791      * by the ANTLR tool.
792      * @param outputDirectory
793      */
setOutputDirectory(String outputDirectory)794     public void setOutputDirectory(String outputDirectory) {
795         haveOutputDir = true;
796         this.outputDirectory = outputDirectory;
797     }
798 
799     /**
800      * Used by build tools to force the output files to always be
801      * relative to the base output directory, even though the tool
802      * had to set the output directory to an absolute path as it
803      * cannot rely on the workign directory like command line invocation
804      * can.
805      *
806      * @param forceRelativeOutput true if output files hould always be relative to base output directory
807      */
setForceRelativeOutput(boolean forceRelativeOutput)808     public void setForceRelativeOutput(boolean forceRelativeOutput) {
809         this.forceRelativeOutput = forceRelativeOutput;
810     }
811 
812     /**
813      * Set the base location of input files. Normally (when the tool is
814      * invoked from the command line), the inputDirectory is not set, but
815      * for build tools such as Maven, we need to be able to locate the input
816      * files relative to the base, as the working directory could be anywhere and
817      * changing workig directories is not a valid concept for JVMs because of threading and
818      * so on. Setting the directory just means that the getFileDirectory() method will
819      * try to open files relative to this input directory.
820      *
821      * @param inputDirectory Input source base directory
822      */
setInputDirectory(String inputDirectory)823     public void setInputDirectory(String inputDirectory) {
824         this.inputDirectory = inputDirectory;
825         haveInputDir = true;
826     }
827 
828     /** This method is used by all code generators to create new output
829      *  files. If the outputDir set by -o is not present it will be created.
830      *  The final filename is sensitive to the output directory and
831      *  the directory where the grammar file was found.  If -o is /tmp
832      *  and the original grammar file was foo/t.g then output files
833      *  go in /tmp/foo.
834      *
835      *  The output dir -o spec takes precedence if it's absolute.
836      *  E.g., if the grammar file dir is absolute the output dir is given
837      *  precendence. "-o /tmp /usr/lib/t.g" results in "/tmp/T.java" as
838      *  output (assuming t.g holds T.java).
839      *
840      *  If no -o is specified, then just write to the directory where the
841      *  grammar file was found.
842      *
843      *  If outputDirectory==null then write a String.
844      */
getOutputFile(Grammar g, String fileName)845     public Writer getOutputFile(Grammar g, String fileName) throws IOException {
846         if (getOutputDirectory() == null) {
847             return new StringWriter();
848         }
849         // output directory is a function of where the grammar file lives
850         // for subdir/T.g, you get subdir here.  Well, depends on -o etc...
851         // But, if this is a .tokens file, then we force the output to
852         // be the base output directory (or current directory if there is not a -o)
853         //
854         File outputDir;
855         if (fileName.endsWith(CodeGenerator.VOCAB_FILE_EXTENSION)) {
856             if (haveOutputDir) {
857                 outputDir = new File(getOutputDirectory());
858             }
859             else {
860                 outputDir = new File(".");
861             }
862         }
863         else {
864             outputDir = getOutputDirectory(g.getFileName());
865         }
866         File outputFile = new File(outputDir, fileName);
867 
868         if (!outputDir.exists()) {
869             outputDir.mkdirs();
870         }
871         FileWriter fw = new FileWriter(outputFile);
872         return new BufferedWriter(fw);
873     }
874 
875     /**
876      * Return the location where ANTLR will generate output files for a given file. This is a
877      * base directory and output files will be relative to here in some cases
878      * such as when -o option is used and input files are given relative
879      * to the input directory.
880      *
881      * @param fileNameWithPath path to input source
882      * @return
883      */
getOutputDirectory(String fileNameWithPath)884     public File getOutputDirectory(String fileNameWithPath) {
885 
886         File outputDir = new File(getOutputDirectory());
887         String fileDirectory;
888 
889         // Some files are given to us without a PATH but should should
890         // still be written to the output directory in the relative path of
891         // the output directory. The file directory is either the set of sub directories
892         // or just or the relative path recorded for the parent grammar. This means
893         // that when we write the tokens files, or the .java files for imported grammars
894         // taht we will write them in the correct place.
895         //
896         if (fileNameWithPath.lastIndexOf(File.separatorChar) == -1) {
897 
898             // No path is included in the file name, so make the file
899             // directory the same as the parent grammar (which might sitll be just ""
900             // but when it is not, we will write the file in the correct place.
901             //
902             fileDirectory = grammarOutputDirectory;
903 
904         }
905         else {
906             fileDirectory = fileNameWithPath.substring(0, fileNameWithPath.lastIndexOf(File.separatorChar));
907         }
908         if (haveOutputDir) {
909             // -o /tmp /var/lib/t.g => /tmp/T.java
910             // -o subdir/output /usr/lib/t.g => subdir/output/T.java
911             // -o . /usr/lib/t.g => ./T.java
912             if ((fileDirectory != null && !forceRelativeOutput) &&
913                 (new File(fileDirectory).isAbsolute() ||
914                  fileDirectory.startsWith("~")) || // isAbsolute doesn't count this :(
915                 isForceAllFilesToOutputDir()) {
916                 // somebody set the dir, it takes precendence; write new file there
917                 outputDir = new File(getOutputDirectory());
918             }
919             else {
920                 // -o /tmp subdir/t.g => /tmp/subdir/t.g
921                 if (fileDirectory != null) {
922                     outputDir = new File(getOutputDirectory(), fileDirectory);
923                 }
924                 else {
925                     outputDir = new File(getOutputDirectory());
926                 }
927             }
928         }
929         else {
930             // they didn't specify a -o dir so just write to location
931             // where grammar is, absolute or relative, this will only happen
932             // with command line invocation as build tools will always
933             // supply an output directory.
934             //
935             outputDir = new File(fileDirectory);
936         }
937         return outputDir;
938     }
939 
940     /**
941      * Name a file from the -lib dir.  Imported grammars and .tokens files
942      *
943      * If we do not locate the file in the library directory, then we try
944      * the location of the originating grammar.
945      *
946      * @param fileName input name we are looking for
947      * @return Path to file that we think shuold be the import file
948      *
949      * @throws java.io.IOException
950      */
getLibraryFile(String fileName)951     public String getLibraryFile(String fileName) throws IOException {
952 
953         // First, see if we can find the file in the library directory
954         //
955         File f = new File(getLibraryDirectory() + File.separator + fileName);
956 
957         if (f.exists()) {
958 
959             // Found in the library directory
960             //
961             return f.getAbsolutePath();
962         }
963 
964         // Need to assume it is in the same location as the input file. Note that
965         // this is only relevant for external build tools and when the input grammar
966         // was specified relative to the source directory (working directory if using
967         // the command line.
968         //
969         return parentGrammarDirectory + File.separator + fileName;
970     }
971 
972     /** Return the directory containing the grammar file for this grammar.
973      *  normally this is a relative path from current directory.  People will
974      *  often do "java org.antlr.Tool grammars/*.g3"  So the file will be
975      *  "grammars/foo.g3" etc...  This method returns "grammars".
976      *
977      *  If we have been given a specific input directory as a base, then
978      *  we must find the directory relative to this directory, unless the
979      *  file name is given to us in absolute terms.
980      */
getFileDirectory(String fileName)981     public String getFileDirectory(String fileName) {
982 
983         File f;
984         if (haveInputDir && !fileName.startsWith(File.separator)) {
985             f = new File(inputDirectory, fileName);
986         }
987         else {
988             f = new File(fileName);
989         }
990         // And ask Java what the base directory of this location is
991         //
992         return f.getParent();
993     }
994 
995     /** Return a File descriptor for vocab file.  Look in library or
996      *  in -o output path.  antlr -o foo T.g U.g where U needs T.tokens
997      *  won't work unless we look in foo too. If we do not find the
998      *  file in the lib directory then must assume that the .tokens file
999      *  is going to be generated as part of this build and we have defined
1000      *  .tokens files so that they ALWAYS are generated in the base output
1001      *  directory, which means the current directory for the command line tool if there
1002      *  was no output directory specified.
1003      */
getImportedVocabFile(String vocabName)1004     public File getImportedVocabFile(String vocabName) {
1005 
1006         File f = new File(getLibraryDirectory(),
1007                           File.separator +
1008                           vocabName +
1009                           CodeGenerator.VOCAB_FILE_EXTENSION);
1010         if (f.exists()) {
1011             return f;
1012         }
1013 
1014         // We did not find the vocab file in the lib directory, so we need
1015         // to look for it in the output directory which is where .tokens
1016         // files are generated (in the base, not relative to the input
1017         // location.)
1018         //
1019         if (haveOutputDir) {
1020             f = new File(getOutputDirectory(), vocabName + CodeGenerator.VOCAB_FILE_EXTENSION);
1021         }
1022         else {
1023             f = new File(vocabName + CodeGenerator.VOCAB_FILE_EXTENSION);
1024         }
1025         return f;
1026     }
1027 
1028     /** If the tool needs to panic/exit, how do we do that?
1029      */
panic()1030     public void panic() {
1031         throw new Error("ANTLR panic");
1032     }
1033 
1034     /** Return a time stamp string accurate to sec: yyyy-mm-dd hh:mm:ss
1035      */
getCurrentTimeStamp()1036     public static String getCurrentTimeStamp() {
1037         GregorianCalendar calendar = new java.util.GregorianCalendar();
1038         int y = calendar.get(Calendar.YEAR);
1039         int m = calendar.get(Calendar.MONTH) + 1; // zero-based for months
1040         int d = calendar.get(Calendar.DAY_OF_MONTH);
1041         int h = calendar.get(Calendar.HOUR_OF_DAY);
1042         int min = calendar.get(Calendar.MINUTE);
1043         int sec = calendar.get(Calendar.SECOND);
1044         String sy = String.valueOf(y);
1045         String sm = m < 10 ? "0" + m : String.valueOf(m);
1046         String sd = d < 10 ? "0" + d : String.valueOf(d);
1047         String sh = h < 10 ? "0" + h : String.valueOf(h);
1048         String smin = min < 10 ? "0" + min : String.valueOf(min);
1049         String ssec = sec < 10 ? "0" + sec : String.valueOf(sec);
1050         return new StringBuffer().append(sy).append("-").append(sm).append("-").append(sd).append(" ").append(sh).append(":").append(smin).append(":").append(ssec).toString();
1051     }
1052 
1053     /**
1054      * Provide the List of all grammar file names that the ANTLR tool will
1055      * process or has processed.
1056      *
1057      * @return the grammarFileNames
1058      */
1059     public List<String> getGrammarFileNames() {
1060         return grammarFileNames;
1061     }
1062 
1063     /**
1064      * Indicates whether ANTLR has gnerated or will generate a description of
1065      * all the NFAs in <a href="http://www.graphviz.org">Dot format</a>
1066      *
1067      * @return the generate_NFA_dot
1068      */
1069     public boolean isGenerate_NFA_dot() {
1070         return generate_NFA_dot;
1071     }
1072 
1073     /**
1074      * Indicates whether ANTLR has generated or will generate a description of
1075      * all the NFAs in <a href="http://www.graphviz.org">Dot format</a>
1076      *
1077      * @return the generate_DFA_dot
1078      */
1079     public boolean isGenerate_DFA_dot() {
1080         return generate_DFA_dot;
1081     }
1082 
1083     /**
1084      * Return the Path to the base output directory, where ANTLR
1085      * will generate all the output files for the current language target as
1086      * well as any ancillary files such as .tokens vocab files.
1087      *
1088      * @return the output Directory
1089      */
1090     public String getOutputDirectory() {
1091         return outputDirectory;
1092     }
1093 
1094     /**
1095      * Return the Path to the directory in which ANTLR will search for ancillary
1096      * files such as .tokens vocab files and imported grammar files.
1097      *
1098      * @return the lib Directory
1099      */
1100     public String getLibraryDirectory() {
1101         return libDirectory;
1102     }
1103 
1104     /**
1105      * Indicate if ANTLR has generated, or will generate a debug version of the
1106      * recognizer. Debug versions of a parser communicate with a debugger such
1107      * as that contained in ANTLRWorks and at start up will 'hang' waiting for
1108      * a connection on an IP port (49100 by default).
1109      *
1110      * @return the debug flag
1111      */
1112     public boolean isDebug() {
1113         return debug;
1114     }
1115 
1116     /**
1117      * Indicate whether ANTLR has generated, or will generate a version of the
1118      * recognizer that prints trace messages on entry and exit of each rule.
1119      *
1120      * @return the trace flag
1121      */
1122     public boolean isTrace() {
1123         return trace;
1124     }
1125 
1126     /**
1127      * Indicates whether ANTLR has generated or will generate a version of the
1128      * recognizer that gathers statistics about its execution, which it prints when
1129      * it terminates.
1130      *
1131      * @return the profile
1132      */
1133     public boolean isProfile() {
1134         return profile;
1135     }
1136 
1137     /**
1138      * Indicates whether ANTLR has generated or will generate a report of various
1139      * elements of the grammar analysis, once it it has finished analyzing a grammar
1140      * file.
1141      *
1142      * @return the report flag
1143      */
1144     public boolean isReport() {
1145         return report;
1146     }
1147 
1148     /**
1149      * Indicates whether ANTLR has printed, or will print, a version of the input grammar
1150      * file(s) that is stripped of any action code embedded within.
1151      *
1152      * @return the printGrammar flag
1153      */
1154     public boolean isPrintGrammar() {
1155         return printGrammar;
1156     }
1157 
1158     /**
1159      * Indicates whether ANTLR has supplied, or will supply, a list of all the things
1160      * that the input grammar depends upon and all the things that will be generated
1161      * when that grammar is successfully analyzed.
1162      *
1163      * @return the depend flag
1164      */
1165     public boolean isDepend() {
1166         return depend;
1167     }
1168 
1169     /**
1170      * Indicates whether ANTLR will force all files to the output directory, even
1171      * if the input files have relative paths from the input directory.
1172      *
1173      * @return the forceAllFilesToOutputDir flag
1174      */
1175     public boolean isForceAllFilesToOutputDir() {
1176         return forceAllFilesToOutputDir;
1177     }
1178 
1179     /**
1180      * Indicates whether ANTLR will be verbose when analyzing grammar files, such as
1181      * displaying the names of the files it is generating and similar information.
1182      *
1183      * @return the verbose flag
1184      */
1185     public boolean isVerbose() {
1186         return verbose;
1187     }
1188 
1189     /**
1190      * Provide the current setting of the conversion timeout on DFA creation.
1191      *
1192      * @return DFA creation timeout value in milliseconds
1193      */
1194     public int getConversionTimeout() {
1195         return DFA.MAX_TIME_PER_DFA_CREATION;
1196     }
1197 
1198     /**
1199      * Returns the current setting of the message format descriptor
1200      * @return Current message format
1201      */
1202     public String getMessageFormat() {
1203         return ErrorManager.getMessageFormat().toString();
1204     }
1205 
1206     /**
1207      * Returns the number of errors that the analysis/processing threw up.
1208      * @return Error count
1209      */
1210     public int getNumErrors() {
1211         return ErrorManager.getNumErrors();
1212     }
1213 
1214     /**
1215      * Indicate whether the tool will analyze the dependencies of the provided grammar
1216      * file list and ensure that grammars with dependencies are built
1217      * after any of the other gramamrs in the list that they are dependent on. Setting
1218      * this option also has the side effect that any grammars that are includes for other
1219      * grammars in the list are excluded from individual analysis, which allows the caller
1220      * to invoke the tool via org.antlr.tool -make *.g and not worry about the inclusion
1221      * of grammars that are just includes for other grammars or what order the grammars
1222      * appear on the command line.
1223      *
1224      * This option was coded to make life easier for tool integration (such as Maven) but
1225      * may also be useful at the command line.
1226      *
1227      * @return true if the tool is currently configured to analyze and sort grammar files.
1228      */
1229     public boolean getMake() {
1230         return make;
1231     }
1232 
1233     /**
1234      * Set the message format to one of ANTLR, gnu, vs2005
1235      *
1236      * @param format
1237      */
1238     public void setMessageFormat(String format) {
1239         ErrorManager.setFormat(format);
1240     }
1241 
1242     /** Provide the List of all grammar file names that the ANTLR tool should process.
1243      *
1244      * @param grammarFileNames The list of grammar files to process
1245      */
1246     public void setGrammarFileNames(List<String> grammarFileNames) {
1247         this.grammarFileNames = grammarFileNames;
1248     }
1249 
1250     public void addGrammarFile(String grammarFileName) {
1251         if (!grammarFileNames.contains(grammarFileName)) {
1252             grammarFileNames.add(grammarFileName);
1253         }
1254     }
1255 
1256     /**
1257      * Indicate whether ANTLR should generate a description of
1258      * all the NFAs in <a href="http://www.graphviz.org">Dot format</a>
1259      *
1260      * @param generate_NFA_dot True to generate dot descriptions
1261      */
1262     public void setGenerate_NFA_dot(boolean generate_NFA_dot) {
1263         this.generate_NFA_dot = generate_NFA_dot;
1264     }
1265 
1266     /**
1267      * Indicates whether ANTLR should generate a description of
1268      * all the NFAs in <a href="http://www.graphviz.org">Dot format</a>
1269      *
1270      * @param generate_DFA_dot True to generate dot descriptions
1271      */
1272     public void setGenerate_DFA_dot(boolean generate_DFA_dot) {
1273         this.generate_DFA_dot = generate_DFA_dot;
1274     }
1275 
1276     /**
1277      * Set the Path to the directory in which ANTLR will search for ancillary
1278      * files such as .tokens vocab files and imported grammar files.
1279      *
1280      * @param libDirectory the libDirectory to set
1281      */
1282     public void setLibDirectory(String libDirectory) {
1283         this.libDirectory = libDirectory;
1284     }
1285 
1286     /**
1287      * Indicate whether ANTLR should generate a debug version of the
1288      * recognizer. Debug versions of a parser communicate with a debugger such
1289      * as that contained in ANTLRWorks and at start up will 'hang' waiting for
1290      * a connection on an IP port (49100 by default).
1291      *
1292      * @param debug true to generate a debug mode parser
1293      */
1294     public void setDebug(boolean debug) {
1295         this.debug = debug;
1296     }
1297 
1298     /**
1299      * Indicate whether ANTLR should generate a version of the
1300      * recognizer that prints trace messages on entry and exit of each rule
1301      *
1302      * @param trace true to generate a tracing parser
1303      */
1304     public void setTrace(boolean trace) {
1305         this.trace = trace;
1306     }
1307 
1308     /**
1309      * Indicate whether ANTLR should generate a version of the
1310      * recognizer that gathers statistics about its execution, which it prints when
1311      * it terminates.
1312      *
1313      * @param profile true to generate a profiling parser
1314      */
1315     public void setProfile(boolean profile) {
1316         this.profile = profile;
1317     }
1318 
1319     /**
1320      * Indicate whether ANTLR should generate a report of various
1321      * elements of the grammar analysis, once it it has finished analyzing a grammar
1322      * file.
1323      *
1324      * @param report true to generate the analysis report
1325      */
1326     public void setReport(boolean report) {
1327         this.report = report;
1328     }
1329 
1330     /**
1331      * Indicate whether ANTLR should print a version of the input grammar
1332      * file(s) that is stripped of any action code embedded within.
1333      *
1334      * @param printGrammar true to generate a stripped file
1335      */
1336     public void setPrintGrammar(boolean printGrammar) {
1337         this.printGrammar = printGrammar;
1338     }
1339 
1340     /**
1341      * Indicate whether ANTLR should supply a list of all the things
1342      * that the input grammar depends upon and all the things that will be generated
1343      * when that gramamr is successfully analyzed.
1344      *
1345      * @param depend true to get depends set rather than process the grammar
1346      */
1347     public void setDepend(boolean depend) {
1348         this.depend = depend;
1349     }
1350 
1351     /**
1352      * Indicates whether ANTLR will force all files to the output directory, even
1353      * if the input files have relative paths from the input directory.
1354      *
1355      * @param forceAllFilesToOutputDir true to force files to output directory
1356      */
1357     public void setForceAllFilesToOutputDir(boolean forceAllFilesToOutputDir) {
1358         this.forceAllFilesToOutputDir = forceAllFilesToOutputDir;
1359     }
1360 
1361     /**
1362      * Indicate whether ANTLR should be verbose when analyzing grammar files, such as
1363      * displaying the names of the files it is generating and similar information.
1364      *
1365      * @param verbose true to be verbose
1366      */
1367     public void setVerbose(boolean verbose) {
1368         this.verbose = verbose;
1369     }
1370 
1371     /**
1372      * Indicate whether the tool should analyze the dependencies of the provided grammar
1373      * file list and ensure that the grammars with dependencies are built
1374      * after any of the other gramamrs in the list that they are dependent on. Setting
1375      * this option also has the side effect that any grammars that are includes for other
1376      * grammars in the list are excluded from individual analysis, which allows the caller
1377      * to invoke the tool via org.antlr.tool -make *.g and not worry about the inclusion
1378      * of grammars that are just includes for other grammars or what order the grammars
1379      * appear on the command line.
1380      *
1381      * This option was coded to make life easier for tool integration (such as Maven) but
1382      * may also be useful at the command line.
1383      *
1384      * @param make
1385      */
1386     public void setMake(boolean make) {
1387         this.make = make;
1388     }
1389 
1390 }
1391