• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- Parsing, selection, and construction of pass pipelines --*- C++ -*--===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 ///
11 /// Interfaces for registering analysis passes, producing common pass manager
12 /// configurations, and parsing of pass pipelines.
13 ///
14 //===----------------------------------------------------------------------===//
15 
16 #ifndef LLVM_PASSES_PASSBUILDER_H
17 #define LLVM_PASSES_PASSBUILDER_H
18 
19 #include "llvm/Analysis/CGSCCPassManager.h"
20 #include "llvm/Analysis/LoopPassManager.h"
21 #include "llvm/IR/PassManager.h"
22 
23 namespace llvm {
24 class StringRef;
25 class AAManager;
26 class TargetMachine;
27 
28 /// \brief This class provides access to building LLVM's passes.
29 ///
30 /// It's members provide the baseline state available to passes during their
31 /// construction. The \c PassRegistry.def file specifies how to construct all
32 /// of the built-in passes, and those may reference these members during
33 /// construction.
34 class PassBuilder {
35   TargetMachine *TM;
36 
37 public:
38   /// \brief LLVM-provided high-level optimization levels.
39   ///
40   /// This enumerates the LLVM-provided high-level optimization levels. Each
41   /// level has a specific goal and rationale.
42   enum OptimizationLevel {
43     /// Disable as many optimizations as possible. This doesn't completely
44     /// disable the optimizer in many cases as there are correctness issues
45     /// such as always_inline functions.
46     O0,
47 
48     /// Optimize quickly without destroying debuggability.
49     ///
50     /// FIXME: The current and historical behavior of this level does *not*
51     /// agree with this goal, but we would like to move toward this goal in the
52     /// future.
53     ///
54     /// This level is tuned to produce a result from the optimizer as quickly
55     /// as possible and to avoid destroying debuggability. This tends to result
56     /// in a very good development mode where the compiled code will be
57     /// immediately executed as part of testing. As a consequence, where
58     /// possible, we would like to produce efficient-to-execute code, but not
59     /// if it significantly slows down compilation or would prevent even basic
60     /// debugging of the resulting binary.
61     ///
62     /// As an example, complex loop transformations such as versioning,
63     /// vectorization, or fusion might not make sense here due to the degree to
64     /// which the executed code would differ from the source code, and the
65     /// potential compile time cost.
66     O1,
67 
68     /// Optimize for fast execution as much as possible without triggering
69     /// significant incremental compile time or code size growth.
70     ///
71     /// The key idea is that optimizations at this level should "pay for
72     /// themselves". So if an optimization increases compile time by 5% or
73     /// increases code size by 5% for a particular benchmark, that benchmark
74     /// should also be one which sees a 5% runtime improvement. If the compile
75     /// time or code size penalties happen on average across a diverse range of
76     /// LLVM users' benchmarks, then the improvements should as well.
77     ///
78     /// And no matter what, the compile time needs to not grow superlinearly
79     /// with the size of input to LLVM so that users can control the runtime of
80     /// the optimizer in this mode.
81     ///
82     /// This is expected to be a good default optimization level for the vast
83     /// majority of users.
84     O2,
85 
86     /// Optimize for fast execution as much as possible.
87     ///
88     /// This mode is significantly more aggressive in trading off compile time
89     /// and code size to get execution time improvements. The core idea is that
90     /// this mode should include any optimization that helps execution time on
91     /// balance across a diverse collection of benchmarks, even if it increases
92     /// code size or compile time for some benchmarks without corresponding
93     /// improvements to execution time.
94     ///
95     /// Despite being willing to trade more compile time off to get improved
96     /// execution time, this mode still tries to avoid superlinear growth in
97     /// order to make even significantly slower compile times at least scale
98     /// reasonably. This does not preclude very substantial constant factor
99     /// costs though.
100     O3,
101 
102     /// Similar to \c O2 but tries to optimize for small code size instead of
103     /// fast execution without triggering significant incremental execution
104     /// time slowdowns.
105     ///
106     /// The logic here is exactly the same as \c O2, but with code size and
107     /// execution time metrics swapped.
108     ///
109     /// A consequence of the different core goal is that this should in general
110     /// produce substantially smaller executables that still run in
111     /// a reasonable amount of time.
112     Os,
113 
114     /// A very specialized mode that will optimize for code size at any and all
115     /// costs.
116     ///
117     /// This is useful primarily when there are absolute size limitations and
118     /// any effort taken to reduce the size is worth it regardless of the
119     /// execution time impact. You should expect this level to produce rather
120     /// slow, but very small, code.
121     Oz
122   };
123 
TM(TM)124   explicit PassBuilder(TargetMachine *TM = nullptr) : TM(TM) {}
125 
126   /// \brief Cross register the analysis managers through their proxies.
127   ///
128   /// This is an interface that can be used to cross register each
129   // AnalysisManager with all the others analysis managers.
130   void crossRegisterProxies(LoopAnalysisManager &LAM,
131                             FunctionAnalysisManager &FAM,
132                             CGSCCAnalysisManager &CGAM,
133                             ModuleAnalysisManager &MAM);
134 
135   /// \brief Registers all available module analysis passes.
136   ///
137   /// This is an interface that can be used to populate a \c
138   /// ModuleAnalysisManager with all registered module analyses. Callers can
139   /// still manually register any additional analyses. Callers can also
140   /// pre-register analyses and this will not override those.
141   void registerModuleAnalyses(ModuleAnalysisManager &MAM);
142 
143   /// \brief Registers all available CGSCC analysis passes.
144   ///
145   /// This is an interface that can be used to populate a \c CGSCCAnalysisManager
146   /// with all registered CGSCC analyses. Callers can still manually register any
147   /// additional analyses. Callers can also pre-register analyses and this will
148   /// not override those.
149   void registerCGSCCAnalyses(CGSCCAnalysisManager &CGAM);
150 
151   /// \brief Registers all available function analysis passes.
152   ///
153   /// This is an interface that can be used to populate a \c
154   /// FunctionAnalysisManager with all registered function analyses. Callers can
155   /// still manually register any additional analyses. Callers can also
156   /// pre-register analyses and this will not override those.
157   void registerFunctionAnalyses(FunctionAnalysisManager &FAM);
158 
159   /// \brief Registers all available loop analysis passes.
160   ///
161   /// This is an interface that can be used to populate a \c LoopAnalysisManager
162   /// with all registered loop analyses. Callers can still manually register any
163   /// additional analyses.
164   void registerLoopAnalyses(LoopAnalysisManager &LAM);
165 
166   /// \brief Add a per-module default optimization pipeline to a pass manager.
167   ///
168   /// This provides a good default optimization pipeline for per-module
169   /// optimization and code generation without any link-time optimization. It
170   /// typically correspond to frontend "-O[123]" options for optimization
171   /// levels \c O1, \c O2 and \c O3 resp.
172   void addPerModuleDefaultPipeline(ModulePassManager &MPM,
173                                    OptimizationLevel Level,
174                                    bool DebugLogging = false);
175 
176   /// \brief Add a pre-link, LTO-targeting default optimization pipeline to
177   /// a pass manager.
178   ///
179   /// This adds the pre-link optimizations tuned to work well with a later LTO
180   /// run. It works to minimize the IR which needs to be analyzed without
181   /// making irreversible decisions which could be made better during the LTO
182   /// run.
183   void addLTOPreLinkDefaultPipeline(ModulePassManager &MPM,
184                                     OptimizationLevel Level,
185                                     bool DebugLogging = false);
186 
187   /// \brief Add an LTO default optimization pipeline to a pass manager.
188   ///
189   /// This provides a good default optimization pipeline for link-time
190   /// optimization and code generation. It is particularly tuned to fit well
191   /// when IR coming into the LTO phase was first run through \c
192   /// addPreLinkLTODefaultPipeline, and the two coordinate closely.
193   void addLTODefaultPipeline(ModulePassManager &MPM, OptimizationLevel Level,
194                              bool DebugLogging = false);
195 
196   /// \brief Parse a textual pass pipeline description into a \c ModulePassManager.
197   ///
198   /// The format of the textual pass pipeline description looks something like:
199   ///
200   ///   module(function(instcombine,sroa),dce,cgscc(inliner,function(...)),...)
201   ///
202   /// Pass managers have ()s describing the nest structure of passes. All passes
203   /// are comma separated. As a special shortcut, if the very first pass is not
204   /// a module pass (as a module pass manager is), this will automatically form
205   /// the shortest stack of pass managers that allow inserting that first pass.
206   /// So, assuming function passes 'fpassN', CGSCC passes 'cgpassN', and loop passes
207   /// 'lpassN', all of these are valid:
208   ///
209   ///   fpass1,fpass2,fpass3
210   ///   cgpass1,cgpass2,cgpass3
211   ///   lpass1,lpass2,lpass3
212   ///
213   /// And they are equivalent to the following (resp.):
214   ///
215   ///   module(function(fpass1,fpass2,fpass3))
216   ///   module(cgscc(cgpass1,cgpass2,cgpass3))
217   ///   module(function(loop(lpass1,lpass2,lpass3)))
218   ///
219   /// This shortcut is especially useful for debugging and testing small pass
220   /// combinations. Note that these shortcuts don't introduce any other magic. If
221   /// the sequence of passes aren't all the exact same kind of pass, it will be
222   /// an error. You cannot mix different levels implicitly, you must explicitly
223   /// form a pass manager in which to nest passes.
224   bool parsePassPipeline(ModulePassManager &MPM, StringRef PipelineText,
225                          bool VerifyEachPass = true, bool DebugLogging = false);
226 
227   /// Parse a textual alias analysis pipeline into the provided AA manager.
228   ///
229   /// The format of the textual AA pipeline is a comma separated list of AA
230   /// pass names:
231   ///
232   ///   basic-aa,globals-aa,...
233   ///
234   /// The AA manager is set up such that the provided alias analyses are tried
235   /// in the order specified. See the \c AAManaager documentation for details
236   /// about the logic used. This routine just provides the textual mapping
237   /// between AA names and the analyses to register with the manager.
238   ///
239   /// Returns false if the text cannot be parsed cleanly. The specific state of
240   /// the \p AA manager is unspecified if such an error is encountered and this
241   /// returns false.
242   bool parseAAPipeline(AAManager &AA, StringRef PipelineText);
243 
244 private:
245   bool parseModulePassName(ModulePassManager &MPM, StringRef Name,
246                            bool DebugLogging);
247   bool parseCGSCCPassName(CGSCCPassManager &CGPM, StringRef Name);
248   bool parseFunctionPassName(FunctionPassManager &FPM, StringRef Name);
249   bool parseLoopPassName(LoopPassManager &LPM, StringRef Name);
250   bool parseAAPassName(AAManager &AA, StringRef Name);
251   bool parseLoopPassPipeline(LoopPassManager &LPM, StringRef &PipelineText,
252                              bool VerifyEachPass, bool DebugLogging);
253   bool parseFunctionPassPipeline(FunctionPassManager &FPM,
254                                  StringRef &PipelineText, bool VerifyEachPass,
255                                  bool DebugLogging);
256   bool parseCGSCCPassPipeline(CGSCCPassManager &CGPM, StringRef &PipelineText,
257                               bool VerifyEachPass, bool DebugLogging);
258   bool parseModulePassPipeline(ModulePassManager &MPM, StringRef &PipelineText,
259                                bool VerifyEachPass, bool DebugLogging);
260 };
261 }
262 
263 #endif
264