1 //===- Parsing, selection, and construction of pass pipelines --*- C++ -*--===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 ///
11 /// Interfaces for registering analysis passes, producing common pass manager
12 /// configurations, and parsing of pass pipelines.
13 ///
14 //===----------------------------------------------------------------------===//
15
16 #ifndef LLVM_PASSES_PASSBUILDER_H
17 #define LLVM_PASSES_PASSBUILDER_H
18
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/Analysis/CGSCCPassManager.h"
21 #include "llvm/IR/PassManager.h"
22 #include "llvm/Transforms/Instrumentation.h"
23 #include "llvm/Transforms/Scalar/LoopPassManager.h"
24 #include <vector>
25
26 namespace llvm {
27 class StringRef;
28 class AAManager;
29 class TargetMachine;
30 class ModuleSummaryIndex;
31
32 /// A struct capturing PGO tunables.
33 struct PGOOptions {
34 PGOOptions(std::string ProfileGenFile = "", std::string ProfileUseFile = "",
35 std::string SampleProfileFile = "", bool RunProfileGen = false,
36 bool SamplePGOSupport = false)
ProfileGenFilePGOOptions37 : ProfileGenFile(ProfileGenFile), ProfileUseFile(ProfileUseFile),
38 SampleProfileFile(SampleProfileFile), RunProfileGen(RunProfileGen),
39 SamplePGOSupport(SamplePGOSupport || !SampleProfileFile.empty()) {
40 assert((RunProfileGen ||
41 !SampleProfileFile.empty() ||
42 !ProfileUseFile.empty() ||
43 SamplePGOSupport) && "Illegal PGOOptions.");
44 }
45 std::string ProfileGenFile;
46 std::string ProfileUseFile;
47 std::string SampleProfileFile;
48 bool RunProfileGen;
49 bool SamplePGOSupport;
50 };
51
52 /// This class provides access to building LLVM's passes.
53 ///
54 /// It's members provide the baseline state available to passes during their
55 /// construction. The \c PassRegistry.def file specifies how to construct all
56 /// of the built-in passes, and those may reference these members during
57 /// construction.
58 class PassBuilder {
59 TargetMachine *TM;
60 Optional<PGOOptions> PGOOpt;
61
62 public:
63 /// A struct to capture parsed pass pipeline names.
64 ///
65 /// A pipeline is defined as a series of names, each of which may in itself
66 /// recursively contain a nested pipeline. A name is either the name of a pass
67 /// (e.g. "instcombine") or the name of a pipeline type (e.g. "cgscc"). If the
68 /// name is the name of a pass, the InnerPipeline is empty, since passes
69 /// cannot contain inner pipelines. See parsePassPipeline() for a more
70 /// detailed description of the textual pipeline format.
71 struct PipelineElement {
72 StringRef Name;
73 std::vector<PipelineElement> InnerPipeline;
74 };
75
76 /// ThinLTO phase.
77 ///
78 /// This enumerates the LLVM ThinLTO optimization phases.
79 enum class ThinLTOPhase {
80 /// No ThinLTO behavior needed.
81 None,
82 // ThinLTO prelink (summary) phase.
83 PreLink,
84 // ThinLTO postlink (backend compile) phase.
85 PostLink
86 };
87
88 /// LLVM-provided high-level optimization levels.
89 ///
90 /// This enumerates the LLVM-provided high-level optimization levels. Each
91 /// level has a specific goal and rationale.
92 enum OptimizationLevel {
93 /// Disable as many optimizations as possible. This doesn't completely
94 /// disable the optimizer in all cases, for example always_inline functions
95 /// can be required to be inlined for correctness.
96 O0,
97
98 /// Optimize quickly without destroying debuggability.
99 ///
100 /// FIXME: The current and historical behavior of this level does *not*
101 /// agree with this goal, but we would like to move toward this goal in the
102 /// future.
103 ///
104 /// This level is tuned to produce a result from the optimizer as quickly
105 /// as possible and to avoid destroying debuggability. This tends to result
106 /// in a very good development mode where the compiled code will be
107 /// immediately executed as part of testing. As a consequence, where
108 /// possible, we would like to produce efficient-to-execute code, but not
109 /// if it significantly slows down compilation or would prevent even basic
110 /// debugging of the resulting binary.
111 ///
112 /// As an example, complex loop transformations such as versioning,
113 /// vectorization, or fusion might not make sense here due to the degree to
114 /// which the executed code would differ from the source code, and the
115 /// potential compile time cost.
116 O1,
117
118 /// Optimize for fast execution as much as possible without triggering
119 /// significant incremental compile time or code size growth.
120 ///
121 /// The key idea is that optimizations at this level should "pay for
122 /// themselves". So if an optimization increases compile time by 5% or
123 /// increases code size by 5% for a particular benchmark, that benchmark
124 /// should also be one which sees a 5% runtime improvement. If the compile
125 /// time or code size penalties happen on average across a diverse range of
126 /// LLVM users' benchmarks, then the improvements should as well.
127 ///
128 /// And no matter what, the compile time needs to not grow superlinearly
129 /// with the size of input to LLVM so that users can control the runtime of
130 /// the optimizer in this mode.
131 ///
132 /// This is expected to be a good default optimization level for the vast
133 /// majority of users.
134 O2,
135
136 /// Optimize for fast execution as much as possible.
137 ///
138 /// This mode is significantly more aggressive in trading off compile time
139 /// and code size to get execution time improvements. The core idea is that
140 /// this mode should include any optimization that helps execution time on
141 /// balance across a diverse collection of benchmarks, even if it increases
142 /// code size or compile time for some benchmarks without corresponding
143 /// improvements to execution time.
144 ///
145 /// Despite being willing to trade more compile time off to get improved
146 /// execution time, this mode still tries to avoid superlinear growth in
147 /// order to make even significantly slower compile times at least scale
148 /// reasonably. This does not preclude very substantial constant factor
149 /// costs though.
150 O3,
151
152 /// Similar to \c O2 but tries to optimize for small code size instead of
153 /// fast execution without triggering significant incremental execution
154 /// time slowdowns.
155 ///
156 /// The logic here is exactly the same as \c O2, but with code size and
157 /// execution time metrics swapped.
158 ///
159 /// A consequence of the different core goal is that this should in general
160 /// produce substantially smaller executables that still run in
161 /// a reasonable amount of time.
162 Os,
163
164 /// A very specialized mode that will optimize for code size at any and all
165 /// costs.
166 ///
167 /// This is useful primarily when there are absolute size limitations and
168 /// any effort taken to reduce the size is worth it regardless of the
169 /// execution time impact. You should expect this level to produce rather
170 /// slow, but very small, code.
171 Oz
172 };
173
174 explicit PassBuilder(TargetMachine *TM = nullptr,
175 Optional<PGOOptions> PGOOpt = None)
TM(TM)176 : TM(TM), PGOOpt(PGOOpt) {}
177
178 /// Cross register the analysis managers through their proxies.
179 ///
180 /// This is an interface that can be used to cross register each
181 // AnalysisManager with all the others analysis managers.
182 void crossRegisterProxies(LoopAnalysisManager &LAM,
183 FunctionAnalysisManager &FAM,
184 CGSCCAnalysisManager &CGAM,
185 ModuleAnalysisManager &MAM);
186
187 /// Registers all available module analysis passes.
188 ///
189 /// This is an interface that can be used to populate a \c
190 /// ModuleAnalysisManager with all registered module analyses. Callers can
191 /// still manually register any additional analyses. Callers can also
192 /// pre-register analyses and this will not override those.
193 void registerModuleAnalyses(ModuleAnalysisManager &MAM);
194
195 /// Registers all available CGSCC analysis passes.
196 ///
197 /// This is an interface that can be used to populate a \c CGSCCAnalysisManager
198 /// with all registered CGSCC analyses. Callers can still manually register any
199 /// additional analyses. Callers can also pre-register analyses and this will
200 /// not override those.
201 void registerCGSCCAnalyses(CGSCCAnalysisManager &CGAM);
202
203 /// Registers all available function analysis passes.
204 ///
205 /// This is an interface that can be used to populate a \c
206 /// FunctionAnalysisManager with all registered function analyses. Callers can
207 /// still manually register any additional analyses. Callers can also
208 /// pre-register analyses and this will not override those.
209 void registerFunctionAnalyses(FunctionAnalysisManager &FAM);
210
211 /// Registers all available loop analysis passes.
212 ///
213 /// This is an interface that can be used to populate a \c LoopAnalysisManager
214 /// with all registered loop analyses. Callers can still manually register any
215 /// additional analyses.
216 void registerLoopAnalyses(LoopAnalysisManager &LAM);
217
218 /// Construct the core LLVM function canonicalization and simplification
219 /// pipeline.
220 ///
221 /// This is a long pipeline and uses most of the per-function optimization
222 /// passes in LLVM to canonicalize and simplify the IR. It is suitable to run
223 /// repeatedly over the IR and is not expected to destroy important
224 /// information about the semantics of the IR.
225 ///
226 /// Note that \p Level cannot be `O0` here. The pipelines produced are
227 /// only intended for use when attempting to optimize code. If frontends
228 /// require some transformations for semantic reasons, they should explicitly
229 /// build them.
230 ///
231 /// \p Phase indicates the current ThinLTO phase.
232 FunctionPassManager
233 buildFunctionSimplificationPipeline(OptimizationLevel Level,
234 ThinLTOPhase Phase,
235 bool DebugLogging = false);
236
237 /// Construct the core LLVM module canonicalization and simplification
238 /// pipeline.
239 ///
240 /// This pipeline focuses on canonicalizing and simplifying the entire module
241 /// of IR. Much like the function simplification pipeline above, it is
242 /// suitable to run repeatedly over the IR and is not expected to destroy
243 /// important information. It does, however, perform inlining and other
244 /// heuristic based simplifications that are not strictly reversible.
245 ///
246 /// Note that \p Level cannot be `O0` here. The pipelines produced are
247 /// only intended for use when attempting to optimize code. If frontends
248 /// require some transformations for semantic reasons, they should explicitly
249 /// build them.
250 ///
251 /// \p Phase indicates the current ThinLTO phase.
252 ModulePassManager
253 buildModuleSimplificationPipeline(OptimizationLevel Level,
254 ThinLTOPhase Phase,
255 bool DebugLogging = false);
256
257 /// Construct the core LLVM module optimization pipeline.
258 ///
259 /// This pipeline focuses on optimizing the execution speed of the IR. It
260 /// uses cost modeling and thresholds to balance code growth against runtime
261 /// improvements. It includes vectorization and other information destroying
262 /// transformations. It also cannot generally be run repeatedly on a module
263 /// without potentially seriously regressing either runtime performance of
264 /// the code or serious code size growth.
265 ///
266 /// Note that \p Level cannot be `O0` here. The pipelines produced are
267 /// only intended for use when attempting to optimize code. If frontends
268 /// require some transformations for semantic reasons, they should explicitly
269 /// build them.
270 ModulePassManager buildModuleOptimizationPipeline(OptimizationLevel Level,
271 bool DebugLogging = false);
272
273 /// Build a per-module default optimization pipeline.
274 ///
275 /// This provides a good default optimization pipeline for per-module
276 /// optimization and code generation without any link-time optimization. It
277 /// typically correspond to frontend "-O[123]" options for optimization
278 /// levels \c O1, \c O2 and \c O3 resp.
279 ///
280 /// Note that \p Level cannot be `O0` here. The pipelines produced are
281 /// only intended for use when attempting to optimize code. If frontends
282 /// require some transformations for semantic reasons, they should explicitly
283 /// build them.
284 ModulePassManager buildPerModuleDefaultPipeline(OptimizationLevel Level,
285 bool DebugLogging = false);
286
287 /// Build a pre-link, ThinLTO-targeting default optimization pipeline to
288 /// a pass manager.
289 ///
290 /// This adds the pre-link optimizations tuned to prepare a module for
291 /// a ThinLTO run. It works to minimize the IR which needs to be analyzed
292 /// without making irreversible decisions which could be made better during
293 /// the LTO run.
294 ///
295 /// Note that \p Level cannot be `O0` here. The pipelines produced are
296 /// only intended for use when attempting to optimize code. If frontends
297 /// require some transformations for semantic reasons, they should explicitly
298 /// build them.
299 ModulePassManager
300 buildThinLTOPreLinkDefaultPipeline(OptimizationLevel Level,
301 bool DebugLogging = false);
302
303 /// Build an ThinLTO default optimization pipeline to a pass manager.
304 ///
305 /// This provides a good default optimization pipeline for link-time
306 /// optimization and code generation. It is particularly tuned to fit well
307 /// when IR coming into the LTO phase was first run through \c
308 /// addPreLinkLTODefaultPipeline, and the two coordinate closely.
309 ///
310 /// Note that \p Level cannot be `O0` here. The pipelines produced are
311 /// only intended for use when attempting to optimize code. If frontends
312 /// require some transformations for semantic reasons, they should explicitly
313 /// build them.
314 ModulePassManager
315 buildThinLTODefaultPipeline(OptimizationLevel Level, bool DebugLogging,
316 const ModuleSummaryIndex *ImportSummary);
317
318 /// Build a pre-link, LTO-targeting default optimization pipeline to a pass
319 /// manager.
320 ///
321 /// This adds the pre-link optimizations tuned to work well with a later LTO
322 /// run. It works to minimize the IR which needs to be analyzed without
323 /// making irreversible decisions which could be made better during the LTO
324 /// run.
325 ///
326 /// Note that \p Level cannot be `O0` here. The pipelines produced are
327 /// only intended for use when attempting to optimize code. If frontends
328 /// require some transformations for semantic reasons, they should explicitly
329 /// build them.
330 ModulePassManager buildLTOPreLinkDefaultPipeline(OptimizationLevel Level,
331 bool DebugLogging = false);
332
333 /// Build an LTO default optimization pipeline to a pass manager.
334 ///
335 /// This provides a good default optimization pipeline for link-time
336 /// optimization and code generation. It is particularly tuned to fit well
337 /// when IR coming into the LTO phase was first run through \c
338 /// addPreLinkLTODefaultPipeline, and the two coordinate closely.
339 ///
340 /// Note that \p Level cannot be `O0` here. The pipelines produced are
341 /// only intended for use when attempting to optimize code. If frontends
342 /// require some transformations for semantic reasons, they should explicitly
343 /// build them.
344 ModulePassManager buildLTODefaultPipeline(OptimizationLevel Level,
345 bool DebugLogging,
346 ModuleSummaryIndex *ExportSummary);
347
348 /// Build the default `AAManager` with the default alias analysis pipeline
349 /// registered.
350 AAManager buildDefaultAAPipeline();
351
352 /// Parse a textual pass pipeline description into a \c
353 /// ModulePassManager.
354 ///
355 /// The format of the textual pass pipeline description looks something like:
356 ///
357 /// module(function(instcombine,sroa),dce,cgscc(inliner,function(...)),...)
358 ///
359 /// Pass managers have ()s describing the nest structure of passes. All passes
360 /// are comma separated. As a special shortcut, if the very first pass is not
361 /// a module pass (as a module pass manager is), this will automatically form
362 /// the shortest stack of pass managers that allow inserting that first pass.
363 /// So, assuming function passes 'fpassN', CGSCC passes 'cgpassN', and loop
364 /// passes 'lpassN', all of these are valid:
365 ///
366 /// fpass1,fpass2,fpass3
367 /// cgpass1,cgpass2,cgpass3
368 /// lpass1,lpass2,lpass3
369 ///
370 /// And they are equivalent to the following (resp.):
371 ///
372 /// module(function(fpass1,fpass2,fpass3))
373 /// module(cgscc(cgpass1,cgpass2,cgpass3))
374 /// module(function(loop(lpass1,lpass2,lpass3)))
375 ///
376 /// This shortcut is especially useful for debugging and testing small pass
377 /// combinations. Note that these shortcuts don't introduce any other magic.
378 /// If the sequence of passes aren't all the exact same kind of pass, it will
379 /// be an error. You cannot mix different levels implicitly, you must
380 /// explicitly form a pass manager in which to nest passes.
381 bool parsePassPipeline(ModulePassManager &MPM, StringRef PipelineText,
382 bool VerifyEachPass = true, bool DebugLogging = false);
383
384 /// {{@ Parse a textual pass pipeline description into a specific PassManager
385 ///
386 /// Automatic deduction of an appropriate pass manager stack is not supported.
387 /// For example, to insert a loop pass 'lpass' into a FunctinoPassManager,
388 /// this is the valid pipeline text:
389 ///
390 /// function(lpass)
391 bool parsePassPipeline(CGSCCPassManager &CGPM, StringRef PipelineText,
392 bool VerifyEachPass = true, bool DebugLogging = false);
393 bool parsePassPipeline(FunctionPassManager &FPM, StringRef PipelineText,
394 bool VerifyEachPass = true, bool DebugLogging = false);
395 bool parsePassPipeline(LoopPassManager &LPM, StringRef PipelineText,
396 bool VerifyEachPass = true, bool DebugLogging = false);
397 /// @}}
398
399 /// Parse a textual alias analysis pipeline into the provided AA manager.
400 ///
401 /// The format of the textual AA pipeline is a comma separated list of AA
402 /// pass names:
403 ///
404 /// basic-aa,globals-aa,...
405 ///
406 /// The AA manager is set up such that the provided alias analyses are tried
407 /// in the order specified. See the \c AAManaager documentation for details
408 /// about the logic used. This routine just provides the textual mapping
409 /// between AA names and the analyses to register with the manager.
410 ///
411 /// Returns false if the text cannot be parsed cleanly. The specific state of
412 /// the \p AA manager is unspecified if such an error is encountered and this
413 /// returns false.
414 bool parseAAPipeline(AAManager &AA, StringRef PipelineText);
415
416 /// Register a callback for a default optimizer pipeline extension
417 /// point
418 ///
419 /// This extension point allows adding passes that perform peephole
420 /// optimizations similar to the instruction combiner. These passes will be
421 /// inserted after each instance of the instruction combiner pass.
registerPeepholeEPCallback(const std::function<void (FunctionPassManager &,OptimizationLevel)> & C)422 void registerPeepholeEPCallback(
423 const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
424 PeepholeEPCallbacks.push_back(C);
425 }
426
427 /// Register a callback for a default optimizer pipeline extension
428 /// point
429 ///
430 /// This extension point allows adding late loop canonicalization and
431 /// simplification passes. This is the last point in the loop optimization
432 /// pipeline before loop deletion. Each pass added
433 /// here must be an instance of LoopPass.
434 /// This is the place to add passes that can remove loops, such as target-
435 /// specific loop idiom recognition.
registerLateLoopOptimizationsEPCallback(const std::function<void (LoopPassManager &,OptimizationLevel)> & C)436 void registerLateLoopOptimizationsEPCallback(
437 const std::function<void(LoopPassManager &, OptimizationLevel)> &C) {
438 LateLoopOptimizationsEPCallbacks.push_back(C);
439 }
440
441 /// Register a callback for a default optimizer pipeline extension
442 /// point
443 ///
444 /// This extension point allows adding loop passes to the end of the loop
445 /// optimizer.
registerLoopOptimizerEndEPCallback(const std::function<void (LoopPassManager &,OptimizationLevel)> & C)446 void registerLoopOptimizerEndEPCallback(
447 const std::function<void(LoopPassManager &, OptimizationLevel)> &C) {
448 LoopOptimizerEndEPCallbacks.push_back(C);
449 }
450
451 /// Register a callback for a default optimizer pipeline extension
452 /// point
453 ///
454 /// This extension point allows adding optimization passes after most of the
455 /// main optimizations, but before the last cleanup-ish optimizations.
registerScalarOptimizerLateEPCallback(const std::function<void (FunctionPassManager &,OptimizationLevel)> & C)456 void registerScalarOptimizerLateEPCallback(
457 const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
458 ScalarOptimizerLateEPCallbacks.push_back(C);
459 }
460
461 /// Register a callback for a default optimizer pipeline extension
462 /// point
463 ///
464 /// This extension point allows adding CallGraphSCC passes at the end of the
465 /// main CallGraphSCC passes and before any function simplification passes run
466 /// by CGPassManager.
registerCGSCCOptimizerLateEPCallback(const std::function<void (CGSCCPassManager &,OptimizationLevel)> & C)467 void registerCGSCCOptimizerLateEPCallback(
468 const std::function<void(CGSCCPassManager &, OptimizationLevel)> &C) {
469 CGSCCOptimizerLateEPCallbacks.push_back(C);
470 }
471
472 /// Register a callback for a default optimizer pipeline extension
473 /// point
474 ///
475 /// This extension point allows adding optimization passes before the
476 /// vectorizer and other highly target specific optimization passes are
477 /// executed.
registerVectorizerStartEPCallback(const std::function<void (FunctionPassManager &,OptimizationLevel)> & C)478 void registerVectorizerStartEPCallback(
479 const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
480 VectorizerStartEPCallbacks.push_back(C);
481 }
482
483 /// Register a callback for a default optimizer pipeline extension point.
484 ///
485 /// This extension point allows adding optimization once at the start of the
486 /// pipeline. This does not apply to 'backend' compiles (LTO and ThinLTO
487 /// link-time pipelines).
registerPipelineStartEPCallback(const std::function<void (ModulePassManager &)> & C)488 void registerPipelineStartEPCallback(
489 const std::function<void(ModulePassManager &)> &C) {
490 PipelineStartEPCallbacks.push_back(C);
491 }
492
493 /// Register a callback for parsing an AliasAnalysis Name to populate
494 /// the given AAManager \p AA
registerParseAACallback(const std::function<bool (StringRef Name,AAManager & AA)> & C)495 void registerParseAACallback(
496 const std::function<bool(StringRef Name, AAManager &AA)> &C) {
497 AAParsingCallbacks.push_back(C);
498 }
499
500 /// {{@ Register callbacks for analysis registration with this PassBuilder
501 /// instance.
502 /// Callees register their analyses with the given AnalysisManager objects.
registerAnalysisRegistrationCallback(const std::function<void (CGSCCAnalysisManager &)> & C)503 void registerAnalysisRegistrationCallback(
504 const std::function<void(CGSCCAnalysisManager &)> &C) {
505 CGSCCAnalysisRegistrationCallbacks.push_back(C);
506 }
registerAnalysisRegistrationCallback(const std::function<void (FunctionAnalysisManager &)> & C)507 void registerAnalysisRegistrationCallback(
508 const std::function<void(FunctionAnalysisManager &)> &C) {
509 FunctionAnalysisRegistrationCallbacks.push_back(C);
510 }
registerAnalysisRegistrationCallback(const std::function<void (LoopAnalysisManager &)> & C)511 void registerAnalysisRegistrationCallback(
512 const std::function<void(LoopAnalysisManager &)> &C) {
513 LoopAnalysisRegistrationCallbacks.push_back(C);
514 }
registerAnalysisRegistrationCallback(const std::function<void (ModuleAnalysisManager &)> & C)515 void registerAnalysisRegistrationCallback(
516 const std::function<void(ModuleAnalysisManager &)> &C) {
517 ModuleAnalysisRegistrationCallbacks.push_back(C);
518 }
519 /// @}}
520
521 /// {{@ Register pipeline parsing callbacks with this pass builder instance.
522 /// Using these callbacks, callers can parse both a single pass name, as well
523 /// as entire sub-pipelines, and populate the PassManager instance
524 /// accordingly.
registerPipelineParsingCallback(const std::function<bool (StringRef Name,CGSCCPassManager &,ArrayRef<PipelineElement>)> & C)525 void registerPipelineParsingCallback(
526 const std::function<bool(StringRef Name, CGSCCPassManager &,
527 ArrayRef<PipelineElement>)> &C) {
528 CGSCCPipelineParsingCallbacks.push_back(C);
529 }
registerPipelineParsingCallback(const std::function<bool (StringRef Name,FunctionPassManager &,ArrayRef<PipelineElement>)> & C)530 void registerPipelineParsingCallback(
531 const std::function<bool(StringRef Name, FunctionPassManager &,
532 ArrayRef<PipelineElement>)> &C) {
533 FunctionPipelineParsingCallbacks.push_back(C);
534 }
registerPipelineParsingCallback(const std::function<bool (StringRef Name,LoopPassManager &,ArrayRef<PipelineElement>)> & C)535 void registerPipelineParsingCallback(
536 const std::function<bool(StringRef Name, LoopPassManager &,
537 ArrayRef<PipelineElement>)> &C) {
538 LoopPipelineParsingCallbacks.push_back(C);
539 }
registerPipelineParsingCallback(const std::function<bool (StringRef Name,ModulePassManager &,ArrayRef<PipelineElement>)> & C)540 void registerPipelineParsingCallback(
541 const std::function<bool(StringRef Name, ModulePassManager &,
542 ArrayRef<PipelineElement>)> &C) {
543 ModulePipelineParsingCallbacks.push_back(C);
544 }
545 /// @}}
546
547 /// Register a callback for a top-level pipeline entry.
548 ///
549 /// If the PassManager type is not given at the top level of the pipeline
550 /// text, this Callback should be used to determine the appropriate stack of
551 /// PassManagers and populate the passed ModulePassManager.
registerParseTopLevelPipelineCallback(const std::function<bool (ModulePassManager &,ArrayRef<PipelineElement>,bool VerifyEachPass,bool DebugLogging)> & C)552 void registerParseTopLevelPipelineCallback(
553 const std::function<bool(ModulePassManager &, ArrayRef<PipelineElement>,
554 bool VerifyEachPass, bool DebugLogging)> &C) {
555 TopLevelPipelineParsingCallbacks.push_back(C);
556 }
557
558 private:
559 static Optional<std::vector<PipelineElement>>
560 parsePipelineText(StringRef Text);
561
562 bool parseModulePass(ModulePassManager &MPM, const PipelineElement &E,
563 bool VerifyEachPass, bool DebugLogging);
564 bool parseCGSCCPass(CGSCCPassManager &CGPM, const PipelineElement &E,
565 bool VerifyEachPass, bool DebugLogging);
566 bool parseFunctionPass(FunctionPassManager &FPM, const PipelineElement &E,
567 bool VerifyEachPass, bool DebugLogging);
568 bool parseLoopPass(LoopPassManager &LPM, const PipelineElement &E,
569 bool VerifyEachPass, bool DebugLogging);
570 bool parseAAPassName(AAManager &AA, StringRef Name);
571
572 bool parseLoopPassPipeline(LoopPassManager &LPM,
573 ArrayRef<PipelineElement> Pipeline,
574 bool VerifyEachPass, bool DebugLogging);
575 bool parseFunctionPassPipeline(FunctionPassManager &FPM,
576 ArrayRef<PipelineElement> Pipeline,
577 bool VerifyEachPass, bool DebugLogging);
578 bool parseCGSCCPassPipeline(CGSCCPassManager &CGPM,
579 ArrayRef<PipelineElement> Pipeline,
580 bool VerifyEachPass, bool DebugLogging);
581 bool parseModulePassPipeline(ModulePassManager &MPM,
582 ArrayRef<PipelineElement> Pipeline,
583 bool VerifyEachPass, bool DebugLogging);
584
585 void addPGOInstrPasses(ModulePassManager &MPM, bool DebugLogging,
586 OptimizationLevel Level, bool RunProfileGen,
587 std::string ProfileGenFile,
588 std::string ProfileUseFile);
589
590 void invokePeepholeEPCallbacks(FunctionPassManager &, OptimizationLevel);
591
592 // Extension Point callbacks
593 SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
594 PeepholeEPCallbacks;
595 SmallVector<std::function<void(LoopPassManager &, OptimizationLevel)>, 2>
596 LateLoopOptimizationsEPCallbacks;
597 SmallVector<std::function<void(LoopPassManager &, OptimizationLevel)>, 2>
598 LoopOptimizerEndEPCallbacks;
599 SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
600 ScalarOptimizerLateEPCallbacks;
601 SmallVector<std::function<void(CGSCCPassManager &, OptimizationLevel)>, 2>
602 CGSCCOptimizerLateEPCallbacks;
603 SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
604 VectorizerStartEPCallbacks;
605 // Module callbacks
606 SmallVector<std::function<void(ModulePassManager &)>, 2>
607 PipelineStartEPCallbacks;
608 SmallVector<std::function<void(ModuleAnalysisManager &)>, 2>
609 ModuleAnalysisRegistrationCallbacks;
610 SmallVector<std::function<bool(StringRef, ModulePassManager &,
611 ArrayRef<PipelineElement>)>,
612 2>
613 ModulePipelineParsingCallbacks;
614 SmallVector<std::function<bool(ModulePassManager &, ArrayRef<PipelineElement>,
615 bool VerifyEachPass, bool DebugLogging)>,
616 2>
617 TopLevelPipelineParsingCallbacks;
618 // CGSCC callbacks
619 SmallVector<std::function<void(CGSCCAnalysisManager &)>, 2>
620 CGSCCAnalysisRegistrationCallbacks;
621 SmallVector<std::function<bool(StringRef, CGSCCPassManager &,
622 ArrayRef<PipelineElement>)>,
623 2>
624 CGSCCPipelineParsingCallbacks;
625 // Function callbacks
626 SmallVector<std::function<void(FunctionAnalysisManager &)>, 2>
627 FunctionAnalysisRegistrationCallbacks;
628 SmallVector<std::function<bool(StringRef, FunctionPassManager &,
629 ArrayRef<PipelineElement>)>,
630 2>
631 FunctionPipelineParsingCallbacks;
632 // Loop callbacks
633 SmallVector<std::function<void(LoopAnalysisManager &)>, 2>
634 LoopAnalysisRegistrationCallbacks;
635 SmallVector<std::function<bool(StringRef, LoopPassManager &,
636 ArrayRef<PipelineElement>)>,
637 2>
638 LoopPipelineParsingCallbacks;
639 // AA callbacks
640 SmallVector<std::function<bool(StringRef Name, AAManager &AA)>, 2>
641 AAParsingCallbacks;
642 };
643
644 /// This utility template takes care of adding require<> and invalidate<>
645 /// passes for an analysis to a given \c PassManager. It is intended to be used
646 /// during parsing of a pass pipeline when parsing a single PipelineName.
647 /// When registering a new function analysis FancyAnalysis with the pass
648 /// pipeline name "fancy-analysis", a matching ParsePipelineCallback could look
649 /// like this:
650 ///
651 /// static bool parseFunctionPipeline(StringRef Name, FunctionPassManager &FPM,
652 /// ArrayRef<PipelineElement> P) {
653 /// if (parseAnalysisUtilityPasses<FancyAnalysis>("fancy-analysis", Name,
654 /// FPM))
655 /// return true;
656 /// return false;
657 /// }
658 template <typename AnalysisT, typename IRUnitT, typename AnalysisManagerT,
659 typename... ExtraArgTs>
parseAnalysisUtilityPasses(StringRef AnalysisName,StringRef PipelineName,PassManager<IRUnitT,AnalysisManagerT,ExtraArgTs...> & PM)660 bool parseAnalysisUtilityPasses(
661 StringRef AnalysisName, StringRef PipelineName,
662 PassManager<IRUnitT, AnalysisManagerT, ExtraArgTs...> &PM) {
663 if (!PipelineName.endswith(">"))
664 return false;
665 // See if this is an invalidate<> pass name
666 if (PipelineName.startswith("invalidate<")) {
667 PipelineName = PipelineName.substr(11, PipelineName.size() - 12);
668 if (PipelineName != AnalysisName)
669 return false;
670 PM.addPass(InvalidateAnalysisPass<AnalysisT>());
671 return true;
672 }
673
674 // See if this is a require<> pass name
675 if (PipelineName.startswith("require<")) {
676 PipelineName = PipelineName.substr(8, PipelineName.size() - 9);
677 if (PipelineName != AnalysisName)
678 return false;
679 PM.addPass(RequireAnalysisPass<AnalysisT, IRUnitT, AnalysisManagerT,
680 ExtraArgTs...>());
681 return true;
682 }
683
684 return false;
685 }
686 }
687
688 #endif
689