blob: 5efcda0a1b4b3386194152919d2b2fa203196ca8 [file] [log] [blame]
Andrew Scull5e1ddfa2018-08-14 10:06:54 +01001//===- Parsing, selection, and construction of pass pipelines --*- C++ -*--===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file
10///
11/// Interfaces for registering analysis passes, producing common pass manager
12/// configurations, and parsing of pass pipelines.
13///
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_PASSES_PASSBUILDER_H
17#define LLVM_PASSES_PASSBUILDER_H
18
19#include "llvm/ADT/Optional.h"
20#include "llvm/Analysis/CGSCCPassManager.h"
21#include "llvm/IR/PassManager.h"
22#include "llvm/Transforms/Instrumentation.h"
23#include "llvm/Transforms/Scalar/LoopPassManager.h"
24#include <vector>
25
26namespace llvm {
27class StringRef;
28class AAManager;
29class TargetMachine;
30
31/// A struct capturing PGO tunables.
32struct PGOOptions {
33 PGOOptions(std::string ProfileGenFile = "", std::string ProfileUseFile = "",
34 std::string SampleProfileFile = "", bool RunProfileGen = false,
35 bool SamplePGOSupport = false)
36 : ProfileGenFile(ProfileGenFile), ProfileUseFile(ProfileUseFile),
37 SampleProfileFile(SampleProfileFile), RunProfileGen(RunProfileGen),
38 SamplePGOSupport(SamplePGOSupport || !SampleProfileFile.empty()) {
39 assert((RunProfileGen ||
40 !SampleProfileFile.empty() ||
41 !ProfileUseFile.empty() ||
42 SamplePGOSupport) && "Illegal PGOOptions.");
43 }
44 std::string ProfileGenFile;
45 std::string ProfileUseFile;
46 std::string SampleProfileFile;
47 bool RunProfileGen;
48 bool SamplePGOSupport;
49};
50
51/// \brief This class provides access to building LLVM's passes.
52///
53/// It's members provide the baseline state available to passes during their
54/// construction. The \c PassRegistry.def file specifies how to construct all
55/// of the built-in passes, and those may reference these members during
56/// construction.
57class PassBuilder {
58 TargetMachine *TM;
59 Optional<PGOOptions> PGOOpt;
60
61public:
62 /// \brief A struct to capture parsed pass pipeline names.
63 ///
64 /// A pipeline is defined as a series of names, each of which may in itself
65 /// recursively contain a nested pipeline. A name is either the name of a pass
66 /// (e.g. "instcombine") or the name of a pipeline type (e.g. "cgscc"). If the
67 /// name is the name of a pass, the InnerPipeline is empty, since passes
68 /// cannot contain inner pipelines. See parsePassPipeline() for a more
69 /// detailed description of the textual pipeline format.
70 struct PipelineElement {
71 StringRef Name;
72 std::vector<PipelineElement> InnerPipeline;
73 };
74
75 /// \brief ThinLTO phase.
76 ///
77 /// This enumerates the LLVM ThinLTO optimization phases.
78 enum class ThinLTOPhase {
79 /// No ThinLTO behavior needed.
80 None,
81 // ThinLTO prelink (summary) phase.
82 PreLink,
83 // ThinLTO postlink (backend compile) phase.
84 PostLink
85 };
86
87 /// \brief LLVM-provided high-level optimization levels.
88 ///
89 /// This enumerates the LLVM-provided high-level optimization levels. Each
90 /// level has a specific goal and rationale.
91 enum OptimizationLevel {
92 /// Disable as many optimizations as possible. This doesn't completely
93 /// disable the optimizer in all cases, for example always_inline functions
94 /// can be required to be inlined for correctness.
95 O0,
96
97 /// Optimize quickly without destroying debuggability.
98 ///
99 /// FIXME: The current and historical behavior of this level does *not*
100 /// agree with this goal, but we would like to move toward this goal in the
101 /// future.
102 ///
103 /// This level is tuned to produce a result from the optimizer as quickly
104 /// as possible and to avoid destroying debuggability. This tends to result
105 /// in a very good development mode where the compiled code will be
106 /// immediately executed as part of testing. As a consequence, where
107 /// possible, we would like to produce efficient-to-execute code, but not
108 /// if it significantly slows down compilation or would prevent even basic
109 /// debugging of the resulting binary.
110 ///
111 /// As an example, complex loop transformations such as versioning,
112 /// vectorization, or fusion might not make sense here due to the degree to
113 /// which the executed code would differ from the source code, and the
114 /// potential compile time cost.
115 O1,
116
117 /// Optimize for fast execution as much as possible without triggering
118 /// significant incremental compile time or code size growth.
119 ///
120 /// The key idea is that optimizations at this level should "pay for
121 /// themselves". So if an optimization increases compile time by 5% or
122 /// increases code size by 5% for a particular benchmark, that benchmark
123 /// should also be one which sees a 5% runtime improvement. If the compile
124 /// time or code size penalties happen on average across a diverse range of
125 /// LLVM users' benchmarks, then the improvements should as well.
126 ///
127 /// And no matter what, the compile time needs to not grow superlinearly
128 /// with the size of input to LLVM so that users can control the runtime of
129 /// the optimizer in this mode.
130 ///
131 /// This is expected to be a good default optimization level for the vast
132 /// majority of users.
133 O2,
134
135 /// Optimize for fast execution as much as possible.
136 ///
137 /// This mode is significantly more aggressive in trading off compile time
138 /// and code size to get execution time improvements. The core idea is that
139 /// this mode should include any optimization that helps execution time on
140 /// balance across a diverse collection of benchmarks, even if it increases
141 /// code size or compile time for some benchmarks without corresponding
142 /// improvements to execution time.
143 ///
144 /// Despite being willing to trade more compile time off to get improved
145 /// execution time, this mode still tries to avoid superlinear growth in
146 /// order to make even significantly slower compile times at least scale
147 /// reasonably. This does not preclude very substantial constant factor
148 /// costs though.
149 O3,
150
151 /// Similar to \c O2 but tries to optimize for small code size instead of
152 /// fast execution without triggering significant incremental execution
153 /// time slowdowns.
154 ///
155 /// The logic here is exactly the same as \c O2, but with code size and
156 /// execution time metrics swapped.
157 ///
158 /// A consequence of the different core goal is that this should in general
159 /// produce substantially smaller executables that still run in
160 /// a reasonable amount of time.
161 Os,
162
163 /// A very specialized mode that will optimize for code size at any and all
164 /// costs.
165 ///
166 /// This is useful primarily when there are absolute size limitations and
167 /// any effort taken to reduce the size is worth it regardless of the
168 /// execution time impact. You should expect this level to produce rather
169 /// slow, but very small, code.
170 Oz
171 };
172
173 explicit PassBuilder(TargetMachine *TM = nullptr,
174 Optional<PGOOptions> PGOOpt = None)
175 : TM(TM), PGOOpt(PGOOpt) {}
176
177 /// \brief Cross register the analysis managers through their proxies.
178 ///
179 /// This is an interface that can be used to cross register each
180 // AnalysisManager with all the others analysis managers.
181 void crossRegisterProxies(LoopAnalysisManager &LAM,
182 FunctionAnalysisManager &FAM,
183 CGSCCAnalysisManager &CGAM,
184 ModuleAnalysisManager &MAM);
185
186 /// \brief Registers all available module analysis passes.
187 ///
188 /// This is an interface that can be used to populate a \c
189 /// ModuleAnalysisManager with all registered module analyses. Callers can
190 /// still manually register any additional analyses. Callers can also
191 /// pre-register analyses and this will not override those.
192 void registerModuleAnalyses(ModuleAnalysisManager &MAM);
193
194 /// \brief Registers all available CGSCC analysis passes.
195 ///
196 /// This is an interface that can be used to populate a \c CGSCCAnalysisManager
197 /// with all registered CGSCC analyses. Callers can still manually register any
198 /// additional analyses. Callers can also pre-register analyses and this will
199 /// not override those.
200 void registerCGSCCAnalyses(CGSCCAnalysisManager &CGAM);
201
202 /// \brief Registers all available function analysis passes.
203 ///
204 /// This is an interface that can be used to populate a \c
205 /// FunctionAnalysisManager with all registered function analyses. Callers can
206 /// still manually register any additional analyses. Callers can also
207 /// pre-register analyses and this will not override those.
208 void registerFunctionAnalyses(FunctionAnalysisManager &FAM);
209
210 /// \brief Registers all available loop analysis passes.
211 ///
212 /// This is an interface that can be used to populate a \c LoopAnalysisManager
213 /// with all registered loop analyses. Callers can still manually register any
214 /// additional analyses.
215 void registerLoopAnalyses(LoopAnalysisManager &LAM);
216
217 /// Construct the core LLVM function canonicalization and simplification
218 /// pipeline.
219 ///
220 /// This is a long pipeline and uses most of the per-function optimization
221 /// passes in LLVM to canonicalize and simplify the IR. It is suitable to run
222 /// repeatedly over the IR and is not expected to destroy important
223 /// information about the semantics of the IR.
224 ///
225 /// Note that \p Level cannot be `O0` here. The pipelines produced are
226 /// only intended for use when attempting to optimize code. If frontends
227 /// require some transformations for semantic reasons, they should explicitly
228 /// build them.
229 ///
230 /// \p Phase indicates the current ThinLTO phase.
231 FunctionPassManager
232 buildFunctionSimplificationPipeline(OptimizationLevel Level,
233 ThinLTOPhase Phase,
234 bool DebugLogging = false);
235
236 /// Construct the core LLVM module canonicalization and simplification
237 /// pipeline.
238 ///
239 /// This pipeline focuses on canonicalizing and simplifying the entire module
240 /// of IR. Much like the function simplification pipeline above, it is
241 /// suitable to run repeatedly over the IR and is not expected to destroy
242 /// important information. It does, however, perform inlining and other
243 /// heuristic based simplifications that are not strictly reversible.
244 ///
245 /// Note that \p Level cannot be `O0` here. The pipelines produced are
246 /// only intended for use when attempting to optimize code. If frontends
247 /// require some transformations for semantic reasons, they should explicitly
248 /// build them.
249 ///
250 /// \p Phase indicates the current ThinLTO phase.
251 ModulePassManager
252 buildModuleSimplificationPipeline(OptimizationLevel Level,
253 ThinLTOPhase Phase,
254 bool DebugLogging = false);
255
256 /// Construct the core LLVM module optimization pipeline.
257 ///
258 /// This pipeline focuses on optimizing the execution speed of the IR. It
259 /// uses cost modeling and thresholds to balance code growth against runtime
260 /// improvements. It includes vectorization and other information destroying
261 /// transformations. It also cannot generally be run repeatedly on a module
262 /// without potentially seriously regressing either runtime performance of
263 /// the code or serious code size growth.
264 ///
265 /// Note that \p Level cannot be `O0` here. The pipelines produced are
266 /// only intended for use when attempting to optimize code. If frontends
267 /// require some transformations for semantic reasons, they should explicitly
268 /// build them.
269 ModulePassManager buildModuleOptimizationPipeline(OptimizationLevel Level,
270 bool DebugLogging = false);
271
272 /// Build a per-module default optimization pipeline.
273 ///
274 /// This provides a good default optimization pipeline for per-module
275 /// optimization and code generation without any link-time optimization. It
276 /// typically correspond to frontend "-O[123]" options for optimization
277 /// levels \c O1, \c O2 and \c O3 resp.
278 ///
279 /// Note that \p Level cannot be `O0` here. The pipelines produced are
280 /// only intended for use when attempting to optimize code. If frontends
281 /// require some transformations for semantic reasons, they should explicitly
282 /// build them.
283 ModulePassManager buildPerModuleDefaultPipeline(OptimizationLevel Level,
284 bool DebugLogging = false);
285
286 /// Build a pre-link, ThinLTO-targeting default optimization pipeline to
287 /// a pass manager.
288 ///
289 /// This adds the pre-link optimizations tuned to prepare a module for
290 /// a ThinLTO run. It works to minimize the IR which needs to be analyzed
291 /// without making irreversible decisions which could be made better during
292 /// the LTO run.
293 ///
294 /// Note that \p Level cannot be `O0` here. The pipelines produced are
295 /// only intended for use when attempting to optimize code. If frontends
296 /// require some transformations for semantic reasons, they should explicitly
297 /// build them.
298 ModulePassManager
299 buildThinLTOPreLinkDefaultPipeline(OptimizationLevel Level,
300 bool DebugLogging = false);
301
302 /// Build an ThinLTO default optimization pipeline to a pass manager.
303 ///
304 /// This provides a good default optimization pipeline for link-time
305 /// optimization and code generation. It is particularly tuned to fit well
306 /// when IR coming into the LTO phase was first run through \c
307 /// addPreLinkLTODefaultPipeline, and the two coordinate closely.
308 ///
309 /// Note that \p Level cannot be `O0` here. The pipelines produced are
310 /// only intended for use when attempting to optimize code. If frontends
311 /// require some transformations for semantic reasons, they should explicitly
312 /// build them.
313 ModulePassManager buildThinLTODefaultPipeline(OptimizationLevel Level,
314 bool DebugLogging = false);
315
316 /// Build a pre-link, LTO-targeting default optimization pipeline to a pass
317 /// manager.
318 ///
319 /// This adds the pre-link optimizations tuned to work well with a later LTO
320 /// run. It works to minimize the IR which needs to be analyzed without
321 /// making irreversible decisions which could be made better during the LTO
322 /// run.
323 ///
324 /// Note that \p Level cannot be `O0` here. The pipelines produced are
325 /// only intended for use when attempting to optimize code. If frontends
326 /// require some transformations for semantic reasons, they should explicitly
327 /// build them.
328 ModulePassManager buildLTOPreLinkDefaultPipeline(OptimizationLevel Level,
329 bool DebugLogging = false);
330
331 /// Build an LTO default optimization pipeline to a pass manager.
332 ///
333 /// This provides a good default optimization pipeline for link-time
334 /// optimization and code generation. It is particularly tuned to fit well
335 /// when IR coming into the LTO phase was first run through \c
336 /// addPreLinkLTODefaultPipeline, and the two coordinate closely.
337 ///
338 /// Note that \p Level cannot be `O0` here. The pipelines produced are
339 /// only intended for use when attempting to optimize code. If frontends
340 /// require some transformations for semantic reasons, they should explicitly
341 /// build them.
342 ModulePassManager buildLTODefaultPipeline(OptimizationLevel Level,
343 bool DebugLogging = false);
344
345 /// Build the default `AAManager` with the default alias analysis pipeline
346 /// registered.
347 AAManager buildDefaultAAPipeline();
348
349 /// \brief Parse a textual pass pipeline description into a \c
350 /// ModulePassManager.
351 ///
352 /// The format of the textual pass pipeline description looks something like:
353 ///
354 /// module(function(instcombine,sroa),dce,cgscc(inliner,function(...)),...)
355 ///
356 /// Pass managers have ()s describing the nest structure of passes. All passes
357 /// are comma separated. As a special shortcut, if the very first pass is not
358 /// a module pass (as a module pass manager is), this will automatically form
359 /// the shortest stack of pass managers that allow inserting that first pass.
360 /// So, assuming function passes 'fpassN', CGSCC passes 'cgpassN', and loop
361 /// passes 'lpassN', all of these are valid:
362 ///
363 /// fpass1,fpass2,fpass3
364 /// cgpass1,cgpass2,cgpass3
365 /// lpass1,lpass2,lpass3
366 ///
367 /// And they are equivalent to the following (resp.):
368 ///
369 /// module(function(fpass1,fpass2,fpass3))
370 /// module(cgscc(cgpass1,cgpass2,cgpass3))
371 /// module(function(loop(lpass1,lpass2,lpass3)))
372 ///
373 /// This shortcut is especially useful for debugging and testing small pass
374 /// combinations. Note that these shortcuts don't introduce any other magic.
375 /// If the sequence of passes aren't all the exact same kind of pass, it will
376 /// be an error. You cannot mix different levels implicitly, you must
377 /// explicitly form a pass manager in which to nest passes.
378 bool parsePassPipeline(ModulePassManager &MPM, StringRef PipelineText,
379 bool VerifyEachPass = true, bool DebugLogging = false);
380
381 /// {{@ Parse a textual pass pipeline description into a specific PassManager
382 ///
383 /// Automatic deduction of an appropriate pass manager stack is not supported.
384 /// For example, to insert a loop pass 'lpass' into a FunctinoPassManager,
385 /// this is the valid pipeline text:
386 ///
387 /// function(lpass)
388 bool parsePassPipeline(CGSCCPassManager &CGPM, StringRef PipelineText,
389 bool VerifyEachPass = true, bool DebugLogging = false);
390 bool parsePassPipeline(FunctionPassManager &FPM, StringRef PipelineText,
391 bool VerifyEachPass = true, bool DebugLogging = false);
392 bool parsePassPipeline(LoopPassManager &LPM, StringRef PipelineText,
393 bool VerifyEachPass = true, bool DebugLogging = false);
394 /// @}}
395
396 /// Parse a textual alias analysis pipeline into the provided AA manager.
397 ///
398 /// The format of the textual AA pipeline is a comma separated list of AA
399 /// pass names:
400 ///
401 /// basic-aa,globals-aa,...
402 ///
403 /// The AA manager is set up such that the provided alias analyses are tried
404 /// in the order specified. See the \c AAManaager documentation for details
405 /// about the logic used. This routine just provides the textual mapping
406 /// between AA names and the analyses to register with the manager.
407 ///
408 /// Returns false if the text cannot be parsed cleanly. The specific state of
409 /// the \p AA manager is unspecified if such an error is encountered and this
410 /// returns false.
411 bool parseAAPipeline(AAManager &AA, StringRef PipelineText);
412
413 /// \brief Register a callback for a default optimizer pipeline extension
414 /// point
415 ///
416 /// This extension point allows adding passes that perform peephole
417 /// optimizations similar to the instruction combiner. These passes will be
418 /// inserted after each instance of the instruction combiner pass.
419 void registerPeepholeEPCallback(
420 const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
421 PeepholeEPCallbacks.push_back(C);
422 }
423
424 /// \brief Register a callback for a default optimizer pipeline extension
425 /// point
426 ///
427 /// This extension point allows adding late loop canonicalization and
428 /// simplification passes. This is the last point in the loop optimization
429 /// pipeline before loop deletion. Each pass added
430 /// here must be an instance of LoopPass.
431 /// This is the place to add passes that can remove loops, such as target-
432 /// specific loop idiom recognition.
433 void registerLateLoopOptimizationsEPCallback(
434 const std::function<void(LoopPassManager &, OptimizationLevel)> &C) {
435 LateLoopOptimizationsEPCallbacks.push_back(C);
436 }
437
438 /// \brief Register a callback for a default optimizer pipeline extension
439 /// point
440 ///
441 /// This extension point allows adding loop passes to the end of the loop
442 /// optimizer.
443 void registerLoopOptimizerEndEPCallback(
444 const std::function<void(LoopPassManager &, OptimizationLevel)> &C) {
445 LoopOptimizerEndEPCallbacks.push_back(C);
446 }
447
448 /// \brief Register a callback for a default optimizer pipeline extension
449 /// point
450 ///
451 /// This extension point allows adding optimization passes after most of the
452 /// main optimizations, but before the last cleanup-ish optimizations.
453 void registerScalarOptimizerLateEPCallback(
454 const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
455 ScalarOptimizerLateEPCallbacks.push_back(C);
456 }
457
458 /// \brief Register a callback for a default optimizer pipeline extension
459 /// point
460 ///
461 /// This extension point allows adding CallGraphSCC passes at the end of the
462 /// main CallGraphSCC passes and before any function simplification passes run
463 /// by CGPassManager.
464 void registerCGSCCOptimizerLateEPCallback(
465 const std::function<void(CGSCCPassManager &, OptimizationLevel)> &C) {
466 CGSCCOptimizerLateEPCallbacks.push_back(C);
467 }
468
469 /// \brief Register a callback for a default optimizer pipeline extension
470 /// point
471 ///
472 /// This extension point allows adding optimization passes before the
473 /// vectorizer and other highly target specific optimization passes are
474 /// executed.
475 void registerVectorizerStartEPCallback(
476 const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
477 VectorizerStartEPCallbacks.push_back(C);
478 }
479
480 /// Register a callback for a default optimizer pipeline extension point.
481 ///
482 /// This extension point allows adding optimization once at the start of the
483 /// pipeline. This does not apply to 'backend' compiles (LTO and ThinLTO
484 /// link-time pipelines).
485 void registerPipelineStartEPCallback(
486 const std::function<void(ModulePassManager &)> &C) {
487 PipelineStartEPCallbacks.push_back(C);
488 }
489
490 /// \brief Register a callback for parsing an AliasAnalysis Name to populate
491 /// the given AAManager \p AA
492 void registerParseAACallback(
493 const std::function<bool(StringRef Name, AAManager &AA)> &C) {
494 AAParsingCallbacks.push_back(C);
495 }
496
497 /// {{@ Register callbacks for analysis registration with this PassBuilder
498 /// instance.
499 /// Callees register their analyses with the given AnalysisManager objects.
500 void registerAnalysisRegistrationCallback(
501 const std::function<void(CGSCCAnalysisManager &)> &C) {
502 CGSCCAnalysisRegistrationCallbacks.push_back(C);
503 }
504 void registerAnalysisRegistrationCallback(
505 const std::function<void(FunctionAnalysisManager &)> &C) {
506 FunctionAnalysisRegistrationCallbacks.push_back(C);
507 }
508 void registerAnalysisRegistrationCallback(
509 const std::function<void(LoopAnalysisManager &)> &C) {
510 LoopAnalysisRegistrationCallbacks.push_back(C);
511 }
512 void registerAnalysisRegistrationCallback(
513 const std::function<void(ModuleAnalysisManager &)> &C) {
514 ModuleAnalysisRegistrationCallbacks.push_back(C);
515 }
516 /// @}}
517
518 /// {{@ Register pipeline parsing callbacks with this pass builder instance.
519 /// Using these callbacks, callers can parse both a single pass name, as well
520 /// as entire sub-pipelines, and populate the PassManager instance
521 /// accordingly.
522 void registerPipelineParsingCallback(
523 const std::function<bool(StringRef Name, CGSCCPassManager &,
524 ArrayRef<PipelineElement>)> &C) {
525 CGSCCPipelineParsingCallbacks.push_back(C);
526 }
527 void registerPipelineParsingCallback(
528 const std::function<bool(StringRef Name, FunctionPassManager &,
529 ArrayRef<PipelineElement>)> &C) {
530 FunctionPipelineParsingCallbacks.push_back(C);
531 }
532 void registerPipelineParsingCallback(
533 const std::function<bool(StringRef Name, LoopPassManager &,
534 ArrayRef<PipelineElement>)> &C) {
535 LoopPipelineParsingCallbacks.push_back(C);
536 }
537 void registerPipelineParsingCallback(
538 const std::function<bool(StringRef Name, ModulePassManager &,
539 ArrayRef<PipelineElement>)> &C) {
540 ModulePipelineParsingCallbacks.push_back(C);
541 }
542 /// @}}
543
544 /// \brief Register a callback for a top-level pipeline entry.
545 ///
546 /// If the PassManager type is not given at the top level of the pipeline
547 /// text, this Callback should be used to determine the appropriate stack of
548 /// PassManagers and populate the passed ModulePassManager.
549 void registerParseTopLevelPipelineCallback(
550 const std::function<bool(ModulePassManager &, ArrayRef<PipelineElement>,
551 bool VerifyEachPass, bool DebugLogging)> &C) {
552 TopLevelPipelineParsingCallbacks.push_back(C);
553 }
554
555private:
556 static Optional<std::vector<PipelineElement>>
557 parsePipelineText(StringRef Text);
558
559 bool parseModulePass(ModulePassManager &MPM, const PipelineElement &E,
560 bool VerifyEachPass, bool DebugLogging);
561 bool parseCGSCCPass(CGSCCPassManager &CGPM, const PipelineElement &E,
562 bool VerifyEachPass, bool DebugLogging);
563 bool parseFunctionPass(FunctionPassManager &FPM, const PipelineElement &E,
564 bool VerifyEachPass, bool DebugLogging);
565 bool parseLoopPass(LoopPassManager &LPM, const PipelineElement &E,
566 bool VerifyEachPass, bool DebugLogging);
567 bool parseAAPassName(AAManager &AA, StringRef Name);
568
569 bool parseLoopPassPipeline(LoopPassManager &LPM,
570 ArrayRef<PipelineElement> Pipeline,
571 bool VerifyEachPass, bool DebugLogging);
572 bool parseFunctionPassPipeline(FunctionPassManager &FPM,
573 ArrayRef<PipelineElement> Pipeline,
574 bool VerifyEachPass, bool DebugLogging);
575 bool parseCGSCCPassPipeline(CGSCCPassManager &CGPM,
576 ArrayRef<PipelineElement> Pipeline,
577 bool VerifyEachPass, bool DebugLogging);
578 bool parseModulePassPipeline(ModulePassManager &MPM,
579 ArrayRef<PipelineElement> Pipeline,
580 bool VerifyEachPass, bool DebugLogging);
581
582 void addPGOInstrPasses(ModulePassManager &MPM, bool DebugLogging,
583 OptimizationLevel Level, bool RunProfileGen,
584 std::string ProfileGenFile,
585 std::string ProfileUseFile);
586
587 void invokePeepholeEPCallbacks(FunctionPassManager &, OptimizationLevel);
588
589 // Extension Point callbacks
590 SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
591 PeepholeEPCallbacks;
592 SmallVector<std::function<void(LoopPassManager &, OptimizationLevel)>, 2>
593 LateLoopOptimizationsEPCallbacks;
594 SmallVector<std::function<void(LoopPassManager &, OptimizationLevel)>, 2>
595 LoopOptimizerEndEPCallbacks;
596 SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
597 ScalarOptimizerLateEPCallbacks;
598 SmallVector<std::function<void(CGSCCPassManager &, OptimizationLevel)>, 2>
599 CGSCCOptimizerLateEPCallbacks;
600 SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
601 VectorizerStartEPCallbacks;
602 // Module callbacks
603 SmallVector<std::function<void(ModulePassManager &)>, 2>
604 PipelineStartEPCallbacks;
605 SmallVector<std::function<void(ModuleAnalysisManager &)>, 2>
606 ModuleAnalysisRegistrationCallbacks;
607 SmallVector<std::function<bool(StringRef, ModulePassManager &,
608 ArrayRef<PipelineElement>)>,
609 2>
610 ModulePipelineParsingCallbacks;
611 SmallVector<std::function<bool(ModulePassManager &, ArrayRef<PipelineElement>,
612 bool VerifyEachPass, bool DebugLogging)>,
613 2>
614 TopLevelPipelineParsingCallbacks;
615 // CGSCC callbacks
616 SmallVector<std::function<void(CGSCCAnalysisManager &)>, 2>
617 CGSCCAnalysisRegistrationCallbacks;
618 SmallVector<std::function<bool(StringRef, CGSCCPassManager &,
619 ArrayRef<PipelineElement>)>,
620 2>
621 CGSCCPipelineParsingCallbacks;
622 // Function callbacks
623 SmallVector<std::function<void(FunctionAnalysisManager &)>, 2>
624 FunctionAnalysisRegistrationCallbacks;
625 SmallVector<std::function<bool(StringRef, FunctionPassManager &,
626 ArrayRef<PipelineElement>)>,
627 2>
628 FunctionPipelineParsingCallbacks;
629 // Loop callbacks
630 SmallVector<std::function<void(LoopAnalysisManager &)>, 2>
631 LoopAnalysisRegistrationCallbacks;
632 SmallVector<std::function<bool(StringRef, LoopPassManager &,
633 ArrayRef<PipelineElement>)>,
634 2>
635 LoopPipelineParsingCallbacks;
636 // AA callbacks
637 SmallVector<std::function<bool(StringRef Name, AAManager &AA)>, 2>
638 AAParsingCallbacks;
639};
640
641/// This utility template takes care of adding require<> and invalidate<>
642/// passes for an analysis to a given \c PassManager. It is intended to be used
643/// during parsing of a pass pipeline when parsing a single PipelineName.
644/// When registering a new function analysis FancyAnalysis with the pass
645/// pipeline name "fancy-analysis", a matching ParsePipelineCallback could look
646/// like this:
647///
648/// static bool parseFunctionPipeline(StringRef Name, FunctionPassManager &FPM,
649/// ArrayRef<PipelineElement> P) {
650/// if (parseAnalysisUtilityPasses<FancyAnalysis>("fancy-analysis", Name,
651/// FPM))
652/// return true;
653/// return false;
654/// }
655template <typename AnalysisT, typename IRUnitT, typename AnalysisManagerT,
656 typename... ExtraArgTs>
657bool parseAnalysisUtilityPasses(
658 StringRef AnalysisName, StringRef PipelineName,
659 PassManager<IRUnitT, AnalysisManagerT, ExtraArgTs...> &PM) {
660 if (!PipelineName.endswith(">"))
661 return false;
662 // See if this is an invalidate<> pass name
663 if (PipelineName.startswith("invalidate<")) {
664 PipelineName = PipelineName.substr(11, PipelineName.size() - 12);
665 if (PipelineName != AnalysisName)
666 return false;
667 PM.addPass(InvalidateAnalysisPass<AnalysisT>());
668 return true;
669 }
670
671 // See if this is a require<> pass name
672 if (PipelineName.startswith("require<")) {
673 PipelineName = PipelineName.substr(8, PipelineName.size() - 9);
674 if (PipelineName != AnalysisName)
675 return false;
676 PM.addPass(RequireAnalysisPass<AnalysisT, IRUnitT, AnalysisManagerT,
677 ExtraArgTs...>());
678 return true;
679 }
680
681 return false;
682}
683}
684
685#endif