blob: 02d3dc324bce7353c6594de539357183b798de3d [file] [log] [blame]
Andrew Scull5e1ddfa2018-08-14 10:06:54 +01001//===- Parsing, selection, and construction of pass pipelines --*- C++ -*--===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file
10///
11/// Interfaces for registering analysis passes, producing common pass manager
12/// configurations, and parsing of pass pipelines.
13///
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_PASSES_PASSBUILDER_H
17#define LLVM_PASSES_PASSBUILDER_H
18
19#include "llvm/ADT/Optional.h"
20#include "llvm/Analysis/CGSCCPassManager.h"
21#include "llvm/IR/PassManager.h"
22#include "llvm/Transforms/Instrumentation.h"
23#include "llvm/Transforms/Scalar/LoopPassManager.h"
24#include <vector>
25
26namespace llvm {
27class StringRef;
28class AAManager;
29class TargetMachine;
Andrew Scullcdfcccc2018-10-05 20:58:37 +010030class ModuleSummaryIndex;
Andrew Scull5e1ddfa2018-08-14 10:06:54 +010031
32/// A struct capturing PGO tunables.
33struct PGOOptions {
34 PGOOptions(std::string ProfileGenFile = "", std::string ProfileUseFile = "",
35 std::string SampleProfileFile = "", bool RunProfileGen = false,
36 bool SamplePGOSupport = false)
37 : ProfileGenFile(ProfileGenFile), ProfileUseFile(ProfileUseFile),
38 SampleProfileFile(SampleProfileFile), RunProfileGen(RunProfileGen),
39 SamplePGOSupport(SamplePGOSupport || !SampleProfileFile.empty()) {
40 assert((RunProfileGen ||
41 !SampleProfileFile.empty() ||
42 !ProfileUseFile.empty() ||
43 SamplePGOSupport) && "Illegal PGOOptions.");
44 }
45 std::string ProfileGenFile;
46 std::string ProfileUseFile;
47 std::string SampleProfileFile;
48 bool RunProfileGen;
49 bool SamplePGOSupport;
50};
51
Andrew Scullcdfcccc2018-10-05 20:58:37 +010052/// This class provides access to building LLVM's passes.
Andrew Scull5e1ddfa2018-08-14 10:06:54 +010053///
54/// It's members provide the baseline state available to passes during their
55/// construction. The \c PassRegistry.def file specifies how to construct all
56/// of the built-in passes, and those may reference these members during
57/// construction.
58class PassBuilder {
59 TargetMachine *TM;
60 Optional<PGOOptions> PGOOpt;
Andrew Scull0372a572018-11-16 15:47:06 +000061 PassInstrumentationCallbacks *PIC;
Andrew Scull5e1ddfa2018-08-14 10:06:54 +010062
63public:
Andrew Scullcdfcccc2018-10-05 20:58:37 +010064 /// A struct to capture parsed pass pipeline names.
Andrew Scull5e1ddfa2018-08-14 10:06:54 +010065 ///
66 /// A pipeline is defined as a series of names, each of which may in itself
67 /// recursively contain a nested pipeline. A name is either the name of a pass
68 /// (e.g. "instcombine") or the name of a pipeline type (e.g. "cgscc"). If the
69 /// name is the name of a pass, the InnerPipeline is empty, since passes
70 /// cannot contain inner pipelines. See parsePassPipeline() for a more
71 /// detailed description of the textual pipeline format.
72 struct PipelineElement {
73 StringRef Name;
74 std::vector<PipelineElement> InnerPipeline;
75 };
76
Andrew Scullcdfcccc2018-10-05 20:58:37 +010077 /// ThinLTO phase.
Andrew Scull5e1ddfa2018-08-14 10:06:54 +010078 ///
79 /// This enumerates the LLVM ThinLTO optimization phases.
80 enum class ThinLTOPhase {
81 /// No ThinLTO behavior needed.
82 None,
83 // ThinLTO prelink (summary) phase.
84 PreLink,
85 // ThinLTO postlink (backend compile) phase.
86 PostLink
87 };
88
Andrew Scullcdfcccc2018-10-05 20:58:37 +010089 /// LLVM-provided high-level optimization levels.
Andrew Scull5e1ddfa2018-08-14 10:06:54 +010090 ///
91 /// This enumerates the LLVM-provided high-level optimization levels. Each
92 /// level has a specific goal and rationale.
93 enum OptimizationLevel {
94 /// Disable as many optimizations as possible. This doesn't completely
95 /// disable the optimizer in all cases, for example always_inline functions
96 /// can be required to be inlined for correctness.
97 O0,
98
99 /// Optimize quickly without destroying debuggability.
100 ///
101 /// FIXME: The current and historical behavior of this level does *not*
102 /// agree with this goal, but we would like to move toward this goal in the
103 /// future.
104 ///
105 /// This level is tuned to produce a result from the optimizer as quickly
106 /// as possible and to avoid destroying debuggability. This tends to result
107 /// in a very good development mode where the compiled code will be
108 /// immediately executed as part of testing. As a consequence, where
109 /// possible, we would like to produce efficient-to-execute code, but not
110 /// if it significantly slows down compilation or would prevent even basic
111 /// debugging of the resulting binary.
112 ///
113 /// As an example, complex loop transformations such as versioning,
114 /// vectorization, or fusion might not make sense here due to the degree to
115 /// which the executed code would differ from the source code, and the
116 /// potential compile time cost.
117 O1,
118
119 /// Optimize for fast execution as much as possible without triggering
120 /// significant incremental compile time or code size growth.
121 ///
122 /// The key idea is that optimizations at this level should "pay for
123 /// themselves". So if an optimization increases compile time by 5% or
124 /// increases code size by 5% for a particular benchmark, that benchmark
125 /// should also be one which sees a 5% runtime improvement. If the compile
126 /// time or code size penalties happen on average across a diverse range of
127 /// LLVM users' benchmarks, then the improvements should as well.
128 ///
129 /// And no matter what, the compile time needs to not grow superlinearly
130 /// with the size of input to LLVM so that users can control the runtime of
131 /// the optimizer in this mode.
132 ///
133 /// This is expected to be a good default optimization level for the vast
134 /// majority of users.
135 O2,
136
137 /// Optimize for fast execution as much as possible.
138 ///
139 /// This mode is significantly more aggressive in trading off compile time
140 /// and code size to get execution time improvements. The core idea is that
141 /// this mode should include any optimization that helps execution time on
142 /// balance across a diverse collection of benchmarks, even if it increases
143 /// code size or compile time for some benchmarks without corresponding
144 /// improvements to execution time.
145 ///
146 /// Despite being willing to trade more compile time off to get improved
147 /// execution time, this mode still tries to avoid superlinear growth in
148 /// order to make even significantly slower compile times at least scale
149 /// reasonably. This does not preclude very substantial constant factor
150 /// costs though.
151 O3,
152
153 /// Similar to \c O2 but tries to optimize for small code size instead of
154 /// fast execution without triggering significant incremental execution
155 /// time slowdowns.
156 ///
157 /// The logic here is exactly the same as \c O2, but with code size and
158 /// execution time metrics swapped.
159 ///
160 /// A consequence of the different core goal is that this should in general
161 /// produce substantially smaller executables that still run in
162 /// a reasonable amount of time.
163 Os,
164
165 /// A very specialized mode that will optimize for code size at any and all
166 /// costs.
167 ///
168 /// This is useful primarily when there are absolute size limitations and
169 /// any effort taken to reduce the size is worth it regardless of the
170 /// execution time impact. You should expect this level to produce rather
171 /// slow, but very small, code.
172 Oz
173 };
174
175 explicit PassBuilder(TargetMachine *TM = nullptr,
Andrew Scull0372a572018-11-16 15:47:06 +0000176 Optional<PGOOptions> PGOOpt = None,
177 PassInstrumentationCallbacks *PIC = nullptr)
178 : TM(TM), PGOOpt(PGOOpt), PIC(PIC) {}
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100179
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100180 /// Cross register the analysis managers through their proxies.
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100181 ///
182 /// This is an interface that can be used to cross register each
183 // AnalysisManager with all the others analysis managers.
184 void crossRegisterProxies(LoopAnalysisManager &LAM,
185 FunctionAnalysisManager &FAM,
186 CGSCCAnalysisManager &CGAM,
187 ModuleAnalysisManager &MAM);
188
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100189 /// Registers all available module analysis passes.
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100190 ///
191 /// This is an interface that can be used to populate a \c
192 /// ModuleAnalysisManager with all registered module analyses. Callers can
193 /// still manually register any additional analyses. Callers can also
194 /// pre-register analyses and this will not override those.
195 void registerModuleAnalyses(ModuleAnalysisManager &MAM);
196
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100197 /// Registers all available CGSCC analysis passes.
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100198 ///
199 /// This is an interface that can be used to populate a \c CGSCCAnalysisManager
200 /// with all registered CGSCC analyses. Callers can still manually register any
201 /// additional analyses. Callers can also pre-register analyses and this will
202 /// not override those.
203 void registerCGSCCAnalyses(CGSCCAnalysisManager &CGAM);
204
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100205 /// Registers all available function analysis passes.
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100206 ///
207 /// This is an interface that can be used to populate a \c
208 /// FunctionAnalysisManager with all registered function analyses. Callers can
209 /// still manually register any additional analyses. Callers can also
210 /// pre-register analyses and this will not override those.
211 void registerFunctionAnalyses(FunctionAnalysisManager &FAM);
212
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100213 /// Registers all available loop analysis passes.
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100214 ///
215 /// This is an interface that can be used to populate a \c LoopAnalysisManager
216 /// with all registered loop analyses. Callers can still manually register any
217 /// additional analyses.
218 void registerLoopAnalyses(LoopAnalysisManager &LAM);
219
220 /// Construct the core LLVM function canonicalization and simplification
221 /// pipeline.
222 ///
223 /// This is a long pipeline and uses most of the per-function optimization
224 /// passes in LLVM to canonicalize and simplify the IR. It is suitable to run
225 /// repeatedly over the IR and is not expected to destroy important
226 /// information about the semantics of the IR.
227 ///
228 /// Note that \p Level cannot be `O0` here. The pipelines produced are
229 /// only intended for use when attempting to optimize code. If frontends
230 /// require some transformations for semantic reasons, they should explicitly
231 /// build them.
232 ///
233 /// \p Phase indicates the current ThinLTO phase.
234 FunctionPassManager
235 buildFunctionSimplificationPipeline(OptimizationLevel Level,
236 ThinLTOPhase Phase,
237 bool DebugLogging = false);
238
239 /// Construct the core LLVM module canonicalization and simplification
240 /// pipeline.
241 ///
242 /// This pipeline focuses on canonicalizing and simplifying the entire module
243 /// of IR. Much like the function simplification pipeline above, it is
244 /// suitable to run repeatedly over the IR and is not expected to destroy
245 /// important information. It does, however, perform inlining and other
246 /// heuristic based simplifications that are not strictly reversible.
247 ///
248 /// Note that \p Level cannot be `O0` here. The pipelines produced are
249 /// only intended for use when attempting to optimize code. If frontends
250 /// require some transformations for semantic reasons, they should explicitly
251 /// build them.
252 ///
253 /// \p Phase indicates the current ThinLTO phase.
254 ModulePassManager
255 buildModuleSimplificationPipeline(OptimizationLevel Level,
256 ThinLTOPhase Phase,
257 bool DebugLogging = false);
258
259 /// Construct the core LLVM module optimization pipeline.
260 ///
261 /// This pipeline focuses on optimizing the execution speed of the IR. It
262 /// uses cost modeling and thresholds to balance code growth against runtime
263 /// improvements. It includes vectorization and other information destroying
264 /// transformations. It also cannot generally be run repeatedly on a module
265 /// without potentially seriously regressing either runtime performance of
266 /// the code or serious code size growth.
267 ///
268 /// Note that \p Level cannot be `O0` here. The pipelines produced are
269 /// only intended for use when attempting to optimize code. If frontends
270 /// require some transformations for semantic reasons, they should explicitly
271 /// build them.
272 ModulePassManager buildModuleOptimizationPipeline(OptimizationLevel Level,
273 bool DebugLogging = false);
274
275 /// Build a per-module default optimization pipeline.
276 ///
277 /// This provides a good default optimization pipeline for per-module
278 /// optimization and code generation without any link-time optimization. It
279 /// typically correspond to frontend "-O[123]" options for optimization
280 /// levels \c O1, \c O2 and \c O3 resp.
281 ///
282 /// Note that \p Level cannot be `O0` here. The pipelines produced are
283 /// only intended for use when attempting to optimize code. If frontends
284 /// require some transformations for semantic reasons, they should explicitly
285 /// build them.
286 ModulePassManager buildPerModuleDefaultPipeline(OptimizationLevel Level,
287 bool DebugLogging = false);
288
289 /// Build a pre-link, ThinLTO-targeting default optimization pipeline to
290 /// a pass manager.
291 ///
292 /// This adds the pre-link optimizations tuned to prepare a module for
293 /// a ThinLTO run. It works to minimize the IR which needs to be analyzed
294 /// without making irreversible decisions which could be made better during
295 /// the LTO run.
296 ///
297 /// Note that \p Level cannot be `O0` here. The pipelines produced are
298 /// only intended for use when attempting to optimize code. If frontends
299 /// require some transformations for semantic reasons, they should explicitly
300 /// build them.
301 ModulePassManager
302 buildThinLTOPreLinkDefaultPipeline(OptimizationLevel Level,
303 bool DebugLogging = false);
304
305 /// Build an ThinLTO default optimization pipeline to a pass manager.
306 ///
307 /// This provides a good default optimization pipeline for link-time
308 /// optimization and code generation. It is particularly tuned to fit well
309 /// when IR coming into the LTO phase was first run through \c
310 /// addPreLinkLTODefaultPipeline, and the two coordinate closely.
311 ///
312 /// Note that \p Level cannot be `O0` here. The pipelines produced are
313 /// only intended for use when attempting to optimize code. If frontends
314 /// require some transformations for semantic reasons, they should explicitly
315 /// build them.
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100316 ModulePassManager
317 buildThinLTODefaultPipeline(OptimizationLevel Level, bool DebugLogging,
318 const ModuleSummaryIndex *ImportSummary);
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100319
320 /// Build a pre-link, LTO-targeting default optimization pipeline to a pass
321 /// manager.
322 ///
323 /// This adds the pre-link optimizations tuned to work well with a later LTO
324 /// run. It works to minimize the IR which needs to be analyzed without
325 /// making irreversible decisions which could be made better during the LTO
326 /// run.
327 ///
328 /// Note that \p Level cannot be `O0` here. The pipelines produced are
329 /// only intended for use when attempting to optimize code. If frontends
330 /// require some transformations for semantic reasons, they should explicitly
331 /// build them.
332 ModulePassManager buildLTOPreLinkDefaultPipeline(OptimizationLevel Level,
333 bool DebugLogging = false);
334
335 /// Build an LTO default optimization pipeline to a pass manager.
336 ///
337 /// This provides a good default optimization pipeline for link-time
338 /// optimization and code generation. It is particularly tuned to fit well
339 /// when IR coming into the LTO phase was first run through \c
340 /// addPreLinkLTODefaultPipeline, and the two coordinate closely.
341 ///
342 /// Note that \p Level cannot be `O0` here. The pipelines produced are
343 /// only intended for use when attempting to optimize code. If frontends
344 /// require some transformations for semantic reasons, they should explicitly
345 /// build them.
346 ModulePassManager buildLTODefaultPipeline(OptimizationLevel Level,
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100347 bool DebugLogging,
348 ModuleSummaryIndex *ExportSummary);
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100349
350 /// Build the default `AAManager` with the default alias analysis pipeline
351 /// registered.
352 AAManager buildDefaultAAPipeline();
353
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100354 /// Parse a textual pass pipeline description into a \c
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100355 /// ModulePassManager.
356 ///
357 /// The format of the textual pass pipeline description looks something like:
358 ///
359 /// module(function(instcombine,sroa),dce,cgscc(inliner,function(...)),...)
360 ///
361 /// Pass managers have ()s describing the nest structure of passes. All passes
362 /// are comma separated. As a special shortcut, if the very first pass is not
363 /// a module pass (as a module pass manager is), this will automatically form
364 /// the shortest stack of pass managers that allow inserting that first pass.
365 /// So, assuming function passes 'fpassN', CGSCC passes 'cgpassN', and loop
366 /// passes 'lpassN', all of these are valid:
367 ///
368 /// fpass1,fpass2,fpass3
369 /// cgpass1,cgpass2,cgpass3
370 /// lpass1,lpass2,lpass3
371 ///
372 /// And they are equivalent to the following (resp.):
373 ///
374 /// module(function(fpass1,fpass2,fpass3))
375 /// module(cgscc(cgpass1,cgpass2,cgpass3))
376 /// module(function(loop(lpass1,lpass2,lpass3)))
377 ///
378 /// This shortcut is especially useful for debugging and testing small pass
379 /// combinations. Note that these shortcuts don't introduce any other magic.
380 /// If the sequence of passes aren't all the exact same kind of pass, it will
381 /// be an error. You cannot mix different levels implicitly, you must
382 /// explicitly form a pass manager in which to nest passes.
383 bool parsePassPipeline(ModulePassManager &MPM, StringRef PipelineText,
384 bool VerifyEachPass = true, bool DebugLogging = false);
385
386 /// {{@ Parse a textual pass pipeline description into a specific PassManager
387 ///
388 /// Automatic deduction of an appropriate pass manager stack is not supported.
389 /// For example, to insert a loop pass 'lpass' into a FunctinoPassManager,
390 /// this is the valid pipeline text:
391 ///
392 /// function(lpass)
393 bool parsePassPipeline(CGSCCPassManager &CGPM, StringRef PipelineText,
394 bool VerifyEachPass = true, bool DebugLogging = false);
395 bool parsePassPipeline(FunctionPassManager &FPM, StringRef PipelineText,
396 bool VerifyEachPass = true, bool DebugLogging = false);
397 bool parsePassPipeline(LoopPassManager &LPM, StringRef PipelineText,
398 bool VerifyEachPass = true, bool DebugLogging = false);
399 /// @}}
400
401 /// Parse a textual alias analysis pipeline into the provided AA manager.
402 ///
403 /// The format of the textual AA pipeline is a comma separated list of AA
404 /// pass names:
405 ///
406 /// basic-aa,globals-aa,...
407 ///
408 /// The AA manager is set up such that the provided alias analyses are tried
409 /// in the order specified. See the \c AAManaager documentation for details
410 /// about the logic used. This routine just provides the textual mapping
411 /// between AA names and the analyses to register with the manager.
412 ///
413 /// Returns false if the text cannot be parsed cleanly. The specific state of
414 /// the \p AA manager is unspecified if such an error is encountered and this
415 /// returns false.
416 bool parseAAPipeline(AAManager &AA, StringRef PipelineText);
417
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100418 /// Register a callback for a default optimizer pipeline extension
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100419 /// point
420 ///
421 /// This extension point allows adding passes that perform peephole
422 /// optimizations similar to the instruction combiner. These passes will be
423 /// inserted after each instance of the instruction combiner pass.
424 void registerPeepholeEPCallback(
425 const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
426 PeepholeEPCallbacks.push_back(C);
427 }
428
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100429 /// Register a callback for a default optimizer pipeline extension
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100430 /// point
431 ///
432 /// This extension point allows adding late loop canonicalization and
433 /// simplification passes. This is the last point in the loop optimization
434 /// pipeline before loop deletion. Each pass added
435 /// here must be an instance of LoopPass.
436 /// This is the place to add passes that can remove loops, such as target-
437 /// specific loop idiom recognition.
438 void registerLateLoopOptimizationsEPCallback(
439 const std::function<void(LoopPassManager &, OptimizationLevel)> &C) {
440 LateLoopOptimizationsEPCallbacks.push_back(C);
441 }
442
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100443 /// Register a callback for a default optimizer pipeline extension
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100444 /// point
445 ///
446 /// This extension point allows adding loop passes to the end of the loop
447 /// optimizer.
448 void registerLoopOptimizerEndEPCallback(
449 const std::function<void(LoopPassManager &, OptimizationLevel)> &C) {
450 LoopOptimizerEndEPCallbacks.push_back(C);
451 }
452
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100453 /// Register a callback for a default optimizer pipeline extension
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100454 /// point
455 ///
456 /// This extension point allows adding optimization passes after most of the
457 /// main optimizations, but before the last cleanup-ish optimizations.
458 void registerScalarOptimizerLateEPCallback(
459 const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
460 ScalarOptimizerLateEPCallbacks.push_back(C);
461 }
462
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100463 /// Register a callback for a default optimizer pipeline extension
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100464 /// point
465 ///
466 /// This extension point allows adding CallGraphSCC passes at the end of the
467 /// main CallGraphSCC passes and before any function simplification passes run
468 /// by CGPassManager.
469 void registerCGSCCOptimizerLateEPCallback(
470 const std::function<void(CGSCCPassManager &, OptimizationLevel)> &C) {
471 CGSCCOptimizerLateEPCallbacks.push_back(C);
472 }
473
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100474 /// Register a callback for a default optimizer pipeline extension
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100475 /// point
476 ///
477 /// This extension point allows adding optimization passes before the
478 /// vectorizer and other highly target specific optimization passes are
479 /// executed.
480 void registerVectorizerStartEPCallback(
481 const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
482 VectorizerStartEPCallbacks.push_back(C);
483 }
484
485 /// Register a callback for a default optimizer pipeline extension point.
486 ///
487 /// This extension point allows adding optimization once at the start of the
488 /// pipeline. This does not apply to 'backend' compiles (LTO and ThinLTO
489 /// link-time pipelines).
490 void registerPipelineStartEPCallback(
491 const std::function<void(ModulePassManager &)> &C) {
492 PipelineStartEPCallbacks.push_back(C);
493 }
494
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100495 /// Register a callback for parsing an AliasAnalysis Name to populate
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100496 /// the given AAManager \p AA
497 void registerParseAACallback(
498 const std::function<bool(StringRef Name, AAManager &AA)> &C) {
499 AAParsingCallbacks.push_back(C);
500 }
501
502 /// {{@ Register callbacks for analysis registration with this PassBuilder
503 /// instance.
504 /// Callees register their analyses with the given AnalysisManager objects.
505 void registerAnalysisRegistrationCallback(
506 const std::function<void(CGSCCAnalysisManager &)> &C) {
507 CGSCCAnalysisRegistrationCallbacks.push_back(C);
508 }
509 void registerAnalysisRegistrationCallback(
510 const std::function<void(FunctionAnalysisManager &)> &C) {
511 FunctionAnalysisRegistrationCallbacks.push_back(C);
512 }
513 void registerAnalysisRegistrationCallback(
514 const std::function<void(LoopAnalysisManager &)> &C) {
515 LoopAnalysisRegistrationCallbacks.push_back(C);
516 }
517 void registerAnalysisRegistrationCallback(
518 const std::function<void(ModuleAnalysisManager &)> &C) {
519 ModuleAnalysisRegistrationCallbacks.push_back(C);
520 }
521 /// @}}
522
523 /// {{@ Register pipeline parsing callbacks with this pass builder instance.
524 /// Using these callbacks, callers can parse both a single pass name, as well
525 /// as entire sub-pipelines, and populate the PassManager instance
526 /// accordingly.
527 void registerPipelineParsingCallback(
528 const std::function<bool(StringRef Name, CGSCCPassManager &,
529 ArrayRef<PipelineElement>)> &C) {
530 CGSCCPipelineParsingCallbacks.push_back(C);
531 }
532 void registerPipelineParsingCallback(
533 const std::function<bool(StringRef Name, FunctionPassManager &,
534 ArrayRef<PipelineElement>)> &C) {
535 FunctionPipelineParsingCallbacks.push_back(C);
536 }
537 void registerPipelineParsingCallback(
538 const std::function<bool(StringRef Name, LoopPassManager &,
539 ArrayRef<PipelineElement>)> &C) {
540 LoopPipelineParsingCallbacks.push_back(C);
541 }
542 void registerPipelineParsingCallback(
543 const std::function<bool(StringRef Name, ModulePassManager &,
544 ArrayRef<PipelineElement>)> &C) {
545 ModulePipelineParsingCallbacks.push_back(C);
546 }
547 /// @}}
548
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100549 /// Register a callback for a top-level pipeline entry.
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100550 ///
551 /// If the PassManager type is not given at the top level of the pipeline
552 /// text, this Callback should be used to determine the appropriate stack of
553 /// PassManagers and populate the passed ModulePassManager.
554 void registerParseTopLevelPipelineCallback(
555 const std::function<bool(ModulePassManager &, ArrayRef<PipelineElement>,
556 bool VerifyEachPass, bool DebugLogging)> &C) {
557 TopLevelPipelineParsingCallbacks.push_back(C);
558 }
559
560private:
561 static Optional<std::vector<PipelineElement>>
562 parsePipelineText(StringRef Text);
563
564 bool parseModulePass(ModulePassManager &MPM, const PipelineElement &E,
565 bool VerifyEachPass, bool DebugLogging);
566 bool parseCGSCCPass(CGSCCPassManager &CGPM, const PipelineElement &E,
567 bool VerifyEachPass, bool DebugLogging);
568 bool parseFunctionPass(FunctionPassManager &FPM, const PipelineElement &E,
569 bool VerifyEachPass, bool DebugLogging);
570 bool parseLoopPass(LoopPassManager &LPM, const PipelineElement &E,
571 bool VerifyEachPass, bool DebugLogging);
572 bool parseAAPassName(AAManager &AA, StringRef Name);
573
574 bool parseLoopPassPipeline(LoopPassManager &LPM,
575 ArrayRef<PipelineElement> Pipeline,
576 bool VerifyEachPass, bool DebugLogging);
577 bool parseFunctionPassPipeline(FunctionPassManager &FPM,
578 ArrayRef<PipelineElement> Pipeline,
579 bool VerifyEachPass, bool DebugLogging);
580 bool parseCGSCCPassPipeline(CGSCCPassManager &CGPM,
581 ArrayRef<PipelineElement> Pipeline,
582 bool VerifyEachPass, bool DebugLogging);
583 bool parseModulePassPipeline(ModulePassManager &MPM,
584 ArrayRef<PipelineElement> Pipeline,
585 bool VerifyEachPass, bool DebugLogging);
586
587 void addPGOInstrPasses(ModulePassManager &MPM, bool DebugLogging,
588 OptimizationLevel Level, bool RunProfileGen,
589 std::string ProfileGenFile,
590 std::string ProfileUseFile);
591
592 void invokePeepholeEPCallbacks(FunctionPassManager &, OptimizationLevel);
593
594 // Extension Point callbacks
595 SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
596 PeepholeEPCallbacks;
597 SmallVector<std::function<void(LoopPassManager &, OptimizationLevel)>, 2>
598 LateLoopOptimizationsEPCallbacks;
599 SmallVector<std::function<void(LoopPassManager &, OptimizationLevel)>, 2>
600 LoopOptimizerEndEPCallbacks;
601 SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
602 ScalarOptimizerLateEPCallbacks;
603 SmallVector<std::function<void(CGSCCPassManager &, OptimizationLevel)>, 2>
604 CGSCCOptimizerLateEPCallbacks;
605 SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
606 VectorizerStartEPCallbacks;
607 // Module callbacks
608 SmallVector<std::function<void(ModulePassManager &)>, 2>
609 PipelineStartEPCallbacks;
610 SmallVector<std::function<void(ModuleAnalysisManager &)>, 2>
611 ModuleAnalysisRegistrationCallbacks;
612 SmallVector<std::function<bool(StringRef, ModulePassManager &,
613 ArrayRef<PipelineElement>)>,
614 2>
615 ModulePipelineParsingCallbacks;
616 SmallVector<std::function<bool(ModulePassManager &, ArrayRef<PipelineElement>,
617 bool VerifyEachPass, bool DebugLogging)>,
618 2>
619 TopLevelPipelineParsingCallbacks;
620 // CGSCC callbacks
621 SmallVector<std::function<void(CGSCCAnalysisManager &)>, 2>
622 CGSCCAnalysisRegistrationCallbacks;
623 SmallVector<std::function<bool(StringRef, CGSCCPassManager &,
624 ArrayRef<PipelineElement>)>,
625 2>
626 CGSCCPipelineParsingCallbacks;
627 // Function callbacks
628 SmallVector<std::function<void(FunctionAnalysisManager &)>, 2>
629 FunctionAnalysisRegistrationCallbacks;
630 SmallVector<std::function<bool(StringRef, FunctionPassManager &,
631 ArrayRef<PipelineElement>)>,
632 2>
633 FunctionPipelineParsingCallbacks;
634 // Loop callbacks
635 SmallVector<std::function<void(LoopAnalysisManager &)>, 2>
636 LoopAnalysisRegistrationCallbacks;
637 SmallVector<std::function<bool(StringRef, LoopPassManager &,
638 ArrayRef<PipelineElement>)>,
639 2>
640 LoopPipelineParsingCallbacks;
641 // AA callbacks
642 SmallVector<std::function<bool(StringRef Name, AAManager &AA)>, 2>
643 AAParsingCallbacks;
644};
645
646/// This utility template takes care of adding require<> and invalidate<>
647/// passes for an analysis to a given \c PassManager. It is intended to be used
648/// during parsing of a pass pipeline when parsing a single PipelineName.
649/// When registering a new function analysis FancyAnalysis with the pass
650/// pipeline name "fancy-analysis", a matching ParsePipelineCallback could look
651/// like this:
652///
653/// static bool parseFunctionPipeline(StringRef Name, FunctionPassManager &FPM,
654/// ArrayRef<PipelineElement> P) {
655/// if (parseAnalysisUtilityPasses<FancyAnalysis>("fancy-analysis", Name,
656/// FPM))
657/// return true;
658/// return false;
659/// }
660template <typename AnalysisT, typename IRUnitT, typename AnalysisManagerT,
661 typename... ExtraArgTs>
662bool parseAnalysisUtilityPasses(
663 StringRef AnalysisName, StringRef PipelineName,
664 PassManager<IRUnitT, AnalysisManagerT, ExtraArgTs...> &PM) {
665 if (!PipelineName.endswith(">"))
666 return false;
667 // See if this is an invalidate<> pass name
668 if (PipelineName.startswith("invalidate<")) {
669 PipelineName = PipelineName.substr(11, PipelineName.size() - 12);
670 if (PipelineName != AnalysisName)
671 return false;
672 PM.addPass(InvalidateAnalysisPass<AnalysisT>());
673 return true;
674 }
675
676 // See if this is a require<> pass name
677 if (PipelineName.startswith("require<")) {
678 PipelineName = PipelineName.substr(8, PipelineName.size() - 9);
679 if (PipelineName != AnalysisName)
680 return false;
681 PM.addPass(RequireAnalysisPass<AnalysisT, IRUnitT, AnalysisManagerT,
682 ExtraArgTs...>());
683 return true;
684 }
685
686 return false;
687}
688}
689
690#endif