Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 1 | //===------ Support/ScopHelper.h -- Some Helper Functions for Scop. -------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // Small functions that help with LLVM-IR. |
| 10 | // |
| 11 | //===----------------------------------------------------------------------===// |
| 12 | |
| 13 | #ifndef POLLY_SUPPORT_IRHELPER_H |
| 14 | #define POLLY_SUPPORT_IRHELPER_H |
| 15 | |
| 16 | #include "llvm/ADT/SetVector.h" |
| 17 | #include "llvm/IR/Instructions.h" |
| 18 | #include "llvm/IR/IntrinsicInst.h" |
| 19 | #include "llvm/IR/ValueHandle.h" |
| 20 | #include "isl/isl-noexceptions.h" |
| 21 | |
| 22 | namespace llvm { |
| 23 | class LoopInfo; |
| 24 | class Loop; |
| 25 | class ScalarEvolution; |
| 26 | class SCEV; |
| 27 | class Region; |
| 28 | class Pass; |
| 29 | class DominatorTree; |
| 30 | class RegionInfo; |
| 31 | class RegionNode; |
| 32 | } // namespace llvm |
| 33 | |
| 34 | namespace polly { |
| 35 | class Scop; |
| 36 | class ScopStmt; |
| 37 | |
| 38 | /// Enumeration of assumptions Polly can take. |
| 39 | enum AssumptionKind { |
| 40 | ALIASING, |
| 41 | INBOUNDS, |
| 42 | WRAPPING, |
| 43 | UNSIGNED, |
| 44 | PROFITABLE, |
| 45 | ERRORBLOCK, |
| 46 | COMPLEXITY, |
| 47 | INFINITELOOP, |
| 48 | INVARIANTLOAD, |
| 49 | DELINEARIZATION, |
| 50 | }; |
| 51 | |
| 52 | /// Enum to distinguish between assumptions and restrictions. |
| 53 | enum AssumptionSign { AS_ASSUMPTION, AS_RESTRICTION }; |
| 54 | |
| 55 | /// Helper struct to remember assumptions. |
| 56 | struct Assumption { |
| 57 | /// The kind of the assumption (e.g., WRAPPING). |
| 58 | AssumptionKind Kind; |
| 59 | |
| 60 | /// Flag to distinguish assumptions and restrictions. |
| 61 | AssumptionSign Sign; |
| 62 | |
| 63 | /// The valid/invalid context if this is an assumption/restriction. |
| 64 | isl::set Set; |
| 65 | |
| 66 | /// The location that caused this assumption. |
| 67 | llvm::DebugLoc Loc; |
| 68 | |
| 69 | /// An optional block whose domain can simplify the assumption. |
| 70 | llvm::BasicBlock *BB; |
| 71 | }; |
| 72 | |
| 73 | using RecordedAssumptionsTy = llvm::SmallVector<Assumption, 8>; |
| 74 | |
| 75 | /// Record an assumption for later addition to the assumed context. |
| 76 | /// |
| 77 | /// This function will add the assumption to the RecordedAssumptions. This |
| 78 | /// collection will be added (@see addAssumption) to the assumed context once |
| 79 | /// all paramaters are known and the context is fully built. |
| 80 | /// |
| 81 | /// @param RecordedAssumption container which keeps all recorded assumptions. |
| 82 | /// @param Kind The assumption kind describing the underlying cause. |
| 83 | /// @param Set The relations between parameters that are assumed to hold. |
| 84 | /// @param Loc The location in the source that caused this assumption. |
| 85 | /// @param Sign Enum to indicate if the assumptions in @p Set are positive |
| 86 | /// (needed/assumptions) or negative (invalid/restrictions). |
| 87 | /// @param BB The block in which this assumption was taken. If it is |
| 88 | /// set, the domain of that block will be used to simplify the |
| 89 | /// actual assumption in @p Set once it is added. This is useful |
| 90 | /// if the assumption was created prior to the domain. |
| 91 | void recordAssumption(RecordedAssumptionsTy *RecordedAssumptions, |
| 92 | AssumptionKind Kind, isl::set Set, llvm::DebugLoc Loc, |
| 93 | AssumptionSign Sign, llvm::BasicBlock *BB = nullptr); |
| 94 | |
| 95 | /// Type to remap values. |
| 96 | using ValueMapT = llvm::DenseMap<llvm::AssertingVH<llvm::Value>, |
| 97 | llvm::AssertingVH<llvm::Value>>; |
| 98 | |
| 99 | /// Type for a set of invariant loads. |
| 100 | using InvariantLoadsSetTy = llvm::SetVector<llvm::AssertingVH<llvm::LoadInst>>; |
| 101 | |
| 102 | /// Set type for parameters. |
| 103 | using ParameterSetTy = llvm::SetVector<const llvm::SCEV *>; |
| 104 | |
| 105 | /// Set of loops (used to remember loops in non-affine subregions). |
| 106 | using BoxedLoopsSetTy = llvm::SetVector<const llvm::Loop *>; |
| 107 | |
| 108 | /// Utility proxy to wrap the common members of LoadInst and StoreInst. |
| 109 | /// |
| 110 | /// This works like the LLVM utility class CallSite, ie. it forwards all calls |
| 111 | /// to either a LoadInst, StoreInst, MemIntrinsic or MemTransferInst. |
| 112 | /// It is similar to LLVM's utility classes IntrinsicInst, MemIntrinsic, |
| 113 | /// MemTransferInst, etc. in that it offers a common interface, but does not act |
| 114 | /// as a fake base class. |
| 115 | /// It is similar to StringRef and ArrayRef in that it holds a pointer to the |
| 116 | /// referenced object and should be passed by-value as it is small enough. |
| 117 | /// |
| 118 | /// This proxy can either represent a LoadInst instance, a StoreInst instance, |
| 119 | /// a MemIntrinsic instance (memset, memmove, memcpy), a CallInst instance or a |
| 120 | /// nullptr (only creatable using the default constructor); never an Instruction |
| 121 | /// that is neither of the above mentioned. When representing a nullptr, only |
| 122 | /// the following methods are defined: |
| 123 | /// isNull(), isInstruction(), isLoad(), isStore(), ..., isMemTransferInst(), |
| 124 | /// operator bool(), operator!() |
| 125 | /// |
| 126 | /// The functions isa, cast, cast_or_null, dyn_cast are modeled te resemble |
| 127 | /// those from llvm/Support/Casting.h. Partial template function specialization |
| 128 | /// is currently not supported in C++ such that those cannot be used directly. |
| 129 | /// (llvm::isa could, but then llvm:cast etc. would not have the expected |
| 130 | /// behavior) |
| 131 | class MemAccInst { |
| 132 | private: |
| 133 | llvm::Instruction *I; |
| 134 | |
| 135 | public: |
| 136 | MemAccInst() : I(nullptr) {} |
| 137 | MemAccInst(const MemAccInst &Inst) : I(Inst.I) {} |
| 138 | /* implicit */ MemAccInst(llvm::LoadInst &LI) : I(&LI) {} |
| 139 | /* implicit */ MemAccInst(llvm::LoadInst *LI) : I(LI) {} |
| 140 | /* implicit */ MemAccInst(llvm::StoreInst &SI) : I(&SI) {} |
| 141 | /* implicit */ MemAccInst(llvm::StoreInst *SI) : I(SI) {} |
| 142 | /* implicit */ MemAccInst(llvm::MemIntrinsic *MI) : I(MI) {} |
| 143 | /* implicit */ MemAccInst(llvm::CallInst *CI) : I(CI) {} |
| 144 | explicit MemAccInst(llvm::Instruction &I) : I(&I) { assert(isa(I)); } |
| 145 | explicit MemAccInst(llvm::Instruction *I) : I(I) { assert(isa(I)); } |
| 146 | |
| 147 | static bool isa(const llvm::Value &V) { |
| 148 | return llvm::isa<llvm::LoadInst>(V) || llvm::isa<llvm::StoreInst>(V) || |
| 149 | llvm::isa<llvm::CallInst>(V) || llvm::isa<llvm::MemIntrinsic>(V); |
| 150 | } |
| 151 | static bool isa(const llvm::Value *V) { |
| 152 | return llvm::isa<llvm::LoadInst>(V) || llvm::isa<llvm::StoreInst>(V) || |
| 153 | llvm::isa<llvm::CallInst>(V) || llvm::isa<llvm::MemIntrinsic>(V); |
| 154 | } |
| 155 | static MemAccInst cast(llvm::Value &V) { |
| 156 | return MemAccInst(llvm::cast<llvm::Instruction>(V)); |
| 157 | } |
| 158 | static MemAccInst cast(llvm::Value *V) { |
| 159 | return MemAccInst(llvm::cast<llvm::Instruction>(V)); |
| 160 | } |
| 161 | static MemAccInst cast_or_null(llvm::Value &V) { |
| 162 | return MemAccInst(llvm::cast<llvm::Instruction>(V)); |
| 163 | } |
| 164 | static MemAccInst cast_or_null(llvm::Value *V) { |
| 165 | if (!V) |
| 166 | return MemAccInst(); |
| 167 | return MemAccInst(llvm::cast<llvm::Instruction>(V)); |
| 168 | } |
| 169 | static MemAccInst dyn_cast(llvm::Value &V) { |
| 170 | if (isa(V)) |
| 171 | return MemAccInst(llvm::cast<llvm::Instruction>(V)); |
| 172 | return MemAccInst(); |
| 173 | } |
| 174 | static MemAccInst dyn_cast(llvm::Value *V) { |
| 175 | assert(V); |
| 176 | if (isa(V)) |
| 177 | return MemAccInst(llvm::cast<llvm::Instruction>(V)); |
| 178 | return MemAccInst(); |
| 179 | } |
| 180 | |
| 181 | MemAccInst &operator=(const MemAccInst &Inst) { |
| 182 | I = Inst.I; |
| 183 | return *this; |
| 184 | } |
| 185 | MemAccInst &operator=(llvm::LoadInst &LI) { |
| 186 | I = &LI; |
| 187 | return *this; |
| 188 | } |
| 189 | MemAccInst &operator=(llvm::LoadInst *LI) { |
| 190 | I = LI; |
| 191 | return *this; |
| 192 | } |
| 193 | MemAccInst &operator=(llvm::StoreInst &SI) { |
| 194 | I = &SI; |
| 195 | return *this; |
| 196 | } |
| 197 | MemAccInst &operator=(llvm::StoreInst *SI) { |
| 198 | I = SI; |
| 199 | return *this; |
| 200 | } |
| 201 | MemAccInst &operator=(llvm::MemIntrinsic &MI) { |
| 202 | I = &MI; |
| 203 | return *this; |
| 204 | } |
| 205 | MemAccInst &operator=(llvm::MemIntrinsic *MI) { |
| 206 | I = MI; |
| 207 | return *this; |
| 208 | } |
| 209 | MemAccInst &operator=(llvm::CallInst &CI) { |
| 210 | I = &CI; |
| 211 | return *this; |
| 212 | } |
| 213 | MemAccInst &operator=(llvm::CallInst *CI) { |
| 214 | I = CI; |
| 215 | return *this; |
| 216 | } |
| 217 | |
| 218 | llvm::Instruction *get() const { |
| 219 | assert(I && "Unexpected nullptr!"); |
| 220 | return I; |
| 221 | } |
| 222 | operator llvm::Instruction *() const { return asInstruction(); } |
| 223 | llvm::Instruction *operator->() const { return get(); } |
| 224 | |
| 225 | explicit operator bool() const { return isInstruction(); } |
| 226 | bool operator!() const { return isNull(); } |
| 227 | |
| 228 | llvm::Value *getValueOperand() const { |
| 229 | if (isLoad()) |
| 230 | return asLoad(); |
| 231 | if (isStore()) |
| 232 | return asStore()->getValueOperand(); |
| 233 | if (isMemIntrinsic()) |
| 234 | return nullptr; |
| 235 | if (isCallInst()) |
| 236 | return nullptr; |
| 237 | llvm_unreachable("Operation not supported on nullptr"); |
| 238 | } |
| 239 | llvm::Value *getPointerOperand() const { |
| 240 | if (isLoad()) |
| 241 | return asLoad()->getPointerOperand(); |
| 242 | if (isStore()) |
| 243 | return asStore()->getPointerOperand(); |
| 244 | if (isMemIntrinsic()) |
| 245 | return asMemIntrinsic()->getRawDest(); |
| 246 | if (isCallInst()) |
| 247 | return nullptr; |
| 248 | llvm_unreachable("Operation not supported on nullptr"); |
| 249 | } |
| 250 | |
| 251 | unsigned getAlignment() const { |
| 252 | if (isLoad()) |
| 253 | return asLoad()->getAlignment(); |
| 254 | if (isStore()) |
| 255 | return asStore()->getAlignment(); |
| 256 | if (isMemTransferInst()) |
| 257 | return std::min(asMemTransferInst()->getDestAlignment(), |
| 258 | asMemTransferInst()->getSourceAlignment()); |
| 259 | if (isMemIntrinsic()) |
| 260 | return asMemIntrinsic()->getDestAlignment(); |
| 261 | if (isCallInst()) |
| 262 | return 0; |
| 263 | llvm_unreachable("Operation not supported on nullptr"); |
| 264 | } |
| 265 | bool isVolatile() const { |
| 266 | if (isLoad()) |
| 267 | return asLoad()->isVolatile(); |
| 268 | if (isStore()) |
| 269 | return asStore()->isVolatile(); |
| 270 | if (isMemIntrinsic()) |
| 271 | return asMemIntrinsic()->isVolatile(); |
| 272 | if (isCallInst()) |
| 273 | return false; |
| 274 | llvm_unreachable("Operation not supported on nullptr"); |
| 275 | } |
| 276 | bool isSimple() const { |
| 277 | if (isLoad()) |
| 278 | return asLoad()->isSimple(); |
| 279 | if (isStore()) |
| 280 | return asStore()->isSimple(); |
| 281 | if (isMemIntrinsic()) |
| 282 | return !asMemIntrinsic()->isVolatile(); |
| 283 | if (isCallInst()) |
| 284 | return true; |
| 285 | llvm_unreachable("Operation not supported on nullptr"); |
| 286 | } |
| 287 | llvm::AtomicOrdering getOrdering() const { |
| 288 | if (isLoad()) |
| 289 | return asLoad()->getOrdering(); |
| 290 | if (isStore()) |
| 291 | return asStore()->getOrdering(); |
| 292 | if (isMemIntrinsic()) |
| 293 | return llvm::AtomicOrdering::NotAtomic; |
| 294 | if (isCallInst()) |
| 295 | return llvm::AtomicOrdering::NotAtomic; |
| 296 | llvm_unreachable("Operation not supported on nullptr"); |
| 297 | } |
| 298 | bool isUnordered() const { |
| 299 | if (isLoad()) |
| 300 | return asLoad()->isUnordered(); |
| 301 | if (isStore()) |
| 302 | return asStore()->isUnordered(); |
| 303 | // Copied from the Load/Store implementation of isUnordered: |
| 304 | if (isMemIntrinsic()) |
| 305 | return !asMemIntrinsic()->isVolatile(); |
| 306 | if (isCallInst()) |
| 307 | return true; |
| 308 | llvm_unreachable("Operation not supported on nullptr"); |
| 309 | } |
| 310 | |
| 311 | bool isNull() const { return !I; } |
| 312 | bool isInstruction() const { return I; } |
| 313 | |
| 314 | llvm::Instruction *asInstruction() const { return I; } |
| 315 | |
| 316 | private: |
| 317 | bool isLoad() const { return I && llvm::isa<llvm::LoadInst>(I); } |
| 318 | bool isStore() const { return I && llvm::isa<llvm::StoreInst>(I); } |
| 319 | bool isCallInst() const { return I && llvm::isa<llvm::CallInst>(I); } |
| 320 | bool isMemIntrinsic() const { return I && llvm::isa<llvm::MemIntrinsic>(I); } |
| 321 | bool isMemSetInst() const { return I && llvm::isa<llvm::MemSetInst>(I); } |
| 322 | bool isMemTransferInst() const { |
| 323 | return I && llvm::isa<llvm::MemTransferInst>(I); |
| 324 | } |
| 325 | |
| 326 | llvm::LoadInst *asLoad() const { return llvm::cast<llvm::LoadInst>(I); } |
| 327 | llvm::StoreInst *asStore() const { return llvm::cast<llvm::StoreInst>(I); } |
| 328 | llvm::CallInst *asCallInst() const { return llvm::cast<llvm::CallInst>(I); } |
| 329 | llvm::MemIntrinsic *asMemIntrinsic() const { |
| 330 | return llvm::cast<llvm::MemIntrinsic>(I); |
| 331 | } |
| 332 | llvm::MemSetInst *asMemSetInst() const { |
| 333 | return llvm::cast<llvm::MemSetInst>(I); |
| 334 | } |
| 335 | llvm::MemTransferInst *asMemTransferInst() const { |
| 336 | return llvm::cast<llvm::MemTransferInst>(I); |
| 337 | } |
| 338 | }; |
| 339 | } // namespace polly |
| 340 | |
| 341 | namespace llvm { |
| 342 | /// Specialize simplify_type for MemAccInst to enable dyn_cast and cast |
| 343 | /// from a MemAccInst object. |
| 344 | template <> struct simplify_type<polly::MemAccInst> { |
| 345 | typedef Instruction *SimpleType; |
| 346 | static SimpleType getSimplifiedValue(polly::MemAccInst &I) { |
| 347 | return I.asInstruction(); |
| 348 | } |
| 349 | }; |
| 350 | } // namespace llvm |
| 351 | |
| 352 | namespace polly { |
| 353 | |
| 354 | /// Simplify the region to have a single unconditional entry edge and a |
| 355 | /// single exit edge. |
| 356 | /// |
| 357 | /// Although this function allows DT and RI to be null, regions only work |
| 358 | /// properly if the DominatorTree (for Region::contains) and RegionInfo are kept |
| 359 | /// up-to-date. |
| 360 | /// |
| 361 | /// @param R The region to be simplified |
| 362 | /// @param DT DominatorTree to be updated. |
| 363 | /// @param LI LoopInfo to be updated. |
| 364 | /// @param RI RegionInfo to be updated. |
| 365 | void simplifyRegion(llvm::Region *R, llvm::DominatorTree *DT, |
| 366 | llvm::LoopInfo *LI, llvm::RegionInfo *RI); |
| 367 | |
| 368 | /// Split the entry block of a function to store the newly inserted |
| 369 | /// allocations outside of all Scops. |
| 370 | /// |
| 371 | /// @param EntryBlock The entry block of the current function. |
| 372 | /// @param P The pass that currently running. |
| 373 | /// |
| 374 | void splitEntryBlockForAlloca(llvm::BasicBlock *EntryBlock, llvm::Pass *P); |
| 375 | |
| 376 | /// Split the entry block of a function to store the newly inserted |
| 377 | /// allocations outside of all Scops. |
| 378 | /// |
| 379 | /// @param DT DominatorTree to be updated. |
| 380 | /// @param LI LoopInfo to be updated. |
| 381 | /// @param RI RegionInfo to be updated. |
| 382 | void splitEntryBlockForAlloca(llvm::BasicBlock *EntryBlock, |
| 383 | llvm::DominatorTree *DT, llvm::LoopInfo *LI, |
| 384 | llvm::RegionInfo *RI); |
| 385 | |
| 386 | /// Wrapper for SCEVExpander extended to all Polly features. |
| 387 | /// |
| 388 | /// This wrapper will internally call the SCEVExpander but also makes sure that |
| 389 | /// all additional features not represented in SCEV (e.g., SDiv/SRem are not |
| 390 | /// black boxes but can be part of the function) will be expanded correctly. |
| 391 | /// |
| 392 | /// The parameters are the same as for the creation of a SCEVExpander as well |
| 393 | /// as the call to SCEVExpander::expandCodeFor: |
| 394 | /// |
| 395 | /// @param S The current Scop. |
| 396 | /// @param SE The Scalar Evolution pass. |
| 397 | /// @param DL The module data layout. |
| 398 | /// @param Name The suffix added to the new instruction names. |
| 399 | /// @param E The expression for which code is actually generated. |
| 400 | /// @param Ty The type of the resulting code. |
| 401 | /// @param IP The insertion point for the new code. |
| 402 | /// @param VMap A remapping of values used in @p E. |
| 403 | /// @param RTCBB The last block of the RTC. Used to insert loop-invariant |
| 404 | /// instructions in rare cases. |
| 405 | llvm::Value *expandCodeFor(Scop &S, llvm::ScalarEvolution &SE, |
| 406 | const llvm::DataLayout &DL, const char *Name, |
| 407 | const llvm::SCEV *E, llvm::Type *Ty, |
| 408 | llvm::Instruction *IP, ValueMapT *VMap, |
| 409 | llvm::BasicBlock *RTCBB); |
| 410 | |
| 411 | /// Check if the block is a error block. |
| 412 | /// |
| 413 | /// A error block is currently any block that fulfills at least one of |
| 414 | /// the following conditions: |
| 415 | /// |
| 416 | /// - It is terminated by an unreachable instruction |
| 417 | /// - It contains a call to a non-pure function that is not immediately |
| 418 | /// dominated by a loop header and that does not dominate the region exit. |
| 419 | /// This is a heuristic to pick only error blocks that are conditionally |
| 420 | /// executed and can be assumed to be not executed at all without the domains |
| 421 | /// being available. |
| 422 | /// |
| 423 | /// @param BB The block to check. |
| 424 | /// @param R The analyzed region. |
| 425 | /// @param LI The loop info analysis. |
| 426 | /// @param DT The dominator tree of the function. |
| 427 | /// |
| 428 | /// @return True if the block is a error block, false otherwise. |
| 429 | bool isErrorBlock(llvm::BasicBlock &BB, const llvm::Region &R, |
| 430 | llvm::LoopInfo &LI, const llvm::DominatorTree &DT); |
| 431 | |
| 432 | /// Return the condition for the terminator @p TI. |
| 433 | /// |
| 434 | /// For unconditional branches the "i1 true" condition will be returned. |
| 435 | /// |
| 436 | /// @param TI The terminator to get the condition from. |
| 437 | /// |
| 438 | /// @return The condition of @p TI and nullptr if none could be extracted. |
| 439 | llvm::Value *getConditionFromTerminator(llvm::Instruction *TI); |
| 440 | |
| 441 | /// Get the smallest loop that contains @p S but is not in @p S. |
| 442 | llvm::Loop *getLoopSurroundingScop(Scop &S, llvm::LoopInfo &LI); |
| 443 | |
| 444 | /// Get the number of blocks in @p L. |
| 445 | /// |
| 446 | /// The number of blocks in a loop are the number of basic blocks actually |
| 447 | /// belonging to the loop, as well as all single basic blocks that the loop |
| 448 | /// exits to and which terminate in an unreachable instruction. We do not |
| 449 | /// allow such basic blocks in the exit of a scop, hence they belong to the |
| 450 | /// scop and represent run-time conditions which we want to model and |
| 451 | /// subsequently speculate away. |
| 452 | /// |
| 453 | /// @see getRegionNodeLoop for additional details. |
| 454 | unsigned getNumBlocksInLoop(llvm::Loop *L); |
| 455 | |
| 456 | /// Get the number of blocks in @p RN. |
| 457 | unsigned getNumBlocksInRegionNode(llvm::RegionNode *RN); |
| 458 | |
| 459 | /// Return the smallest loop surrounding @p RN. |
| 460 | llvm::Loop *getRegionNodeLoop(llvm::RegionNode *RN, llvm::LoopInfo &LI); |
| 461 | |
| 462 | /// Check if @p LInst can be hoisted in @p R. |
| 463 | /// |
| 464 | /// @param LInst The load to check. |
| 465 | /// @param R The analyzed region. |
| 466 | /// @param LI The loop info. |
| 467 | /// @param SE The scalar evolution analysis. |
| 468 | /// @param DT The dominator tree of the function. |
| 469 | /// @param KnownInvariantLoads The invariant load set. |
| 470 | /// |
| 471 | /// @return True if @p LInst can be hoisted in @p R. |
| 472 | bool isHoistableLoad(llvm::LoadInst *LInst, llvm::Region &R, llvm::LoopInfo &LI, |
| 473 | llvm::ScalarEvolution &SE, const llvm::DominatorTree &DT, |
| 474 | const InvariantLoadsSetTy &KnownInvariantLoads); |
| 475 | |
| 476 | /// Return true iff @p V is an intrinsic that we ignore during code |
| 477 | /// generation. |
| 478 | bool isIgnoredIntrinsic(const llvm::Value *V); |
| 479 | |
| 480 | /// Check whether a value an be synthesized by the code generator. |
| 481 | /// |
| 482 | /// Some value will be recalculated only from information that is code generated |
| 483 | /// from the polyhedral representation. For such instructions we do not need to |
| 484 | /// ensure that their operands are available during code generation. |
| 485 | /// |
| 486 | /// @param V The value to check. |
| 487 | /// @param S The current SCoP. |
| 488 | /// @param SE The scalar evolution database. |
| 489 | /// @param Scope Location where the value would by synthesized. |
| 490 | /// @return If the instruction I can be regenerated from its |
| 491 | /// scalar evolution representation, return true, |
| 492 | /// otherwise return false. |
| 493 | bool canSynthesize(const llvm::Value *V, const Scop &S, |
| 494 | llvm::ScalarEvolution *SE, llvm::Loop *Scope); |
| 495 | |
| 496 | /// Return the block in which a value is used. |
| 497 | /// |
| 498 | /// For normal instructions, this is the instruction's parent block. For PHI |
| 499 | /// nodes, this is the incoming block of that use, because this is where the |
| 500 | /// operand must be defined (i.e. its definition dominates this block). |
| 501 | /// Non-instructions do not use operands at a specific point such that in this |
| 502 | /// case this function returns nullptr. |
| 503 | llvm::BasicBlock *getUseBlock(const llvm::Use &U); |
| 504 | |
| 505 | // If the loop is nonaffine/boxed, return the first non-boxed surrounding loop |
| 506 | // for Polly. If the loop is affine, return the loop itself. |
| 507 | // |
| 508 | // @param L Pointer to the Loop object to analyze. |
| 509 | // @param LI Reference to the LoopInfo. |
| 510 | // @param BoxedLoops Set of Boxed Loops we get from the SCoP. |
| 511 | llvm::Loop *getFirstNonBoxedLoopFor(llvm::Loop *L, llvm::LoopInfo &LI, |
| 512 | const BoxedLoopsSetTy &BoxedLoops); |
| 513 | |
| 514 | // If the Basic Block belongs to a loop that is nonaffine/boxed, return the |
| 515 | // first non-boxed surrounding loop for Polly. If the loop is affine, return |
| 516 | // the loop itself. |
| 517 | // |
| 518 | // @param BB Pointer to the Basic Block to analyze. |
| 519 | // @param LI Reference to the LoopInfo. |
| 520 | // @param BoxedLoops Set of Boxed Loops we get from the SCoP. |
| 521 | llvm::Loop *getFirstNonBoxedLoopFor(llvm::BasicBlock *BB, llvm::LoopInfo &LI, |
| 522 | const BoxedLoopsSetTy &BoxedLoops); |
| 523 | |
| 524 | /// Is the given instruction a call to a debug function? |
| 525 | /// |
| 526 | /// A debug function can be used to insert output in Polly-optimized code which |
| 527 | /// normally does not allow function calls with side-effects. For instance, a |
| 528 | /// printf can be inserted to check whether a value still has the expected value |
| 529 | /// after Polly generated code: |
| 530 | /// |
| 531 | /// int sum = 0; |
| 532 | /// for (int i = 0; i < 16; i+=1) { |
| 533 | /// sum += i; |
| 534 | /// printf("The value of sum at i=%d is %d\n", sum, i); |
| 535 | /// } |
| 536 | bool isDebugCall(llvm::Instruction *Inst); |
| 537 | |
| 538 | /// Does the statement contain a call to a debug function? |
| 539 | /// |
| 540 | /// Such a statement must not be removed, even if has no side-effects. |
| 541 | bool hasDebugCall(ScopStmt *Stmt); |
| 542 | } // namespace polly |
| 543 | #endif |