Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 1 | //===- llvm/Analysis/ValueTracking.h - Walk computations --------*- C++ -*-===// |
| 2 | // |
Andrew Walbran | 16937d0 | 2019-10-22 13:54:20 +0100 | [diff] [blame] | 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file contains routines that help analyze properties that chains of |
| 10 | // computations have. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #ifndef LLVM_ANALYSIS_VALUETRACKING_H |
| 15 | #define LLVM_ANALYSIS_VALUETRACKING_H |
| 16 | |
| 17 | #include "llvm/ADT/ArrayRef.h" |
| 18 | #include "llvm/ADT/Optional.h" |
Andrew Walbran | 3d2c197 | 2020-04-07 12:24:26 +0100 | [diff] [blame] | 19 | #include "llvm/ADT/SmallSet.h" |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 20 | #include "llvm/IR/Constants.h" |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 21 | #include "llvm/IR/DataLayout.h" |
| 22 | #include "llvm/IR/InstrTypes.h" |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 23 | #include "llvm/IR/Intrinsics.h" |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 24 | #include "llvm/IR/Operator.h" |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 25 | #include <cassert> |
| 26 | #include <cstdint> |
| 27 | |
| 28 | namespace llvm { |
| 29 | |
| 30 | class AddOperator; |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 31 | class AllocaInst; |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 32 | class APInt; |
| 33 | class AssumptionCache; |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 34 | class DominatorTree; |
| 35 | class GEPOperator; |
| 36 | class IntrinsicInst; |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 37 | class LoadInst; |
Andrew Walbran | 3d2c197 | 2020-04-07 12:24:26 +0100 | [diff] [blame] | 38 | class WithOverflowInst; |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 39 | struct KnownBits; |
| 40 | class Loop; |
| 41 | class LoopInfo; |
| 42 | class MDNode; |
| 43 | class OptimizationRemarkEmitter; |
| 44 | class StringRef; |
| 45 | class TargetLibraryInfo; |
| 46 | class Value; |
| 47 | |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 48 | constexpr unsigned MaxAnalysisRecursionDepth = 6; |
| 49 | |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 50 | /// Determine which bits of V are known to be either zero or one and return |
| 51 | /// them in the KnownZero/KnownOne bit sets. |
| 52 | /// |
| 53 | /// This function is defined on values with integer type, values with pointer |
| 54 | /// type, and vectors of integers. In the case |
| 55 | /// where V is a vector, the known zero and known one values are the |
| 56 | /// same width as the vector element, and the bit is set only if it is true |
| 57 | /// for all of the elements in the vector. |
| 58 | void computeKnownBits(const Value *V, KnownBits &Known, |
| 59 | const DataLayout &DL, unsigned Depth = 0, |
| 60 | AssumptionCache *AC = nullptr, |
| 61 | const Instruction *CxtI = nullptr, |
| 62 | const DominatorTree *DT = nullptr, |
Andrew Scull | 0372a57 | 2018-11-16 15:47:06 +0000 | [diff] [blame] | 63 | OptimizationRemarkEmitter *ORE = nullptr, |
| 64 | bool UseInstrInfo = true); |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 65 | |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 66 | /// Determine which bits of V are known to be either zero or one and return |
| 67 | /// them in the KnownZero/KnownOne bit sets. |
| 68 | /// |
| 69 | /// This function is defined on values with integer type, values with pointer |
| 70 | /// type, and vectors of integers. In the case |
| 71 | /// where V is a vector, the known zero and known one values are the |
| 72 | /// same width as the vector element, and the bit is set only if it is true |
| 73 | /// for all of the demanded elements in the vector. |
| 74 | void computeKnownBits(const Value *V, const APInt &DemandedElts, |
| 75 | KnownBits &Known, const DataLayout &DL, |
| 76 | unsigned Depth = 0, AssumptionCache *AC = nullptr, |
| 77 | const Instruction *CxtI = nullptr, |
| 78 | const DominatorTree *DT = nullptr, |
| 79 | OptimizationRemarkEmitter *ORE = nullptr, |
| 80 | bool UseInstrInfo = true); |
| 81 | |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 82 | /// Returns the known bits rather than passing by reference. |
| 83 | KnownBits computeKnownBits(const Value *V, const DataLayout &DL, |
| 84 | unsigned Depth = 0, AssumptionCache *AC = nullptr, |
| 85 | const Instruction *CxtI = nullptr, |
| 86 | const DominatorTree *DT = nullptr, |
Andrew Scull | 0372a57 | 2018-11-16 15:47:06 +0000 | [diff] [blame] | 87 | OptimizationRemarkEmitter *ORE = nullptr, |
| 88 | bool UseInstrInfo = true); |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 89 | |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 90 | /// Returns the known bits rather than passing by reference. |
| 91 | KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts, |
| 92 | const DataLayout &DL, unsigned Depth = 0, |
| 93 | AssumptionCache *AC = nullptr, |
| 94 | const Instruction *CxtI = nullptr, |
| 95 | const DominatorTree *DT = nullptr, |
| 96 | OptimizationRemarkEmitter *ORE = nullptr, |
| 97 | bool UseInstrInfo = true); |
| 98 | |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 99 | /// Compute known bits from the range metadata. |
| 100 | /// \p KnownZero the set of bits that are known to be zero |
| 101 | /// \p KnownOne the set of bits that are known to be one |
| 102 | void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, |
| 103 | KnownBits &Known); |
| 104 | |
| 105 | /// Return true if LHS and RHS have no common bits set. |
| 106 | bool haveNoCommonBitsSet(const Value *LHS, const Value *RHS, |
| 107 | const DataLayout &DL, |
| 108 | AssumptionCache *AC = nullptr, |
| 109 | const Instruction *CxtI = nullptr, |
Andrew Scull | 0372a57 | 2018-11-16 15:47:06 +0000 | [diff] [blame] | 110 | const DominatorTree *DT = nullptr, |
| 111 | bool UseInstrInfo = true); |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 112 | |
| 113 | /// Return true if the given value is known to have exactly one bit set when |
| 114 | /// defined. For vectors return true if every element is known to be a power |
| 115 | /// of two when defined. Supports values with integer or pointer type and |
| 116 | /// vectors of integers. If 'OrZero' is set, then return true if the given |
| 117 | /// value is either a power of two or zero. |
| 118 | bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, |
| 119 | bool OrZero = false, unsigned Depth = 0, |
| 120 | AssumptionCache *AC = nullptr, |
| 121 | const Instruction *CxtI = nullptr, |
Andrew Scull | 0372a57 | 2018-11-16 15:47:06 +0000 | [diff] [blame] | 122 | const DominatorTree *DT = nullptr, |
| 123 | bool UseInstrInfo = true); |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 124 | |
| 125 | bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI); |
| 126 | |
| 127 | /// Return true if the given value is known to be non-zero when defined. For |
| 128 | /// vectors, return true if every element is known to be non-zero when |
| 129 | /// defined. For pointers, if the context instruction and dominator tree are |
| 130 | /// specified, perform context-sensitive analysis and return true if the |
| 131 | /// pointer couldn't possibly be null at the specified instruction. |
| 132 | /// Supports values with integer or pointer type and vectors of integers. |
| 133 | bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth = 0, |
| 134 | AssumptionCache *AC = nullptr, |
| 135 | const Instruction *CxtI = nullptr, |
Andrew Scull | 0372a57 | 2018-11-16 15:47:06 +0000 | [diff] [blame] | 136 | const DominatorTree *DT = nullptr, |
| 137 | bool UseInstrInfo = true); |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 138 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 139 | /// Return true if the two given values are negation. |
| 140 | /// Currently can recoginze Value pair: |
| 141 | /// 1: <X, Y> if X = sub (0, Y) or Y = sub (0, X) |
| 142 | /// 2: <X, Y> if X = sub (A, B) and Y = sub (B, A) |
| 143 | bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW = false); |
| 144 | |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 145 | /// Returns true if the give value is known to be non-negative. |
| 146 | bool isKnownNonNegative(const Value *V, const DataLayout &DL, |
| 147 | unsigned Depth = 0, |
| 148 | AssumptionCache *AC = nullptr, |
| 149 | const Instruction *CxtI = nullptr, |
Andrew Scull | 0372a57 | 2018-11-16 15:47:06 +0000 | [diff] [blame] | 150 | const DominatorTree *DT = nullptr, |
| 151 | bool UseInstrInfo = true); |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 152 | |
| 153 | /// Returns true if the given value is known be positive (i.e. non-negative |
| 154 | /// and non-zero). |
| 155 | bool isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth = 0, |
| 156 | AssumptionCache *AC = nullptr, |
| 157 | const Instruction *CxtI = nullptr, |
Andrew Scull | 0372a57 | 2018-11-16 15:47:06 +0000 | [diff] [blame] | 158 | const DominatorTree *DT = nullptr, |
| 159 | bool UseInstrInfo = true); |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 160 | |
| 161 | /// Returns true if the given value is known be negative (i.e. non-positive |
| 162 | /// and non-zero). |
| 163 | bool isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth = 0, |
| 164 | AssumptionCache *AC = nullptr, |
| 165 | const Instruction *CxtI = nullptr, |
Andrew Scull | 0372a57 | 2018-11-16 15:47:06 +0000 | [diff] [blame] | 166 | const DominatorTree *DT = nullptr, |
| 167 | bool UseInstrInfo = true); |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 168 | |
| 169 | /// Return true if the given values are known to be non-equal when defined. |
| 170 | /// Supports scalar integer types only. |
| 171 | bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, |
Andrew Scull | 0372a57 | 2018-11-16 15:47:06 +0000 | [diff] [blame] | 172 | AssumptionCache *AC = nullptr, |
| 173 | const Instruction *CxtI = nullptr, |
| 174 | const DominatorTree *DT = nullptr, |
| 175 | bool UseInstrInfo = true); |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 176 | |
| 177 | /// Return true if 'V & Mask' is known to be zero. We use this predicate to |
| 178 | /// simplify operations downstream. Mask is known to be zero for bits that V |
| 179 | /// cannot have. |
| 180 | /// |
| 181 | /// This function is defined on values with integer type, values with pointer |
| 182 | /// type, and vectors of integers. In the case |
| 183 | /// where V is a vector, the mask, known zero, and known one values are the |
| 184 | /// same width as the vector element, and the bit is set only if it is true |
| 185 | /// for all of the elements in the vector. |
| 186 | bool MaskedValueIsZero(const Value *V, const APInt &Mask, |
| 187 | const DataLayout &DL, |
| 188 | unsigned Depth = 0, AssumptionCache *AC = nullptr, |
| 189 | const Instruction *CxtI = nullptr, |
Andrew Scull | 0372a57 | 2018-11-16 15:47:06 +0000 | [diff] [blame] | 190 | const DominatorTree *DT = nullptr, |
| 191 | bool UseInstrInfo = true); |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 192 | |
| 193 | /// Return the number of times the sign bit of the register is replicated into |
| 194 | /// the other bits. We know that at least 1 bit is always equal to the sign |
| 195 | /// bit (itself), but other cases can give us information. For example, |
| 196 | /// immediately after an "ashr X, 2", we know that the top 3 bits are all |
| 197 | /// equal to each other, so we return 3. For vectors, return the number of |
| 198 | /// sign bits for the vector element with the mininum number of known sign |
| 199 | /// bits. |
| 200 | unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, |
| 201 | unsigned Depth = 0, AssumptionCache *AC = nullptr, |
| 202 | const Instruction *CxtI = nullptr, |
Andrew Scull | 0372a57 | 2018-11-16 15:47:06 +0000 | [diff] [blame] | 203 | const DominatorTree *DT = nullptr, |
| 204 | bool UseInstrInfo = true); |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 205 | |
| 206 | /// This function computes the integer multiple of Base that equals V. If |
| 207 | /// successful, it returns true and returns the multiple in Multiple. If |
| 208 | /// unsuccessful, it returns false. Also, if V can be simplified to an |
| 209 | /// integer, then the simplified V is returned in Val. Look through sext only |
| 210 | /// if LookThroughSExt=true. |
| 211 | bool ComputeMultiple(Value *V, unsigned Base, Value *&Multiple, |
| 212 | bool LookThroughSExt = false, |
| 213 | unsigned Depth = 0); |
| 214 | |
| 215 | /// Map a call instruction to an intrinsic ID. Libcalls which have equivalent |
| 216 | /// intrinsics are treated as-if they were intrinsics. |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 217 | Intrinsic::ID getIntrinsicForCallSite(const CallBase &CB, |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 218 | const TargetLibraryInfo *TLI); |
| 219 | |
| 220 | /// Return true if we can prove that the specified FP value is never equal to |
| 221 | /// -0.0. |
| 222 | bool CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI, |
| 223 | unsigned Depth = 0); |
| 224 | |
| 225 | /// Return true if we can prove that the specified FP value is either NaN or |
| 226 | /// never less than -0.0. |
| 227 | /// |
| 228 | /// NaN --> true |
| 229 | /// +0 --> true |
| 230 | /// -0 --> true |
| 231 | /// x > +0 --> true |
| 232 | /// x < -0 --> false |
| 233 | bool CannotBeOrderedLessThanZero(const Value *V, const TargetLibraryInfo *TLI); |
| 234 | |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 235 | /// Return true if the floating-point scalar value is not an infinity or if |
| 236 | /// the floating-point vector value has no infinities. Return false if a value |
| 237 | /// could ever be infinity. |
| 238 | bool isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI, |
| 239 | unsigned Depth = 0); |
| 240 | |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 241 | /// Return true if the floating-point scalar value is not a NaN or if the |
| 242 | /// floating-point vector value has no NaN elements. Return false if a value |
| 243 | /// could ever be NaN. |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 244 | bool isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI, |
| 245 | unsigned Depth = 0); |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 246 | |
| 247 | /// Return true if we can prove that the specified FP value's sign bit is 0. |
| 248 | /// |
| 249 | /// NaN --> true/false (depending on the NaN's sign bit) |
| 250 | /// +0 --> true |
| 251 | /// -0 --> false |
| 252 | /// x > +0 --> true |
| 253 | /// x < -0 --> false |
| 254 | bool SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI); |
| 255 | |
| 256 | /// If the specified value can be set by repeating the same byte in memory, |
| 257 | /// return the i8 value that it is represented with. This is true for all i8 |
| 258 | /// values obviously, but is also true for i32 0, i32 -1, i16 0xF0F0, double |
| 259 | /// 0.0 etc. If the value can't be handled with a repeated byte store (e.g. |
Andrew Scull | 0372a57 | 2018-11-16 15:47:06 +0000 | [diff] [blame] | 260 | /// i16 0x1234), return null. If the value is entirely undef and padding, |
| 261 | /// return undef. |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 262 | Value *isBytewiseValue(Value *V, const DataLayout &DL); |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 263 | |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 264 | /// Given an aggregate and an sequence of indices, see if the scalar value |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 265 | /// indexed is already around as a register, for example if it were inserted |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 266 | /// directly into the aggregate. |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 267 | /// |
| 268 | /// If InsertBefore is not null, this function will duplicate (modified) |
| 269 | /// insertvalues when a part of a nested struct is extracted. |
| 270 | Value *FindInsertedValue(Value *V, |
| 271 | ArrayRef<unsigned> idx_range, |
| 272 | Instruction *InsertBefore = nullptr); |
| 273 | |
| 274 | /// Analyze the specified pointer to see if it can be expressed as a base |
| 275 | /// pointer plus a constant offset. Return the base and offset to the caller. |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 276 | /// |
| 277 | /// This is a wrapper around Value::stripAndAccumulateConstantOffsets that |
| 278 | /// creates and later unpacks the required APInt. |
| 279 | inline Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, |
| 280 | const DataLayout &DL, |
| 281 | bool AllowNonInbounds = true) { |
| 282 | APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0); |
| 283 | Value *Base = |
| 284 | Ptr->stripAndAccumulateConstantOffsets(DL, OffsetAPInt, AllowNonInbounds); |
| 285 | |
| 286 | Offset = OffsetAPInt.getSExtValue(); |
| 287 | return Base; |
| 288 | } |
| 289 | inline const Value * |
| 290 | GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset, |
| 291 | const DataLayout &DL, |
| 292 | bool AllowNonInbounds = true) { |
| 293 | return GetPointerBaseWithConstantOffset(const_cast<Value *>(Ptr), Offset, DL, |
| 294 | AllowNonInbounds); |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 295 | } |
| 296 | |
| 297 | /// Returns true if the GEP is based on a pointer to a string (array of |
| 298 | // \p CharSize integers) and is indexing into this string. |
| 299 | bool isGEPBasedOnPointerToString(const GEPOperator *GEP, |
| 300 | unsigned CharSize = 8); |
| 301 | |
| 302 | /// Represents offset+length into a ConstantDataArray. |
| 303 | struct ConstantDataArraySlice { |
| 304 | /// ConstantDataArray pointer. nullptr indicates a zeroinitializer (a valid |
| 305 | /// initializer, it just doesn't fit the ConstantDataArray interface). |
| 306 | const ConstantDataArray *Array; |
| 307 | |
| 308 | /// Slice starts at this Offset. |
| 309 | uint64_t Offset; |
| 310 | |
| 311 | /// Length of the slice. |
| 312 | uint64_t Length; |
| 313 | |
| 314 | /// Moves the Offset and adjusts Length accordingly. |
| 315 | void move(uint64_t Delta) { |
| 316 | assert(Delta < Length); |
| 317 | Offset += Delta; |
| 318 | Length -= Delta; |
| 319 | } |
| 320 | |
| 321 | /// Convenience accessor for elements in the slice. |
| 322 | uint64_t operator[](unsigned I) const { |
| 323 | return Array==nullptr ? 0 : Array->getElementAsInteger(I + Offset); |
| 324 | } |
| 325 | }; |
| 326 | |
| 327 | /// Returns true if the value \p V is a pointer into a ConstantDataArray. |
| 328 | /// If successful \p Slice will point to a ConstantDataArray info object |
| 329 | /// with an appropriate offset. |
| 330 | bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice, |
| 331 | unsigned ElementSize, uint64_t Offset = 0); |
| 332 | |
| 333 | /// This function computes the length of a null-terminated C string pointed to |
| 334 | /// by V. If successful, it returns true and returns the string in Str. If |
| 335 | /// unsuccessful, it returns false. This does not include the trailing null |
| 336 | /// character by default. If TrimAtNul is set to false, then this returns any |
| 337 | /// trailing null characters as well as any other characters that come after |
| 338 | /// it. |
| 339 | bool getConstantStringInfo(const Value *V, StringRef &Str, |
| 340 | uint64_t Offset = 0, bool TrimAtNul = true); |
| 341 | |
| 342 | /// If we can compute the length of the string pointed to by the specified |
| 343 | /// pointer, return 'len+1'. If we can't, return 0. |
| 344 | uint64_t GetStringLength(const Value *V, unsigned CharSize = 8); |
| 345 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 346 | /// This function returns call pointer argument that is considered the same by |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 347 | /// aliasing rules. You CAN'T use it to replace one value with another. If |
| 348 | /// \p MustPreserveNullness is true, the call must preserve the nullness of |
| 349 | /// the pointer. |
| 350 | const Value *getArgumentAliasingToReturnedPointer(const CallBase *Call, |
| 351 | bool MustPreserveNullness); |
| 352 | inline Value * |
| 353 | getArgumentAliasingToReturnedPointer(CallBase *Call, |
| 354 | bool MustPreserveNullness) { |
Andrew Walbran | 16937d0 | 2019-10-22 13:54:20 +0100 | [diff] [blame] | 355 | return const_cast<Value *>(getArgumentAliasingToReturnedPointer( |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 356 | const_cast<const CallBase *>(Call), MustPreserveNullness)); |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 357 | } |
| 358 | |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 359 | /// {launder,strip}.invariant.group returns pointer that aliases its argument, |
| 360 | /// and it only captures pointer by returning it. |
| 361 | /// These intrinsics are not marked as nocapture, because returning is |
| 362 | /// considered as capture. The arguments are not marked as returned neither, |
| 363 | /// because it would make it useless. If \p MustPreserveNullness is true, |
| 364 | /// the intrinsic must preserve the nullness of the pointer. |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 365 | bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing( |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 366 | const CallBase *Call, bool MustPreserveNullness); |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 367 | |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 368 | /// This method strips off any GEP address adjustments and pointer casts from |
| 369 | /// the specified value, returning the original object being addressed. Note |
| 370 | /// that the returned value has pointer type if the specified value does. If |
| 371 | /// the MaxLookup value is non-zero, it limits the number of instructions to |
| 372 | /// be stripped off. |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 373 | Value *getUnderlyingObject(Value *V, unsigned MaxLookup = 6); |
| 374 | inline const Value *getUnderlyingObject(const Value *V, |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 375 | unsigned MaxLookup = 6) { |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 376 | return getUnderlyingObject(const_cast<Value *>(V), MaxLookup); |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 377 | } |
| 378 | |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 379 | /// This method is similar to getUnderlyingObject except that it can |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 380 | /// look through phi and select instructions and return multiple objects. |
| 381 | /// |
| 382 | /// If LoopInfo is passed, loop phis are further analyzed. If a pointer |
| 383 | /// accesses different objects in each iteration, we don't look through the |
| 384 | /// phi node. E.g. consider this loop nest: |
| 385 | /// |
| 386 | /// int **A; |
| 387 | /// for (i) |
| 388 | /// for (j) { |
| 389 | /// A[i][j] = A[i-1][j] * B[j] |
| 390 | /// } |
| 391 | /// |
| 392 | /// This is transformed by Load-PRE to stash away A[i] for the next iteration |
| 393 | /// of the outer loop: |
| 394 | /// |
| 395 | /// Curr = A[0]; // Prev_0 |
| 396 | /// for (i: 1..N) { |
| 397 | /// Prev = Curr; // Prev = PHI (Prev_0, Curr) |
| 398 | /// Curr = A[i]; |
| 399 | /// for (j: 0..N) { |
| 400 | /// Curr[j] = Prev[j] * B[j] |
| 401 | /// } |
| 402 | /// } |
| 403 | /// |
| 404 | /// Since A[i] and A[i-1] are independent pointers, getUnderlyingObjects |
| 405 | /// should not assume that Curr and Prev share the same underlying object thus |
| 406 | /// it shouldn't look through the phi above. |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 407 | void getUnderlyingObjects(const Value *V, |
Andrew Walbran | 3d2c197 | 2020-04-07 12:24:26 +0100 | [diff] [blame] | 408 | SmallVectorImpl<const Value *> &Objects, |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 409 | LoopInfo *LI = nullptr, unsigned MaxLookup = 6); |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 410 | |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 411 | /// This is a wrapper around getUnderlyingObjects and adds support for basic |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 412 | /// ptrtoint+arithmetic+inttoptr sequences. |
| 413 | bool getUnderlyingObjectsForCodeGen(const Value *V, |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 414 | SmallVectorImpl<Value *> &Objects); |
| 415 | |
| 416 | /// Returns unique alloca where the value comes from, or nullptr. |
| 417 | /// If OffsetZero is true check that V points to the begining of the alloca. |
| 418 | AllocaInst *findAllocaForValue(Value *V, bool OffsetZero = false); |
| 419 | inline const AllocaInst *findAllocaForValue(const Value *V, |
| 420 | bool OffsetZero = false) { |
| 421 | return findAllocaForValue(const_cast<Value *>(V), OffsetZero); |
| 422 | } |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 423 | |
| 424 | /// Return true if the only users of this pointer are lifetime markers. |
| 425 | bool onlyUsedByLifetimeMarkers(const Value *V); |
| 426 | |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 427 | /// Return true if the only users of this pointer are lifetime markers or |
| 428 | /// droppable instructions. |
| 429 | bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V); |
| 430 | |
| 431 | /// Return true if speculation of the given load must be suppressed to avoid |
| 432 | /// ordering or interfering with an active sanitizer. If not suppressed, |
| 433 | /// dereferenceability and alignment must be proven separately. Note: This |
| 434 | /// is only needed for raw reasoning; if you use the interface below |
| 435 | /// (isSafeToSpeculativelyExecute), this is handled internally. |
| 436 | bool mustSuppressSpeculation(const LoadInst &LI); |
| 437 | |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 438 | /// Return true if the instruction does not have any effects besides |
| 439 | /// calculating the result and does not have undefined behavior. |
| 440 | /// |
| 441 | /// This method never returns true for an instruction that returns true for |
| 442 | /// mayHaveSideEffects; however, this method also does some other checks in |
| 443 | /// addition. It checks for undefined behavior, like dividing by zero or |
| 444 | /// loading from an invalid pointer (but not for undefined results, like a |
| 445 | /// shift with a shift amount larger than the width of the result). It checks |
| 446 | /// for malloc and alloca because speculatively executing them might cause a |
| 447 | /// memory leak. It also returns false for instructions related to control |
| 448 | /// flow, specifically terminators and PHI nodes. |
| 449 | /// |
| 450 | /// If the CtxI is specified this method performs context-sensitive analysis |
| 451 | /// and returns true if it is safe to execute the instruction immediately |
| 452 | /// before the CtxI. |
| 453 | /// |
| 454 | /// If the CtxI is NOT specified this method only looks at the instruction |
| 455 | /// itself and its operands, so if this method returns true, it is safe to |
| 456 | /// move the instruction as long as the correct dominance relationships for |
| 457 | /// the operands and users hold. |
| 458 | /// |
| 459 | /// This method can return true for instructions that read memory; |
| 460 | /// for such instructions, moving them may change the resulting value. |
| 461 | bool isSafeToSpeculativelyExecute(const Value *V, |
| 462 | const Instruction *CtxI = nullptr, |
| 463 | const DominatorTree *DT = nullptr); |
| 464 | |
| 465 | /// Returns true if the result or effects of the given instructions \p I |
| 466 | /// depend on or influence global memory. |
| 467 | /// Memory dependence arises for example if the instruction reads from |
| 468 | /// memory or may produce effects or undefined behaviour. Memory dependent |
| 469 | /// instructions generally cannot be reorderd with respect to other memory |
| 470 | /// dependent instructions or moved into non-dominated basic blocks. |
| 471 | /// Instructions which just compute a value based on the values of their |
| 472 | /// operands are not memory dependent. |
| 473 | bool mayBeMemoryDependent(const Instruction &I); |
| 474 | |
| 475 | /// Return true if it is an intrinsic that cannot be speculated but also |
| 476 | /// cannot trap. |
| 477 | bool isAssumeLikeIntrinsic(const Instruction *I); |
| 478 | |
| 479 | /// Return true if it is valid to use the assumptions provided by an |
| 480 | /// assume intrinsic, I, at the point in the control-flow identified by the |
| 481 | /// context instruction, CxtI. |
| 482 | bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, |
| 483 | const DominatorTree *DT = nullptr); |
| 484 | |
Andrew Walbran | 3d2c197 | 2020-04-07 12:24:26 +0100 | [diff] [blame] | 485 | enum class OverflowResult { |
| 486 | /// Always overflows in the direction of signed/unsigned min value. |
| 487 | AlwaysOverflowsLow, |
| 488 | /// Always overflows in the direction of signed/unsigned max value. |
| 489 | AlwaysOverflowsHigh, |
| 490 | /// May or may not overflow. |
| 491 | MayOverflow, |
| 492 | /// Never overflows. |
| 493 | NeverOverflows, |
| 494 | }; |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 495 | |
| 496 | OverflowResult computeOverflowForUnsignedMul(const Value *LHS, |
| 497 | const Value *RHS, |
| 498 | const DataLayout &DL, |
| 499 | AssumptionCache *AC, |
| 500 | const Instruction *CxtI, |
Andrew Scull | 0372a57 | 2018-11-16 15:47:06 +0000 | [diff] [blame] | 501 | const DominatorTree *DT, |
| 502 | bool UseInstrInfo = true); |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 503 | OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS, |
| 504 | const DataLayout &DL, |
| 505 | AssumptionCache *AC, |
| 506 | const Instruction *CxtI, |
Andrew Scull | 0372a57 | 2018-11-16 15:47:06 +0000 | [diff] [blame] | 507 | const DominatorTree *DT, |
| 508 | bool UseInstrInfo = true); |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 509 | OverflowResult computeOverflowForUnsignedAdd(const Value *LHS, |
| 510 | const Value *RHS, |
| 511 | const DataLayout &DL, |
| 512 | AssumptionCache *AC, |
| 513 | const Instruction *CxtI, |
Andrew Scull | 0372a57 | 2018-11-16 15:47:06 +0000 | [diff] [blame] | 514 | const DominatorTree *DT, |
| 515 | bool UseInstrInfo = true); |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 516 | OverflowResult computeOverflowForSignedAdd(const Value *LHS, const Value *RHS, |
| 517 | const DataLayout &DL, |
| 518 | AssumptionCache *AC = nullptr, |
| 519 | const Instruction *CxtI = nullptr, |
| 520 | const DominatorTree *DT = nullptr); |
| 521 | /// This version also leverages the sign bit of Add if known. |
| 522 | OverflowResult computeOverflowForSignedAdd(const AddOperator *Add, |
| 523 | const DataLayout &DL, |
| 524 | AssumptionCache *AC = nullptr, |
| 525 | const Instruction *CxtI = nullptr, |
| 526 | const DominatorTree *DT = nullptr); |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 527 | OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, |
| 528 | const DataLayout &DL, |
| 529 | AssumptionCache *AC, |
| 530 | const Instruction *CxtI, |
| 531 | const DominatorTree *DT); |
| 532 | OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, |
| 533 | const DataLayout &DL, |
| 534 | AssumptionCache *AC, |
| 535 | const Instruction *CxtI, |
| 536 | const DominatorTree *DT); |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 537 | |
Andrew Walbran | 3d2c197 | 2020-04-07 12:24:26 +0100 | [diff] [blame] | 538 | /// Returns true if the arithmetic part of the \p WO 's result is |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 539 | /// used only along the paths control dependent on the computation |
Andrew Walbran | 3d2c197 | 2020-04-07 12:24:26 +0100 | [diff] [blame] | 540 | /// not overflowing, \p WO being an <op>.with.overflow intrinsic. |
| 541 | bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 542 | const DominatorTree &DT); |
| 543 | |
Andrew Walbran | 3d2c197 | 2020-04-07 12:24:26 +0100 | [diff] [blame] | 544 | |
| 545 | /// Determine the possible constant range of an integer or vector of integer |
| 546 | /// value. This is intended as a cheap, non-recursive check. |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 547 | ConstantRange computeConstantRange(const Value *V, bool UseInstrInfo = true, |
| 548 | AssumptionCache *AC = nullptr, |
| 549 | const Instruction *CtxI = nullptr, |
| 550 | unsigned Depth = 0); |
Andrew Walbran | 3d2c197 | 2020-04-07 12:24:26 +0100 | [diff] [blame] | 551 | |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 552 | /// Return true if this function can prove that the instruction I will |
| 553 | /// always transfer execution to one of its successors (including the next |
| 554 | /// instruction that follows within a basic block). E.g. this is not |
| 555 | /// guaranteed for function calls that could loop infinitely. |
| 556 | /// |
| 557 | /// In other words, this function returns false for instructions that may |
| 558 | /// transfer execution or fail to transfer execution in a way that is not |
| 559 | /// captured in the CFG nor in the sequence of instructions within a basic |
| 560 | /// block. |
| 561 | /// |
| 562 | /// Undefined behavior is assumed not to happen, so e.g. division is |
| 563 | /// guaranteed to transfer execution to the following instruction even |
| 564 | /// though division by zero might cause undefined behavior. |
| 565 | bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I); |
| 566 | |
| 567 | /// Returns true if this block does not contain a potential implicit exit. |
| 568 | /// This is equivelent to saying that all instructions within the basic block |
| 569 | /// are guaranteed to transfer execution to their successor within the basic |
| 570 | /// block. This has the same assumptions w.r.t. undefined behavior as the |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 571 | /// instruction variant of this function. |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 572 | bool isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB); |
| 573 | |
| 574 | /// Return true if this function can prove that the instruction I |
| 575 | /// is executed for every iteration of the loop L. |
| 576 | /// |
| 577 | /// Note that this currently only considers the loop header. |
| 578 | bool isGuaranteedToExecuteForEveryIteration(const Instruction *I, |
| 579 | const Loop *L); |
| 580 | |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 581 | /// Return true if I yields poison or raises UB if any of its operands is |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 582 | /// poison. |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 583 | /// Formally, given I = `r = op v1 v2 .. vN`, propagatesPoison returns true |
| 584 | /// if, for all i, r is evaluated to poison or op raises UB if vi = poison. |
| 585 | /// To filter out operands that raise UB on poison, you can use |
| 586 | /// getGuaranteedNonPoisonOp. |
| 587 | bool propagatesPoison(const Operator *I); |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 588 | |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 589 | /// Insert operands of I into Ops such that I will trigger undefined behavior |
| 590 | /// if I is executed and that operand has a poison value. |
| 591 | void getGuaranteedNonPoisonOps(const Instruction *I, |
| 592 | SmallPtrSetImpl<const Value *> &Ops); |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 593 | |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 594 | /// Return true if the given instruction must trigger undefined behavior |
Andrew Walbran | 3d2c197 | 2020-04-07 12:24:26 +0100 | [diff] [blame] | 595 | /// when I is executed with any operands which appear in KnownPoison holding |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 596 | /// a poison value at the point of execution. |
Andrew Walbran | 3d2c197 | 2020-04-07 12:24:26 +0100 | [diff] [blame] | 597 | bool mustTriggerUB(const Instruction *I, |
| 598 | const SmallSet<const Value *, 16>& KnownPoison); |
| 599 | |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 600 | /// Return true if this function can prove that if Inst is executed |
| 601 | /// and yields a poison value or undef bits, then that will trigger |
| 602 | /// undefined behavior. |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 603 | /// |
| 604 | /// Note that this currently only considers the basic block that is |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 605 | /// the parent of Inst. |
| 606 | bool programUndefinedIfUndefOrPoison(const Instruction *Inst); |
| 607 | bool programUndefinedIfPoison(const Instruction *Inst); |
| 608 | |
| 609 | /// canCreateUndefOrPoison returns true if Op can create undef or poison from |
| 610 | /// non-undef & non-poison operands. |
| 611 | /// For vectors, canCreateUndefOrPoison returns true if there is potential |
| 612 | /// poison or undef in any element of the result when vectors without |
| 613 | /// undef/poison poison are given as operands. |
| 614 | /// For example, given `Op = shl <2 x i32> %x, <0, 32>`, this function returns |
| 615 | /// true. If Op raises immediate UB but never creates poison or undef |
| 616 | /// (e.g. sdiv I, 0), canCreatePoison returns false. |
| 617 | /// |
| 618 | /// canCreatePoison returns true if Op can create poison from non-poison |
| 619 | /// operands. |
| 620 | bool canCreateUndefOrPoison(const Operator *Op); |
| 621 | bool canCreatePoison(const Operator *Op); |
| 622 | |
| 623 | /// Return true if V is poison given that ValAssumedPoison is already poison. |
| 624 | /// For example, if ValAssumedPoison is `icmp X, 10` and V is `icmp X, 5`, |
| 625 | /// impliesPoison returns true. |
| 626 | bool impliesPoison(const Value *ValAssumedPoison, const Value *V); |
| 627 | |
| 628 | /// Return true if this function can prove that V does not have undef bits |
| 629 | /// and is never poison. If V is an aggregate value or vector, check whether |
| 630 | /// all elements (except padding) are not undef or poison. |
| 631 | /// Note that this is different from canCreateUndefOrPoison because the |
| 632 | /// function assumes Op's operands are not poison/undef. |
| 633 | /// |
| 634 | /// If CtxI and DT are specified this method performs flow-sensitive analysis |
| 635 | /// and returns true if it is guaranteed to be never undef or poison |
| 636 | /// immediately before the CtxI. |
| 637 | bool isGuaranteedNotToBeUndefOrPoison(const Value *V, |
| 638 | AssumptionCache *AC = nullptr, |
| 639 | const Instruction *CtxI = nullptr, |
| 640 | const DominatorTree *DT = nullptr, |
| 641 | unsigned Depth = 0); |
| 642 | bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC = nullptr, |
| 643 | const Instruction *CtxI = nullptr, |
| 644 | const DominatorTree *DT = nullptr, |
| 645 | unsigned Depth = 0); |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 646 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 647 | /// Specific patterns of select instructions we can match. |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 648 | enum SelectPatternFlavor { |
| 649 | SPF_UNKNOWN = 0, |
| 650 | SPF_SMIN, /// Signed minimum |
| 651 | SPF_UMIN, /// Unsigned minimum |
| 652 | SPF_SMAX, /// Signed maximum |
| 653 | SPF_UMAX, /// Unsigned maximum |
| 654 | SPF_FMINNUM, /// Floating point minnum |
| 655 | SPF_FMAXNUM, /// Floating point maxnum |
| 656 | SPF_ABS, /// Absolute value |
| 657 | SPF_NABS /// Negated absolute value |
| 658 | }; |
| 659 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 660 | /// Behavior when a floating point min/max is given one NaN and one |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 661 | /// non-NaN as input. |
| 662 | enum SelectPatternNaNBehavior { |
| 663 | SPNB_NA = 0, /// NaN behavior not applicable. |
| 664 | SPNB_RETURNS_NAN, /// Given one NaN input, returns the NaN. |
| 665 | SPNB_RETURNS_OTHER, /// Given one NaN input, returns the non-NaN. |
| 666 | SPNB_RETURNS_ANY /// Given one NaN input, can return either (or |
| 667 | /// it has been determined that no operands can |
| 668 | /// be NaN). |
| 669 | }; |
| 670 | |
| 671 | struct SelectPatternResult { |
| 672 | SelectPatternFlavor Flavor; |
| 673 | SelectPatternNaNBehavior NaNBehavior; /// Only applicable if Flavor is |
| 674 | /// SPF_FMINNUM or SPF_FMAXNUM. |
| 675 | bool Ordered; /// When implementing this min/max pattern as |
| 676 | /// fcmp; select, does the fcmp have to be |
| 677 | /// ordered? |
| 678 | |
| 679 | /// Return true if \p SPF is a min or a max pattern. |
| 680 | static bool isMinOrMax(SelectPatternFlavor SPF) { |
| 681 | return SPF != SPF_UNKNOWN && SPF != SPF_ABS && SPF != SPF_NABS; |
| 682 | } |
| 683 | }; |
| 684 | |
| 685 | /// Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind |
| 686 | /// and providing the out parameter results if we successfully match. |
| 687 | /// |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 688 | /// For ABS/NABS, LHS will be set to the input to the abs idiom. RHS will be |
| 689 | /// the negation instruction from the idiom. |
| 690 | /// |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 691 | /// If CastOp is not nullptr, also match MIN/MAX idioms where the type does |
| 692 | /// not match that of the original select. If this is the case, the cast |
| 693 | /// operation (one of Trunc,SExt,Zext) that must be done to transform the |
| 694 | /// type of LHS and RHS into the type of V is returned in CastOp. |
| 695 | /// |
| 696 | /// For example: |
| 697 | /// %1 = icmp slt i32 %a, i32 4 |
| 698 | /// %2 = sext i32 %a to i64 |
| 699 | /// %3 = select i1 %1, i64 %2, i64 4 |
| 700 | /// |
| 701 | /// -> LHS = %a, RHS = i32 4, *CastOp = Instruction::SExt |
| 702 | /// |
| 703 | SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, |
| 704 | Instruction::CastOps *CastOp = nullptr, |
| 705 | unsigned Depth = 0); |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 706 | |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 707 | inline SelectPatternResult |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 708 | matchSelectPattern(const Value *V, const Value *&LHS, const Value *&RHS) { |
| 709 | Value *L = const_cast<Value *>(LHS); |
| 710 | Value *R = const_cast<Value *>(RHS); |
| 711 | auto Result = matchSelectPattern(const_cast<Value *>(V), L, R); |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 712 | LHS = L; |
| 713 | RHS = R; |
| 714 | return Result; |
| 715 | } |
| 716 | |
Andrew Walbran | 3d2c197 | 2020-04-07 12:24:26 +0100 | [diff] [blame] | 717 | /// Determine the pattern that a select with the given compare as its |
| 718 | /// predicate and given values as its true/false operands would match. |
| 719 | SelectPatternResult matchDecomposedSelectPattern( |
| 720 | CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, |
| 721 | Instruction::CastOps *CastOp = nullptr, unsigned Depth = 0); |
| 722 | |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 723 | /// Return the canonical comparison predicate for the specified |
| 724 | /// minimum/maximum flavor. |
| 725 | CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, |
| 726 | bool Ordered = false); |
| 727 | |
| 728 | /// Return the inverse minimum/maximum flavor of the specified flavor. |
| 729 | /// For example, signed minimum is the inverse of signed maximum. |
| 730 | SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF); |
| 731 | |
| 732 | /// Return the canonical inverse comparison predicate for the specified |
| 733 | /// minimum/maximum flavor. |
| 734 | CmpInst::Predicate getInverseMinMaxPred(SelectPatternFlavor SPF); |
| 735 | |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 736 | /// Check if the values in \p VL are select instructions that can be converted |
| 737 | /// to a min or max (vector) intrinsic. Returns the intrinsic ID, if such a |
| 738 | /// conversion is possible, together with a bool indicating whether all select |
| 739 | /// conditions are only used by the selects. Otherwise return |
| 740 | /// Intrinsic::not_intrinsic. |
| 741 | std::pair<Intrinsic::ID, bool> |
| 742 | canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL); |
| 743 | |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 744 | /// Return true if RHS is known to be implied true by LHS. Return false if |
| 745 | /// RHS is known to be implied false by LHS. Otherwise, return None if no |
| 746 | /// implication can be made. |
| 747 | /// A & B must be i1 (boolean) values or a vector of such values. Note that |
| 748 | /// the truth table for implication is the same as <=u on i1 values (but not |
| 749 | /// <=s!). The truth table for both is: |
| 750 | /// | T | F (B) |
| 751 | /// T | T | F |
| 752 | /// F | T | T |
| 753 | /// (A) |
| 754 | Optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS, |
| 755 | const DataLayout &DL, bool LHSIsTrue = true, |
| 756 | unsigned Depth = 0); |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 757 | Optional<bool> isImpliedCondition(const Value *LHS, |
| 758 | CmpInst::Predicate RHSPred, |
| 759 | const Value *RHSOp0, const Value *RHSOp1, |
| 760 | const DataLayout &DL, bool LHSIsTrue = true, |
| 761 | unsigned Depth = 0); |
Andrew Walbran | 16937d0 | 2019-10-22 13:54:20 +0100 | [diff] [blame] | 762 | |
| 763 | /// Return the boolean condition value in the context of the given instruction |
| 764 | /// if it is known based on dominating conditions. |
| 765 | Optional<bool> isImpliedByDomCondition(const Value *Cond, |
| 766 | const Instruction *ContextI, |
| 767 | const DataLayout &DL); |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 768 | Optional<bool> isImpliedByDomCondition(CmpInst::Predicate Pred, |
| 769 | const Value *LHS, const Value *RHS, |
| 770 | const Instruction *ContextI, |
| 771 | const DataLayout &DL); |
| 772 | |
| 773 | /// If Ptr1 is provably equal to Ptr2 plus a constant offset, return that |
| 774 | /// offset. For example, Ptr1 might be &A[42], and Ptr2 might be &A[40]. In |
| 775 | /// this case offset would be -8. |
| 776 | Optional<int64_t> isPointerOffset(const Value *Ptr1, const Value *Ptr2, |
| 777 | const DataLayout &DL); |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 778 | } // end namespace llvm |
| 779 | |
| 780 | #endif // LLVM_ANALYSIS_VALUETRACKING_H |