Update clang to r339409b.
Change-Id: Ied8a188bb072c40035320acedc86164b66d920af
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetLowering.h b/linux-x64/clang/include/llvm/CodeGen/TargetLowering.h
index dce4168..a593907 100644
--- a/linux-x64/clang/include/llvm/CodeGen/TargetLowering.h
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetLowering.h
@@ -29,7 +29,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/Analysis/DivergenceAnalysis.h"
+#include "llvm/Analysis/LegacyDivergenceAnalysis.h"
#include "llvm/CodeGen/DAGCombine.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
@@ -163,6 +163,7 @@
LLOnly, // Expand the (load) instruction into just a load-linked, which has
// greater atomic guarantees than a normal load.
CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
+ MaskedIntrinsic, // Use a target-specific intrinsic for the LL/SC loop.
};
/// Enum that specifies when a multiplication should be expanded.
@@ -545,6 +546,12 @@
return false;
}
+ /// Return true if inserting a scalar into a variable element of an undef
+ /// vector is more efficiently handled by splatting the scalar instead.
+ virtual bool shouldSplatInsEltVarIndex(EVT) const {
+ return false;
+ }
+
/// Return true if target supports floating point exceptions.
bool hasFloatingPointExceptions() const {
return HasFloatingPointExceptions;
@@ -798,6 +805,7 @@
case ISD::STRICT_FSUB: EqOpc = ISD::FSUB; break;
case ISD::STRICT_FMUL: EqOpc = ISD::FMUL; break;
case ISD::STRICT_FDIV: EqOpc = ISD::FDIV; break;
+ case ISD::STRICT_FREM: EqOpc = ISD::FREM; break;
case ISD::STRICT_FSQRT: EqOpc = ISD::FSQRT; break;
case ISD::STRICT_FPOW: EqOpc = ISD::FPOW; break;
case ISD::STRICT_FPOWI: EqOpc = ISD::FPOWI; break;
@@ -1428,6 +1436,12 @@
return PrefLoopAlignment;
}
+ /// Should loops be aligned even when the function is marked OptSize (but not
+ /// MinSize).
+ virtual bool alignLoopsWithOptSize() const {
+ return false;
+ }
+
/// If the target has a standard location for the stack protector guard,
/// returns the address of that location. Otherwise, returns nullptr.
/// DEPRECATED: please override useLoadStackGuardNode and customize
@@ -1549,6 +1563,26 @@
llvm_unreachable("Store conditional unimplemented on this target");
}
+ /// Perform a masked atomicrmw using a target-specific intrinsic. This
+ /// represents the core LL/SC loop which will be lowered at a late stage by
+ /// the backend.
+ virtual Value *emitMaskedAtomicRMWIntrinsic(IRBuilder<> &Builder,
+ AtomicRMWInst *AI,
+ Value *AlignedAddr, Value *Incr,
+ Value *Mask, Value *ShiftAmt,
+ AtomicOrdering Ord) const {
+ llvm_unreachable("Masked atomicrmw expansion unimplemented on this target");
+ }
+
+ /// Perform a masked cmpxchg using a target-specific intrinsic. This
+ /// represents the core LL/SC loop which will be lowered at a late stage by
+ /// the backend.
+ virtual Value *emitMaskedAtomicCmpXchgIntrinsic(
+ IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
+ Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
+ llvm_unreachable("Masked cmpxchg expansion unimplemented on this target");
+ }
+
/// Inserts in the IR a target-specific intrinsic specifying a fence.
/// It is called by AtomicExpandPass before expanding an
/// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad
@@ -1625,11 +1659,11 @@
return AtomicExpansionKind::None;
}
- /// Returns true if the given atomic cmpxchg should be expanded by the
- /// IR-level AtomicExpand pass into a load-linked/store-conditional sequence
- /// (through emitLoadLinked() and emitStoreConditional()).
- virtual bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
- return false;
+ /// Returns how the given atomic cmpxchg should be expanded by the IR-level
+ /// AtomicExpand pass.
+ virtual AtomicExpansionKind
+ shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
+ return AtomicExpansionKind::None;
}
/// Returns how the IR-level AtomicExpand pass should expand the given
@@ -1687,6 +1721,15 @@
return false;
}
+ /// Return true if it is profitable to transform an integer
+ /// multiplication-by-constant into simpler operations like shifts and adds.
+ /// This may be true if the target does not directly support the
+ /// multiplication operation for the specified type or the sequence of simpler
+ /// ops is faster than the multiply.
+ virtual bool decomposeMulByConstant(EVT VT, SDValue C) const {
+ return false;
+ }
+
//===--------------------------------------------------------------------===//
// TargetLowering Configuration Methods - These methods should be invoked by
// the derived class constructor to configure this object for the target.
@@ -2648,7 +2691,7 @@
virtual bool isSDNodeSourceOfDivergence(const SDNode *N,
FunctionLoweringInfo *FLI,
- DivergenceAnalysis *DA) const {
+ LegacyDivergenceAnalysis *DA) const {
return false;
}
@@ -2803,7 +2846,8 @@
unsigned Depth = 0,
bool AssumeSingleUse = false) const;
- /// Helper wrapper around SimplifyDemandedBits
+ /// Helper wrapper around SimplifyDemandedBits.
+ /// Adds Op back to the worklist upon success.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
DAGCombinerInfo &DCI) const;
@@ -2826,7 +2870,8 @@
TargetLoweringOpt &TLO, unsigned Depth = 0,
bool AssumeSingleUse = false) const;
- /// Helper wrapper around SimplifyDemandedVectorElts
+ /// Helper wrapper around SimplifyDemandedVectorElts.
+ /// Adds Op back to the worklist upon success.
bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedElts,
APInt &KnownUndef, APInt &KnownZero,
DAGCombinerInfo &DCI) const;
@@ -2942,12 +2987,16 @@
///
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
- /// Return true if it is profitable to move a following shift through this
- // node, adjusting any immediate operands as necessary to preserve semantics.
- // This transformation may not be desirable if it disrupts a particularly
- // auspicious target-specific tree (e.g. bitfield extraction in AArch64).
- // By default, it returns true.
- virtual bool isDesirableToCommuteWithShift(const SDNode *N) const {
+ /// Return true if it is profitable to move this shift by a constant amount
+ /// though its operand, adjusting any immediate operands as necessary to
+ /// preserve semantics. This transformation may not be desirable if it
+ /// disrupts a particularly auspicious target-specific tree (e.g. bitfield
+ /// extraction in AArch64). By default, it returns true.
+ ///
+ /// @param N the shift node
+ /// @param Level the current DAGCombine legalization level.
+ virtual bool isDesirableToCommuteWithShift(const SDNode *N,
+ CombineLevel Level) const {
return true;
}