Update prebuilt Clang to r365631c1 from Android.
The version we had was segfaulting.
Bug: 132420445
Change-Id: Icb45a6fe0b4e2166f7895e669df1157cec9fb4e0
diff --git a/linux-x64/clang/include/llvm/CodeGen/SelectionDAGNodes.h b/linux-x64/clang/include/llvm/CodeGen/SelectionDAGNodes.h
index c0dd9d1..5aab964 100644
--- a/linux-x64/clang/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/linux-x64/clang/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -368,6 +368,13 @@
bool ApproximateFuncs : 1;
bool AllowReassociation : 1;
+ // We assume instructions do not raise floating-point exceptions by default,
+ // and only those marked explicitly may do so. We could choose to represent
+ // this via a positive "FPExcept" flags like on the MI level, but having a
+ // negative "NoFPExcept" flag here (that defaults to true) makes the flag
+ // intersection logic more straightforward.
+ bool NoFPExcept : 1;
+
public:
/// Default constructor turns off all optimization flags.
SDNodeFlags()
@@ -375,7 +382,7 @@
Exact(false), NoNaNs(false), NoInfs(false),
NoSignedZeros(false), AllowReciprocal(false), VectorReduction(false),
AllowContract(false), ApproximateFuncs(false),
- AllowReassociation(false) {}
+ AllowReassociation(false), NoFPExcept(true) {}
/// Propagate the fast-math-flags from an IR FPMathOperator.
void copyFMF(const FPMathOperator &FPMO) {
@@ -438,6 +445,10 @@
setDefined();
AllowReassociation = b;
}
+ void setFPExcept(bool b) {
+ setDefined();
+ NoFPExcept = !b;
+ }
// These are accessors for each flag.
bool hasNoUnsignedWrap() const { return NoUnsignedWrap; }
@@ -451,9 +462,10 @@
bool hasAllowContract() const { return AllowContract; }
bool hasApproximateFuncs() const { return ApproximateFuncs; }
bool hasAllowReassociation() const { return AllowReassociation; }
+ bool hasFPExcept() const { return !NoFPExcept; }
bool isFast() const {
- return NoSignedZeros && AllowReciprocal && NoNaNs && NoInfs &&
+ return NoSignedZeros && AllowReciprocal && NoNaNs && NoInfs && NoFPExcept &&
AllowContract && ApproximateFuncs && AllowReassociation;
}
@@ -473,6 +485,7 @@
AllowContract &= Flags.AllowContract;
ApproximateFuncs &= Flags.ApproximateFuncs;
AllowReassociation &= Flags.AllowReassociation;
+ NoFPExcept &= Flags.NoFPExcept;
}
};
@@ -488,6 +501,17 @@
// SubclassData. These are designed to fit within a uint16_t so they pack
// with NodeType.
+#if defined(_AIX) && (!defined(__GNUC__) || defined(__ibmxl__))
+// Except for GCC; by default, AIX compilers store bit-fields in 4-byte words
+// and give the `pack` pragma push semantics.
+#define BEGIN_TWO_BYTE_PACK() _Pragma("pack(2)")
+#define END_TWO_BYTE_PACK() _Pragma("pack(pop)")
+#else
+#define BEGIN_TWO_BYTE_PACK()
+#define END_TWO_BYTE_PACK()
+#endif
+
+BEGIN_TWO_BYTE_PACK()
class SDNodeBitfields {
friend class SDNode;
friend class MemIntrinsicSDNode;
@@ -560,6 +584,9 @@
LoadSDNodeBitfields LoadSDNodeBits;
StoreSDNodeBitfields StoreSDNodeBits;
};
+END_TWO_BYTE_PACK()
+#undef BEGIN_TWO_BYTE_PACK
+#undef END_TWO_BYTE_PACK
// RawSDNodeBits must cover the entirety of the union. This means that all of
// the union's members must have size <= RawSDNodeBits. We write the RHS as
@@ -677,6 +704,8 @@
case ISD::STRICT_FFLOOR:
case ISD::STRICT_FROUND:
case ISD::STRICT_FTRUNC:
+ case ISD::STRICT_FP_ROUND:
+ case ISD::STRICT_FP_EXTEND:
return true;
}
}
@@ -1385,7 +1414,10 @@
public:
AtomicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTL,
EVT MemVT, MachineMemOperand *MMO)
- : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) {}
+ : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) {
+ assert(((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) ||
+ MMO->isAtomic()) && "then why are we using an AtomicSDNode?");
+ }
const SDValue &getBasePtr() const { return getOperand(1); }
const SDValue &getVal() const { return getOperand(2); }
@@ -1482,14 +1514,16 @@
bool isSplat() const { return isSplatMask(Mask, getValueType(0)); }
- int getSplatIndex() const {
+ int getSplatIndex() const {
assert(isSplat() && "Cannot get splat index for non-splat!");
EVT VT = getValueType(0);
- for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
+ for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
if (Mask[i] >= 0)
return Mask[i];
- }
- llvm_unreachable("Splat with all undef indices?");
+
+ // We can choose any index value here and be correct because all elements
+ // are undefined. Return 0 for better potential for callers to simplify.
+ return 0;
}
static bool isSplatMask(const int *Mask, EVT VT);
@@ -1622,16 +1656,32 @@
/// If \p V is not a bitcasted one-use value, it is returned as-is.
SDValue peekThroughOneUseBitcasts(SDValue V);
+/// Return the non-extracted vector source operand of \p V if it exists.
+/// If \p V is not an extracted subvector, it is returned as-is.
+SDValue peekThroughExtractSubvectors(SDValue V);
+
/// Returns true if \p V is a bitwise not operation. Assumes that an all ones
/// constant is canonicalized to be operand 1.
-bool isBitwiseNot(SDValue V);
+bool isBitwiseNot(SDValue V, bool AllowUndefs = false);
/// Returns the SDNode if it is a constant splat BuildVector or constant int.
-ConstantSDNode *isConstOrConstSplat(SDValue N, bool AllowUndefs = false);
+ConstantSDNode *isConstOrConstSplat(SDValue N, bool AllowUndefs = false,
+ bool AllowTruncation = false);
+
+/// Returns the SDNode if it is a demanded constant splat BuildVector or
+/// constant int.
+ConstantSDNode *isConstOrConstSplat(SDValue N, const APInt &DemandedElts,
+ bool AllowUndefs = false,
+ bool AllowTruncation = false);
/// Returns the SDNode if it is a constant splat BuildVector or constant float.
ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, bool AllowUndefs = false);
+/// Returns the SDNode if it is a demanded constant splat BuildVector or
+/// constant float.
+ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, const APInt &DemandedElts,
+ bool AllowUndefs = false);
+
/// Return true if the value is a constant 0 integer or a splatted vector of
/// a constant 0 integer (with no undefs by default).
/// Build vector implicit truncation is not an issue for null values.
@@ -1692,6 +1742,38 @@
}
};
+/// This SDNode is used for LIFETIME_START/LIFETIME_END values, which indicate
+/// the offet and size that are started/ended in the underlying FrameIndex.
+class LifetimeSDNode : public SDNode {
+ friend class SelectionDAG;
+ int64_t Size;
+ int64_t Offset; // -1 if offset is unknown.
+
+ LifetimeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl,
+ SDVTList VTs, int64_t Size, int64_t Offset)
+ : SDNode(Opcode, Order, dl, VTs), Size(Size), Offset(Offset) {}
+public:
+ int64_t getFrameIndex() const {
+ return cast<FrameIndexSDNode>(getOperand(1))->getIndex();
+ }
+
+ bool hasOffset() const { return Offset >= 0; }
+ int64_t getOffset() const {
+ assert(hasOffset() && "offset is unknown");
+ return Offset;
+ }
+ int64_t getSize() const {
+ assert(hasOffset() && "offset is unknown");
+ return Size;
+ }
+
+ // Methods to support isa and dyn_cast
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::LIFETIME_START ||
+ N->getOpcode() == ISD::LIFETIME_END;
+ }
+};
+
class JumpTableSDNode : public SDNode {
friend class SelectionDAG;
@@ -1837,12 +1919,31 @@
unsigned MinSplatBits = 0,
bool isBigEndian = false) const;
+ /// Returns the demanded splatted value or a null value if this is not a
+ /// splat.
+ ///
+ /// The DemandedElts mask indicates the elements that must be in the splat.
+ /// If passed a non-null UndefElements bitvector, it will resize it to match
+ /// the vector width and set the bits where elements are undef.
+ SDValue getSplatValue(const APInt &DemandedElts,
+ BitVector *UndefElements = nullptr) const;
+
/// Returns the splatted value or a null value if this is not a splat.
///
/// If passed a non-null UndefElements bitvector, it will resize it to match
/// the vector width and set the bits where elements are undef.
SDValue getSplatValue(BitVector *UndefElements = nullptr) const;
+ /// Returns the demanded splatted constant or null if this is not a constant
+ /// splat.
+ ///
+ /// The DemandedElts mask indicates the elements that must be in the splat.
+ /// If passed a non-null UndefElements bitvector, it will resize it to match
+ /// the vector width and set the bits where elements are undef.
+ ConstantSDNode *
+ getConstantSplatNode(const APInt &DemandedElts,
+ BitVector *UndefElements = nullptr) const;
+
/// Returns the splatted constant or null if this is not a constant
/// splat.
///
@@ -1851,6 +1952,16 @@
ConstantSDNode *
getConstantSplatNode(BitVector *UndefElements = nullptr) const;
+ /// Returns the demanded splatted constant FP or null if this is not a
+ /// constant FP splat.
+ ///
+ /// The DemandedElts mask indicates the elements that must be in the splat.
+ /// If passed a non-null UndefElements bitvector, it will resize it to match
+ /// the vector width and set the bits where elements are undef.
+ ConstantFPSDNode *
+ getConstantFPSplatNode(const APInt &DemandedElts,
+ BitVector *UndefElements = nullptr) const;
+
/// Returns the splatted constant FP or null if this is not a constant
/// FP splat.
///
@@ -1975,8 +2086,10 @@
MCSymbol *Label;
- LabelSDNode(unsigned Order, const DebugLoc &dl, MCSymbol *L)
- : SDNode(ISD::EH_LABEL, Order, dl, getSDVTList(MVT::Other)), Label(L) {}
+ LabelSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl, MCSymbol *L)
+ : SDNode(Opcode, Order, dl, getSDVTList(MVT::Other)), Label(L) {
+ assert(LabelSDNode::classof(this) && "not a label opcode");
+ }
public:
MCSymbol *getLabel() const { return Label; }
@@ -2068,6 +2181,8 @@
: MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
LSBaseSDNodeBits.AddressingMode = AM;
assert(getAddressingMode() == AM && "Value truncated");
+ assert((!MMO->isAtomic() || MMO->isVolatile()) &&
+ "use an AtomicSDNode instead for non-volatile atomics");
}
const SDValue &getOffset() const {
@@ -2492,18 +2607,6 @@
cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
}
- /// Return true if the node is a math/logic binary operator. This corresponds
- /// to the IR function of the same name.
- inline bool isBinaryOp(const SDNode *N) {
- auto Op = N->getOpcode();
- return (Op == ISD::ADD || Op == ISD::SUB || Op == ISD::MUL ||
- Op == ISD::AND || Op == ISD::OR || Op == ISD::XOR ||
- Op == ISD::SHL || Op == ISD::SRL || Op == ISD::SRA ||
- Op == ISD::SDIV || Op == ISD::UDIV || Op == ISD::SREM ||
- Op == ISD::UREM || Op == ISD::FADD || Op == ISD::FSUB ||
- Op == ISD::FMUL || Op == ISD::FDIV || Op == ISD::FREM);
- }
-
/// Attempt to match a unary predicate against a scalar/splat constant or
/// every element of a constant BUILD_VECTOR.
/// If AllowUndef is true, then UNDEF elements will pass nullptr to Match.
@@ -2514,10 +2617,11 @@
/// Attempt to match a binary predicate against a pair of scalar/splat
/// constants or every element of a pair of constant BUILD_VECTORs.
/// If AllowUndef is true, then UNDEF elements will pass nullptr to Match.
+ /// If AllowTypeMismatch is true then RetType + ArgTypes don't need to match.
bool matchBinaryPredicate(
SDValue LHS, SDValue RHS,
std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match,
- bool AllowUndefs = false);
+ bool AllowUndefs = false, bool AllowTypeMismatch = false);
} // end namespace ISD
} // end namespace llvm