Update prebuilt Clang to r416183b from Android.
https://android.googlesource.com/platform/prebuilts/clang/host/
linux-x86/+/06a71ddac05c22edb2d10b590e1769b3f8619bef
clang 12.0.5 (based on r416183b) from build 7284624.
Change-Id: I277a316abcf47307562d8b748b84870f31a72866
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/linux-x64/clang/include/llvm/ADT/APFixedPoint.h b/linux-x64/clang/include/llvm/ADT/APFixedPoint.h
new file mode 100644
index 0000000..d6349e6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/APFixedPoint.h
@@ -0,0 +1,237 @@
+//===- APFixedPoint.h - Fixed point constant handling -----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// Defines the fixed point number interface.
+/// This is a class for abstracting various operations performed on fixed point
+/// types.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_APFIXEDPOINT_H
+#define LLVM_ADT_APFIXEDPOINT_H
+
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+class APFloat;
+struct fltSemantics;
+
+/// The fixed point semantics work similarly to fltSemantics. The width
+/// specifies the whole bit width of the underlying scaled integer (with padding
+/// if any). The scale represents the number of fractional bits in this type.
+/// When HasUnsignedPadding is true and this type is unsigned, the first bit
+/// in the value this represents is treated as padding.
+class FixedPointSemantics {
+public:
+ FixedPointSemantics(unsigned Width, unsigned Scale, bool IsSigned,
+ bool IsSaturated, bool HasUnsignedPadding)
+ : Width(Width), Scale(Scale), IsSigned(IsSigned),
+ IsSaturated(IsSaturated), HasUnsignedPadding(HasUnsignedPadding) {
+ assert(Width >= Scale && "Not enough room for the scale");
+ assert(!(IsSigned && HasUnsignedPadding) &&
+ "Cannot have unsigned padding on a signed type.");
+ }
+
+ unsigned getWidth() const { return Width; }
+ unsigned getScale() const { return Scale; }
+ bool isSigned() const { return IsSigned; }
+ bool isSaturated() const { return IsSaturated; }
+ bool hasUnsignedPadding() const { return HasUnsignedPadding; }
+
+ void setSaturated(bool Saturated) { IsSaturated = Saturated; }
+
+ /// Return the number of integral bits represented by these semantics. These
+ /// are separate from the fractional bits and do not include the sign or
+ /// padding bit.
+ unsigned getIntegralBits() const {
+ if (IsSigned || (!IsSigned && HasUnsignedPadding))
+ return Width - Scale - 1;
+ else
+ return Width - Scale;
+ }
+
+ /// Return the FixedPointSemantics that allows for calculating the full
+ /// precision semantic that can precisely represent the precision and ranges
+ /// of both input values. This does not compute the resulting semantics for a
+ /// given binary operation.
+ FixedPointSemantics
+ getCommonSemantics(const FixedPointSemantics &Other) const;
+
+ /// Returns true if this fixed-point semantic with its value bits interpreted
+ /// as an integer can fit in the given floating point semantic without
+ /// overflowing to infinity.
+ /// For example, a signed 8-bit fixed-point semantic has a maximum and
+ /// minimum integer representation of 127 and -128, respectively. If both of
+ /// these values can be represented (possibly inexactly) in the floating
+ /// point semantic without overflowing, this returns true.
+ bool fitsInFloatSemantics(const fltSemantics &FloatSema) const;
+
+ /// Return the FixedPointSemantics for an integer type.
+ static FixedPointSemantics GetIntegerSemantics(unsigned Width,
+ bool IsSigned) {
+ return FixedPointSemantics(Width, /*Scale=*/0, IsSigned,
+ /*IsSaturated=*/false,
+ /*HasUnsignedPadding=*/false);
+ }
+
+private:
+ unsigned Width : 16;
+ unsigned Scale : 13;
+ unsigned IsSigned : 1;
+ unsigned IsSaturated : 1;
+ unsigned HasUnsignedPadding : 1;
+};
+
+/// The APFixedPoint class works similarly to APInt/APSInt in that it is a
+/// functional replacement for a scaled integer. It is meant to replicate the
+/// fixed point types proposed in ISO/IEC JTC1 SC22 WG14 N1169. The class carries
+/// info about the fixed point type's width, sign, scale, and saturation, and
+/// provides different operations that would normally be performed on fixed point
+/// types.
+class APFixedPoint {
+public:
+ APFixedPoint(const APInt &Val, const FixedPointSemantics &Sema)
+ : Val(Val, !Sema.isSigned()), Sema(Sema) {
+ assert(Val.getBitWidth() == Sema.getWidth() &&
+ "The value should have a bit width that matches the Sema width");
+ }
+
+ APFixedPoint(uint64_t Val, const FixedPointSemantics &Sema)
+ : APFixedPoint(APInt(Sema.getWidth(), Val, Sema.isSigned()), Sema) {}
+
+ // Zero initialization.
+ APFixedPoint(const FixedPointSemantics &Sema) : APFixedPoint(0, Sema) {}
+
+ APSInt getValue() const { return APSInt(Val, !Sema.isSigned()); }
+ inline unsigned getWidth() const { return Sema.getWidth(); }
+ inline unsigned getScale() const { return Sema.getScale(); }
+ inline bool isSaturated() const { return Sema.isSaturated(); }
+ inline bool isSigned() const { return Sema.isSigned(); }
+ inline bool hasPadding() const { return Sema.hasUnsignedPadding(); }
+ FixedPointSemantics getSemantics() const { return Sema; }
+
+ bool getBoolValue() const { return Val.getBoolValue(); }
+
+ // Convert this number to match the semantics provided. If the overflow
+ // parameter is provided, set this value to true or false to indicate if this
+ // operation results in an overflow.
+ APFixedPoint convert(const FixedPointSemantics &DstSema,
+ bool *Overflow = nullptr) const;
+
+ // Perform binary operations on a fixed point type. The resulting fixed point
+ // value will be in the common, full precision semantics that can represent
+ // the precision and ranges of both input values. See convert() for an
+ // explanation of the Overflow parameter.
+ APFixedPoint add(const APFixedPoint &Other, bool *Overflow = nullptr) const;
+ APFixedPoint sub(const APFixedPoint &Other, bool *Overflow = nullptr) const;
+ APFixedPoint mul(const APFixedPoint &Other, bool *Overflow = nullptr) const;
+ APFixedPoint div(const APFixedPoint &Other, bool *Overflow = nullptr) const;
+
+ // Perform shift operations on a fixed point type. Unlike the other binary
+ // operations, the resulting fixed point value will be in the original
+ // semantic.
+ APFixedPoint shl(unsigned Amt, bool *Overflow = nullptr) const;
+ APFixedPoint shr(unsigned Amt, bool *Overflow = nullptr) const {
+ // Right shift cannot overflow.
+ if (Overflow)
+ *Overflow = false;
+ return APFixedPoint(Val >> Amt, Sema);
+ }
+
+ /// Perform a unary negation (-X) on this fixed point type, taking into
+ /// account saturation if applicable.
+ APFixedPoint negate(bool *Overflow = nullptr) const;
+
+ /// Return the integral part of this fixed point number, rounded towards
+ /// zero. (-2.5k -> -2)
+ APSInt getIntPart() const {
+ if (Val < 0 && Val != -Val) // Cover the case when we have the min val
+ return -(-Val >> getScale());
+ else
+ return Val >> getScale();
+ }
+
+ /// Return the integral part of this fixed point number, rounded towards
+ /// zero. The value is stored into an APSInt with the provided width and sign.
+ /// If the overflow parameter is provided, and the integral value is not able
+ /// to be fully stored in the provided width and sign, the overflow parameter
+ /// is set to true.
+ APSInt convertToInt(unsigned DstWidth, bool DstSign,
+ bool *Overflow = nullptr) const;
+
+ /// Convert this fixed point number to a floating point value with the
+ /// provided semantics.
+ APFloat convertToFloat(const fltSemantics &FloatSema) const;
+
+ void toString(SmallVectorImpl<char> &Str) const;
+ std::string toString() const {
+ SmallString<40> S;
+ toString(S);
+ return std::string(S.str());
+ }
+
+ // If LHS > RHS, return 1. If LHS == RHS, return 0. If LHS < RHS, return -1.
+ int compare(const APFixedPoint &Other) const;
+ bool operator==(const APFixedPoint &Other) const {
+ return compare(Other) == 0;
+ }
+ bool operator!=(const APFixedPoint &Other) const {
+ return compare(Other) != 0;
+ }
+ bool operator>(const APFixedPoint &Other) const { return compare(Other) > 0; }
+ bool operator<(const APFixedPoint &Other) const { return compare(Other) < 0; }
+ bool operator>=(const APFixedPoint &Other) const {
+ return compare(Other) >= 0;
+ }
+ bool operator<=(const APFixedPoint &Other) const {
+ return compare(Other) <= 0;
+ }
+
+ static APFixedPoint getMax(const FixedPointSemantics &Sema);
+ static APFixedPoint getMin(const FixedPointSemantics &Sema);
+
+ /// Given a floating point semantic, return the next floating point semantic
+ /// with a larger exponent and larger or equal mantissa.
+ static const fltSemantics *promoteFloatSemantics(const fltSemantics *S);
+
+ /// Create an APFixedPoint with a value equal to that of the provided integer,
+ /// and in the same semantics as the provided target semantics. If the value
+ /// is not able to fit in the specified fixed point semantics, and the
+ /// overflow parameter is provided, it is set to true.
+ static APFixedPoint getFromIntValue(const APSInt &Value,
+ const FixedPointSemantics &DstFXSema,
+ bool *Overflow = nullptr);
+
+ /// Create an APFixedPoint with a value equal to that of the provided
+ /// floating point value, in the provided target semantics. If the value is
+ /// not able to fit in the specified fixed point semantics and the overflow
+ /// parameter is specified, it is set to true.
+ /// For NaN, the Overflow flag is always set. For +inf and -inf, if the
+ /// semantic is saturating, the value saturates. Otherwise, the Overflow flag
+ /// is set.
+ static APFixedPoint getFromFloatValue(const APFloat &Value,
+ const FixedPointSemantics &DstFXSema,
+ bool *Overflow = nullptr);
+
+private:
+ APSInt Val;
+ FixedPointSemantics Sema;
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const APFixedPoint &FX) {
+ OS << FX.toString();
+ return OS;
+}
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ADT/APFloat.h b/linux-x64/clang/include/llvm/ADT/APFloat.h
index a9648d3..1f9ac22 100644
--- a/linux-x64/clang/include/llvm/ADT/APFloat.h
+++ b/linux-x64/clang/include/llvm/ADT/APFloat.h
@@ -18,6 +18,7 @@
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/Support/ErrorHandling.h"
#include <memory>
@@ -38,6 +39,7 @@
class APFloat;
class raw_ostream;
+template <typename T> class Expected;
template <typename T> class SmallVectorImpl;
/// Enum that represents what fraction of the LSB truncated bits of an fp number
@@ -140,15 +142,16 @@
// members.
struct APFloatBase {
typedef APInt::WordType integerPart;
- static const unsigned integerPartWidth = APInt::APINT_BITS_PER_WORD;
+ static constexpr unsigned integerPartWidth = APInt::APINT_BITS_PER_WORD;
/// A signed type to represent a floating point numbers unbiased exponent.
- typedef signed short ExponentType;
+ typedef int32_t ExponentType;
/// \name Floating Point Semantics.
/// @{
enum Semantics {
S_IEEEhalf,
+ S_BFloat,
S_IEEEsingle,
S_IEEEdouble,
S_x87DoubleExtended,
@@ -160,6 +163,7 @@
static Semantics SemanticsToEnum(const llvm::fltSemantics &Sem);
static const fltSemantics &IEEEhalf() LLVM_READNONE;
+ static const fltSemantics &BFloat() LLVM_READNONE;
static const fltSemantics &IEEEsingle() LLVM_READNONE;
static const fltSemantics &IEEEdouble() LLVM_READNONE;
static const fltSemantics &IEEEquad() LLVM_READNONE;
@@ -181,17 +185,24 @@
};
/// IEEE-754R 4.3: Rounding-direction attributes.
- enum roundingMode {
- rmNearestTiesToEven,
- rmTowardPositive,
- rmTowardNegative,
- rmTowardZero,
- rmNearestTiesToAway
- };
+ using roundingMode = llvm::RoundingMode;
+
+ static constexpr roundingMode rmNearestTiesToEven =
+ RoundingMode::NearestTiesToEven;
+ static constexpr roundingMode rmTowardPositive = RoundingMode::TowardPositive;
+ static constexpr roundingMode rmTowardNegative = RoundingMode::TowardNegative;
+ static constexpr roundingMode rmTowardZero = RoundingMode::TowardZero;
+ static constexpr roundingMode rmNearestTiesToAway =
+ RoundingMode::NearestTiesToAway;
/// IEEE-754R 7: Default exception handling.
///
/// opUnderflow or opOverflow are always returned or-ed with opInexact.
+ ///
+ /// APFloat models this behavior specified by IEEE-754:
+ /// "For operations producing results in floating-point format, the default
+ /// result of an operation that signals the invalid operation exception
+ /// shall be a quiet NaN."
enum opStatus {
opOK = 0x00,
opInvalidOp = 0x01,
@@ -238,7 +249,7 @@
/// \name Constructors
/// @{
- IEEEFloat(const fltSemantics &); // Default construct to 0.0
+ IEEEFloat(const fltSemantics &); // Default construct to +0.0
IEEEFloat(const fltSemantics &, integerPart);
IEEEFloat(const fltSemantics &, uninitializedTag);
IEEEFloat(const fltSemantics &, const APInt &);
@@ -294,7 +305,7 @@
bool, roundingMode);
opStatus convertFromZeroExtendedInteger(const integerPart *, unsigned int,
bool, roundingMode);
- opStatus convertFromString(StringRef, roundingMode);
+ Expected<opStatus> convertFromString(StringRef, roundingMode);
APInt bitcastToAPInt() const;
double convertToDouble() const;
float convertToFloat() const;
@@ -481,7 +492,8 @@
integerPart addSignificand(const IEEEFloat &);
integerPart subtractSignificand(const IEEEFloat &, integerPart);
lostFraction addOrSubtractSignificand(const IEEEFloat &, bool subtract);
- lostFraction multiplySignificand(const IEEEFloat &, const IEEEFloat *);
+ lostFraction multiplySignificand(const IEEEFloat &, IEEEFloat);
+ lostFraction multiplySignificand(const IEEEFloat&);
lostFraction divideSignificand(const IEEEFloat &);
void incrementSignificand();
void initialize(const fltSemantics *);
@@ -504,6 +516,7 @@
opStatus divideSpecials(const IEEEFloat &);
opStatus multiplySpecials(const IEEEFloat &);
opStatus modSpecials(const IEEEFloat &);
+ opStatus remainderSpecials(const IEEEFloat&);
/// @}
@@ -520,16 +533,20 @@
bool *) const;
opStatus convertFromUnsignedParts(const integerPart *, unsigned int,
roundingMode);
- opStatus convertFromHexadecimalString(StringRef, roundingMode);
- opStatus convertFromDecimalString(StringRef, roundingMode);
+ Expected<opStatus> convertFromHexadecimalString(StringRef, roundingMode);
+ Expected<opStatus> convertFromDecimalString(StringRef, roundingMode);
char *convertNormalToHexString(char *, unsigned int, bool,
roundingMode) const;
opStatus roundSignificandWithExponent(const integerPart *, unsigned int, int,
roundingMode);
+ ExponentType exponentNaN() const;
+ ExponentType exponentInf() const;
+ ExponentType exponentZero() const;
/// @}
APInt convertHalfAPFloatToAPInt() const;
+ APInt convertBFloatAPFloatToAPInt() const;
APInt convertFloatAPFloatToAPInt() const;
APInt convertDoubleAPFloatToAPInt() const;
APInt convertQuadrupleAPFloatToAPInt() const;
@@ -537,6 +554,7 @@
APInt convertPPCDoubleDoubleAPFloatToAPInt() const;
void initFromAPInt(const fltSemantics *Sem, const APInt &api);
void initFromHalfAPInt(const APInt &api);
+ void initFromBFloatAPInt(const APInt &api);
void initFromFloatAPInt(const APInt &api);
void initFromDoubleAPInt(const APInt &api);
void initFromQuadrupleAPInt(const APInt &api);
@@ -578,7 +596,7 @@
IEEEFloat frexp(const IEEEFloat &Val, int &Exp, IEEEFloat::roundingMode RM);
// This mode implements more precise float in terms of two APFloats.
-// The interface and layout is designed for arbitray underlying semantics,
+// The interface and layout is designed for arbitrary underlying semantics,
// though currently only PPCDoubleDouble semantics are supported, whose
// corresponding underlying semantics are IEEEdouble.
class DoubleAPFloat final : public APFloatBase {
@@ -643,7 +661,7 @@
cmpResult compare(const DoubleAPFloat &RHS) const;
bool bitwiseIsEqual(const DoubleAPFloat &RHS) const;
APInt bitcastToAPInt() const;
- opStatus convertFromString(StringRef, roundingMode);
+ Expected<opStatus> convertFromString(StringRef, roundingMode);
opStatus next(bool nextDown);
opStatus convertToInteger(MutableArrayRef<integerPart> Input,
@@ -846,6 +864,9 @@
APFloat(const fltSemantics &Semantics) : U(Semantics) {}
APFloat(const fltSemantics &Semantics, StringRef S);
APFloat(const fltSemantics &Semantics, integerPart I) : U(Semantics, I) {}
+ template <typename T,
+ typename = std::enable_if_t<std::is_floating_point<T>::value>>
+ APFloat(const fltSemantics &Semantics, T V) = delete;
// TODO: Remove this constructor. This isn't faster than the first one.
APFloat(const fltSemantics &Semantics, uninitializedTag)
: U(Semantics, uninitialized) {}
@@ -940,9 +961,10 @@
/// Returns a float which is bitcasted from an all one value int.
///
+ /// \param Semantics - type float semantics
/// \param BitWidth - Select float type
- /// \param isIEEE - If 128 bit number, select between PPC and IEEE
- static APFloat getAllOnesValue(unsigned BitWidth, bool isIEEE = false);
+ static APFloat getAllOnesValue(const fltSemantics &Semantics,
+ unsigned BitWidth);
/// Used to insert APFloat objects, or objects that contain APFloat objects,
/// into FoldingSets.
@@ -1025,6 +1047,13 @@
APFLOAT_DISPATCH_ON_SEMANTICS(next(nextDown));
}
+ /// Negate an APFloat.
+ APFloat operator-() const {
+ APFloat Result(*this);
+ Result.changeSign();
+ return Result;
+ }
+
/// Add two APFloats, rounding ties to the nearest even.
/// No error checking.
APFloat operator+(const APFloat &RHS) const {
@@ -1100,14 +1129,34 @@
APFLOAT_DISPATCH_ON_SEMANTICS(
convertFromZeroExtendedInteger(Input, InputSize, IsSigned, RM));
}
- opStatus convertFromString(StringRef, roundingMode);
+ Expected<opStatus> convertFromString(StringRef, roundingMode);
APInt bitcastToAPInt() const {
APFLOAT_DISPATCH_ON_SEMANTICS(bitcastToAPInt());
}
double convertToDouble() const { return getIEEE().convertToDouble(); }
float convertToFloat() const { return getIEEE().convertToFloat(); }
- bool operator==(const APFloat &) const = delete;
+ bool operator==(const APFloat &RHS) const { return compare(RHS) == cmpEqual; }
+
+ bool operator!=(const APFloat &RHS) const { return compare(RHS) != cmpEqual; }
+
+ bool operator<(const APFloat &RHS) const {
+ return compare(RHS) == cmpLessThan;
+ }
+
+ bool operator>(const APFloat &RHS) const {
+ return compare(RHS) == cmpGreaterThan;
+ }
+
+ bool operator<=(const APFloat &RHS) const {
+ cmpResult Res = compare(RHS);
+ return Res == cmpLessThan || Res == cmpEqual;
+ }
+
+ bool operator>=(const APFloat &RHS) const {
+ cmpResult Res = compare(RHS);
+ return Res == cmpGreaterThan || Res == cmpEqual;
+ }
cmpResult compare(const APFloat &RHS) const {
assert(&getSemantics() == &RHS.getSemantics() &&
@@ -1239,7 +1288,7 @@
return B;
if (B.isNaN())
return A;
- return (B.compare(A) == APFloat::cmpLessThan) ? B : A;
+ return B < A ? B : A;
}
/// Implements IEEE maxNum semantics. Returns the larger of the 2 arguments if
@@ -1250,7 +1299,7 @@
return B;
if (B.isNaN())
return A;
- return (A.compare(B) == APFloat::cmpLessThan) ? B : A;
+ return A < B ? B : A;
}
/// Implements IEEE 754-2018 minimum semantics. Returns the smaller of 2
@@ -1263,7 +1312,7 @@
return B;
if (A.isZero() && B.isZero() && (A.isNegative() != B.isNegative()))
return A.isNegative() ? A : B;
- return (B.compare(A) == APFloat::cmpLessThan) ? B : A;
+ return B < A ? B : A;
}
/// Implements IEEE 754-2018 maximum semantics. Returns the larger of 2
@@ -1276,7 +1325,7 @@
return B;
if (A.isZero() && B.isZero() && (A.isNegative() != B.isNegative()))
return A.isNegative() ? B : A;
- return (A.compare(B) == APFloat::cmpLessThan) ? B : A;
+ return A < B ? B : A;
}
} // namespace llvm
diff --git a/linux-x64/clang/include/llvm/ADT/APInt.h b/linux-x64/clang/include/llvm/ADT/APInt.h
index 2381b75..b97ea2c 100644
--- a/linux-x64/clang/include/llvm/ADT/APInt.h
+++ b/linux-x64/clang/include/llvm/ADT/APInt.h
@@ -31,6 +31,7 @@
template <typename T> class SmallVectorImpl;
template <typename T> class ArrayRef;
template <typename T> class Optional;
+template <typename T> struct DenseMapInfo;
class APInt;
@@ -84,7 +85,7 @@
UP,
};
- static const WordType WORDTYPE_MAX = ~WordType(0);
+ static constexpr WordType WORDTYPE_MAX = ~WordType(0);
private:
/// This union is used to store the integer value. When the
@@ -96,7 +97,7 @@
unsigned BitWidth; ///< The number of bits in this APInt.
- friend struct DenseMapAPIntKeyInfo;
+ friend struct DenseMapInfo<APInt>;
friend class APSInt;
@@ -389,6 +390,11 @@
/// \returns true if this APInt is positive.
bool isStrictlyPositive() const { return isNonNegative() && !isNullValue(); }
+ /// Determine if this APInt Value is non-positive (<= 0).
+ ///
+ /// \returns true if this APInt is non-positive.
+ bool isNonPositive() const { return !isStrictlyPositive(); }
+
/// Determine if all bits are set
///
/// This checks to see if the value has all bits of the APInt are set or not.
@@ -595,8 +601,8 @@
/// Constructs an APInt value that has a contiguous range of bits set. The
/// bits from loBit (inclusive) to hiBit (exclusive) will be set. All other
/// bits will be zero. For example, with parameters(32, 0, 16) you would get
- /// 0x0000FFFF. If hiBit is less than loBit then the set bits "wrap". For
- /// example, with parameters (32, 28, 4), you would get 0xF000000F.
+ /// 0x0000FFFF. Please call getBitsSetWithWrap if \p loBit may be greater than
+ /// \p hiBit.
///
/// \param numBits the intended bit width of the result
/// \param loBit the index of the lowest bit set.
@@ -604,11 +610,25 @@
///
/// \returns An APInt value with the requested bits set.
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit) {
+ assert(loBit <= hiBit && "loBit greater than hiBit");
APInt Res(numBits, 0);
Res.setBits(loBit, hiBit);
return Res;
}
+ /// Wrap version of getBitsSet.
+ /// If \p hiBit is bigger than \p loBit, this is same with getBitsSet.
+ /// If \p hiBit is not bigger than \p loBit, the set bits "wrap". For example,
+ /// with parameters (32, 28, 4), you would get 0xF000000F.
+ /// If \p hiBit is equal to \p loBit, you would get a result with all bits
+ /// set.
+ static APInt getBitsSetWithWrap(unsigned numBits, unsigned loBit,
+ unsigned hiBit) {
+ APInt Res(numBits, 0);
+ Res.setBitsWithWrap(loBit, hiBit);
+ return Res;
+ }
+
/// Get a value with upper bits starting at loBit set.
///
/// Constructs an APInt value that has a contiguous range of bits set. The
@@ -745,8 +765,8 @@
/// Move assignment operator.
APInt &operator=(APInt &&that) {
-#ifdef _MSC_VER
- // The MSVC std::shuffle implementation still does self-assignment.
+#ifdef EXPENSIVE_CHECKS
+ // Some std::shuffle implementations still do self-assignment.
if (this == &that)
return *this;
#endif
@@ -774,11 +794,10 @@
APInt &operator=(uint64_t RHS) {
if (isSingleWord()) {
U.VAL = RHS;
- clearUnusedBits();
- } else {
- U.pVal[0] = RHS;
- memset(U.pVal+1, 0, (getNumWords() - 1) * APINT_WORD_SIZE);
+ return clearUnusedBits();
}
+ U.pVal[0] = RHS;
+ memset(U.pVal + 1, 0, (getNumWords() - 1) * APINT_WORD_SIZE);
return *this;
}
@@ -835,10 +854,9 @@
APInt &operator|=(uint64_t RHS) {
if (isSingleWord()) {
U.VAL |= RHS;
- clearUnusedBits();
- } else {
- U.pVal[0] |= RHS;
+ return clearUnusedBits();
}
+ U.pVal[0] |= RHS;
return *this;
}
@@ -865,10 +883,9 @@
APInt &operator^=(uint64_t RHS) {
if (isSingleWord()) {
U.VAL ^= RHS;
- clearUnusedBits();
- } else {
- U.pVal[0] ^= RHS;
+ return clearUnusedBits();
}
+ U.pVal[0] ^= RHS;
return *this;
}
@@ -1109,6 +1126,10 @@
APInt uadd_sat(const APInt &RHS) const;
APInt ssub_sat(const APInt &RHS) const;
APInt usub_sat(const APInt &RHS) const;
+ APInt smul_sat(const APInt &RHS) const;
+ APInt umul_sat(const APInt &RHS) const;
+ APInt sshl_sat(const APInt &RHS) const;
+ APInt ushl_sat(const APInt &RHS) const;
/// Array-indexing support.
///
@@ -1245,7 +1266,7 @@
/// \returns true if *this <= RHS when considered signed.
bool sle(uint64_t RHS) const { return !sgt(RHS); }
- /// Unsigned greather than comparison
+ /// Unsigned greater than comparison
///
/// Regards both *this and RHS as unsigned quantities and compares them for
/// the validity of the greater-than relationship.
@@ -1264,7 +1285,7 @@
return (!isSingleWord() && getActiveBits() > 64) || getZExtValue() > RHS;
}
- /// Signed greather than comparison
+ /// Signed greater than comparison
///
/// Regards both *this and RHS as signed quantities and compares them for the
/// validity of the greater-than relationship.
@@ -1342,6 +1363,19 @@
/// that is greater than or equal to the current width.
APInt trunc(unsigned width) const;
+ /// Truncate to new width with unsigned saturation.
+ ///
+ /// If the APInt, treated as unsigned integer, can be losslessly truncated to
+ /// the new bitwidth, then return truncated APInt. Else, return max value.
+ APInt truncUSat(unsigned width) const;
+
+ /// Truncate to new width with signed saturation.
+ ///
+ /// If this APInt, treated as signed integer, can be losslessly truncated to
+ /// the new bitwidth, then return truncated APInt. Else, return either
+ /// signed min value if the APInt was negative, or signed max value.
+ APInt truncSSat(unsigned width) const;
+
/// Sign extend to a new width.
///
/// This operation sign extends the APInt to a new width. If the high order
@@ -1369,6 +1403,12 @@
/// extended, truncated, or left alone to make it that width.
APInt zextOrTrunc(unsigned width) const;
+ /// Truncate to width
+ ///
+ /// Make this APInt have the bit width given by \p width. The value is
+ /// truncated or left alone to make it that width.
+ APInt truncOrSelf(unsigned width) const;
+
/// Sign extend or truncate to width
///
/// Make this APInt have the bit width given by \p width. The value is sign
@@ -1413,7 +1453,31 @@
setBit(BitWidth - 1);
}
+ /// Set a given bit to a given value.
+ void setBitVal(unsigned BitPosition, bool BitValue) {
+ if (BitValue)
+ setBit(BitPosition);
+ else
+ clearBit(BitPosition);
+ }
+
/// Set the bits from loBit (inclusive) to hiBit (exclusive) to 1.
+ /// This function handles "wrap" case when \p loBit >= \p hiBit, and calls
+ /// setBits when \p loBit < \p hiBit.
+ /// For \p loBit == \p hiBit wrap case, set every bit to 1.
+ void setBitsWithWrap(unsigned loBit, unsigned hiBit) {
+ assert(hiBit <= BitWidth && "hiBit out of range");
+ assert(loBit <= BitWidth && "loBit out of range");
+ if (loBit < hiBit) {
+ setBits(loBit, hiBit);
+ return;
+ }
+ setLowBits(hiBit);
+ setHighBits(BitWidth - loBit);
+ }
+
+ /// Set the bits from loBit (inclusive) to hiBit (exclusive) to 1.
+ /// This function handles case when \p loBit <= \p hiBit.
void setBits(unsigned loBit, unsigned hiBit) {
assert(hiBit <= BitWidth && "hiBit out of range");
assert(loBit <= BitWidth && "loBit out of range");
@@ -1467,6 +1531,13 @@
U.pVal[whichWord(BitPosition)] &= Mask;
}
+ /// Set bottom loBits bits to 0.
+ void clearLowBits(unsigned loBits) {
+ assert(loBits <= BitWidth && "More bits than bitwidth");
+ APInt Keep = getHighBitsSet(BitWidth, BitWidth - loBits);
+ *this &= Keep;
+ }
+
/// Set the sign bit to 0.
void clearSignBit() {
clearBit(BitWidth - 1);
@@ -1496,9 +1567,11 @@
/// Insert the bits from a smaller APInt starting at bitPosition.
void insertBits(const APInt &SubBits, unsigned bitPosition);
+ void insertBits(uint64_t SubBits, unsigned bitPosition, unsigned numBits);
/// Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
APInt extractBits(unsigned numBits, unsigned bitPosition) const;
+ uint64_t extractBitsAsZExtValue(unsigned numBits, unsigned bitPosition) const;
/// @}
/// \name Value Characterization Functions
@@ -1548,11 +1621,7 @@
/// returns the smallest bit width that will retain the negative value. For
/// example, -1 can be written as 0b1 or 0xFFFFFFFFFF. 0b1 is shorter and so
/// for -1, this function will always return 1.
- unsigned getMinSignedBits() const {
- if (isNegative())
- return BitWidth - countLeadingOnes() + 1;
- return getActiveBits() + 1;
- }
+ unsigned getMinSignedBits() const { return BitWidth - getNumSignBits() + 1; }
/// Get zero extended value
///
@@ -1714,13 +1783,13 @@
return BitsToDouble(getWord(0));
}
- /// Converts APInt bits to a double
+ /// Converts APInt bits to a float
///
/// The conversion does not do a translation from integer to float, it just
/// re-interprets the bits as a float. Note that it is valid to do this on
/// any bit width. Exactly 32 bits will be translated.
float bitsToFloat() const {
- return BitsToFloat(getWord(0));
+ return BitsToFloat(static_cast<uint32_t>(getWord(0)));
}
/// Converts a double to APInt bits.
@@ -2149,7 +2218,7 @@
/// Converts the given APInt to a float value.
///
-/// Treast the APInt as a signed value for conversion purposes.
+/// Treats the APInt as a signed value for conversion purposes.
inline float RoundSignedAPIntToFloat(const APInt &APIVal) {
return float(APIVal.signedRoundToDouble());
}
@@ -2185,7 +2254,7 @@
/// count as an overflow, but here we want to allow values to decrease
/// and increase as long as they are within the same interval.
/// Specifically, adding of two negative numbers should not cause an
-/// overflow (as long as the magnitude does not exceed the bith width).
+/// overflow (as long as the magnitude does not exceed the bit width).
/// On the other hand, given a positive number, adding a negative
/// number to it can give a negative result, which would cause the
/// value to go from [-2^BW, 0) to [0, 2^BW). In that sense, zero is
@@ -2207,6 +2276,12 @@
/// coefficients.
Optional<APInt> SolveQuadraticEquationWrap(APInt A, APInt B, APInt C,
unsigned RangeWidth);
+
+/// Compare two values, and if they are different, return the position of the
+/// most significant bit that is different in the values.
+Optional<unsigned> GetMostSignificantDifferentBit(const APInt &A,
+ const APInt &B);
+
} // End of APIntOps namespace
// See friend declaration above. This additional declaration is required in
@@ -2219,7 +2294,7 @@
/// LoadIntFromMemory - Loads the integer stored in the LoadBytes bytes starting
/// from Src into IntVal, which is assumed to be wide enough and to hold zero.
-void LoadIntFromMemory(APInt &IntVal, uint8_t *Src, unsigned LoadBytes);
+void LoadIntFromMemory(APInt &IntVal, const uint8_t *Src, unsigned LoadBytes);
} // namespace llvm
diff --git a/linux-x64/clang/include/llvm/ADT/AllocatorList.h b/linux-x64/clang/include/llvm/ADT/AllocatorList.h
index 405a2e4..404a657 100644
--- a/linux-x64/clang/include/llvm/ADT/AllocatorList.h
+++ b/linux-x64/clang/include/llvm/ADT/AllocatorList.h
@@ -110,21 +110,14 @@
template <class OtherValueT, class OtherIteratorBase>
IteratorImpl(const IteratorImpl<OtherValueT, OtherIteratorBase> &X,
- typename std::enable_if<std::is_convertible<
- OtherIteratorBase, IteratorBase>::value>::type * = nullptr)
+ std::enable_if_t<std::is_convertible<
+ OtherIteratorBase, IteratorBase>::value> * = nullptr)
: base_type(X.wrapped()) {}
~IteratorImpl() = default;
reference operator*() const { return base_type::wrapped()->V; }
pointer operator->() const { return &operator*(); }
-
- friend bool operator==(const IteratorImpl &L, const IteratorImpl &R) {
- return L.wrapped() == R.wrapped();
- }
- friend bool operator!=(const IteratorImpl &L, const IteratorImpl &R) {
- return !(L == R);
- }
};
public:
diff --git a/linux-x64/clang/include/llvm/ADT/Any.h b/linux-x64/clang/include/llvm/ADT/Any.h
index 5dcd6e7..0aded62 100644
--- a/linux-x64/clang/include/llvm/ADT/Any.h
+++ b/linux-x64/clang/include/llvm/ADT/Any.h
@@ -38,7 +38,7 @@
explicit StorageImpl(T &&Value) : Value(std::move(Value)) {}
std::unique_ptr<StorageBase> clone() const override {
- return llvm::make_unique<StorageImpl<T>>(Value);
+ return std::make_unique<StorageImpl<T>>(Value);
}
const void *id() const override { return &TypeId<T>::Id; }
@@ -59,26 +59,26 @@
// When T is Any or T is not copy-constructible we need to explicitly disable
// the forwarding constructor so that the copy constructor gets selected
// instead.
- template <
- typename T,
- typename std::enable_if<
- llvm::conjunction<
- llvm::negation<std::is_same<typename std::decay<T>::type, Any>>,
- // We also disable this overload when an `Any` object can be
- // converted to the parameter type because in that case, this
- // constructor may combine with that conversion during overload
- // resolution for determining copy constructibility, and then
- // when we try to determine copy constructibility below we may
- // infinitely recurse. This is being evaluated by the standards
- // committee as a potential DR in `std::any` as well, but we're
- // going ahead and adopting it to work-around usage of `Any` with
- // types that need to be implicitly convertible from an `Any`.
- llvm::negation<std::is_convertible<Any, typename std::decay<T>::type>>,
- std::is_copy_constructible<typename std::decay<T>::type>>::value,
- int>::type = 0>
+ template <typename T,
+ std::enable_if_t<
+ llvm::conjunction<
+ llvm::negation<std::is_same<std::decay_t<T>, Any>>,
+ // We also disable this overload when an `Any` object can be
+ // converted to the parameter type because in that case,
+ // this constructor may combine with that conversion during
+ // overload resolution for determining copy
+ // constructibility, and then when we try to determine copy
+ // constructibility below we may infinitely recurse. This is
+ // being evaluated by the standards committee as a potential
+ // DR in `std::any` as well, but we're going ahead and
+ // adopting it to work-around usage of `Any` with types that
+ // need to be implicitly convertible from an `Any`.
+ llvm::negation<std::is_convertible<Any, std::decay_t<T>>>,
+ std::is_copy_constructible<std::decay_t<T>>>::value,
+ int> = 0>
Any(T &&Value) {
- using U = typename std::decay<T>::type;
- Storage = llvm::make_unique<StorageImpl<U>>(std::forward<T>(Value));
+ Storage =
+ std::make_unique<StorageImpl<std::decay_t<T>>>(std::forward<T>(Value));
}
Any(Any &&Other) : Storage(std::move(Other.Storage)) {}
@@ -114,32 +114,27 @@
template <typename T> bool any_isa(const Any &Value) {
if (!Value.Storage)
return false;
- using U =
- typename std::remove_cv<typename std::remove_reference<T>::type>::type;
- return Value.Storage->id() == &Any::TypeId<U>::Id;
+ return Value.Storage->id() ==
+ &Any::TypeId<std::remove_cv_t<std::remove_reference_t<T>>>::Id;
}
template <class T> T any_cast(const Any &Value) {
- using U =
- typename std::remove_cv<typename std::remove_reference<T>::type>::type;
- return static_cast<T>(*any_cast<U>(&Value));
+ return static_cast<T>(
+ *any_cast<std::remove_cv_t<std::remove_reference_t<T>>>(&Value));
}
template <class T> T any_cast(Any &Value) {
- using U =
- typename std::remove_cv<typename std::remove_reference<T>::type>::type;
- return static_cast<T>(*any_cast<U>(&Value));
+ return static_cast<T>(
+ *any_cast<std::remove_cv_t<std::remove_reference_t<T>>>(&Value));
}
template <class T> T any_cast(Any &&Value) {
- using U =
- typename std::remove_cv<typename std::remove_reference<T>::type>::type;
- return static_cast<T>(std::move(*any_cast<U>(&Value)));
+ return static_cast<T>(std::move(
+ *any_cast<std::remove_cv_t<std::remove_reference_t<T>>>(&Value)));
}
template <class T> const T *any_cast(const Any *Value) {
- using U =
- typename std::remove_cv<typename std::remove_reference<T>::type>::type;
+ using U = std::remove_cv_t<std::remove_reference_t<T>>;
assert(Value && any_isa<T>(*Value) && "Bad any cast!");
if (!Value || !any_isa<U>(*Value))
return nullptr;
@@ -147,7 +142,7 @@
}
template <class T> T *any_cast(Any *Value) {
- using U = typename std::decay<T>::type;
+ using U = std::decay_t<T>;
assert(Value && any_isa<U>(*Value) && "Bad any cast!");
if (!Value || !any_isa<U>(*Value))
return nullptr;
diff --git a/linux-x64/clang/include/llvm/ADT/ArrayRef.h b/linux-x64/clang/include/llvm/ADT/ArrayRef.h
index 773c88f..5ed4d07 100644
--- a/linux-x64/clang/include/llvm/ADT/ArrayRef.h
+++ b/linux-x64/clang/include/llvm/ADT/ArrayRef.h
@@ -38,7 +38,7 @@
/// This is intended to be trivially copyable, so it should be passed by
/// value.
template<typename T>
- class LLVM_NODISCARD ArrayRef {
+ class LLVM_GSL_POINTER LLVM_NODISCARD ArrayRef {
public:
using iterator = const T *;
using const_iterator = const T *;
@@ -97,37 +97,45 @@
/*implicit*/ constexpr ArrayRef(const T (&Arr)[N]) : Data(Arr), Length(N) {}
/// Construct an ArrayRef from a std::initializer_list.
+#if LLVM_GNUC_PREREQ(9, 0, 0)
+// Disable gcc's warning in this constructor as it generates an enormous amount
+// of messages. Anyone using ArrayRef should already be aware of the fact that
+// it does not do lifetime extension.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Winit-list-lifetime"
+#endif
/*implicit*/ ArrayRef(const std::initializer_list<T> &Vec)
: Data(Vec.begin() == Vec.end() ? (T*)nullptr : Vec.begin()),
Length(Vec.size()) {}
+#if LLVM_GNUC_PREREQ(9, 0, 0)
+#pragma GCC diagnostic pop
+#endif
/// Construct an ArrayRef<const T*> from ArrayRef<T*>. This uses SFINAE to
/// ensure that only ArrayRefs of pointers can be converted.
template <typename U>
- ArrayRef(
- const ArrayRef<U *> &A,
- typename std::enable_if<
- std::is_convertible<U *const *, T const *>::value>::type * = nullptr)
- : Data(A.data()), Length(A.size()) {}
+ ArrayRef(const ArrayRef<U *> &A,
+ std::enable_if_t<std::is_convertible<U *const *, T const *>::value>
+ * = nullptr)
+ : Data(A.data()), Length(A.size()) {}
/// Construct an ArrayRef<const T*> from a SmallVector<T*>. This is
/// templated in order to avoid instantiating SmallVectorTemplateCommon<T>
/// whenever we copy-construct an ArrayRef.
- template<typename U, typename DummyT>
+ template <typename U, typename DummyT>
/*implicit*/ ArrayRef(
- const SmallVectorTemplateCommon<U *, DummyT> &Vec,
- typename std::enable_if<
- std::is_convertible<U *const *, T const *>::value>::type * = nullptr)
- : Data(Vec.data()), Length(Vec.size()) {
- }
+ const SmallVectorTemplateCommon<U *, DummyT> &Vec,
+ std::enable_if_t<std::is_convertible<U *const *, T const *>::value> * =
+ nullptr)
+ : Data(Vec.data()), Length(Vec.size()) {}
/// Construct an ArrayRef<const T*> from std::vector<T*>. This uses SFINAE
/// to ensure that only vectors of pointers can be converted.
- template<typename U, typename A>
+ template <typename U, typename A>
ArrayRef(const std::vector<U *, A> &Vec,
- typename std::enable_if<
- std::is_convertible<U *const *, T const *>::value>::type* = 0)
- : Data(Vec.data()), Length(Vec.size()) {}
+ std::enable_if_t<std::is_convertible<U *const *, T const *>::value>
+ * = 0)
+ : Data(Vec.data()), Length(Vec.size()) {}
/// @}
/// @name Simple Operations
@@ -246,7 +254,7 @@
/// The declaration here is extra complicated so that "arrayRef = {}"
/// continues to select the move assignment operator.
template <typename U>
- typename std::enable_if<std::is_same<U, T>::value, ArrayRef<T>>::type &
+ std::enable_if_t<std::is_same<U, T>::value, ArrayRef<T>> &
operator=(U &&Temporary) = delete;
/// Disallow accidental assignment from a temporary.
@@ -254,7 +262,7 @@
/// The declaration here is extra complicated so that "arrayRef = {}"
/// continues to select the move assignment operator.
template <typename U>
- typename std::enable_if<std::is_same<U, T>::value, ArrayRef<T>>::type &
+ std::enable_if_t<std::is_same<U, T>::value, ArrayRef<T>> &
operator=(std::initializer_list<U>) = delete;
/// @}
@@ -298,17 +306,17 @@
/// Construct an empty MutableArrayRef from None.
/*implicit*/ MutableArrayRef(NoneType) : ArrayRef<T>() {}
- /// Construct an MutableArrayRef from a single element.
+ /// Construct a MutableArrayRef from a single element.
/*implicit*/ MutableArrayRef(T &OneElt) : ArrayRef<T>(OneElt) {}
- /// Construct an MutableArrayRef from a pointer and length.
+ /// Construct a MutableArrayRef from a pointer and length.
/*implicit*/ MutableArrayRef(T *data, size_t length)
: ArrayRef<T>(data, length) {}
- /// Construct an MutableArrayRef from a range.
+ /// Construct a MutableArrayRef from a range.
MutableArrayRef(T *begin, T *end) : ArrayRef<T>(begin, end) {}
- /// Construct an MutableArrayRef from a SmallVector.
+ /// Construct a MutableArrayRef from a SmallVector.
/*implicit*/ MutableArrayRef(SmallVectorImpl<T> &Vec)
: ArrayRef<T>(Vec) {}
@@ -316,12 +324,12 @@
/*implicit*/ MutableArrayRef(std::vector<T> &Vec)
: ArrayRef<T>(Vec) {}
- /// Construct an ArrayRef from a std::array
+ /// Construct a MutableArrayRef from a std::array
template <size_t N>
/*implicit*/ constexpr MutableArrayRef(std::array<T, N> &Arr)
: ArrayRef<T>(Arr) {}
- /// Construct an MutableArrayRef from a C array.
+ /// Construct a MutableArrayRef from a C array.
template <size_t N>
/*implicit*/ constexpr MutableArrayRef(T (&Arr)[N]) : ArrayRef<T>(Arr) {}
@@ -481,6 +489,12 @@
return Vec;
}
+ /// Construct an ArrayRef from a std::array.
+ template <typename T, std::size_t N>
+ ArrayRef<T> makeArrayRef(const std::array<T, N> &Arr) {
+ return Arr;
+ }
+
/// Construct an ArrayRef from an ArrayRef (no-op) (const)
template <typename T> ArrayRef<T> makeArrayRef(const ArrayRef<T> &Vec) {
return Vec;
@@ -518,11 +532,21 @@
return LHS.equals(RHS);
}
- template<typename T>
+ template <typename T>
+ inline bool operator==(SmallVectorImpl<T> &LHS, ArrayRef<T> RHS) {
+ return ArrayRef<T>(LHS).equals(RHS);
+ }
+
+ template <typename T>
inline bool operator!=(ArrayRef<T> LHS, ArrayRef<T> RHS) {
return !(LHS == RHS);
}
+ template <typename T>
+ inline bool operator!=(SmallVectorImpl<T> &LHS, ArrayRef<T> RHS) {
+ return !(LHS == RHS);
+ }
+
/// @}
template <typename T> hash_code hash_value(ArrayRef<T> S) {
diff --git a/linux-x64/clang/include/llvm/ADT/BitVector.h b/linux-x64/clang/include/llvm/ADT/BitVector.h
index fabf5d9..2a85778 100644
--- a/linux-x64/clang/include/llvm/ADT/BitVector.h
+++ b/linux-x64/clang/include/llvm/ADT/BitVector.h
@@ -14,6 +14,7 @@
#define LLVM_ADT_BITVECTOR_H
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/MathExtras.h"
#include <algorithm>
@@ -71,7 +72,7 @@
};
class BitVector {
- typedef unsigned long BitWord;
+ typedef uintptr_t BitWord;
enum { BITWORD_SIZE = (unsigned)sizeof(BitWord) * CHAR_BIT };
@@ -187,12 +188,12 @@
/// all - Returns true if all bits are set.
bool all() const {
for (unsigned i = 0; i < Size / BITWORD_SIZE; ++i)
- if (Bits[i] != ~0UL)
+ if (Bits[i] != ~BitWord(0))
return false;
// If bits remain check that they are ones. The unused bits are always zero.
if (unsigned Remainder = Size % BITWORD_SIZE)
- return Bits[Size / BITWORD_SIZE] == (1UL << Remainder) - 1;
+ return Bits[Size / BITWORD_SIZE] == (BitWord(1) << Remainder) - 1;
return true;
}
@@ -202,9 +203,10 @@
return !any();
}
- /// find_first_in - Returns the index of the first set bit in the range
- /// [Begin, End). Returns -1 if all bits in the range are unset.
- int find_first_in(unsigned Begin, unsigned End) const {
+ /// find_first_in - Returns the index of the first set / unset bit,
+ /// depending on \p Set, in the range [Begin, End).
+ /// Returns -1 if all bits in the range are unset / set.
+ int find_first_in(unsigned Begin, unsigned End, bool Set = true) const {
assert(Begin <= End && End <= Size);
if (Begin == End)
return -1;
@@ -213,8 +215,14 @@
unsigned LastWord = (End - 1) / BITWORD_SIZE;
// Check subsequent words.
+ // The code below is based on search for the first _set_ bit. If
+ // we're searching for the first _unset_, we just take the
+ // complement of each word before we use it and apply
+ // the same method.
for (unsigned i = FirstWord; i <= LastWord; ++i) {
BitWord Copy = Bits[i];
+ if (!Set)
+ Copy = ~Copy;
if (i == FirstWord) {
unsigned FirstBit = Begin % BITWORD_SIZE;
@@ -265,32 +273,7 @@
/// find_first_unset_in - Returns the index of the first unset bit in the
/// range [Begin, End). Returns -1 if all bits in the range are set.
int find_first_unset_in(unsigned Begin, unsigned End) const {
- assert(Begin <= End && End <= Size);
- if (Begin == End)
- return -1;
-
- unsigned FirstWord = Begin / BITWORD_SIZE;
- unsigned LastWord = (End - 1) / BITWORD_SIZE;
-
- // Check subsequent words.
- for (unsigned i = FirstWord; i <= LastWord; ++i) {
- BitWord Copy = Bits[i];
-
- if (i == FirstWord) {
- unsigned FirstBit = Begin % BITWORD_SIZE;
- Copy |= maskTrailingOnes<BitWord>(FirstBit);
- }
-
- if (i == LastWord) {
- unsigned LastBit = (End - 1) % BITWORD_SIZE;
- Copy |= maskTrailingZeros<BitWord>(LastBit + 1);
- }
- if (Copy != ~0UL) {
- unsigned Result = i * BITWORD_SIZE + countTrailingOnes(Copy);
- return Result < size() ? Result : -1;
- }
- }
- return -1;
+ return find_first_in(Begin, End, /* Set = */ false);
}
/// find_last_unset_in - Returns the index of the last unset bit in the
@@ -317,7 +300,7 @@
Copy |= maskTrailingOnes<BitWord>(FirstBit);
}
- if (Copy != ~0UL) {
+ if (Copy != ~BitWord(0)) {
unsigned Result =
(CurrentWord + 1) * BITWORD_SIZE - countLeadingOnes(Copy) - 1;
return Result < Size ? Result : -1;
@@ -414,21 +397,21 @@
if (I == E) return *this;
if (I / BITWORD_SIZE == E / BITWORD_SIZE) {
- BitWord EMask = 1UL << (E % BITWORD_SIZE);
- BitWord IMask = 1UL << (I % BITWORD_SIZE);
+ BitWord EMask = BitWord(1) << (E % BITWORD_SIZE);
+ BitWord IMask = BitWord(1) << (I % BITWORD_SIZE);
BitWord Mask = EMask - IMask;
Bits[I / BITWORD_SIZE] |= Mask;
return *this;
}
- BitWord PrefixMask = ~0UL << (I % BITWORD_SIZE);
+ BitWord PrefixMask = ~BitWord(0) << (I % BITWORD_SIZE);
Bits[I / BITWORD_SIZE] |= PrefixMask;
I = alignTo(I, BITWORD_SIZE);
for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE)
- Bits[I / BITWORD_SIZE] = ~0UL;
+ Bits[I / BITWORD_SIZE] = ~BitWord(0);
- BitWord PostfixMask = (1UL << (E % BITWORD_SIZE)) - 1;
+ BitWord PostfixMask = (BitWord(1) << (E % BITWORD_SIZE)) - 1;
if (I < E)
Bits[I / BITWORD_SIZE] |= PostfixMask;
@@ -453,21 +436,21 @@
if (I == E) return *this;
if (I / BITWORD_SIZE == E / BITWORD_SIZE) {
- BitWord EMask = 1UL << (E % BITWORD_SIZE);
- BitWord IMask = 1UL << (I % BITWORD_SIZE);
+ BitWord EMask = BitWord(1) << (E % BITWORD_SIZE);
+ BitWord IMask = BitWord(1) << (I % BITWORD_SIZE);
BitWord Mask = EMask - IMask;
Bits[I / BITWORD_SIZE] &= ~Mask;
return *this;
}
- BitWord PrefixMask = ~0UL << (I % BITWORD_SIZE);
+ BitWord PrefixMask = ~BitWord(0) << (I % BITWORD_SIZE);
Bits[I / BITWORD_SIZE] &= ~PrefixMask;
I = alignTo(I, BITWORD_SIZE);
for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE)
- Bits[I / BITWORD_SIZE] = 0UL;
+ Bits[I / BITWORD_SIZE] = BitWord(0);
- BitWord PostfixMask = (1UL << (E % BITWORD_SIZE)) - 1;
+ BitWord PostfixMask = (BitWord(1) << (E % BITWORD_SIZE)) - 1;
if (I < E)
Bits[I / BITWORD_SIZE] &= ~PostfixMask;
@@ -531,24 +514,10 @@
// Comparison operators.
bool operator==(const BitVector &RHS) const {
- unsigned ThisWords = NumBitWords(size());
- unsigned RHSWords = NumBitWords(RHS.size());
- unsigned i;
- for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
- if (Bits[i] != RHS.Bits[i])
- return false;
-
- // Verify that any extra words are all zeros.
- if (i != ThisWords) {
- for (; i != ThisWords; ++i)
- if (Bits[i])
- return false;
- } else if (i != RHSWords) {
- for (; i != RHSWords; ++i)
- if (RHS.Bits[i])
- return false;
- }
- return true;
+ if (size() != RHS.size())
+ return false;
+ unsigned NumWords = NumBitWords(size());
+ return Bits.take_front(NumWords) == RHS.Bits.take_front(NumWords);
}
bool operator!=(const BitVector &RHS) const {
@@ -719,6 +688,14 @@
if (this == &RHS) return *this;
Size = RHS.size();
+
+ // Handle tombstone when the BitVector is a key of a DenseHash.
+ if (RHS.isInvalid()) {
+ std::free(Bits.data());
+ Bits = None;
+ return *this;
+ }
+
unsigned RHSWords = NumBitWords(Size);
if (Size <= getBitCapacity()) {
if (Size)
@@ -758,6 +735,16 @@
std::swap(Size, RHS.Size);
}
+ void invalid() {
+ assert(!Size && Bits.empty());
+ Size = (unsigned)-1;
+ }
+ bool isInvalid() const { return Size == (unsigned)-1; }
+
+ ArrayRef<BitWord> getData() const {
+ return Bits.take_front(NumBitWords(size()));
+ }
+
//===--------------------------------------------------------------------===//
// Portable bit mask operations.
//===--------------------------------------------------------------------===//
@@ -868,7 +855,7 @@
// Then set any stray high bits of the last used word.
unsigned ExtraBits = Size % BITWORD_SIZE;
if (ExtraBits) {
- BitWord ExtraBitMask = ~0UL << ExtraBits;
+ BitWord ExtraBitMask = ~BitWord(0) << ExtraBits;
if (t)
Bits[UsedWords-1] |= ExtraBitMask;
else
@@ -932,6 +919,23 @@
return X.getMemorySize();
}
+template <> struct DenseMapInfo<BitVector> {
+ static inline BitVector getEmptyKey() { return BitVector(); }
+ static inline BitVector getTombstoneKey() {
+ BitVector V;
+ V.invalid();
+ return V;
+ }
+ static unsigned getHashValue(const BitVector &V) {
+ return DenseMapInfo<std::pair<unsigned, ArrayRef<uintptr_t>>>::getHashValue(
+ std::make_pair(V.size(), V.getData()));
+ }
+ static bool isEqual(const BitVector &LHS, const BitVector &RHS) {
+ if (LHS.isInvalid() || RHS.isInvalid())
+ return LHS.isInvalid() == RHS.isInvalid();
+ return LHS == RHS;
+ }
+};
} // end namespace llvm
namespace std {
diff --git a/linux-x64/clang/include/llvm/ADT/Bitfields.h b/linux-x64/clang/include/llvm/ADT/Bitfields.h
new file mode 100644
index 0000000..d93f648
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/Bitfields.h
@@ -0,0 +1,289 @@
+//===-- llvm/ADT/Bitfield.h - Get and Set bits in an integer ---*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements methods to test, set and extract typed bits from packed
+/// unsigned integers.
+///
+/// Why not C++ bitfields?
+/// ----------------------
+/// C++ bitfields do not offer control over the bit layout nor consistent
+/// behavior when it comes to out of range values.
+/// For instance, the layout is implementation defined and adjacent bits may be
+/// packed together but are not required to. This is problematic when storage is
+/// sparse and data must be stored in a particular integer type.
+///
+/// The methods provided in this file ensure precise control over the
+/// layout/storage as well as protection against out of range values.
+///
+/// Usage example
+/// -------------
+/// \code{.cpp}
+/// uint8_t Storage = 0;
+///
+/// // Store and retrieve a single bit as bool.
+/// using Bool = Bitfield::Element<bool, 0, 1>;
+/// Bitfield::set<Bool>(Storage, true);
+/// EXPECT_EQ(Storage, 0b00000001);
+/// // ^
+/// EXPECT_EQ(Bitfield::get<Bool>(Storage), true);
+///
+/// // Store and retrieve a 2 bit typed enum.
+/// // Note: enum underlying type must be unsigned.
+/// enum class SuitEnum : uint8_t { CLUBS, DIAMONDS, HEARTS, SPADES };
+/// // Note: enum maximum value needs to be passed in as last parameter.
+/// using Suit = Bitfield::Element<SuitEnum, 1, 2, SuitEnum::SPADES>;
+/// Bitfield::set<Suit>(Storage, SuitEnum::HEARTS);
+/// EXPECT_EQ(Storage, 0b00000101);
+/// // ^^
+/// EXPECT_EQ(Bitfield::get<Suit>(Storage), SuitEnum::HEARTS);
+///
+/// // Store and retrieve a 5 bit value as unsigned.
+/// using Value = Bitfield::Element<unsigned, 3, 5>;
+/// Bitfield::set<Value>(Storage, 10);
+/// EXPECT_EQ(Storage, 0b01010101);
+/// // ^^^^^
+/// EXPECT_EQ(Bitfield::get<Value>(Storage), 10U);
+///
+/// // Interpret the same 5 bit value as signed.
+/// using SignedValue = Bitfield::Element<int, 3, 5>;
+/// Bitfield::set<SignedValue>(Storage, -2);
+/// EXPECT_EQ(Storage, 0b11110101);
+/// // ^^^^^
+/// EXPECT_EQ(Bitfield::get<SignedValue>(Storage), -2);
+///
+/// // Ability to efficiently test if a field is non zero.
+/// EXPECT_TRUE(Bitfield::test<Value>(Storage));
+///
+/// // Alter Storage changes value.
+/// Storage = 0;
+/// EXPECT_EQ(Bitfield::get<Bool>(Storage), false);
+/// EXPECT_EQ(Bitfield::get<Suit>(Storage), SuitEnum::CLUBS);
+/// EXPECT_EQ(Bitfield::get<Value>(Storage), 0U);
+/// EXPECT_EQ(Bitfield::get<SignedValue>(Storage), 0);
+///
+/// Storage = 255;
+/// EXPECT_EQ(Bitfield::get<Bool>(Storage), true);
+/// EXPECT_EQ(Bitfield::get<Suit>(Storage), SuitEnum::SPADES);
+/// EXPECT_EQ(Bitfield::get<Value>(Storage), 31U);
+/// EXPECT_EQ(Bitfield::get<SignedValue>(Storage), -1);
+/// \endcode
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_BITFIELDS_H
+#define LLVM_ADT_BITFIELDS_H
+
+#include <cassert>
+#include <climits> // CHAR_BIT
+#include <cstddef> // size_t
+#include <cstdint> // uintXX_t
+#include <limits> // numeric_limits
+#include <type_traits>
+
+namespace llvm {
+
+namespace bitfields_details {
+
+/// A struct defining useful bit patterns for n-bits integer types.
+template <typename T, unsigned Bits> struct BitPatterns {
+ /// Bit patterns are forged using the equivalent `Unsigned` type because of
+ /// undefined operations over signed types (e.g. Bitwise shift operators).
+ /// Moreover same size casting from unsigned to signed is well defined but not
+ /// the other way around.
+ using Unsigned = typename std::make_unsigned<T>::type;
+ static_assert(sizeof(Unsigned) == sizeof(T), "Types must have same size");
+
+ static constexpr unsigned TypeBits = sizeof(Unsigned) * CHAR_BIT;
+ static_assert(TypeBits >= Bits, "n-bit must fit in T");
+
+ /// e.g. with TypeBits == 8 and Bits == 6.
+ static constexpr Unsigned AllZeros = Unsigned(0); // 00000000
+ static constexpr Unsigned AllOnes = ~Unsigned(0); // 11111111
+ static constexpr Unsigned Umin = AllZeros; // 00000000
+ static constexpr Unsigned Umax = AllOnes >> (TypeBits - Bits); // 00111111
+ static constexpr Unsigned SignBitMask = Unsigned(1) << (Bits - 1); // 00100000
+ static constexpr Unsigned Smax = Umax >> 1U; // 00011111
+ static constexpr Unsigned Smin = ~Smax; // 11100000
+ static constexpr Unsigned SignExtend = Unsigned(Smin << 1U); // 11000000
+};
+
+/// `Compressor` is used to manipulate the bits of a (possibly signed) integer
+/// type so it can be packed and unpacked into a `bits` sized integer,
+/// `Compressor` is specialized on signed-ness so no runtime cost is incurred.
+/// The `pack` method also checks that the passed in `UserValue` is valid.
+template <typename T, unsigned Bits, bool = std::is_unsigned<T>::value>
+struct Compressor {
+ static_assert(std::is_unsigned<T>::value, "T is unsigned");
+ using BP = BitPatterns<T, Bits>;
+
+ static T pack(T UserValue, T UserMaxValue) {
+ assert(UserValue <= UserMaxValue && "value is too big");
+ assert(UserValue <= BP::Umax && "value is too big");
+ return UserValue;
+ }
+
+ static T unpack(T StorageValue) { return StorageValue; }
+};
+
+template <typename T, unsigned Bits> struct Compressor<T, Bits, false> {
+ static_assert(std::is_signed<T>::value, "T is signed");
+ using BP = BitPatterns<T, Bits>;
+
+ static T pack(T UserValue, T UserMaxValue) {
+ assert(UserValue <= UserMaxValue && "value is too big");
+ assert(UserValue <= T(BP::Smax) && "value is too big");
+ assert(UserValue >= T(BP::Smin) && "value is too small");
+ if (UserValue < 0)
+ UserValue &= ~BP::SignExtend;
+ return UserValue;
+ }
+
+ static T unpack(T StorageValue) {
+ if (StorageValue >= T(BP::SignBitMask))
+ StorageValue |= BP::SignExtend;
+ return StorageValue;
+ }
+};
+
+/// Impl is where Bifield description and Storage are put together to interact
+/// with values.
+template <typename Bitfield, typename StorageType> struct Impl {
+ static_assert(std::is_unsigned<StorageType>::value,
+ "Storage must be unsigned");
+ using IntegerType = typename Bitfield::IntegerType;
+ using C = Compressor<IntegerType, Bitfield::Bits>;
+ using BP = BitPatterns<StorageType, Bitfield::Bits>;
+
+ static constexpr size_t StorageBits = sizeof(StorageType) * CHAR_BIT;
+ static_assert(Bitfield::FirstBit <= StorageBits, "Data must fit in mask");
+ static_assert(Bitfield::LastBit <= StorageBits, "Data must fit in mask");
+ static constexpr StorageType Mask = BP::Umax << Bitfield::Shift;
+
+ /// Checks `UserValue` is within bounds and packs it between `FirstBit` and
+ /// `LastBit` of `Packed` leaving the rest unchanged.
+ static void update(StorageType &Packed, IntegerType UserValue) {
+ const StorageType StorageValue = C::pack(UserValue, Bitfield::UserMaxValue);
+ Packed &= ~Mask;
+ Packed |= StorageValue << Bitfield::Shift;
+ }
+
+ /// Interprets bits between `FirstBit` and `LastBit` of `Packed` as
+ /// an`IntegerType`.
+ static IntegerType extract(StorageType Packed) {
+ const StorageType StorageValue = (Packed & Mask) >> Bitfield::Shift;
+ return C::unpack(StorageValue);
+ }
+
+ /// Interprets bits between `FirstBit` and `LastBit` of `Packed` as
+ /// an`IntegerType`.
+ static StorageType test(StorageType Packed) { return Packed & Mask; }
+};
+
+/// `Bitfield` deals with the following type:
+/// - unsigned enums
+/// - signed and unsigned integer
+/// - `bool`
+/// Internally though we only manipulate integer with well defined and
+/// consistent semantics, this excludes typed enums and `bool` that are replaced
+/// with their unsigned counterparts. The correct type is restored in the public
+/// API.
+template <typename T, bool = std::is_enum<T>::value>
+struct ResolveUnderlyingType {
+ using type = typename std::underlying_type<T>::type;
+};
+template <typename T> struct ResolveUnderlyingType<T, false> {
+ using type = T;
+};
+template <> struct ResolveUnderlyingType<bool, false> {
+ /// In case sizeof(bool) != 1, replace `void` by an additionnal
+ /// std::conditional.
+ using type = std::conditional<sizeof(bool) == 1, uint8_t, void>::type;
+};
+
+} // namespace bitfields_details
+
+/// Holds functions to get, set or test bitfields.
+struct Bitfield {
+ /// Describes an element of a Bitfield. This type is then used with the
+ /// Bitfield static member functions.
+ /// \tparam T The type of the field once in unpacked form.
+ /// \tparam Offset The position of the first bit.
+ /// \tparam Size The size of the field.
+ /// \tparam MaxValue For enums the maximum enum allowed.
+ template <typename T, unsigned Offset, unsigned Size,
+ T MaxValue = std::is_enum<T>::value
+ ? T(0) // coupled with static_assert below
+ : std::numeric_limits<T>::max()>
+ struct Element {
+ using Type = T;
+ using IntegerType =
+ typename bitfields_details::ResolveUnderlyingType<T>::type;
+ static constexpr unsigned Shift = Offset;
+ static constexpr unsigned Bits = Size;
+ static constexpr unsigned FirstBit = Offset;
+ static constexpr unsigned LastBit = Shift + Bits - 1;
+ static constexpr unsigned NextBit = Shift + Bits;
+
+ private:
+ template <typename, typename> friend struct bitfields_details::Impl;
+
+ static_assert(Bits > 0, "Bits must be non zero");
+ static constexpr size_t TypeBits = sizeof(IntegerType) * CHAR_BIT;
+ static_assert(Bits <= TypeBits, "Bits may not be greater than T size");
+ static_assert(!std::is_enum<T>::value || MaxValue != T(0),
+ "Enum Bitfields must provide a MaxValue");
+ static_assert(!std::is_enum<T>::value ||
+ std::is_unsigned<IntegerType>::value,
+ "Enum must be unsigned");
+ static_assert(std::is_integral<IntegerType>::value &&
+ std::numeric_limits<IntegerType>::is_integer,
+ "IntegerType must be an integer type");
+
+ static constexpr IntegerType UserMaxValue =
+ static_cast<IntegerType>(MaxValue);
+ };
+
+ /// Unpacks the field from the `Packed` value.
+ template <typename Bitfield, typename StorageType>
+ static typename Bitfield::Type get(StorageType Packed) {
+ using I = bitfields_details::Impl<Bitfield, StorageType>;
+ return static_cast<typename Bitfield::Type>(I::extract(Packed));
+ }
+
+ /// Return a non-zero value if the field is non-zero.
+ /// It is more efficient than `getField`.
+ template <typename Bitfield, typename StorageType>
+ static StorageType test(StorageType Packed) {
+ using I = bitfields_details::Impl<Bitfield, StorageType>;
+ return I::test(Packed);
+ }
+
+ /// Sets the typed value in the provided `Packed` value.
+ /// The method will asserts if the provided value is too big to fit in.
+ template <typename Bitfield, typename StorageType>
+ static void set(StorageType &Packed, typename Bitfield::Type Value) {
+ using I = bitfields_details::Impl<Bitfield, StorageType>;
+ I::update(Packed, static_cast<typename Bitfield::IntegerType>(Value));
+ }
+
+ /// Returns whether the two bitfields share common bits.
+ template <typename A, typename B> static constexpr bool isOverlapping() {
+ return A::LastBit >= B::FirstBit && B::LastBit >= A::FirstBit;
+ }
+
+ template <typename A> static constexpr bool areContiguous() { return true; }
+ template <typename A, typename B, typename... Others>
+ static constexpr bool areContiguous() {
+ return A::NextBit == B::FirstBit && areContiguous<B, Others...>();
+ }
+};
+
+} // namespace llvm
+
+#endif // LLVM_ADT_BITFIELDS_H
diff --git a/linux-x64/clang/include/llvm/ADT/BitmaskEnum.h b/linux-x64/clang/include/llvm/ADT/BitmaskEnum.h
index 1a18bc7..89e5508 100644
--- a/linux-x64/clang/include/llvm/ADT/BitmaskEnum.h
+++ b/linux-x64/clang/include/llvm/ADT/BitmaskEnum.h
@@ -71,49 +71,49 @@
template <typename E>
struct is_bitmask_enum<
- E, typename std::enable_if<sizeof(E::LLVM_BITMASK_LARGEST_ENUMERATOR) >=
- 0>::type> : std::true_type {};
+ E, std::enable_if_t<sizeof(E::LLVM_BITMASK_LARGEST_ENUMERATOR) >= 0>>
+ : std::true_type {};
namespace BitmaskEnumDetail {
/// Get a bitmask with 1s in all places up to the high-order bit of E's largest
/// value.
-template <typename E> typename std::underlying_type<E>::type Mask() {
+template <typename E> std::underlying_type_t<E> Mask() {
// On overflow, NextPowerOf2 returns zero with the type uint64_t, so
// subtracting 1 gives us the mask with all bits set, like we want.
- return NextPowerOf2(static_cast<typename std::underlying_type<E>::type>(
+ return NextPowerOf2(static_cast<std::underlying_type_t<E>>(
E::LLVM_BITMASK_LARGEST_ENUMERATOR)) -
1;
}
/// Check that Val is in range for E, and return Val cast to E's underlying
/// type.
-template <typename E> typename std::underlying_type<E>::type Underlying(E Val) {
- auto U = static_cast<typename std::underlying_type<E>::type>(Val);
+template <typename E> std::underlying_type_t<E> Underlying(E Val) {
+ auto U = static_cast<std::underlying_type_t<E>>(Val);
assert(U >= 0 && "Negative enum values are not allowed.");
assert(U <= Mask<E>() && "Enum value too large (or largest val too small?)");
return U;
}
-template <typename E,
- typename = typename std::enable_if<is_bitmask_enum<E>::value>::type>
+constexpr unsigned bitWidth(uint64_t Value) {
+ return Value ? 1 + bitWidth(Value >> 1) : 0;
+}
+
+template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>>
E operator~(E Val) {
return static_cast<E>(~Underlying(Val) & Mask<E>());
}
-template <typename E,
- typename = typename std::enable_if<is_bitmask_enum<E>::value>::type>
+template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>>
E operator|(E LHS, E RHS) {
return static_cast<E>(Underlying(LHS) | Underlying(RHS));
}
-template <typename E,
- typename = typename std::enable_if<is_bitmask_enum<E>::value>::type>
+template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>>
E operator&(E LHS, E RHS) {
return static_cast<E>(Underlying(LHS) & Underlying(RHS));
}
-template <typename E,
- typename = typename std::enable_if<is_bitmask_enum<E>::value>::type>
+template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>>
E operator^(E LHS, E RHS) {
return static_cast<E>(Underlying(LHS) ^ Underlying(RHS));
}
@@ -121,22 +121,19 @@
// |=, &=, and ^= return a reference to LHS, to match the behavior of the
// operators on builtin types.
-template <typename E,
- typename = typename std::enable_if<is_bitmask_enum<E>::value>::type>
+template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>>
E &operator|=(E &LHS, E RHS) {
LHS = LHS | RHS;
return LHS;
}
-template <typename E,
- typename = typename std::enable_if<is_bitmask_enum<E>::value>::type>
+template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>>
E &operator&=(E &LHS, E RHS) {
LHS = LHS & RHS;
return LHS;
}
-template <typename E,
- typename = typename std::enable_if<is_bitmask_enum<E>::value>::type>
+template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>>
E &operator^=(E &LHS, E RHS) {
LHS = LHS ^ RHS;
return LHS;
@@ -146,6 +143,10 @@
// Enable bitmask enums in namespace ::llvm and all nested namespaces.
LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
+template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>>
+constexpr unsigned BitWidth = BitmaskEnumDetail::bitWidth(uint64_t{
+ static_cast<std::underlying_type_t<E>>(
+ E::LLVM_BITMASK_LARGEST_ENUMERATOR)});
} // namespace llvm
diff --git a/linux-x64/clang/include/llvm/ADT/CachedHashString.h b/linux-x64/clang/include/llvm/ADT/CachedHashString.h
index 80144fb..6233d0f 100644
--- a/linux-x64/clang/include/llvm/ADT/CachedHashString.h
+++ b/linux-x64/clang/include/llvm/ADT/CachedHashString.h
@@ -19,9 +19,8 @@
#ifndef LLVM_ADT_CACHED_HASH_STRING_H
#define LLVM_ADT_CACHED_HASH_STRING_H
-#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/Support/raw_ostream.h"
namespace llvm {
diff --git a/linux-x64/clang/include/llvm/ADT/CoalescingBitVector.h b/linux-x64/clang/include/llvm/ADT/CoalescingBitVector.h
new file mode 100644
index 0000000..0a7dcfe
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/CoalescingBitVector.h
@@ -0,0 +1,443 @@
+//===- llvm/ADT/CoalescingBitVector.h - A coalescing bitvector --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file A bitvector that uses an IntervalMap to coalesce adjacent elements
+/// into intervals.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_COALESCINGBITVECTOR_H
+#define LLVM_ADT_COALESCINGBITVECTOR_H
+
+#include "llvm/ADT/IntervalMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include <algorithm>
+#include <initializer_list>
+
+namespace llvm {
+
+/// A bitvector that, under the hood, relies on an IntervalMap to coalesce
+/// elements into intervals. Good for representing sets which predominantly
+/// contain contiguous ranges. Bad for representing sets with lots of gaps
+/// between elements.
+///
+/// Compared to SparseBitVector, CoalescingBitVector offers more predictable
+/// performance for non-sequential find() operations.
+///
+/// \tparam IndexT - The type of the index into the bitvector.
+template <typename IndexT> class CoalescingBitVector {
+ static_assert(std::is_unsigned<IndexT>::value,
+ "Index must be an unsigned integer.");
+
+ using ThisT = CoalescingBitVector<IndexT>;
+
+ /// An interval map for closed integer ranges. The mapped values are unused.
+ using MapT = IntervalMap<IndexT, char>;
+
+ using UnderlyingIterator = typename MapT::const_iterator;
+
+ using IntervalT = std::pair<IndexT, IndexT>;
+
+public:
+ using Allocator = typename MapT::Allocator;
+
+ /// Construct by passing in a CoalescingBitVector<IndexT>::Allocator
+ /// reference.
+ CoalescingBitVector(Allocator &Alloc)
+ : Alloc(&Alloc), Intervals(Alloc) {}
+
+ /// \name Copy/move constructors and assignment operators.
+ /// @{
+
+ CoalescingBitVector(const ThisT &Other)
+ : Alloc(Other.Alloc), Intervals(*Other.Alloc) {
+ set(Other);
+ }
+
+ ThisT &operator=(const ThisT &Other) {
+ clear();
+ set(Other);
+ return *this;
+ }
+
+ CoalescingBitVector(ThisT &&Other) = delete;
+ ThisT &operator=(ThisT &&Other) = delete;
+
+ /// @}
+
+ /// Clear all the bits.
+ void clear() { Intervals.clear(); }
+
+ /// Check whether no bits are set.
+ bool empty() const { return Intervals.empty(); }
+
+ /// Count the number of set bits.
+ unsigned count() const {
+ unsigned Bits = 0;
+ for (auto It = Intervals.begin(), End = Intervals.end(); It != End; ++It)
+ Bits += 1 + It.stop() - It.start();
+ return Bits;
+ }
+
+ /// Set the bit at \p Index.
+ ///
+ /// This method does /not/ support setting a bit that has already been set,
+ /// for efficiency reasons. If possible, restructure your code to not set the
+ /// same bit multiple times, or use \ref test_and_set.
+ void set(IndexT Index) {
+ assert(!test(Index) && "Setting already-set bits not supported/efficient, "
+ "IntervalMap will assert");
+ insert(Index, Index);
+ }
+
+ /// Set the bits set in \p Other.
+ ///
+ /// This method does /not/ support setting already-set bits, see \ref set
+ /// for the rationale. For a safe set union operation, use \ref operator|=.
+ void set(const ThisT &Other) {
+ for (auto It = Other.Intervals.begin(), End = Other.Intervals.end();
+ It != End; ++It)
+ insert(It.start(), It.stop());
+ }
+
+ /// Set the bits at \p Indices. Used for testing, primarily.
+ void set(std::initializer_list<IndexT> Indices) {
+ for (IndexT Index : Indices)
+ set(Index);
+ }
+
+ /// Check whether the bit at \p Index is set.
+ bool test(IndexT Index) const {
+ const auto It = Intervals.find(Index);
+ if (It == Intervals.end())
+ return false;
+ assert(It.stop() >= Index && "Interval must end after Index");
+ return It.start() <= Index;
+ }
+
+ /// Set the bit at \p Index. Supports setting an already-set bit.
+ void test_and_set(IndexT Index) {
+ if (!test(Index))
+ set(Index);
+ }
+
+ /// Reset the bit at \p Index. Supports resetting an already-unset bit.
+ void reset(IndexT Index) {
+ auto It = Intervals.find(Index);
+ if (It == Intervals.end())
+ return;
+
+ // Split the interval containing Index into up to two parts: one from
+ // [Start, Index-1] and another from [Index+1, Stop]. If Index is equal to
+ // either Start or Stop, we create one new interval. If Index is equal to
+ // both Start and Stop, we simply erase the existing interval.
+ IndexT Start = It.start();
+ if (Index < Start)
+ // The index was not set.
+ return;
+ IndexT Stop = It.stop();
+ assert(Index <= Stop && "Wrong interval for index");
+ It.erase();
+ if (Start < Index)
+ insert(Start, Index - 1);
+ if (Index < Stop)
+ insert(Index + 1, Stop);
+ }
+
+ /// Set union. If \p RHS is guaranteed to not overlap with this, \ref set may
+ /// be a faster alternative.
+ void operator|=(const ThisT &RHS) {
+ // Get the overlaps between the two interval maps.
+ SmallVector<IntervalT, 8> Overlaps;
+ getOverlaps(RHS, Overlaps);
+
+ // Insert the non-overlapping parts of all the intervals from RHS.
+ for (auto It = RHS.Intervals.begin(), End = RHS.Intervals.end();
+ It != End; ++It) {
+ IndexT Start = It.start();
+ IndexT Stop = It.stop();
+ SmallVector<IntervalT, 8> NonOverlappingParts;
+ getNonOverlappingParts(Start, Stop, Overlaps, NonOverlappingParts);
+ for (IntervalT AdditivePortion : NonOverlappingParts)
+ insert(AdditivePortion.first, AdditivePortion.second);
+ }
+ }
+
+ /// Set intersection.
+ void operator&=(const ThisT &RHS) {
+ // Get the overlaps between the two interval maps (i.e. the intersection).
+ SmallVector<IntervalT, 8> Overlaps;
+ getOverlaps(RHS, Overlaps);
+ // Rebuild the interval map, including only the overlaps.
+ clear();
+ for (IntervalT Overlap : Overlaps)
+ insert(Overlap.first, Overlap.second);
+ }
+
+ /// Reset all bits present in \p Other.
+ void intersectWithComplement(const ThisT &Other) {
+ SmallVector<IntervalT, 8> Overlaps;
+ if (!getOverlaps(Other, Overlaps)) {
+ // If there is no overlap with Other, the intersection is empty.
+ return;
+ }
+
+ // Delete the overlapping intervals. Split up intervals that only partially
+ // intersect an overlap.
+ for (IntervalT Overlap : Overlaps) {
+ IndexT OlapStart, OlapStop;
+ std::tie(OlapStart, OlapStop) = Overlap;
+
+ auto It = Intervals.find(OlapStart);
+ IndexT CurrStart = It.start();
+ IndexT CurrStop = It.stop();
+ assert(CurrStart <= OlapStart && OlapStop <= CurrStop &&
+ "Expected some intersection!");
+
+ // Split the overlap interval into up to two parts: one from [CurrStart,
+ // OlapStart-1] and another from [OlapStop+1, CurrStop]. If OlapStart is
+ // equal to CurrStart, the first split interval is unnecessary. Ditto for
+ // when OlapStop is equal to CurrStop, we omit the second split interval.
+ It.erase();
+ if (CurrStart < OlapStart)
+ insert(CurrStart, OlapStart - 1);
+ if (OlapStop < CurrStop)
+ insert(OlapStop + 1, CurrStop);
+ }
+ }
+
+ bool operator==(const ThisT &RHS) const {
+ // We cannot just use std::equal because it checks the dereferenced values
+ // of an iterator pair for equality, not the iterators themselves. In our
+ // case that results in comparison of the (unused) IntervalMap values.
+ auto ItL = Intervals.begin();
+ auto ItR = RHS.Intervals.begin();
+ while (ItL != Intervals.end() && ItR != RHS.Intervals.end() &&
+ ItL.start() == ItR.start() && ItL.stop() == ItR.stop()) {
+ ++ItL;
+ ++ItR;
+ }
+ return ItL == Intervals.end() && ItR == RHS.Intervals.end();
+ }
+
+ bool operator!=(const ThisT &RHS) const { return !operator==(RHS); }
+
+ class const_iterator
+ : public std::iterator<std::forward_iterator_tag, IndexT> {
+ friend class CoalescingBitVector;
+
+ // For performance reasons, make the offset at the end different than the
+ // one used in \ref begin, to optimize the common `It == end()` pattern.
+ static constexpr unsigned kIteratorAtTheEndOffset = ~0u;
+
+ UnderlyingIterator MapIterator;
+ unsigned OffsetIntoMapIterator = 0;
+
+ // Querying the start/stop of an IntervalMap iterator can be very expensive.
+ // Cache these values for performance reasons.
+ IndexT CachedStart = IndexT();
+ IndexT CachedStop = IndexT();
+
+ void setToEnd() {
+ OffsetIntoMapIterator = kIteratorAtTheEndOffset;
+ CachedStart = IndexT();
+ CachedStop = IndexT();
+ }
+
+ /// MapIterator has just changed, reset the cached state to point to the
+ /// start of the new underlying iterator.
+ void resetCache() {
+ if (MapIterator.valid()) {
+ OffsetIntoMapIterator = 0;
+ CachedStart = MapIterator.start();
+ CachedStop = MapIterator.stop();
+ } else {
+ setToEnd();
+ }
+ }
+
+ /// Advance the iterator to \p Index, if it is contained within the current
+ /// interval. The public-facing method which supports advancing past the
+ /// current interval is \ref advanceToLowerBound.
+ void advanceTo(IndexT Index) {
+ assert(Index <= CachedStop && "Cannot advance to OOB index");
+ if (Index < CachedStart)
+ // We're already past this index.
+ return;
+ OffsetIntoMapIterator = Index - CachedStart;
+ }
+
+ const_iterator(UnderlyingIterator MapIt) : MapIterator(MapIt) {
+ resetCache();
+ }
+
+ public:
+ const_iterator() { setToEnd(); }
+
+ bool operator==(const const_iterator &RHS) const {
+ // Do /not/ compare MapIterator for equality, as this is very expensive.
+ // The cached start/stop values make that check unnecessary.
+ return std::tie(OffsetIntoMapIterator, CachedStart, CachedStop) ==
+ std::tie(RHS.OffsetIntoMapIterator, RHS.CachedStart,
+ RHS.CachedStop);
+ }
+
+ bool operator!=(const const_iterator &RHS) const {
+ return !operator==(RHS);
+ }
+
+ IndexT operator*() const { return CachedStart + OffsetIntoMapIterator; }
+
+ const_iterator &operator++() { // Pre-increment (++It).
+ if (CachedStart + OffsetIntoMapIterator < CachedStop) {
+ // Keep going within the current interval.
+ ++OffsetIntoMapIterator;
+ } else {
+ // We reached the end of the current interval: advance.
+ ++MapIterator;
+ resetCache();
+ }
+ return *this;
+ }
+
+ const_iterator operator++(int) { // Post-increment (It++).
+ const_iterator tmp = *this;
+ operator++();
+ return tmp;
+ }
+
+ /// Advance the iterator to the first set bit AT, OR AFTER, \p Index. If
+ /// no such set bit exists, advance to end(). This is like std::lower_bound.
+ /// This is useful if \p Index is close to the current iterator position.
+ /// However, unlike \ref find(), this has worst-case O(n) performance.
+ void advanceToLowerBound(IndexT Index) {
+ if (OffsetIntoMapIterator == kIteratorAtTheEndOffset)
+ return;
+
+ // Advance to the first interval containing (or past) Index, or to end().
+ while (Index > CachedStop) {
+ ++MapIterator;
+ resetCache();
+ if (OffsetIntoMapIterator == kIteratorAtTheEndOffset)
+ return;
+ }
+
+ advanceTo(Index);
+ }
+ };
+
+ const_iterator begin() const { return const_iterator(Intervals.begin()); }
+
+ const_iterator end() const { return const_iterator(); }
+
+ /// Return an iterator pointing to the first set bit AT, OR AFTER, \p Index.
+ /// If no such set bit exists, return end(). This is like std::lower_bound.
+ /// This has worst-case logarithmic performance (roughly O(log(gaps between
+ /// contiguous ranges))).
+ const_iterator find(IndexT Index) const {
+ auto UnderlyingIt = Intervals.find(Index);
+ if (UnderlyingIt == Intervals.end())
+ return end();
+ auto It = const_iterator(UnderlyingIt);
+ It.advanceTo(Index);
+ return It;
+ }
+
+ /// Return a range iterator which iterates over all of the set bits in the
+ /// half-open range [Start, End).
+ iterator_range<const_iterator> half_open_range(IndexT Start,
+ IndexT End) const {
+ assert(Start < End && "Not a valid range");
+ auto StartIt = find(Start);
+ if (StartIt == end() || *StartIt >= End)
+ return {end(), end()};
+ auto EndIt = StartIt;
+ EndIt.advanceToLowerBound(End);
+ return {StartIt, EndIt};
+ }
+
+ void print(raw_ostream &OS) const {
+ OS << "{";
+ for (auto It = Intervals.begin(), End = Intervals.end(); It != End;
+ ++It) {
+ OS << "[" << It.start();
+ if (It.start() != It.stop())
+ OS << ", " << It.stop();
+ OS << "]";
+ }
+ OS << "}";
+ }
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ LLVM_DUMP_METHOD void dump() const {
+ // LLDB swallows the first line of output after callling dump(). Add
+ // newlines before/after the braces to work around this.
+ dbgs() << "\n";
+ print(dbgs());
+ dbgs() << "\n";
+ }
+#endif
+
+private:
+ void insert(IndexT Start, IndexT End) { Intervals.insert(Start, End, 0); }
+
+ /// Record the overlaps between \p this and \p Other in \p Overlaps. Return
+ /// true if there is any overlap.
+ bool getOverlaps(const ThisT &Other,
+ SmallVectorImpl<IntervalT> &Overlaps) const {
+ for (IntervalMapOverlaps<MapT, MapT> I(Intervals, Other.Intervals);
+ I.valid(); ++I)
+ Overlaps.emplace_back(I.start(), I.stop());
+ assert(llvm::is_sorted(Overlaps,
+ [](IntervalT LHS, IntervalT RHS) {
+ return LHS.second < RHS.first;
+ }) &&
+ "Overlaps must be sorted");
+ return !Overlaps.empty();
+ }
+
+ /// Given the set of overlaps between this and some other bitvector, and an
+ /// interval [Start, Stop] from that bitvector, determine the portions of the
+ /// interval which do not overlap with this.
+ void getNonOverlappingParts(IndexT Start, IndexT Stop,
+ const SmallVectorImpl<IntervalT> &Overlaps,
+ SmallVectorImpl<IntervalT> &NonOverlappingParts) {
+ IndexT NextUncoveredBit = Start;
+ for (IntervalT Overlap : Overlaps) {
+ IndexT OlapStart, OlapStop;
+ std::tie(OlapStart, OlapStop) = Overlap;
+
+ // [Start;Stop] and [OlapStart;OlapStop] overlap iff OlapStart <= Stop
+ // and Start <= OlapStop.
+ bool DoesOverlap = OlapStart <= Stop && Start <= OlapStop;
+ if (!DoesOverlap)
+ continue;
+
+ // Cover the range [NextUncoveredBit, OlapStart). This puts the start of
+ // the next uncovered range at OlapStop+1.
+ if (NextUncoveredBit < OlapStart)
+ NonOverlappingParts.emplace_back(NextUncoveredBit, OlapStart - 1);
+ NextUncoveredBit = OlapStop + 1;
+ if (NextUncoveredBit > Stop)
+ break;
+ }
+ if (NextUncoveredBit <= Stop)
+ NonOverlappingParts.emplace_back(NextUncoveredBit, Stop);
+ }
+
+ Allocator *Alloc;
+ MapT Intervals;
+};
+
+} // namespace llvm
+
+#endif // LLVM_ADT_COALESCINGBITVECTOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/DAGDeltaAlgorithm.h b/linux-x64/clang/include/llvm/ADT/DAGDeltaAlgorithm.h
index d4cdc3c..c3872af 100644
--- a/linux-x64/clang/include/llvm/ADT/DAGDeltaAlgorithm.h
+++ b/linux-x64/clang/include/llvm/ADT/DAGDeltaAlgorithm.h
@@ -29,7 +29,7 @@
///
/// P(S) => P(S union pred(S))
///
-/// The minization algorithm uses this dependency information to attempt to
+/// The minimization algorithm uses this dependency information to attempt to
/// eagerly prune large subsets of changes. As with \see DeltaAlgorithm, the DAG
/// is not required to satisfy this property, but the algorithm will run
/// substantially fewer tests with appropriate dependencies. \see DeltaAlgorithm
diff --git a/linux-x64/clang/include/llvm/ADT/DeltaAlgorithm.h b/linux-x64/clang/include/llvm/ADT/DeltaAlgorithm.h
index 114b954..e1743fd 100644
--- a/linux-x64/clang/include/llvm/ADT/DeltaAlgorithm.h
+++ b/linux-x64/clang/include/llvm/ADT/DeltaAlgorithm.h
@@ -54,7 +54,7 @@
/// Split - Partition a set of changes \p S into one or two subsets.
void Split(const changeset_ty &S, changesetlist_ty &Res);
- /// Delta - Minimize a set of \p Changes which has been partioned into
+ /// Delta - Minimize a set of \p Changes which has been partitioned into
/// smaller sets, by attempting to remove individual subsets.
changeset_ty Delta(const changeset_ty &Changes,
const changesetlist_ty &Sets);
diff --git a/linux-x64/clang/include/llvm/ADT/DenseMap.h b/linux-x64/clang/include/llvm/ADT/DenseMap.h
index a05cf81..ce0b05d 100644
--- a/linux-x64/clang/include/llvm/ADT/DenseMap.h
+++ b/linux-x64/clang/include/llvm/ADT/DenseMap.h
@@ -18,6 +18,7 @@
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/MemAlloc.h"
#include "llvm/Support/ReverseIteration.h"
#include "llvm/Support/type_traits.h"
#include <algorithm>
@@ -38,33 +39,7 @@
// implementation without requiring two members.
template <typename KeyT, typename ValueT>
struct DenseMapPair : public std::pair<KeyT, ValueT> {
-
- // FIXME: Switch to inheriting constructors when we drop support for older
- // clang versions.
- // NOTE: This default constructor is declared with '{}' rather than
- // '= default' to work around a separate bug in clang-3.8. This can
- // also go when we switch to inheriting constructors.
- DenseMapPair() {}
-
- DenseMapPair(const KeyT &Key, const ValueT &Value)
- : std::pair<KeyT, ValueT>(Key, Value) {}
-
- DenseMapPair(KeyT &&Key, ValueT &&Value)
- : std::pair<KeyT, ValueT>(std::move(Key), std::move(Value)) {}
-
- template <typename AltKeyT, typename AltValueT>
- DenseMapPair(AltKeyT &&AltKey, AltValueT &&AltValue,
- typename std::enable_if<
- std::is_convertible<AltKeyT, KeyT>::value &&
- std::is_convertible<AltValueT, ValueT>::value>::type * = 0)
- : std::pair<KeyT, ValueT>(std::forward<AltKeyT>(AltKey),
- std::forward<AltValueT>(AltValue)) {}
-
- template <typename AltPairT>
- DenseMapPair(AltPairT &&AltPair,
- typename std::enable_if<std::is_convertible<
- AltPairT, std::pair<KeyT, ValueT>>::value>::type * = nullptr)
- : std::pair<KeyT, ValueT>(std::forward<AltPairT>(AltPair)) {}
+ using std::pair<KeyT, ValueT>::pair;
KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
@@ -145,9 +120,8 @@
}
const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
- if (is_trivially_copyable<KeyT>::value &&
- is_trivially_copyable<ValueT>::value) {
- // Use a simpler loop when these are trivial types.
+ if (std::is_trivially_destructible<ValueT>::value) {
+ // Use a simpler loop when values don't need destruction.
for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
P->getFirst() = EmptyKey;
} else {
@@ -176,13 +150,19 @@
iterator find(const_arg_type_t<KeyT> Val) {
BucketT *TheBucket;
if (LookupBucketFor(Val, TheBucket))
- return makeIterator(TheBucket, getBucketsEnd(), *this, true);
+ return makeIterator(TheBucket,
+ shouldReverseIterate<KeyT>() ? getBuckets()
+ : getBucketsEnd(),
+ *this, true);
return end();
}
const_iterator find(const_arg_type_t<KeyT> Val) const {
const BucketT *TheBucket;
if (LookupBucketFor(Val, TheBucket))
- return makeConstIterator(TheBucket, getBucketsEnd(), *this, true);
+ return makeConstIterator(TheBucket,
+ shouldReverseIterate<KeyT>() ? getBuckets()
+ : getBucketsEnd(),
+ *this, true);
return end();
}
@@ -195,14 +175,20 @@
iterator find_as(const LookupKeyT &Val) {
BucketT *TheBucket;
if (LookupBucketFor(Val, TheBucket))
- return makeIterator(TheBucket, getBucketsEnd(), *this, true);
+ return makeIterator(TheBucket,
+ shouldReverseIterate<KeyT>() ? getBuckets()
+ : getBucketsEnd(),
+ *this, true);
return end();
}
template<class LookupKeyT>
const_iterator find_as(const LookupKeyT &Val) const {
const BucketT *TheBucket;
if (LookupBucketFor(Val, TheBucket))
- return makeConstIterator(TheBucket, getBucketsEnd(), *this, true);
+ return makeConstIterator(TheBucket,
+ shouldReverseIterate<KeyT>() ? getBuckets()
+ : getBucketsEnd(),
+ *this, true);
return end();
}
@@ -236,16 +222,22 @@
std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&... Args) {
BucketT *TheBucket;
if (LookupBucketFor(Key, TheBucket))
- return std::make_pair(
- makeIterator(TheBucket, getBucketsEnd(), *this, true),
- false); // Already in map.
+ return std::make_pair(makeIterator(TheBucket,
+ shouldReverseIterate<KeyT>()
+ ? getBuckets()
+ : getBucketsEnd(),
+ *this, true),
+ false); // Already in map.
// Otherwise, insert the new element.
TheBucket =
InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...);
- return std::make_pair(
- makeIterator(TheBucket, getBucketsEnd(), *this, true),
- true);
+ return std::make_pair(makeIterator(TheBucket,
+ shouldReverseIterate<KeyT>()
+ ? getBuckets()
+ : getBucketsEnd(),
+ *this, true),
+ true);
}
// Inserts key,value pair into the map if the key isn't already in the map.
@@ -255,15 +247,21 @@
std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&... Args) {
BucketT *TheBucket;
if (LookupBucketFor(Key, TheBucket))
- return std::make_pair(
- makeIterator(TheBucket, getBucketsEnd(), *this, true),
- false); // Already in map.
+ return std::make_pair(makeIterator(TheBucket,
+ shouldReverseIterate<KeyT>()
+ ? getBuckets()
+ : getBucketsEnd(),
+ *this, true),
+ false); // Already in map.
// Otherwise, insert the new element.
TheBucket = InsertIntoBucket(TheBucket, Key, std::forward<Ts>(Args)...);
- return std::make_pair(
- makeIterator(TheBucket, getBucketsEnd(), *this, true),
- true);
+ return std::make_pair(makeIterator(TheBucket,
+ shouldReverseIterate<KeyT>()
+ ? getBuckets()
+ : getBucketsEnd(),
+ *this, true),
+ true);
}
/// Alternate version of insert() which allows a different, and possibly
@@ -276,16 +274,22 @@
const LookupKeyT &Val) {
BucketT *TheBucket;
if (LookupBucketFor(Val, TheBucket))
- return std::make_pair(
- makeIterator(TheBucket, getBucketsEnd(), *this, true),
- false); // Already in map.
+ return std::make_pair(makeIterator(TheBucket,
+ shouldReverseIterate<KeyT>()
+ ? getBuckets()
+ : getBucketsEnd(),
+ *this, true),
+ false); // Already in map.
// Otherwise, insert the new element.
TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first),
std::move(KV.second), Val);
- return std::make_pair(
- makeIterator(TheBucket, getBucketsEnd(), *this, true),
- true);
+ return std::make_pair(makeIterator(TheBucket,
+ shouldReverseIterate<KeyT>()
+ ? getBuckets()
+ : getBucketsEnd(),
+ *this, true),
+ true);
}
/// insert - Range insertion of pairs.
@@ -422,8 +426,8 @@
setNumEntries(other.getNumEntries());
setNumTombstones(other.getNumTombstones());
- if (is_trivially_copyable<KeyT>::value &&
- is_trivially_copyable<ValueT>::value)
+ if (std::is_trivially_copyable<KeyT>::value &&
+ std::is_trivially_copyable<ValueT>::value)
memcpy(reinterpret_cast<void *>(getBuckets()), other.getBuckets(),
getNumBuckets() * sizeof(BucketT));
else
@@ -721,7 +725,7 @@
unsigned NumBuckets;
public:
- /// Create a DenseMap wth an optional \p InitialReserve that guarantee that
+ /// Create a DenseMap with an optional \p InitialReserve that guarantee that
/// this number of elements can be inserted in the map without grow()
explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); }
@@ -748,7 +752,7 @@
~DenseMap() {
this->destroyAll();
- operator delete(Buckets);
+ deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
}
void swap(DenseMap& RHS) {
@@ -768,7 +772,7 @@
DenseMap& operator=(DenseMap &&other) {
this->destroyAll();
- operator delete(Buckets);
+ deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
init(0);
swap(other);
return *this;
@@ -776,7 +780,7 @@
void copyFrom(const DenseMap& other) {
this->destroyAll();
- operator delete(Buckets);
+ deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
if (allocateBuckets(other.NumBuckets)) {
this->BaseT::copyFrom(other);
} else {
@@ -809,10 +813,12 @@
this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
// Free the old table.
- operator delete(OldBuckets);
+ deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
+ alignof(BucketT));
}
void shrink_and_clear() {
+ unsigned OldNumBuckets = NumBuckets;
unsigned OldNumEntries = NumEntries;
this->destroyAll();
@@ -825,7 +831,8 @@
return;
}
- operator delete(Buckets);
+ deallocate_buffer(Buckets, sizeof(BucketT) * OldNumBuckets,
+ alignof(BucketT));
init(NewNumBuckets);
}
@@ -861,7 +868,8 @@
return false;
}
- Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT) * NumBuckets));
+ Buckets = static_cast<BucketT *>(
+ allocate_buffer(sizeof(BucketT) * NumBuckets, alignof(BucketT)));
return true;
}
};
@@ -946,7 +954,7 @@
std::swap(*LHSB, *RHSB);
continue;
}
- // Swap separately and handle any assymetry.
+ // Swap separately and handle any asymmetry.
std::swap(LHSB->getFirst(), RHSB->getFirst());
if (hasLHSValue) {
::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
@@ -1028,16 +1036,13 @@
}
void grow(unsigned AtLeast) {
- if (AtLeast >= InlineBuckets)
+ if (AtLeast > InlineBuckets)
AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));
if (Small) {
- if (AtLeast < InlineBuckets)
- return; // Nothing to do.
-
// First move the inline buckets into a temporary storage.
AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
- BucketT *TmpBegin = reinterpret_cast<BucketT *>(TmpStorage.buffer);
+ BucketT *TmpBegin = reinterpret_cast<BucketT *>(&TmpStorage);
BucketT *TmpEnd = TmpBegin;
// Loop over the buckets, moving non-empty, non-tombstones into the
@@ -1057,10 +1062,13 @@
P->getFirst().~KeyT();
}
- // Now make this map use the large rep, and move all the entries back
- // into it.
- Small = false;
- new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
+ // AtLeast == InlineBuckets can happen if there are many tombstones,
+ // and grow() is used to remove them. Usually we always switch to the
+ // large rep here.
+ if (AtLeast > InlineBuckets) {
+ Small = false;
+ new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
+ }
this->moveFromOldBuckets(TmpBegin, TmpEnd);
return;
}
@@ -1076,7 +1084,8 @@
this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
// Free the old table.
- operator delete(OldRep.Buckets);
+ deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets,
+ alignof(BucketT));
}
void shrink_and_clear() {
@@ -1123,8 +1132,8 @@
assert(Small);
// Note that this cast does not violate aliasing rules as we assert that
// the memory's dynamic type is the small, inline bucket buffer, and the
- // 'storage.buffer' static type is 'char *'.
- return reinterpret_cast<const BucketT *>(storage.buffer);
+ // 'storage' is a POD containing a char buffer.
+ return reinterpret_cast<const BucketT *>(&storage);
}
BucketT *getInlineBuckets() {
@@ -1135,7 +1144,7 @@
const LargeRep *getLargeRep() const {
assert(!Small);
// Note, same rule about aliasing as with getInlineBuckets.
- return reinterpret_cast<const LargeRep *>(storage.buffer);
+ return reinterpret_cast<const LargeRep *>(&storage);
}
LargeRep *getLargeRep() {
@@ -1160,15 +1169,17 @@
if (Small)
return;
- operator delete(getLargeRep()->Buckets);
+ deallocate_buffer(getLargeRep()->Buckets,
+ sizeof(BucketT) * getLargeRep()->NumBuckets,
+ alignof(BucketT));
getLargeRep()->~LargeRep();
}
LargeRep allocateBuckets(unsigned Num) {
assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
- LargeRep Rep = {
- static_cast<BucketT*>(operator new(sizeof(BucketT) * Num)), Num
- };
+ LargeRep Rep = {static_cast<BucketT *>(allocate_buffer(
+ sizeof(BucketT) * Num, alignof(BucketT))),
+ Num};
return Rep;
}
};
@@ -1179,8 +1190,6 @@
friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
- using ConstIterator = DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
-
public:
using difference_type = ptrdiff_t;
using value_type =
@@ -1213,41 +1222,43 @@
// for const iterator destinations so it doesn't end up as a user defined copy
// constructor.
template <bool IsConstSrc,
- typename = typename std::enable_if<!IsConstSrc && IsConst>::type>
+ typename = std::enable_if_t<!IsConstSrc && IsConst>>
DenseMapIterator(
const DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc> &I)
: DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
reference operator*() const {
assert(isHandleInSync() && "invalid iterator access!");
+ assert(Ptr != End && "dereferencing end() iterator");
if (shouldReverseIterate<KeyT>())
return Ptr[-1];
return *Ptr;
}
pointer operator->() const {
assert(isHandleInSync() && "invalid iterator access!");
+ assert(Ptr != End && "dereferencing end() iterator");
if (shouldReverseIterate<KeyT>())
return &(Ptr[-1]);
return Ptr;
}
- bool operator==(const ConstIterator &RHS) const {
- assert((!Ptr || isHandleInSync()) && "handle not in sync!");
+ friend bool operator==(const DenseMapIterator &LHS,
+ const DenseMapIterator &RHS) {
+ assert((!LHS.Ptr || LHS.isHandleInSync()) && "handle not in sync!");
assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
- assert(getEpochAddress() == RHS.getEpochAddress() &&
+ assert(LHS.getEpochAddress() == RHS.getEpochAddress() &&
"comparing incomparable iterators!");
- return Ptr == RHS.Ptr;
+ return LHS.Ptr == RHS.Ptr;
}
- bool operator!=(const ConstIterator &RHS) const {
- assert((!Ptr || isHandleInSync()) && "handle not in sync!");
- assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
- assert(getEpochAddress() == RHS.getEpochAddress() &&
- "comparing incomparable iterators!");
- return Ptr != RHS.Ptr;
+
+ friend bool operator!=(const DenseMapIterator &LHS,
+ const DenseMapIterator &RHS) {
+ return !(LHS == RHS);
}
inline DenseMapIterator& operator++() { // Preincrement
assert(isHandleInSync() && "invalid iterator access!");
+ assert(Ptr != End && "incrementing end() iterator");
if (shouldReverseIterate<KeyT>()) {
--Ptr;
RetreatPastEmptyBuckets();
diff --git a/linux-x64/clang/include/llvm/ADT/DenseMapInfo.h b/linux-x64/clang/include/llvm/ADT/DenseMapInfo.h
index 5ef6f3a..8271b93 100644
--- a/linux-x64/clang/include/llvm/ADT/DenseMapInfo.h
+++ b/linux-x64/clang/include/llvm/ADT/DenseMapInfo.h
@@ -13,11 +13,11 @@
#ifndef LLVM_ADT_DENSEMAPINFO_H
#define LLVM_ADT_DENSEMAPINFO_H
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/Support/PointerLikeTypeTraits.h"
-#include "llvm/Support/ScalableSize.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
@@ -25,6 +25,24 @@
namespace llvm {
+namespace detail {
+
+/// Simplistic combination of 32-bit hash values into 32-bit hash values.
+static inline unsigned combineHashValue(unsigned a, unsigned b) {
+ uint64_t key = (uint64_t)a << 32 | (uint64_t)b;
+ key += ~(key << 32);
+ key ^= (key >> 22);
+ key += ~(key << 13);
+ key ^= (key >> 8);
+ key += (key << 3);
+ key ^= (key >> 15);
+ key += ~(key << 27);
+ key ^= (key >> 31);
+ return (unsigned)key;
+}
+
+} // end namespace detail
+
template<typename T>
struct DenseMapInfo {
//static inline T getEmptyKey();
@@ -33,18 +51,28 @@
//static bool isEqual(const T &LHS, const T &RHS);
};
-// Provide DenseMapInfo for all pointers.
+// Provide DenseMapInfo for all pointers. Come up with sentinel pointer values
+// that are aligned to alignof(T) bytes, but try to avoid requiring T to be
+// complete. This allows clients to instantiate DenseMap<T*, ...> with forward
+// declared key types. Assume that no pointer key type requires more than 4096
+// bytes of alignment.
template<typename T>
struct DenseMapInfo<T*> {
+ // The following should hold, but it would require T to be complete:
+ // static_assert(alignof(T) <= (1 << Log2MaxAlign),
+ // "DenseMap does not support pointer keys requiring more than "
+ // "Log2MaxAlign bits of alignment");
+ static constexpr uintptr_t Log2MaxAlign = 12;
+
static inline T* getEmptyKey() {
uintptr_t Val = static_cast<uintptr_t>(-1);
- Val <<= PointerLikeTypeTraits<T*>::NumLowBitsAvailable;
+ Val <<= Log2MaxAlign;
return reinterpret_cast<T*>(Val);
}
static inline T* getTombstoneKey() {
uintptr_t Val = static_cast<uintptr_t>(-2);
- Val <<= PointerLikeTypeTraits<T*>::NumLowBitsAvailable;
+ Val <<= Log2MaxAlign;
return reinterpret_cast<T*>(Val);
}
@@ -67,6 +95,17 @@
}
};
+// Provide DenseMapInfo for unsigned chars.
+template <> struct DenseMapInfo<unsigned char> {
+ static inline unsigned char getEmptyKey() { return ~0; }
+ static inline unsigned char getTombstoneKey() { return ~0 - 1; }
+ static unsigned getHashValue(const unsigned char &Val) { return Val * 37U; }
+
+ static bool isEqual(const unsigned char &LHS, const unsigned char &RHS) {
+ return LHS == RHS;
+ }
+};
+
// Provide DenseMapInfo for unsigned shorts.
template <> struct DenseMapInfo<unsigned short> {
static inline unsigned short getEmptyKey() { return 0xFFFF; }
@@ -187,17 +226,8 @@
}
static unsigned getHashValue(const Pair& PairVal) {
- uint64_t key = (uint64_t)FirstInfo::getHashValue(PairVal.first) << 32
- | (uint64_t)SecondInfo::getHashValue(PairVal.second);
- key += ~(key << 32);
- key ^= (key >> 22);
- key += ~(key << 13);
- key ^= (key >> 8);
- key += (key << 3);
- key ^= (key >> 15);
- key += ~(key << 27);
- key ^= (key >> 31);
- return (unsigned)key;
+ return detail::combineHashValue(FirstInfo::getHashValue(PairVal.first),
+ SecondInfo::getHashValue(PairVal.second));
}
static bool isEqual(const Pair &LHS, const Pair &RHS) {
@@ -206,6 +236,56 @@
}
};
+// Provide DenseMapInfo for all tuples whose members have info.
+template <typename... Ts> struct DenseMapInfo<std::tuple<Ts...>> {
+ using Tuple = std::tuple<Ts...>;
+
+ static inline Tuple getEmptyKey() {
+ return Tuple(DenseMapInfo<Ts>::getEmptyKey()...);
+ }
+
+ static inline Tuple getTombstoneKey() {
+ return Tuple(DenseMapInfo<Ts>::getTombstoneKey()...);
+ }
+
+ template <unsigned I>
+ static unsigned getHashValueImpl(const Tuple &values, std::false_type) {
+ using EltType = typename std::tuple_element<I, Tuple>::type;
+ std::integral_constant<bool, I + 1 == sizeof...(Ts)> atEnd;
+ return detail::combineHashValue(
+ DenseMapInfo<EltType>::getHashValue(std::get<I>(values)),
+ getHashValueImpl<I + 1>(values, atEnd));
+ }
+
+ template <unsigned I>
+ static unsigned getHashValueImpl(const Tuple &values, std::true_type) {
+ return 0;
+ }
+
+ static unsigned getHashValue(const std::tuple<Ts...> &values) {
+ std::integral_constant<bool, 0 == sizeof...(Ts)> atEnd;
+ return getHashValueImpl<0>(values, atEnd);
+ }
+
+ template <unsigned I>
+ static bool isEqualImpl(const Tuple &lhs, const Tuple &rhs, std::false_type) {
+ using EltType = typename std::tuple_element<I, Tuple>::type;
+ std::integral_constant<bool, I + 1 == sizeof...(Ts)> atEnd;
+ return DenseMapInfo<EltType>::isEqual(std::get<I>(lhs), std::get<I>(rhs)) &&
+ isEqualImpl<I + 1>(lhs, rhs, atEnd);
+ }
+
+ template <unsigned I>
+ static bool isEqualImpl(const Tuple &lhs, const Tuple &rhs, std::true_type) {
+ return true;
+ }
+
+ static bool isEqual(const Tuple &lhs, const Tuple &rhs) {
+ std::integral_constant<bool, 0 == sizeof...(Ts)> atEnd;
+ return isEqualImpl<0>(lhs, rhs, atEnd);
+ }
+};
+
// Provide DenseMapInfo for StringRefs.
template <> struct DenseMapInfo<StringRef> {
static inline StringRef getEmptyKey() {
@@ -269,18 +349,46 @@
static bool isEqual(hash_code LHS, hash_code RHS) { return LHS == RHS; }
};
-template <> struct DenseMapInfo<ElementCount> {
- static inline ElementCount getEmptyKey() { return {~0U, true}; }
- static inline ElementCount getTombstoneKey() { return {~0U - 1, false}; }
- static unsigned getHashValue(const ElementCount& EltCnt) {
- if (EltCnt.Scalable)
- return (EltCnt.Min * 37U) - 1U;
-
- return EltCnt.Min * 37U;
+/// Provide DenseMapInfo for APInt.
+template <> struct DenseMapInfo<APInt> {
+ static inline APInt getEmptyKey() {
+ APInt V(nullptr, 0);
+ V.U.VAL = 0;
+ return V;
}
- static bool isEqual(const ElementCount& LHS, const ElementCount& RHS) {
- return LHS == RHS;
+ static inline APInt getTombstoneKey() {
+ APInt V(nullptr, 0);
+ V.U.VAL = 1;
+ return V;
+ }
+
+ static unsigned getHashValue(const APInt &Key) {
+ return static_cast<unsigned>(hash_value(Key));
+ }
+
+ static bool isEqual(const APInt &LHS, const APInt &RHS) {
+ return LHS.getBitWidth() == RHS.getBitWidth() && LHS == RHS;
+ }
+};
+
+/// Provide DenseMapInfo for APSInt, using the DenseMapInfo for APInt.
+template <> struct DenseMapInfo<APSInt> {
+ static inline APSInt getEmptyKey() {
+ return APSInt(DenseMapInfo<APInt>::getEmptyKey());
+ }
+
+ static inline APSInt getTombstoneKey() {
+ return APSInt(DenseMapInfo<APInt>::getTombstoneKey());
+ }
+
+ static unsigned getHashValue(const APSInt &Key) {
+ return static_cast<unsigned>(hash_value(Key));
+ }
+
+ static bool isEqual(const APSInt &LHS, const APSInt &RHS) {
+ return LHS.getBitWidth() == RHS.getBitWidth() &&
+ LHS.isUnsigned() == RHS.isUnsigned() && LHS == RHS;
}
};
diff --git a/linux-x64/clang/include/llvm/ADT/DenseSet.h b/linux-x64/clang/include/llvm/ADT/DenseSet.h
index 9afb715..edce7c4 100644
--- a/linux-x64/clang/include/llvm/ADT/DenseSet.h
+++ b/linux-x64/clang/include/llvm/ADT/DenseSet.h
@@ -66,6 +66,12 @@
explicit DenseSetImpl(unsigned InitialReserve = 0) : TheMap(InitialReserve) {}
+ template <typename InputIt>
+ DenseSetImpl(const InputIt &I, const InputIt &E)
+ : DenseSetImpl(PowerOf2Ceil(std::distance(I, E))) {
+ insert(I, E);
+ }
+
DenseSetImpl(std::initializer_list<ValueT> Elems)
: DenseSetImpl(PowerOf2Ceil(Elems.size())) {
insert(Elems.begin(), Elems.end());
@@ -124,8 +130,12 @@
Iterator& operator++() { ++I; return *this; }
Iterator operator++(int) { auto T = *this; ++I; return T; }
- bool operator==(const ConstIterator& X) const { return I == X.I; }
- bool operator!=(const ConstIterator& X) const { return I != X.I; }
+ friend bool operator==(const Iterator &X, const Iterator &Y) {
+ return X.I == Y.I;
+ }
+ friend bool operator!=(const Iterator &X, const Iterator &Y) {
+ return X.I != Y.I;
+ }
};
class ConstIterator {
@@ -149,8 +159,12 @@
ConstIterator& operator++() { ++I; return *this; }
ConstIterator operator++(int) { auto T = *this; ++I; return T; }
- bool operator==(const ConstIterator& X) const { return I == X.I; }
- bool operator!=(const ConstIterator& X) const { return I != X.I; }
+ friend bool operator==(const ConstIterator &X, const ConstIterator &Y) {
+ return X.I == Y.I;
+ }
+ friend bool operator!=(const ConstIterator &X, const ConstIterator &Y) {
+ return X.I != Y.I;
+ }
};
using iterator = Iterator;
@@ -167,6 +181,11 @@
return ConstIterator(TheMap.find(V));
}
+ /// Check if the set contains the given element.
+ bool contains(const_arg_type_t<ValueT> V) const {
+ return TheMap.find(V) != TheMap.end();
+ }
+
/// Alternative version of find() which allows a different, and possibly less
/// expensive, key type.
/// The DenseMapInfo is responsible for supplying methods
diff --git a/linux-x64/clang/include/llvm/ADT/DepthFirstIterator.h b/linux-x64/clang/include/llvm/ADT/DepthFirstIterator.h
index 11967f5..5bfea28 100644
--- a/linux-x64/clang/include/llvm/ADT/DepthFirstIterator.h
+++ b/linux-x64/clang/include/llvm/ADT/DepthFirstIterator.h
@@ -198,7 +198,7 @@
// nodes that a depth first iteration did not find: ie unreachable nodes.
//
bool nodeVisited(NodeRef Node) const {
- return this->Visited.count(Node) != 0;
+ return this->Visited.contains(Node);
}
/// getPathLength - Return the length of the path from the entry node to the
diff --git a/linux-x64/clang/include/llvm/ADT/DirectedGraph.h b/linux-x64/clang/include/llvm/ADT/DirectedGraph.h
new file mode 100644
index 0000000..e8bb9e6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/DirectedGraph.h
@@ -0,0 +1,279 @@
+//===- llvm/ADT/DirectedGraph.h - Directed Graph ----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interface and a base class implementation for a
+// directed graph.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_DIRECTEDGRAPH_H
+#define LLVM_ADT_DIRECTEDGRAPH_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+/// Represent an edge in the directed graph.
+/// The edge contains the target node it connects to.
+template <class NodeType, class EdgeType> class DGEdge {
+public:
+ DGEdge() = delete;
+ /// Create an edge pointing to the given node \p N.
+ explicit DGEdge(NodeType &N) : TargetNode(N) {}
+ explicit DGEdge(const DGEdge<NodeType, EdgeType> &E)
+ : TargetNode(E.TargetNode) {}
+ DGEdge<NodeType, EdgeType> &operator=(const DGEdge<NodeType, EdgeType> &E) {
+ TargetNode = E.TargetNode;
+ return *this;
+ }
+
+ /// Static polymorphism: delegate implementation (via isEqualTo) to the
+ /// derived class.
+ bool operator==(const DGEdge &E) const {
+ return getDerived().isEqualTo(E.getDerived());
+ }
+ bool operator!=(const DGEdge &E) const { return !operator==(E); }
+
+ /// Retrieve the target node this edge connects to.
+ const NodeType &getTargetNode() const { return TargetNode; }
+ NodeType &getTargetNode() {
+ return const_cast<NodeType &>(
+ static_cast<const DGEdge<NodeType, EdgeType> &>(*this).getTargetNode());
+ }
+
+ /// Set the target node this edge connects to.
+ void setTargetNode(const NodeType &N) { TargetNode = N; }
+
+protected:
+ // As the default implementation use address comparison for equality.
+ bool isEqualTo(const EdgeType &E) const { return this == &E; }
+
+ // Cast the 'this' pointer to the derived type and return a reference.
+ EdgeType &getDerived() { return *static_cast<EdgeType *>(this); }
+ const EdgeType &getDerived() const {
+ return *static_cast<const EdgeType *>(this);
+ }
+
+ // The target node this edge connects to.
+ NodeType &TargetNode;
+};
+
+/// Represent a node in the directed graph.
+/// The node has a (possibly empty) list of outgoing edges.
+template <class NodeType, class EdgeType> class DGNode {
+public:
+ using EdgeListTy = SetVector<EdgeType *>;
+ using iterator = typename EdgeListTy::iterator;
+ using const_iterator = typename EdgeListTy::const_iterator;
+
+ /// Create a node with a single outgoing edge \p E.
+ explicit DGNode(EdgeType &E) : Edges() { Edges.insert(&E); }
+ DGNode() = default;
+
+ explicit DGNode(const DGNode<NodeType, EdgeType> &N) : Edges(N.Edges) {}
+ DGNode(DGNode<NodeType, EdgeType> &&N) : Edges(std::move(N.Edges)) {}
+
+ DGNode<NodeType, EdgeType> &operator=(const DGNode<NodeType, EdgeType> &N) {
+ Edges = N.Edges;
+ return *this;
+ }
+ DGNode<NodeType, EdgeType> &operator=(const DGNode<NodeType, EdgeType> &&N) {
+ Edges = std::move(N.Edges);
+ return *this;
+ }
+
+ /// Static polymorphism: delegate implementation (via isEqualTo) to the
+ /// derived class.
+ friend bool operator==(const NodeType &M, const NodeType &N) {
+ return M.isEqualTo(N);
+ }
+ friend bool operator!=(const NodeType &M, const NodeType &N) {
+ return !(M == N);
+ }
+
+ const_iterator begin() const { return Edges.begin(); }
+ const_iterator end() const { return Edges.end(); }
+ iterator begin() { return Edges.begin(); }
+ iterator end() { return Edges.end(); }
+ const EdgeType &front() const { return *Edges.front(); }
+ EdgeType &front() { return *Edges.front(); }
+ const EdgeType &back() const { return *Edges.back(); }
+ EdgeType &back() { return *Edges.back(); }
+
+ /// Collect in \p EL, all the edges from this node to \p N.
+ /// Return true if at least one edge was found, and false otherwise.
+ /// Note that this implementation allows more than one edge to connect
+ /// a given pair of nodes.
+ bool findEdgesTo(const NodeType &N, SmallVectorImpl<EdgeType *> &EL) const {
+ assert(EL.empty() && "Expected the list of edges to be empty.");
+ for (auto *E : Edges)
+ if (E->getTargetNode() == N)
+ EL.push_back(E);
+ return !EL.empty();
+ }
+
+ /// Add the given edge \p E to this node, if it doesn't exist already. Returns
+ /// true if the edge is added and false otherwise.
+ bool addEdge(EdgeType &E) { return Edges.insert(&E); }
+
+ /// Remove the given edge \p E from this node, if it exists.
+ void removeEdge(EdgeType &E) { Edges.remove(&E); }
+
+ /// Test whether there is an edge that goes from this node to \p N.
+ bool hasEdgeTo(const NodeType &N) const {
+ return (findEdgeTo(N) != Edges.end());
+ }
+
+ /// Retrieve the outgoing edges for the node.
+ const EdgeListTy &getEdges() const { return Edges; }
+ EdgeListTy &getEdges() {
+ return const_cast<EdgeListTy &>(
+ static_cast<const DGNode<NodeType, EdgeType> &>(*this).Edges);
+ }
+
+ /// Clear the outgoing edges.
+ void clear() { Edges.clear(); }
+
+protected:
+ // As the default implementation use address comparison for equality.
+ bool isEqualTo(const NodeType &N) const { return this == &N; }
+
+ // Cast the 'this' pointer to the derived type and return a reference.
+ NodeType &getDerived() { return *static_cast<NodeType *>(this); }
+ const NodeType &getDerived() const {
+ return *static_cast<const NodeType *>(this);
+ }
+
+ /// Find an edge to \p N. If more than one edge exists, this will return
+ /// the first one in the list of edges.
+ const_iterator findEdgeTo(const NodeType &N) const {
+ return llvm::find_if(
+ Edges, [&N](const EdgeType *E) { return E->getTargetNode() == N; });
+ }
+
+ // The list of outgoing edges.
+ EdgeListTy Edges;
+};
+
+/// Directed graph
+///
+/// The graph is represented by a table of nodes.
+/// Each node contains a (possibly empty) list of outgoing edges.
+/// Each edge contains the target node it connects to.
+template <class NodeType, class EdgeType> class DirectedGraph {
+protected:
+ using NodeListTy = SmallVector<NodeType *, 10>;
+ using EdgeListTy = SmallVector<EdgeType *, 10>;
+public:
+ using iterator = typename NodeListTy::iterator;
+ using const_iterator = typename NodeListTy::const_iterator;
+ using DGraphType = DirectedGraph<NodeType, EdgeType>;
+
+ DirectedGraph() = default;
+ explicit DirectedGraph(NodeType &N) : Nodes() { addNode(N); }
+ DirectedGraph(const DGraphType &G) : Nodes(G.Nodes) {}
+ DirectedGraph(DGraphType &&RHS) : Nodes(std::move(RHS.Nodes)) {}
+ DGraphType &operator=(const DGraphType &G) {
+ Nodes = G.Nodes;
+ return *this;
+ }
+ DGraphType &operator=(const DGraphType &&G) {
+ Nodes = std::move(G.Nodes);
+ return *this;
+ }
+
+ const_iterator begin() const { return Nodes.begin(); }
+ const_iterator end() const { return Nodes.end(); }
+ iterator begin() { return Nodes.begin(); }
+ iterator end() { return Nodes.end(); }
+ const NodeType &front() const { return *Nodes.front(); }
+ NodeType &front() { return *Nodes.front(); }
+ const NodeType &back() const { return *Nodes.back(); }
+ NodeType &back() { return *Nodes.back(); }
+
+ size_t size() const { return Nodes.size(); }
+
+ /// Find the given node \p N in the table.
+ const_iterator findNode(const NodeType &N) const {
+ return llvm::find_if(Nodes,
+ [&N](const NodeType *Node) { return *Node == N; });
+ }
+ iterator findNode(const NodeType &N) {
+ return const_cast<iterator>(
+ static_cast<const DGraphType &>(*this).findNode(N));
+ }
+
+ /// Add the given node \p N to the graph if it is not already present.
+ bool addNode(NodeType &N) {
+ if (findNode(N) != Nodes.end())
+ return false;
+ Nodes.push_back(&N);
+ return true;
+ }
+
+ /// Collect in \p EL all edges that are coming into node \p N. Return true
+ /// if at least one edge was found, and false otherwise.
+ bool findIncomingEdgesToNode(const NodeType &N, SmallVectorImpl<EdgeType*> &EL) const {
+ assert(EL.empty() && "Expected the list of edges to be empty.");
+ EdgeListTy TempList;
+ for (auto *Node : Nodes) {
+ if (*Node == N)
+ continue;
+ Node->findEdgesTo(N, TempList);
+ llvm::append_range(EL, TempList);
+ TempList.clear();
+ }
+ return !EL.empty();
+ }
+
+ /// Remove the given node \p N from the graph. If the node has incoming or
+ /// outgoing edges, they are also removed. Return true if the node was found
+ /// and then removed, and false if the node was not found in the graph to
+ /// begin with.
+ bool removeNode(NodeType &N) {
+ iterator IT = findNode(N);
+ if (IT == Nodes.end())
+ return false;
+ // Remove incoming edges.
+ EdgeListTy EL;
+ for (auto *Node : Nodes) {
+ if (*Node == N)
+ continue;
+ Node->findEdgesTo(N, EL);
+ for (auto *E : EL)
+ Node->removeEdge(*E);
+ EL.clear();
+ }
+ N.clear();
+ Nodes.erase(IT);
+ return true;
+ }
+
+ /// Assuming nodes \p Src and \p Dst are already in the graph, connect node \p
+ /// Src to node \p Dst using the provided edge \p E. Return true if \p Src is
+ /// not already connected to \p Dst via \p E, and false otherwise.
+ bool connect(NodeType &Src, NodeType &Dst, EdgeType &E) {
+ assert(findNode(Src) != Nodes.end() && "Src node should be present.");
+ assert(findNode(Dst) != Nodes.end() && "Dst node should be present.");
+ assert((E.getTargetNode() == Dst) &&
+ "Target of the given edge does not match Dst.");
+ return Src.addEdge(E);
+ }
+
+protected:
+ // The list of nodes in the graph.
+ NodeListTy Nodes;
+};
+
+} // namespace llvm
+
+#endif // LLVM_ADT_DIRECTEDGRAPH_H
diff --git a/linux-x64/clang/include/llvm/ADT/EnumeratedArray.h b/linux-x64/clang/include/llvm/ADT/EnumeratedArray.h
new file mode 100644
index 0000000..a66ec9d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/EnumeratedArray.h
@@ -0,0 +1,49 @@
+//===- llvm/ADT/EnumeratedArray.h - Enumerated Array-------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an array type that can be indexed using scoped enum values.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ENUMERATEDARRAY_H
+#define LLVM_ADT_ENUMERATEDARRAY_H
+
+#include <cassert>
+
+namespace llvm {
+
+template <typename ValueType, typename Enumeration,
+ Enumeration LargestEnum = Enumeration::Last, typename IndexType = int,
+ IndexType Size = 1 + static_cast<IndexType>(LargestEnum)>
+class EnumeratedArray {
+public:
+ EnumeratedArray() = default;
+ EnumeratedArray(ValueType V) {
+ for (IndexType IX = 0; IX < Size; ++IX) {
+ Underlying[IX] = V;
+ }
+ }
+ inline const ValueType &operator[](const Enumeration Index) const {
+ auto IX = static_cast<const IndexType>(Index);
+ assert(IX >= 0 && IX < Size && "Index is out of bounds.");
+ return Underlying[IX];
+ }
+ inline ValueType &operator[](const Enumeration Index) {
+ return const_cast<ValueType &>(
+ static_cast<const EnumeratedArray<ValueType, Enumeration, LargestEnum,
+ IndexType, Size> &>(*this)[Index]);
+ }
+ inline IndexType size() { return Size; }
+
+private:
+ ValueType Underlying[Size];
+};
+
+} // namespace llvm
+
+#endif // LLVM_ADT_ENUMERATEDARRAY_H
diff --git a/linux-x64/clang/include/llvm/ADT/FloatingPointMode.h b/linux-x64/clang/include/llvm/ADT/FloatingPointMode.h
new file mode 100644
index 0000000..6988309
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/FloatingPointMode.h
@@ -0,0 +1,195 @@
+//===- llvm/Support/FloatingPointMode.h -------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Utilities for dealing with flags related to floating point mode controls.
+//
+//===----------------------------------------------------------------------===/
+
+#ifndef LLVM_FLOATINGPOINTMODE_H
+#define LLVM_FLOATINGPOINTMODE_H
+
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+/// Rounding mode.
+///
+/// Enumerates supported rounding modes, as well as some special values. The set
+/// of the modes must agree with IEEE-754, 4.3.1 and 4.3.2. The constants
+/// assigned to the IEEE rounding modes must agree with the values used by
+/// FLT_ROUNDS (C11, 5.2.4.2.2p8).
+///
+/// This value is packed into bitfield in some cases, including \c FPOptions, so
+/// the rounding mode values and the special value \c Dynamic must fit into the
+/// the bit field (now - 3 bits). The value \c Invalid is used only in values
+/// returned by intrinsics to indicate errors, it should never be stored as
+/// rounding mode value, so it does not need to fit the bit fields.
+///
+enum class RoundingMode : int8_t {
+ // Rounding mode defined in IEEE-754.
+ TowardZero = 0, ///< roundTowardZero.
+ NearestTiesToEven = 1, ///< roundTiesToEven.
+ TowardPositive = 2, ///< roundTowardPositive.
+ TowardNegative = 3, ///< roundTowardNegative.
+ NearestTiesToAway = 4, ///< roundTiesToAway.
+
+ // Special values.
+ Dynamic = 7, ///< Denotes mode unknown at compile time.
+ Invalid = -1 ///< Denotes invalid value.
+};
+
+/// Returns text representation of the given rounding mode.
+inline StringRef spell(RoundingMode RM) {
+ switch (RM) {
+ case RoundingMode::TowardZero: return "towardzero";
+ case RoundingMode::NearestTiesToEven: return "tonearest";
+ case RoundingMode::TowardPositive: return "upward";
+ case RoundingMode::TowardNegative: return "downward";
+ case RoundingMode::NearestTiesToAway: return "tonearestaway";
+ case RoundingMode::Dynamic: return "dynamic";
+ default: return "invalid";
+ }
+}
+
+inline raw_ostream &operator << (raw_ostream &OS, RoundingMode RM) {
+ OS << spell(RM);
+ return OS;
+}
+
+/// Represent subnormal handling kind for floating point instruction inputs and
+/// outputs.
+struct DenormalMode {
+ /// Represent handled modes for denormal (aka subnormal) modes in the floating
+ /// point environment.
+ enum DenormalModeKind : int8_t {
+ Invalid = -1,
+
+ /// IEEE-754 denormal numbers preserved.
+ IEEE,
+
+ /// The sign of a flushed-to-zero number is preserved in the sign of 0
+ PreserveSign,
+
+ /// Denormals are flushed to positive zero.
+ PositiveZero
+ };
+
+ /// Denormal flushing mode for floating point instruction results in the
+ /// default floating point environment.
+ DenormalModeKind Output = DenormalModeKind::Invalid;
+
+ /// Denormal treatment kind for floating point instruction inputs in the
+ /// default floating-point environment. If this is not DenormalModeKind::IEEE,
+ /// floating-point instructions implicitly treat the input value as 0.
+ DenormalModeKind Input = DenormalModeKind::Invalid;
+
+ constexpr DenormalMode() = default;
+ constexpr DenormalMode(DenormalModeKind Out, DenormalModeKind In) :
+ Output(Out), Input(In) {}
+
+
+ static constexpr DenormalMode getInvalid() {
+ return DenormalMode(DenormalModeKind::Invalid, DenormalModeKind::Invalid);
+ }
+
+ static constexpr DenormalMode getIEEE() {
+ return DenormalMode(DenormalModeKind::IEEE, DenormalModeKind::IEEE);
+ }
+
+ static constexpr DenormalMode getPreserveSign() {
+ return DenormalMode(DenormalModeKind::PreserveSign,
+ DenormalModeKind::PreserveSign);
+ }
+
+ static constexpr DenormalMode getPositiveZero() {
+ return DenormalMode(DenormalModeKind::PositiveZero,
+ DenormalModeKind::PositiveZero);
+ }
+
+ bool operator==(DenormalMode Other) const {
+ return Output == Other.Output && Input == Other.Input;
+ }
+
+ bool operator!=(DenormalMode Other) const {
+ return !(*this == Other);
+ }
+
+ bool isSimple() const {
+ return Input == Output;
+ }
+
+ bool isValid() const {
+ return Output != DenormalModeKind::Invalid &&
+ Input != DenormalModeKind::Invalid;
+ }
+
+ inline void print(raw_ostream &OS) const;
+
+ inline std::string str() const {
+ std::string storage;
+ raw_string_ostream OS(storage);
+ print(OS);
+ return OS.str();
+ }
+};
+
+inline raw_ostream& operator<<(raw_ostream &OS, DenormalMode Mode) {
+ Mode.print(OS);
+ return OS;
+}
+
+/// Parse the expected names from the denormal-fp-math attribute.
+inline DenormalMode::DenormalModeKind
+parseDenormalFPAttributeComponent(StringRef Str) {
+ // Assume ieee on unspecified attribute.
+ return StringSwitch<DenormalMode::DenormalModeKind>(Str)
+ .Cases("", "ieee", DenormalMode::IEEE)
+ .Case("preserve-sign", DenormalMode::PreserveSign)
+ .Case("positive-zero", DenormalMode::PositiveZero)
+ .Default(DenormalMode::Invalid);
+}
+
+/// Return the name used for the denormal handling mode used by the the
+/// expected names from the denormal-fp-math attribute.
+inline StringRef denormalModeKindName(DenormalMode::DenormalModeKind Mode) {
+ switch (Mode) {
+ case DenormalMode::IEEE:
+ return "ieee";
+ case DenormalMode::PreserveSign:
+ return "preserve-sign";
+ case DenormalMode::PositiveZero:
+ return "positive-zero";
+ default:
+ return "";
+ }
+}
+
+/// Returns the denormal mode to use for inputs and outputs.
+inline DenormalMode parseDenormalFPAttribute(StringRef Str) {
+ StringRef OutputStr, InputStr;
+ std::tie(OutputStr, InputStr) = Str.split(',');
+
+ DenormalMode Mode;
+ Mode.Output = parseDenormalFPAttributeComponent(OutputStr);
+
+ // Maintain compatability with old form of the attribute which only specified
+ // one component.
+ Mode.Input = InputStr.empty() ? Mode.Output :
+ parseDenormalFPAttributeComponent(InputStr);
+
+ return Mode;
+}
+
+void DenormalMode::print(raw_ostream &OS) const {
+ OS << denormalModeKindName(Output) << ',' << denormalModeKindName(Input);
+}
+
+}
+
+#endif // LLVM_FLOATINGPOINTMODE_H
diff --git a/linux-x64/clang/include/llvm/ADT/FoldingSet.h b/linux-x64/clang/include/llvm/ADT/FoldingSet.h
index d5837e5..fb1cb03 100644
--- a/linux-x64/clang/include/llvm/ADT/FoldingSet.h
+++ b/linux-x64/clang/include/llvm/ADT/FoldingSet.h
@@ -85,17 +85,17 @@
///
/// MyNode *M = MyFoldingSet.FindNodeOrInsertPos(ID, InsertPoint);
///
-/// If found then M with be non-NULL, else InsertPoint will point to where it
+/// If found then M will be non-NULL, else InsertPoint will point to where it
/// should be inserted using InsertNode.
///
-/// 3) If you get a NULL result from FindNodeOrInsertPos then you can as a new
-/// node with FindNodeOrInsertPos;
+/// 3) If you get a NULL result from FindNodeOrInsertPos then you can insert a
+/// new node with InsertNode;
///
-/// InsertNode(N, InsertPoint);
+/// MyFoldingSet.InsertNode(M, InsertPoint);
///
/// 4) Finally, if you want to remove a node from the folding set call;
///
-/// bool WasRemoved = RemoveNode(N);
+/// bool WasRemoved = MyFoldingSet.RemoveNode(M);
///
/// The result indicates whether the node existed in the folding set.
@@ -110,8 +110,6 @@
/// back to the bucket to facilitate node removal.
///
class FoldingSetBase {
- virtual void anchor(); // Out of line virtual method.
-
protected:
/// Buckets - Array of bucket chains.
void **Buckets;
@@ -154,11 +152,6 @@
/// empty - Returns true if there are no nodes in the folding set.
bool empty() const { return NumNodes == 0; }
- /// reserve - Increase the number of buckets such that adding the
- /// EltCount-th node won't cause a rebucket operation. reserve is permitted
- /// to allocate more space than requested by EltCount.
- void reserve(unsigned EltCount);
-
/// capacity - Returns the number of nodes permitted in the folding set
/// before a rebucket operation is performed.
unsigned capacity() {
@@ -167,32 +160,46 @@
return NumBuckets * 2;
}
+protected:
+ /// Functions provided by the derived class to compute folding properties.
+ /// This is effectively a vtable for FoldingSetBase, except that we don't
+ /// actually store a pointer to it in the object.
+ struct FoldingSetInfo {
+ /// GetNodeProfile - Instantiations of the FoldingSet template implement
+ /// this function to gather data bits for the given node.
+ void (*GetNodeProfile)(const FoldingSetBase *Self, Node *N,
+ FoldingSetNodeID &ID);
+
+ /// NodeEquals - Instantiations of the FoldingSet template implement
+ /// this function to compare the given node with the given ID.
+ bool (*NodeEquals)(const FoldingSetBase *Self, Node *N,
+ const FoldingSetNodeID &ID, unsigned IDHash,
+ FoldingSetNodeID &TempID);
+
+ /// ComputeNodeHash - Instantiations of the FoldingSet template implement
+ /// this function to compute a hash value for the given node.
+ unsigned (*ComputeNodeHash)(const FoldingSetBase *Self, Node *N,
+ FoldingSetNodeID &TempID);
+ };
+
private:
/// GrowHashTable - Double the size of the hash table and rehash everything.
- void GrowHashTable();
+ void GrowHashTable(const FoldingSetInfo &Info);
/// GrowBucketCount - resize the hash table and rehash everything.
/// NewBucketCount must be a power of two, and must be greater than the old
/// bucket count.
- void GrowBucketCount(unsigned NewBucketCount);
+ void GrowBucketCount(unsigned NewBucketCount, const FoldingSetInfo &Info);
protected:
- /// GetNodeProfile - Instantiations of the FoldingSet template implement
- /// this function to gather data bits for the given node.
- virtual void GetNodeProfile(Node *N, FoldingSetNodeID &ID) const = 0;
-
- /// NodeEquals - Instantiations of the FoldingSet template implement
- /// this function to compare the given node with the given ID.
- virtual bool NodeEquals(Node *N, const FoldingSetNodeID &ID, unsigned IDHash,
- FoldingSetNodeID &TempID) const=0;
-
- /// ComputeNodeHash - Instantiations of the FoldingSet template implement
- /// this function to compute a hash value for the given node.
- virtual unsigned ComputeNodeHash(Node *N, FoldingSetNodeID &TempID) const = 0;
-
// The below methods are protected to encourage subclasses to provide a more
// type-safe API.
+ /// reserve - Increase the number of buckets such that adding the
+ /// EltCount-th node won't cause a rebucket operation. reserve is permitted
+ /// to allocate more space than requested by EltCount.
+ void reserve(unsigned EltCount, const FoldingSetInfo &Info);
+
/// RemoveNode - Remove a node from the folding set, returning true if one
/// was removed or false if the node was not in the folding set.
bool RemoveNode(Node *N);
@@ -200,17 +207,18 @@
/// GetOrInsertNode - If there is an existing simple Node exactly
/// equal to the specified node, return it. Otherwise, insert 'N' and return
/// it instead.
- Node *GetOrInsertNode(Node *N);
+ Node *GetOrInsertNode(Node *N, const FoldingSetInfo &Info);
/// FindNodeOrInsertPos - Look up the node specified by ID. If it exists,
/// return it. If not, return the insertion token that will make insertion
/// faster.
- Node *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos);
+ Node *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos,
+ const FoldingSetInfo &Info);
/// InsertNode - Insert the specified node into the folding set, knowing that
/// it is not already in the folding set. InsertPos must be obtained from
/// FindNodeOrInsertPos.
- void InsertNode(Node *N, void *InsertPos);
+ void InsertNode(Node *N, void *InsertPos, const FoldingSetInfo &Info);
};
//===----------------------------------------------------------------------===//
@@ -397,7 +405,7 @@
//===----------------------------------------------------------------------===//
/// FoldingSetImpl - An implementation detail that lets us share code between
/// FoldingSet and ContextualFoldingSet.
-template <class T> class FoldingSetImpl : public FoldingSetBase {
+template <class Derived, class T> class FoldingSetImpl : public FoldingSetBase {
protected:
explicit FoldingSetImpl(unsigned Log2InitSize)
: FoldingSetBase(Log2InitSize) {}
@@ -427,29 +435,40 @@
return bucket_iterator(Buckets + (hash & (NumBuckets-1)), true);
}
+ /// reserve - Increase the number of buckets such that adding the
+ /// EltCount-th node won't cause a rebucket operation. reserve is permitted
+ /// to allocate more space than requested by EltCount.
+ void reserve(unsigned EltCount) {
+ return FoldingSetBase::reserve(EltCount, Derived::getFoldingSetInfo());
+ }
+
/// RemoveNode - Remove a node from the folding set, returning true if one
/// was removed or false if the node was not in the folding set.
- bool RemoveNode(T *N) { return FoldingSetBase::RemoveNode(N); }
+ bool RemoveNode(T *N) {
+ return FoldingSetBase::RemoveNode(N);
+ }
/// GetOrInsertNode - If there is an existing simple Node exactly
/// equal to the specified node, return it. Otherwise, insert 'N' and
/// return it instead.
T *GetOrInsertNode(T *N) {
- return static_cast<T *>(FoldingSetBase::GetOrInsertNode(N));
+ return static_cast<T *>(
+ FoldingSetBase::GetOrInsertNode(N, Derived::getFoldingSetInfo()));
}
/// FindNodeOrInsertPos - Look up the node specified by ID. If it exists,
/// return it. If not, return the insertion token that will make insertion
/// faster.
T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) {
- return static_cast<T *>(FoldingSetBase::FindNodeOrInsertPos(ID, InsertPos));
+ return static_cast<T *>(FoldingSetBase::FindNodeOrInsertPos(
+ ID, InsertPos, Derived::getFoldingSetInfo()));
}
/// InsertNode - Insert the specified node into the folding set, knowing that
/// it is not already in the folding set. InsertPos must be obtained from
/// FindNodeOrInsertPos.
void InsertNode(T *N, void *InsertPos) {
- FoldingSetBase::InsertNode(N, InsertPos);
+ FoldingSetBase::InsertNode(N, InsertPos, Derived::getFoldingSetInfo());
}
/// InsertNode - Insert the specified node into the folding set, knowing that
@@ -470,32 +489,43 @@
/// moved-from state is not a valid state for anything other than
/// move-assigning and destroying. This is primarily to enable movable APIs
/// that incorporate these objects.
-template <class T> class FoldingSet final : public FoldingSetImpl<T> {
- using Super = FoldingSetImpl<T>;
+template <class T>
+class FoldingSet : public FoldingSetImpl<FoldingSet<T>, T> {
+ using Super = FoldingSetImpl<FoldingSet, T>;
using Node = typename Super::Node;
- /// GetNodeProfile - Each instantiatation of the FoldingSet needs to provide a
+ /// GetNodeProfile - Each instantiation of the FoldingSet needs to provide a
/// way to convert nodes into a unique specifier.
- void GetNodeProfile(Node *N, FoldingSetNodeID &ID) const override {
+ static void GetNodeProfile(const FoldingSetBase *, Node *N,
+ FoldingSetNodeID &ID) {
T *TN = static_cast<T *>(N);
FoldingSetTrait<T>::Profile(*TN, ID);
}
/// NodeEquals - Instantiations may optionally provide a way to compare a
/// node with a specified ID.
- bool NodeEquals(Node *N, const FoldingSetNodeID &ID, unsigned IDHash,
- FoldingSetNodeID &TempID) const override {
+ static bool NodeEquals(const FoldingSetBase *, Node *N,
+ const FoldingSetNodeID &ID, unsigned IDHash,
+ FoldingSetNodeID &TempID) {
T *TN = static_cast<T *>(N);
return FoldingSetTrait<T>::Equals(*TN, ID, IDHash, TempID);
}
/// ComputeNodeHash - Instantiations may optionally provide a way to compute a
/// hash value directly from a node.
- unsigned ComputeNodeHash(Node *N, FoldingSetNodeID &TempID) const override {
+ static unsigned ComputeNodeHash(const FoldingSetBase *, Node *N,
+ FoldingSetNodeID &TempID) {
T *TN = static_cast<T *>(N);
return FoldingSetTrait<T>::ComputeHash(*TN, TempID);
}
+ static const FoldingSetBase::FoldingSetInfo &getFoldingSetInfo() {
+ static constexpr FoldingSetBase::FoldingSetInfo Info = {
+ GetNodeProfile, NodeEquals, ComputeNodeHash};
+ return Info;
+ }
+ friend Super;
+
public:
explicit FoldingSet(unsigned Log2InitSize = 6) : Super(Log2InitSize) {}
FoldingSet(FoldingSet &&Arg) = default;
@@ -512,36 +542,52 @@
/// function with signature
/// void Profile(FoldingSetNodeID &, Ctx);
template <class T, class Ctx>
-class ContextualFoldingSet final : public FoldingSetImpl<T> {
+class ContextualFoldingSet
+ : public FoldingSetImpl<ContextualFoldingSet<T, Ctx>, T> {
// Unfortunately, this can't derive from FoldingSet<T> because the
// construction of the vtable for FoldingSet<T> requires
// FoldingSet<T>::GetNodeProfile to be instantiated, which in turn
// requires a single-argument T::Profile().
- using Super = FoldingSetImpl<T>;
+ using Super = FoldingSetImpl<ContextualFoldingSet, T>;
using Node = typename Super::Node;
Ctx Context;
+ static const Ctx &getContext(const FoldingSetBase *Base) {
+ return static_cast<const ContextualFoldingSet*>(Base)->Context;
+ }
+
/// GetNodeProfile - Each instantiatation of the FoldingSet needs to provide a
/// way to convert nodes into a unique specifier.
- void GetNodeProfile(Node *N, FoldingSetNodeID &ID) const override {
+ static void GetNodeProfile(const FoldingSetBase *Base, Node *N,
+ FoldingSetNodeID &ID) {
T *TN = static_cast<T *>(N);
- ContextualFoldingSetTrait<T, Ctx>::Profile(*TN, ID, Context);
+ ContextualFoldingSetTrait<T, Ctx>::Profile(*TN, ID, getContext(Base));
}
- bool NodeEquals(Node *N, const FoldingSetNodeID &ID, unsigned IDHash,
- FoldingSetNodeID &TempID) const override {
+ static bool NodeEquals(const FoldingSetBase *Base, Node *N,
+ const FoldingSetNodeID &ID, unsigned IDHash,
+ FoldingSetNodeID &TempID) {
T *TN = static_cast<T *>(N);
return ContextualFoldingSetTrait<T, Ctx>::Equals(*TN, ID, IDHash, TempID,
- Context);
+ getContext(Base));
}
- unsigned ComputeNodeHash(Node *N, FoldingSetNodeID &TempID) const override {
+ static unsigned ComputeNodeHash(const FoldingSetBase *Base, Node *N,
+ FoldingSetNodeID &TempID) {
T *TN = static_cast<T *>(N);
- return ContextualFoldingSetTrait<T, Ctx>::ComputeHash(*TN, TempID, Context);
+ return ContextualFoldingSetTrait<T, Ctx>::ComputeHash(*TN, TempID,
+ getContext(Base));
}
+ static const FoldingSetBase::FoldingSetInfo &getFoldingSetInfo() {
+ static constexpr FoldingSetBase::FoldingSetInfo Info = {
+ GetNodeProfile, NodeEquals, ComputeNodeHash};
+ return Info;
+ }
+ friend Super;
+
public:
explicit ContextualFoldingSet(Ctx Context, unsigned Log2InitSize = 6)
: Super(Log2InitSize), Context(Context) {}
diff --git a/linux-x64/clang/include/llvm/ADT/FunctionExtras.h b/linux-x64/clang/include/llvm/ADT/FunctionExtras.h
index 121aa52..7f8fb10 100644
--- a/linux-x64/clang/include/llvm/ADT/FunctionExtras.h
+++ b/linux-x64/clang/include/llvm/ADT/FunctionExtras.h
@@ -11,11 +11,11 @@
/// in `<function>`.
///
/// It provides `unique_function`, which works like `std::function` but supports
-/// move-only callable objects.
+/// move-only callable objects and const-qualification.
///
/// Future plans:
-/// - Add a `function` that provides const, volatile, and ref-qualified support,
-/// which doesn't work with `std::function`.
+/// - Add a `function` that provides ref-qualified support, which doesn't work
+/// with `std::function`.
/// - Provide support for specifying multiple signatures to type erase callable
/// objects with an overload set, such as those produced by generic lambdas.
/// - Expand to include a copyable utility that directly replaces std::function
@@ -34,23 +34,42 @@
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/PointerUnion.h"
+#include "llvm/Support/MemAlloc.h"
#include "llvm/Support/type_traits.h"
#include <memory>
+#include <type_traits>
namespace llvm {
+/// unique_function is a type-erasing functor similar to std::function.
+///
+/// It can hold move-only function objects, like lambdas capturing unique_ptrs.
+/// Accordingly, it is movable but not copyable.
+///
+/// It supports const-qualification:
+/// - unique_function<int() const> has a const operator().
+/// It can only hold functions which themselves have a const operator().
+/// - unique_function<int()> has a non-const operator().
+/// It can hold functions with a non-const operator(), like mutable lambdas.
template <typename FunctionT> class unique_function;
-template <typename ReturnT, typename... ParamTs>
-class unique_function<ReturnT(ParamTs...)> {
+namespace detail {
+
+template <typename T>
+using EnableIfTrivial =
+ std::enable_if_t<llvm::is_trivially_move_constructible<T>::value &&
+ std::is_trivially_destructible<T>::value>;
+
+template <typename ReturnT, typename... ParamTs> class UniqueFunctionBase {
+protected:
static constexpr size_t InlineStorageSize = sizeof(void *) * 3;
- // MSVC has a bug and ICEs if we give it a particular dependent value
- // expression as part of the `std::conditional` below. To work around this,
- // we build that into a template struct's constexpr bool.
- template <typename T> struct IsSizeLessThanThresholdT {
- static constexpr bool value = sizeof(T) <= (2 * sizeof(void *));
- };
+ template <typename T, class = void>
+ struct IsSizeLessThanThresholdT : std::false_type {};
+
+ template <typename T>
+ struct IsSizeLessThanThresholdT<
+ T, std::enable_if_t<sizeof(T) <= 2 * sizeof(void *)>> : std::true_type {};
// Provide a type function to map parameters that won't observe extra copies
// or moves and which are small enough to likely pass in register to values
@@ -112,8 +131,11 @@
// For in-line storage, we just provide an aligned character buffer. We
// provide three pointers worth of storage here.
- typename std::aligned_storage<InlineStorageSize, alignof(void *)>::type
- InlineStorage;
+ // This is mutable as an inlined `const unique_function<void() const>` may
+ // still modify its own mutable members.
+ mutable
+ typename std::aligned_storage<InlineStorageSize, alignof(void *)>::type
+ InlineStorage;
} StorageUnion;
// A compressed pointer to either our dispatching callback or our table of
@@ -136,11 +158,25 @@
.template get<NonTrivialCallbacks *>();
}
- void *getInlineStorage() { return &StorageUnion.InlineStorage; }
+ CallPtrT getCallPtr() const {
+ return isTrivialCallback() ? getTrivialCallback()
+ : getNonTrivialCallbacks()->CallPtr;
+ }
- void *getOutOfLineStorage() {
+ // These three functions are only const in the narrow sense. They return
+ // mutable pointers to function state.
+ // This allows unique_function<T const>::operator() to be const, even if the
+ // underlying functor may be internally mutable.
+ //
+ // const callers must ensure they're only used in const-correct ways.
+ void *getCalleePtr() const {
+ return isInlineStorage() ? getInlineStorage() : getOutOfLineStorage();
+ }
+ void *getInlineStorage() const { return &StorageUnion.InlineStorage; }
+ void *getOutOfLineStorage() const {
return StorageUnion.OutOfLineStorage.StoragePtr;
}
+
size_t getOutOfLineStorageSize() const {
return StorageUnion.OutOfLineStorage.Size;
}
@@ -152,10 +188,11 @@
StorageUnion.OutOfLineStorage = {Ptr, Size, Alignment};
}
- template <typename CallableT>
- static ReturnT CallImpl(void *CallableAddr, AdjustedParamT<ParamTs>... Params) {
- return (*reinterpret_cast<CallableT *>(CallableAddr))(
- std::forward<ParamTs>(Params)...);
+ template <typename CalledAsT>
+ static ReturnT CallImpl(void *CallableAddr,
+ AdjustedParamT<ParamTs>... Params) {
+ auto &Func = *reinterpret_cast<CalledAsT *>(CallableAddr);
+ return Func(std::forward<ParamTs>(Params)...);
}
template <typename CallableT>
@@ -169,11 +206,54 @@
reinterpret_cast<CallableT *>(CallableAddr)->~CallableT();
}
-public:
- unique_function() = default;
- unique_function(std::nullptr_t /*null_callable*/) {}
+ // The pointers to call/move/destroy functions are determined for each
+ // callable type (and called-as type, which determines the overload chosen).
+ // (definitions are out-of-line).
- ~unique_function() {
+ // By default, we need an object that contains all the different
+ // type erased behaviors needed. Create a static instance of the struct type
+ // here and each instance will contain a pointer to it.
+ // Wrap in a struct to avoid https://gcc.gnu.org/PR71954
+ template <typename CallableT, typename CalledAs, typename Enable = void>
+ struct CallbacksHolder {
+ static NonTrivialCallbacks Callbacks;
+ };
+ // See if we can create a trivial callback. We need the callable to be
+ // trivially moved and trivially destroyed so that we don't have to store
+ // type erased callbacks for those operations.
+ template <typename CallableT, typename CalledAs>
+ struct CallbacksHolder<CallableT, CalledAs, EnableIfTrivial<CallableT>> {
+ static TrivialCallback Callbacks;
+ };
+
+ // A simple tag type so the call-as type to be passed to the constructor.
+ template <typename T> struct CalledAs {};
+
+ // Essentially the "main" unique_function constructor, but subclasses
+ // provide the qualified type to be used for the call.
+ // (We always store a T, even if the call will use a pointer to const T).
+ template <typename CallableT, typename CalledAsT>
+ UniqueFunctionBase(CallableT Callable, CalledAs<CalledAsT>) {
+ bool IsInlineStorage = true;
+ void *CallableAddr = getInlineStorage();
+ if (sizeof(CallableT) > InlineStorageSize ||
+ alignof(CallableT) > alignof(decltype(StorageUnion.InlineStorage))) {
+ IsInlineStorage = false;
+ // Allocate out-of-line storage. FIXME: Use an explicit alignment
+ // parameter in C++17 mode.
+ auto Size = sizeof(CallableT);
+ auto Alignment = alignof(CallableT);
+ CallableAddr = allocate_buffer(Size, Alignment);
+ setOutOfLineStorage(CallableAddr, Size, Alignment);
+ }
+
+ // Now move into the storage.
+ new (CallableAddr) CallableT(std::move(Callable));
+ CallbackAndInlineFlag.setPointerAndInt(
+ &CallbacksHolder<CallableT, CalledAsT>::Callbacks, IsInlineStorage);
+ }
+
+ ~UniqueFunctionBase() {
if (!CallbackAndInlineFlag.getPointer())
return;
@@ -189,7 +269,7 @@
getOutOfLineStorageAlignment());
}
- unique_function(unique_function &&RHS) noexcept {
+ UniqueFunctionBase(UniqueFunctionBase &&RHS) noexcept {
// Copy the callback and inline flag.
CallbackAndInlineFlag = RHS.CallbackAndInlineFlag;
@@ -218,75 +298,86 @@
#endif
}
- unique_function &operator=(unique_function &&RHS) noexcept {
+ UniqueFunctionBase &operator=(UniqueFunctionBase &&RHS) noexcept {
if (this == &RHS)
return *this;
// Because we don't try to provide any exception safety guarantees we can
// implement move assignment very simply by first destroying the current
// object and then move-constructing over top of it.
- this->~unique_function();
- new (this) unique_function(std::move(RHS));
+ this->~UniqueFunctionBase();
+ new (this) UniqueFunctionBase(std::move(RHS));
return *this;
}
- template <typename CallableT> unique_function(CallableT Callable) {
- bool IsInlineStorage = true;
- void *CallableAddr = getInlineStorage();
- if (sizeof(CallableT) > InlineStorageSize ||
- alignof(CallableT) > alignof(decltype(StorageUnion.InlineStorage))) {
- IsInlineStorage = false;
- // Allocate out-of-line storage. FIXME: Use an explicit alignment
- // parameter in C++17 mode.
- auto Size = sizeof(CallableT);
- auto Alignment = alignof(CallableT);
- CallableAddr = allocate_buffer(Size, Alignment);
- setOutOfLineStorage(CallableAddr, Size, Alignment);
- }
+ UniqueFunctionBase() = default;
- // Now move into the storage.
- new (CallableAddr) CallableT(std::move(Callable));
-
- // See if we can create a trivial callback. We need the callable to be
- // trivially moved and trivially destroyed so that we don't have to store
- // type erased callbacks for those operations.
- //
- // FIXME: We should use constexpr if here and below to avoid instantiating
- // the non-trivial static objects when unnecessary. While the linker should
- // remove them, it is still wasteful.
- if (llvm::is_trivially_move_constructible<CallableT>::value &&
- std::is_trivially_destructible<CallableT>::value) {
- // We need to create a nicely aligned object. We use a static variable
- // for this because it is a trivial struct.
- static TrivialCallback Callback = { &CallImpl<CallableT> };
-
- CallbackAndInlineFlag = {&Callback, IsInlineStorage};
- return;
- }
-
- // Otherwise, we need to point at an object that contains all the different
- // type erased behaviors needed. Create a static instance of the struct type
- // here and then use a pointer to that.
- static NonTrivialCallbacks Callbacks = {
- &CallImpl<CallableT>, &MoveImpl<CallableT>, &DestroyImpl<CallableT>};
-
- CallbackAndInlineFlag = {&Callbacks, IsInlineStorage};
- }
-
- ReturnT operator()(ParamTs... Params) {
- void *CallableAddr =
- isInlineStorage() ? getInlineStorage() : getOutOfLineStorage();
-
- return (isTrivialCallback()
- ? getTrivialCallback()
- : getNonTrivialCallbacks()->CallPtr)(CallableAddr, Params...);
- }
-
+public:
explicit operator bool() const {
return (bool)CallbackAndInlineFlag.getPointer();
}
};
+template <typename R, typename... P>
+template <typename CallableT, typename CalledAsT, typename Enable>
+typename UniqueFunctionBase<R, P...>::NonTrivialCallbacks UniqueFunctionBase<
+ R, P...>::CallbacksHolder<CallableT, CalledAsT, Enable>::Callbacks = {
+ &CallImpl<CalledAsT>, &MoveImpl<CallableT>, &DestroyImpl<CallableT>};
+
+template <typename R, typename... P>
+template <typename CallableT, typename CalledAsT>
+typename UniqueFunctionBase<R, P...>::TrivialCallback
+ UniqueFunctionBase<R, P...>::CallbacksHolder<
+ CallableT, CalledAsT, EnableIfTrivial<CallableT>>::Callbacks{
+ &CallImpl<CalledAsT>};
+
+} // namespace detail
+
+template <typename R, typename... P>
+class unique_function<R(P...)> : public detail::UniqueFunctionBase<R, P...> {
+ using Base = detail::UniqueFunctionBase<R, P...>;
+
+public:
+ unique_function() = default;
+ unique_function(std::nullptr_t) {}
+ unique_function(unique_function &&) = default;
+ unique_function(const unique_function &) = delete;
+ unique_function &operator=(unique_function &&) = default;
+ unique_function &operator=(const unique_function &) = delete;
+
+ template <typename CallableT>
+ unique_function(CallableT Callable)
+ : Base(std::forward<CallableT>(Callable),
+ typename Base::template CalledAs<CallableT>{}) {}
+
+ R operator()(P... Params) {
+ return this->getCallPtr()(this->getCalleePtr(), Params...);
+ }
+};
+
+template <typename R, typename... P>
+class unique_function<R(P...) const>
+ : public detail::UniqueFunctionBase<R, P...> {
+ using Base = detail::UniqueFunctionBase<R, P...>;
+
+public:
+ unique_function() = default;
+ unique_function(std::nullptr_t) {}
+ unique_function(unique_function &&) = default;
+ unique_function(const unique_function &) = delete;
+ unique_function &operator=(unique_function &&) = default;
+ unique_function &operator=(const unique_function &) = delete;
+
+ template <typename CallableT>
+ unique_function(CallableT Callable)
+ : Base(std::forward<CallableT>(Callable),
+ typename Base::template CalledAs<const CallableT>{}) {}
+
+ R operator()(P... Params) const {
+ return this->getCallPtr()(this->getCalleePtr(), Params...);
+ }
+};
+
} // end namespace llvm
#endif // LLVM_ADT_FUNCTION_H
diff --git a/linux-x64/clang/include/llvm/ADT/Hashing.h b/linux-x64/clang/include/llvm/ADT/Hashing.h
index f639aa2..cb53b7f 100644
--- a/linux-x64/clang/include/llvm/ADT/Hashing.h
+++ b/linux-x64/clang/include/llvm/ADT/Hashing.h
@@ -45,13 +45,14 @@
#define LLVM_ADT_HASHING_H
#include "llvm/Support/DataTypes.h"
-#include "llvm/Support/Host.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SwapByteOrder.h"
#include "llvm/Support/type_traits.h"
#include <algorithm>
#include <cassert>
#include <cstring>
#include <string>
+#include <tuple>
#include <utility>
namespace llvm {
@@ -101,8 +102,7 @@
/// differing argument types even if they would implicit promote to a common
/// type without changing the value.
template <typename T>
-typename std::enable_if<is_integral_or_enum<T>::value, hash_code>::type
-hash_value(T value);
+std::enable_if_t<is_integral_or_enum<T>::value, hash_code> hash_value(T value);
/// Compute a hash_code for a pointer's address.
///
@@ -113,6 +113,10 @@
template <typename T, typename U>
hash_code hash_value(const std::pair<T, U> &arg);
+/// Compute a hash_code for a tuple.
+template <typename... Ts>
+hash_code hash_value(const std::tuple<Ts...> &arg);
+
/// Compute a hash_code for a standard string.
template <typename T>
hash_code hash_value(const std::basic_string<T> &arg);
@@ -158,10 +162,10 @@
}
/// Some primes between 2^63 and 2^64 for various uses.
-static const uint64_t k0 = 0xc3a5c85c97cb3127ULL;
-static const uint64_t k1 = 0xb492b66fbe98f273ULL;
-static const uint64_t k2 = 0x9ae16a3b2f90404fULL;
-static const uint64_t k3 = 0xc949d7c7509e6557ULL;
+static constexpr uint64_t k0 = 0xc3a5c85c97cb3127ULL;
+static constexpr uint64_t k1 = 0xb492b66fbe98f273ULL;
+static constexpr uint64_t k2 = 0x9ae16a3b2f90404fULL;
+static constexpr uint64_t k3 = 0xc949d7c7509e6557ULL;
/// Bitwise right rotate.
/// Normally this will compile to a single instruction, especially if the
@@ -191,7 +195,7 @@
uint8_t b = s[len >> 1];
uint8_t c = s[len - 1];
uint32_t y = static_cast<uint32_t>(a) + (static_cast<uint32_t>(b) << 8);
- uint32_t z = len + (static_cast<uint32_t>(c) << 2);
+ uint32_t z = static_cast<uint32_t>(len) + (static_cast<uint32_t>(c) << 2);
return shift_mix(y * k2 ^ z * k3 ^ seed) * k2;
}
@@ -257,7 +261,7 @@
/// Currently, the algorithm for computing hash codes is based on CityHash and
/// keeps 56 bytes of arbitrary state.
struct hash_state {
- uint64_t h0, h1, h2, h3, h4, h5, h6;
+ uint64_t h0 = 0, h1 = 0, h2 = 0, h3 = 0, h4 = 0, h5 = 0, h6 = 0;
/// Create a new hash_state structure and initialize it based on the
/// seed and the first 64-byte chunk.
@@ -360,7 +364,7 @@
/// Helper to get the hashable data representation for a type.
/// This variant is enabled when the type itself can be used.
template <typename T>
-typename std::enable_if<is_hashable_data<T>::value, T>::type
+std::enable_if_t<is_hashable_data<T>::value, T>
get_hashable_data(const T &value) {
return value;
}
@@ -368,7 +372,7 @@
/// This variant is enabled when we must first call hash_value and use the
/// result as our data.
template <typename T>
-typename std::enable_if<!is_hashable_data<T>::value, size_t>::type
+std::enable_if_t<!is_hashable_data<T>::value, size_t>
get_hashable_data(const T &value) {
using ::llvm::hash_value;
return hash_value(value);
@@ -442,7 +446,7 @@
/// are stored in contiguous memory, this routine avoids copying each value
/// and directly reads from the underlying memory.
template <typename ValueT>
-typename std::enable_if<is_hashable_data<ValueT>::value, hash_code>::type
+std::enable_if_t<is_hashable_data<ValueT>::value, hash_code>
hash_combine_range_impl(ValueT *first, ValueT *last) {
const uint64_t seed = get_execution_seed();
const char *s_begin = reinterpret_cast<const char *>(first);
@@ -492,7 +496,7 @@
/// useful at minimizing the code in the recursive calls to ease the pain
/// caused by a lack of variadic functions.
struct hash_combine_recursive_helper {
- char buffer[64];
+ char buffer[64] = {};
hash_state state;
const uint64_t seed;
@@ -540,7 +544,7 @@
// store types smaller than the buffer.
if (!store_and_advance(buffer_ptr, buffer_end, data,
partial_store_size))
- abort();
+ llvm_unreachable("buffer smaller than stored type");
}
return buffer_ptr;
}
@@ -627,8 +631,7 @@
// Declared and documented above, but defined here so that any of the hashing
// infrastructure is available.
template <typename T>
-typename std::enable_if<is_integral_or_enum<T>::value, hash_code>::type
-hash_value(T value) {
+std::enable_if_t<is_integral_or_enum<T>::value, hash_code> hash_value(T value) {
return ::llvm::hashing::detail::hash_integer_value(
static_cast<uint64_t>(value));
}
@@ -647,6 +650,26 @@
return hash_combine(arg.first, arg.second);
}
+// Implementation details for the hash_value overload for std::tuple<...>(...).
+namespace hashing {
+namespace detail {
+
+template <typename... Ts, std::size_t... Indices>
+hash_code hash_value_tuple_helper(const std::tuple<Ts...> &arg,
+ std::index_sequence<Indices...> indices) {
+ return hash_combine(std::get<Indices>(arg)...);
+}
+
+} // namespace detail
+} // namespace hashing
+
+template <typename... Ts>
+hash_code hash_value(const std::tuple<Ts...> &arg) {
+ // TODO: Use std::apply when LLVM starts using C++17.
+ return ::llvm::hashing::detail::hash_value_tuple_helper(
+ arg, typename std::index_sequence_for<Ts...>());
+}
+
// Declared and documented above, but defined here so that any of the hashing
// infrastructure is available.
template <typename T>
diff --git a/linux-x64/clang/include/llvm/ADT/ImmutableMap.h b/linux-x64/clang/include/llvm/ADT/ImmutableMap.h
index 86fd7fe..81b21a7 100644
--- a/linux-x64/clang/include/llvm/ADT/ImmutableMap.h
+++ b/linux-x64/clang/include/llvm/ADT/ImmutableMap.h
@@ -70,33 +70,14 @@
using TreeTy = ImutAVLTree<ValInfo>;
protected:
- TreeTy* Root;
+ IntrusiveRefCntPtr<TreeTy> Root;
public:
/// Constructs a map from a pointer to a tree root. In general one
/// should use a Factory object to create maps instead of directly
/// invoking the constructor, but there are cases where make this
/// constructor public is useful.
- explicit ImmutableMap(const TreeTy* R) : Root(const_cast<TreeTy*>(R)) {
- if (Root) { Root->retain(); }
- }
-
- ImmutableMap(const ImmutableMap &X) : Root(X.Root) {
- if (Root) { Root->retain(); }
- }
-
- ~ImmutableMap() {
- if (Root) { Root->release(); }
- }
-
- ImmutableMap &operator=(const ImmutableMap &X) {
- if (Root != X.Root) {
- if (X.Root) { X.Root->retain(); }
- if (Root) { Root->release(); }
- Root = X.Root;
- }
- return *this;
- }
+ explicit ImmutableMap(const TreeTy *R) : Root(const_cast<TreeTy *>(R)) {}
class Factory {
typename TreeTy::Factory F;
@@ -115,12 +96,12 @@
LLVM_NODISCARD ImmutableMap add(ImmutableMap Old, key_type_ref K,
data_type_ref D) {
- TreeTy *T = F.add(Old.Root, std::pair<key_type,data_type>(K,D));
+ TreeTy *T = F.add(Old.Root.get(), std::pair<key_type, data_type>(K, D));
return ImmutableMap(Canonicalize ? F.getCanonicalTree(T): T);
}
LLVM_NODISCARD ImmutableMap remove(ImmutableMap Old, key_type_ref K) {
- TreeTy *T = F.remove(Old.Root,K);
+ TreeTy *T = F.remove(Old.Root.get(), K);
return ImmutableMap(Canonicalize ? F.getCanonicalTree(T): T);
}
@@ -134,19 +115,20 @@
}
bool operator==(const ImmutableMap &RHS) const {
- return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root;
+ return Root && RHS.Root ? Root->isEqual(*RHS.Root.get()) : Root == RHS.Root;
}
bool operator!=(const ImmutableMap &RHS) const {
- return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root;
+ return Root && RHS.Root ? Root->isNotEqual(*RHS.Root.get())
+ : Root != RHS.Root;
}
TreeTy *getRoot() const {
if (Root) { Root->retain(); }
- return Root;
+ return Root.get();
}
- TreeTy *getRootWithoutRetain() const { return Root; }
+ TreeTy *getRootWithoutRetain() const { return Root.get(); }
void manualRetain() {
if (Root) Root->retain();
@@ -217,7 +199,7 @@
data_type_ref getData() const { return (*this)->second; }
};
- iterator begin() const { return iterator(Root); }
+ iterator begin() const { return iterator(Root.get()); }
iterator end() const { return iterator(); }
data_type* lookup(key_type_ref K) const {
@@ -243,7 +225,7 @@
unsigned getHeight() const { return Root ? Root->getHeight() : 0; }
static inline void Profile(FoldingSetNodeID& ID, const ImmutableMap& M) {
- ID.AddPointer(M.Root);
+ ID.AddPointer(M.Root.get());
}
inline void Profile(FoldingSetNodeID& ID) const {
@@ -266,7 +248,7 @@
using FactoryTy = typename TreeTy::Factory;
protected:
- TreeTy *Root;
+ IntrusiveRefCntPtr<TreeTy> Root;
FactoryTy *Factory;
public:
@@ -274,44 +256,12 @@
/// should use a Factory object to create maps instead of directly
/// invoking the constructor, but there are cases where make this
/// constructor public is useful.
- explicit ImmutableMapRef(const TreeTy *R, FactoryTy *F)
- : Root(const_cast<TreeTy *>(R)), Factory(F) {
- if (Root) {
- Root->retain();
- }
- }
+ ImmutableMapRef(const TreeTy *R, FactoryTy *F)
+ : Root(const_cast<TreeTy *>(R)), Factory(F) {}
- explicit ImmutableMapRef(const ImmutableMap<KeyT, ValT> &X,
- typename ImmutableMap<KeyT, ValT>::Factory &F)
- : Root(X.getRootWithoutRetain()),
- Factory(F.getTreeFactory()) {
- if (Root) { Root->retain(); }
- }
-
- ImmutableMapRef(const ImmutableMapRef &X) : Root(X.Root), Factory(X.Factory) {
- if (Root) {
- Root->retain();
- }
- }
-
- ~ImmutableMapRef() {
- if (Root)
- Root->release();
- }
-
- ImmutableMapRef &operator=(const ImmutableMapRef &X) {
- if (Root != X.Root) {
- if (X.Root)
- X.Root->retain();
-
- if (Root)
- Root->release();
-
- Root = X.Root;
- Factory = X.Factory;
- }
- return *this;
- }
+ ImmutableMapRef(const ImmutableMap<KeyT, ValT> &X,
+ typename ImmutableMap<KeyT, ValT>::Factory &F)
+ : Root(X.getRootWithoutRetain()), Factory(F.getTreeFactory()) {}
static inline ImmutableMapRef getEmptyMap(FactoryTy *F) {
return ImmutableMapRef(0, F);
@@ -326,12 +276,13 @@
}
ImmutableMapRef add(key_type_ref K, data_type_ref D) const {
- TreeTy *NewT = Factory->add(Root, std::pair<key_type, data_type>(K, D));
+ TreeTy *NewT =
+ Factory->add(Root.get(), std::pair<key_type, data_type>(K, D));
return ImmutableMapRef(NewT, Factory);
}
ImmutableMapRef remove(key_type_ref K) const {
- TreeTy *NewT = Factory->remove(Root, K);
+ TreeTy *NewT = Factory->remove(Root.get(), K);
return ImmutableMapRef(NewT, Factory);
}
@@ -340,15 +291,16 @@
}
ImmutableMap<KeyT, ValT> asImmutableMap() const {
- return ImmutableMap<KeyT, ValT>(Factory->getCanonicalTree(Root));
+ return ImmutableMap<KeyT, ValT>(Factory->getCanonicalTree(Root.get()));
}
bool operator==(const ImmutableMapRef &RHS) const {
- return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root;
+ return Root && RHS.Root ? Root->isEqual(*RHS.Root.get()) : Root == RHS.Root;
}
bool operator!=(const ImmutableMapRef &RHS) const {
- return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root;
+ return Root && RHS.Root ? Root->isNotEqual(*RHS.Root.get())
+ : Root != RHS.Root;
}
bool isEmpty() const { return !Root; }
@@ -377,7 +329,7 @@
data_type_ref getData() const { return (*this)->second; }
};
- iterator begin() const { return iterator(Root); }
+ iterator begin() const { return iterator(Root.get()); }
iterator end() const { return iterator(); }
data_type *lookup(key_type_ref K) const {
@@ -403,7 +355,7 @@
unsigned getHeight() const { return Root ? Root->getHeight() : 0; }
static inline void Profile(FoldingSetNodeID &ID, const ImmutableMapRef &M) {
- ID.AddPointer(M.Root);
+ ID.AddPointer(M.Root.get());
}
inline void Profile(FoldingSetNodeID &ID) const { return Profile(ID, *this); }
diff --git a/linux-x64/clang/include/llvm/ADT/ImmutableSet.h b/linux-x64/clang/include/llvm/ADT/ImmutableSet.h
index 5871054..f19913f 100644
--- a/linux-x64/clang/include/llvm/ADT/ImmutableSet.h
+++ b/linux-x64/clang/include/llvm/ADT/ImmutableSet.h
@@ -15,6 +15,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator.h"
#include "llvm/Support/Allocator.h"
@@ -169,7 +170,7 @@
bool contains(key_type_ref K) { return (bool) find(K); }
/// foreach - A member template the accepts invokes operator() on a functor
- /// object (specifed by Callback) for every node/subtree in the tree.
+ /// object (specified by Callback) for every node/subtree in the tree.
/// Nodes are visited using an inorder traversal.
template <typename Callback>
void foreach(Callback& C) {
@@ -183,7 +184,7 @@
}
/// validateTree - A utility method that checks that the balancing and
- /// ordering invariants of the tree are satisifed. It is a recursive
+ /// ordering invariants of the tree are satisfied. It is a recursive
/// method that returns the height of the tree, which is then consumed
/// by the enclosing validateTree call. External callers should ignore the
/// return value. An invalid tree will cause an assertion to fire in
@@ -205,8 +206,7 @@
ImutInfo::KeyOfValue(getValue()))) &&
"Value in left child is not less that current value");
-
- assert(!(getRight() ||
+ assert((!getRight() ||
ImutInfo::isLess(ImutInfo::KeyOfValue(getValue()),
ImutInfo::KeyOfValue(getRight()->getValue()))) &&
"Current value is not less that value of right child");
@@ -358,6 +358,12 @@
}
};
+template <typename ImutInfo>
+struct IntrusiveRefCntPtrInfo<ImutAVLTree<ImutInfo>> {
+ static void retain(ImutAVLTree<ImutInfo> *Tree) { Tree->retain(); }
+ static void release(ImutAVLTree<ImutInfo> *Tree) { Tree->release(); }
+};
+
//===----------------------------------------------------------------------===//
// Immutable AVL-Tree Factory class.
//===----------------------------------------------------------------------===//
@@ -451,7 +457,7 @@
//===--------------------------------------------------===//
// "createNode" is used to generate new tree roots that link
- // to other trees. The functon may also simply move links
+ // to other trees. The function may also simply move links
// in an existing root if that root is still marked mutable.
// This is necessary because otherwise our balancing code
// would leak memory as it would create nodes that are
@@ -962,33 +968,14 @@
using TreeTy = ImutAVLTree<ValInfo>;
private:
- TreeTy *Root;
+ IntrusiveRefCntPtr<TreeTy> Root;
public:
/// Constructs a set from a pointer to a tree root. In general one
/// should use a Factory object to create sets instead of directly
/// invoking the constructor, but there are cases where make this
/// constructor public is useful.
- explicit ImmutableSet(TreeTy* R) : Root(R) {
- if (Root) { Root->retain(); }
- }
-
- ImmutableSet(const ImmutableSet &X) : Root(X.Root) {
- if (Root) { Root->retain(); }
- }
-
- ~ImmutableSet() {
- if (Root) { Root->release(); }
- }
-
- ImmutableSet &operator=(const ImmutableSet &X) {
- if (Root != X.Root) {
- if (X.Root) { X.Root->retain(); }
- if (Root) { Root->release(); }
- Root = X.Root;
- }
- return *this;
- }
+ explicit ImmutableSet(TreeTy *R) : Root(R) {}
class Factory {
typename TreeTy::Factory F;
@@ -1017,7 +1004,7 @@
/// The memory allocated to represent the set is released when the
/// factory object that created the set is destroyed.
LLVM_NODISCARD ImmutableSet add(ImmutableSet Old, value_type_ref V) {
- TreeTy *NewT = F.add(Old.Root, V);
+ TreeTy *NewT = F.add(Old.Root.get(), V);
return ImmutableSet(Canonicalize ? F.getCanonicalTree(NewT) : NewT);
}
@@ -1029,7 +1016,7 @@
/// The memory allocated to represent the set is released when the
/// factory object that created the set is destroyed.
LLVM_NODISCARD ImmutableSet remove(ImmutableSet Old, value_type_ref V) {
- TreeTy *NewT = F.remove(Old.Root, V);
+ TreeTy *NewT = F.remove(Old.Root.get(), V);
return ImmutableSet(Canonicalize ? F.getCanonicalTree(NewT) : NewT);
}
@@ -1048,21 +1035,20 @@
}
bool operator==(const ImmutableSet &RHS) const {
- return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root;
+ return Root && RHS.Root ? Root->isEqual(*RHS.Root.get()) : Root == RHS.Root;
}
bool operator!=(const ImmutableSet &RHS) const {
- return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root;
+ return Root && RHS.Root ? Root->isNotEqual(*RHS.Root.get())
+ : Root != RHS.Root;
}
TreeTy *getRoot() {
if (Root) { Root->retain(); }
- return Root;
+ return Root.get();
}
- TreeTy *getRootWithoutRetain() const {
- return Root;
- }
+ TreeTy *getRootWithoutRetain() const { return Root.get(); }
/// isEmpty - Return true if the set contains no elements.
bool isEmpty() const { return !Root; }
@@ -1083,7 +1069,7 @@
using iterator = ImutAVLValueIterator<ImmutableSet>;
- iterator begin() const { return iterator(Root); }
+ iterator begin() const { return iterator(Root.get()); }
iterator end() const { return iterator(); }
//===--------------------------------------------------===//
@@ -1093,7 +1079,7 @@
unsigned getHeight() const { return Root ? Root->getHeight() : 0; }
static void Profile(FoldingSetNodeID &ID, const ImmutableSet &S) {
- ID.AddPointer(S.Root);
+ ID.AddPointer(S.Root.get());
}
void Profile(FoldingSetNodeID &ID) const { return Profile(ID, *this); }
@@ -1115,7 +1101,7 @@
using FactoryTy = typename TreeTy::Factory;
private:
- TreeTy *Root;
+ IntrusiveRefCntPtr<TreeTy> Root;
FactoryTy *Factory;
public:
@@ -1123,42 +1109,18 @@
/// should use a Factory object to create sets instead of directly
/// invoking the constructor, but there are cases where make this
/// constructor public is useful.
- explicit ImmutableSetRef(TreeTy* R, FactoryTy *F)
- : Root(R),
- Factory(F) {
- if (Root) { Root->retain(); }
- }
-
- ImmutableSetRef(const ImmutableSetRef &X)
- : Root(X.Root),
- Factory(X.Factory) {
- if (Root) { Root->retain(); }
- }
-
- ~ImmutableSetRef() {
- if (Root) { Root->release(); }
- }
-
- ImmutableSetRef &operator=(const ImmutableSetRef &X) {
- if (Root != X.Root) {
- if (X.Root) { X.Root->retain(); }
- if (Root) { Root->release(); }
- Root = X.Root;
- Factory = X.Factory;
- }
- return *this;
- }
+ ImmutableSetRef(TreeTy *R, FactoryTy *F) : Root(R), Factory(F) {}
static ImmutableSetRef getEmptySet(FactoryTy *F) {
return ImmutableSetRef(0, F);
}
ImmutableSetRef add(value_type_ref V) {
- return ImmutableSetRef(Factory->add(Root, V), Factory);
+ return ImmutableSetRef(Factory->add(Root.get(), V), Factory);
}
ImmutableSetRef remove(value_type_ref V) {
- return ImmutableSetRef(Factory->remove(Root, V), Factory);
+ return ImmutableSetRef(Factory->remove(Root.get(), V), Factory);
}
/// Returns true if the set contains the specified value.
@@ -1167,20 +1129,19 @@
}
ImmutableSet<ValT> asImmutableSet(bool canonicalize = true) const {
- return ImmutableSet<ValT>(canonicalize ?
- Factory->getCanonicalTree(Root) : Root);
+ return ImmutableSet<ValT>(
+ canonicalize ? Factory->getCanonicalTree(Root.get()) : Root.get());
}
- TreeTy *getRootWithoutRetain() const {
- return Root;
- }
+ TreeTy *getRootWithoutRetain() const { return Root.get(); }
bool operator==(const ImmutableSetRef &RHS) const {
- return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root;
+ return Root && RHS.Root ? Root->isEqual(*RHS.Root.get()) : Root == RHS.Root;
}
bool operator!=(const ImmutableSetRef &RHS) const {
- return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root;
+ return Root && RHS.Root ? Root->isNotEqual(*RHS.Root.get())
+ : Root != RHS.Root;
}
/// isEmpty - Return true if the set contains no elements.
@@ -1196,7 +1157,7 @@
using iterator = ImutAVLValueIterator<ImmutableSetRef>;
- iterator begin() const { return iterator(Root); }
+ iterator begin() const { return iterator(Root.get()); }
iterator end() const { return iterator(); }
//===--------------------------------------------------===//
@@ -1206,7 +1167,7 @@
unsigned getHeight() const { return Root ? Root->getHeight() : 0; }
static void Profile(FoldingSetNodeID &ID, const ImmutableSetRef &S) {
- ID.AddPointer(S.Root);
+ ID.AddPointer(S.Root.get());
}
void Profile(FoldingSetNodeID &ID) const { return Profile(ID, *this); }
diff --git a/linux-x64/clang/include/llvm/ADT/IntervalMap.h b/linux-x64/clang/include/llvm/ADT/IntervalMap.h
index 12828c4..0b6c7d6 100644
--- a/linux-x64/clang/include/llvm/ADT/IntervalMap.h
+++ b/linux-x64/clang/include/llvm/ADT/IntervalMap.h
@@ -491,7 +491,7 @@
struct CacheAlignedPointerTraits {
static inline void *getAsVoidPointer(void *P) { return P; }
static inline void *getFromVoidPointer(void *P) { return P; }
- enum { NumLowBitsAvailable = Log2CacheLine };
+ static constexpr int NumLowBitsAvailable = Log2CacheLine;
};
PointerIntPair<void*, Log2CacheLine, unsigned, CacheAlignedPointerTraits> pip;
@@ -823,7 +823,7 @@
}
/// reset - Reset cached information about node(Level) from subtree(Level -1).
- /// @param Level 1..height. THe node to update after parent node changed.
+ /// @param Level 1..height. The node to update after parent node changed.
void reset(unsigned Level) {
path[Level] = Entry(subtree(Level - 1), offset(Level));
}
@@ -884,7 +884,7 @@
}
/// getLeftSibling - Get the left sibling node at Level, or a null NodeRef.
- /// @param Level Get the sinbling to node(Level).
+ /// @param Level Get the sibling to node(Level).
/// @return Left sibling, or NodeRef().
NodeRef getRightSibling(unsigned Level) const;
@@ -963,7 +963,6 @@
private:
// The root data is either a RootLeaf or a RootBranchData instance.
- LLVM_ALIGNAS(RootLeaf) LLVM_ALIGNAS(RootBranchData)
AlignedCharArrayUnion<RootLeaf, RootBranchData> data;
// Tree height.
@@ -979,10 +978,7 @@
Allocator &allocator;
/// Represent data as a node type without breaking aliasing rules.
- template <typename T>
- T &dataAs() const {
- return *bit_cast<T *>(const_cast<char *>(data.buffer));
- }
+ template <typename T> T &dataAs() const { return *bit_cast<T *>(&data); }
const RootLeaf &rootLeaf() const {
assert(!branched() && "Cannot acces leaf data in branched root");
@@ -1040,7 +1036,7 @@
public:
explicit IntervalMap(Allocator &a) : height(0), rootSize(0), allocator(a) {
- assert((uintptr_t(data.buffer) & (alignof(RootLeaf) - 1)) == 0 &&
+ assert((uintptr_t(&data) & (alignof(RootLeaf) - 1)) == 0 &&
"Insufficient alignment");
new(&rootLeaf()) RootLeaf();
}
@@ -1396,7 +1392,7 @@
setRoot(map->rootSize);
}
- /// preincrement - move to the next interval.
+ /// preincrement - Move to the next interval.
const_iterator &operator++() {
assert(valid() && "Cannot increment end()");
if (++path.leafOffset() == path.leafSize() && branched())
@@ -1404,14 +1400,14 @@
return *this;
}
- /// postincrement - Dont do that!
+ /// postincrement - Don't do that!
const_iterator operator++(int) {
const_iterator tmp = *this;
operator++();
return tmp;
}
- /// predecrement - move to the previous interval.
+ /// predecrement - Move to the previous interval.
const_iterator &operator--() {
if (path.leafOffset() && (valid() || !branched()))
--path.leafOffset();
@@ -1420,7 +1416,7 @@
return *this;
}
- /// postdecrement - Dont do that!
+ /// postdecrement - Don't do that!
const_iterator operator--(int) {
const_iterator tmp = *this;
operator--();
diff --git a/linux-x64/clang/include/llvm/ADT/IntrusiveRefCntPtr.h b/linux-x64/clang/include/llvm/ADT/IntrusiveRefCntPtr.h
index 6d97fe1..ca4c40d 100644
--- a/linux-x64/clang/include/llvm/ADT/IntrusiveRefCntPtr.h
+++ b/linux-x64/clang/include/llvm/ADT/IntrusiveRefCntPtr.h
@@ -58,6 +58,7 @@
#include <atomic>
#include <cassert>
#include <cstddef>
+#include <memory>
namespace llvm {
@@ -70,10 +71,23 @@
template <class Derived> class RefCountedBase {
mutable unsigned RefCount = 0;
-public:
+protected:
RefCountedBase() = default;
RefCountedBase(const RefCountedBase &) {}
+ RefCountedBase &operator=(const RefCountedBase &) = delete;
+#ifndef NDEBUG
+ ~RefCountedBase() {
+ assert(RefCount == 0 &&
+ "Destruction occured when there are still references to this.");
+ }
+#else
+ // Default the destructor in release builds, A trivial destructor may enable
+ // better codegen.
+ ~RefCountedBase() = default;
+#endif
+
+public:
void Retain() const { ++RefCount; }
void Release() const {
@@ -85,10 +99,24 @@
/// A thread-safe version of \c RefCountedBase.
template <class Derived> class ThreadSafeRefCountedBase {
- mutable std::atomic<int> RefCount;
+ mutable std::atomic<int> RefCount{0};
protected:
- ThreadSafeRefCountedBase() : RefCount(0) {}
+ ThreadSafeRefCountedBase() = default;
+ ThreadSafeRefCountedBase(const ThreadSafeRefCountedBase &) {}
+ ThreadSafeRefCountedBase &
+ operator=(const ThreadSafeRefCountedBase &) = delete;
+
+#ifndef NDEBUG
+ ~ThreadSafeRefCountedBase() {
+ assert(RefCount == 0 &&
+ "Destruction occured when there are still references to this.");
+ }
+#else
+ // Default the destructor in release builds, A trivial destructor may enable
+ // better codegen.
+ ~ThreadSafeRefCountedBase() = default;
+#endif
public:
void Retain() const { RefCount.fetch_add(1, std::memory_order_relaxed); }
@@ -149,6 +177,11 @@
}
template <class X>
+ IntrusiveRefCntPtr(std::unique_ptr<X> S) : Obj(S.release()) {
+ retain();
+ }
+
+ template <class X>
IntrusiveRefCntPtr(const IntrusiveRefCntPtr<X> &S) : Obj(S.get()) {
retain();
}
@@ -264,6 +297,12 @@
}
};
+/// Factory function for creating intrusive ref counted pointers.
+template <typename T, typename... Args>
+IntrusiveRefCntPtr<T> makeIntrusiveRefCnt(Args &&...A) {
+ return IntrusiveRefCntPtr<T>(new T(std::forward<Args>(A)...));
+}
+
} // end namespace llvm
#endif // LLVM_ADT_INTRUSIVEREFCNTPTR_H
diff --git a/linux-x64/clang/include/llvm/ADT/Optional.h b/linux-x64/clang/include/llvm/ADT/Optional.h
index b45a740..daa9ee6 100644
--- a/linux-x64/clang/include/llvm/ADT/Optional.h
+++ b/linux-x64/clang/include/llvm/ADT/Optional.h
@@ -15,6 +15,7 @@
#ifndef LLVM_ADT_OPTIONAL_H
#define LLVM_ADT_OPTIONAL_H
+#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/None.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/type_traits.h"
@@ -43,21 +44,21 @@
public:
~OptionalStorage() { reset(); }
- OptionalStorage() noexcept : empty(), hasVal(false) {}
+ constexpr OptionalStorage() noexcept : empty(), hasVal(false) {}
- OptionalStorage(OptionalStorage const &other) : OptionalStorage() {
+ constexpr OptionalStorage(OptionalStorage const &other) : OptionalStorage() {
if (other.hasValue()) {
emplace(other.value);
}
}
- OptionalStorage(OptionalStorage &&other) : OptionalStorage() {
+ constexpr OptionalStorage(OptionalStorage &&other) : OptionalStorage() {
if (other.hasValue()) {
emplace(std::move(other.value));
}
}
template <class... Args>
- explicit OptionalStorage(in_place_t, Args &&... args)
+ constexpr explicit OptionalStorage(in_place_t, Args &&... args)
: value(std::forward<Args>(args)...), hasVal(true) {}
void reset() noexcept {
@@ -67,13 +68,13 @@
}
}
- bool hasValue() const noexcept { return hasVal; }
+ constexpr bool hasValue() const noexcept { return hasVal; }
T &getValue() LLVM_LVALUE_FUNCTION noexcept {
assert(hasVal);
return value;
}
- T const &getValue() const LLVM_LVALUE_FUNCTION noexcept {
+ constexpr T const &getValue() const LLVM_LVALUE_FUNCTION noexcept {
assert(hasVal);
return value;
}
@@ -148,16 +149,16 @@
public:
~OptionalStorage() = default;
- OptionalStorage() noexcept : empty{} {}
+ constexpr OptionalStorage() noexcept : empty{} {}
- OptionalStorage(OptionalStorage const &other) = default;
- OptionalStorage(OptionalStorage &&other) = default;
+ constexpr OptionalStorage(OptionalStorage const &other) = default;
+ constexpr OptionalStorage(OptionalStorage &&other) = default;
OptionalStorage &operator=(OptionalStorage const &other) = default;
OptionalStorage &operator=(OptionalStorage &&other) = default;
template <class... Args>
- explicit OptionalStorage(in_place_t, Args &&... args)
+ constexpr explicit OptionalStorage(in_place_t, Args &&... args)
: value(std::forward<Args>(args)...), hasVal(true) {}
void reset() noexcept {
@@ -167,13 +168,13 @@
}
}
- bool hasValue() const noexcept { return hasVal; }
+ constexpr bool hasValue() const noexcept { return hasVal; }
T &getValue() LLVM_LVALUE_FUNCTION noexcept {
assert(hasVal);
return value;
}
- T const &getValue() const LLVM_LVALUE_FUNCTION noexcept {
+ constexpr T const &getValue() const LLVM_LVALUE_FUNCTION noexcept {
assert(hasVal);
return value;
}
@@ -221,11 +222,12 @@
constexpr Optional() {}
constexpr Optional(NoneType) {}
- Optional(const T &y) : Storage(optional_detail::in_place_t{}, y) {}
- Optional(const Optional &O) = default;
+ constexpr Optional(const T &y) : Storage(optional_detail::in_place_t{}, y) {}
+ constexpr Optional(const Optional &O) = default;
- Optional(T &&y) : Storage(optional_detail::in_place_t{}, std::move(y)) {}
- Optional(Optional &&O) = default;
+ constexpr Optional(T &&y)
+ : Storage(optional_detail::in_place_t{}, std::move(y)) {}
+ constexpr Optional(Optional &&O) = default;
Optional &operator=(T &&y) {
Storage = std::move(y);
@@ -238,7 +240,7 @@
Storage.emplace(std::forward<ArgTypes>(Args)...);
}
- static inline Optional create(const T *y) {
+ static constexpr Optional create(const T *y) {
return y ? Optional(*y) : Optional();
}
@@ -250,16 +252,20 @@
void reset() { Storage.reset(); }
- const T *getPointer() const { return &Storage.getValue(); }
+ constexpr const T *getPointer() const { return &Storage.getValue(); }
T *getPointer() { return &Storage.getValue(); }
- const T &getValue() const LLVM_LVALUE_FUNCTION { return Storage.getValue(); }
+ constexpr const T &getValue() const LLVM_LVALUE_FUNCTION {
+ return Storage.getValue();
+ }
T &getValue() LLVM_LVALUE_FUNCTION { return Storage.getValue(); }
- explicit operator bool() const { return hasValue(); }
- bool hasValue() const { return Storage.hasValue(); }
- const T *operator->() const { return getPointer(); }
+ constexpr explicit operator bool() const { return hasValue(); }
+ constexpr bool hasValue() const { return Storage.hasValue(); }
+ constexpr const T *operator->() const { return getPointer(); }
T *operator->() { return getPointer(); }
- const T &operator*() const LLVM_LVALUE_FUNCTION { return getValue(); }
+ constexpr const T &operator*() const LLVM_LVALUE_FUNCTION {
+ return getValue();
+ }
T &operator*() LLVM_LVALUE_FUNCTION { return getValue(); }
template <typename U>
@@ -267,6 +273,14 @@
return hasValue() ? getValue() : std::forward<U>(value);
}
+ /// Apply a function to the value if present; otherwise return None.
+ template <class Function>
+ auto map(const Function &F) const LLVM_LVALUE_FUNCTION
+ -> Optional<decltype(F(getValue()))> {
+ if (*this) return F(getValue());
+ return None;
+ }
+
#if LLVM_HAS_RVALUE_REFERENCE_THIS
T &&getValue() && { return std::move(Storage.getValue()); }
T &&operator*() && { return std::move(Storage.getValue()); }
@@ -275,140 +289,168 @@
T getValueOr(U &&value) && {
return hasValue() ? std::move(getValue()) : std::forward<U>(value);
}
+
+ /// Apply a function to the value if present; otherwise return None.
+ template <class Function>
+ auto map(const Function &F) &&
+ -> Optional<decltype(F(std::move(*this).getValue()))> {
+ if (*this) return F(std::move(*this).getValue());
+ return None;
+ }
#endif
};
+template <class T> llvm::hash_code hash_value(const Optional<T> &O) {
+ return O ? hash_combine(true, *O) : hash_value(false);
+}
+
template <typename T, typename U>
-bool operator==(const Optional<T> &X, const Optional<U> &Y) {
+constexpr bool operator==(const Optional<T> &X, const Optional<U> &Y) {
if (X && Y)
return *X == *Y;
return X.hasValue() == Y.hasValue();
}
template <typename T, typename U>
-bool operator!=(const Optional<T> &X, const Optional<U> &Y) {
+constexpr bool operator!=(const Optional<T> &X, const Optional<U> &Y) {
return !(X == Y);
}
template <typename T, typename U>
-bool operator<(const Optional<T> &X, const Optional<U> &Y) {
+constexpr bool operator<(const Optional<T> &X, const Optional<U> &Y) {
if (X && Y)
return *X < *Y;
return X.hasValue() < Y.hasValue();
}
template <typename T, typename U>
-bool operator<=(const Optional<T> &X, const Optional<U> &Y) {
+constexpr bool operator<=(const Optional<T> &X, const Optional<U> &Y) {
return !(Y < X);
}
template <typename T, typename U>
-bool operator>(const Optional<T> &X, const Optional<U> &Y) {
+constexpr bool operator>(const Optional<T> &X, const Optional<U> &Y) {
return Y < X;
}
template <typename T, typename U>
-bool operator>=(const Optional<T> &X, const Optional<U> &Y) {
+constexpr bool operator>=(const Optional<T> &X, const Optional<U> &Y) {
return !(X < Y);
}
-template<typename T>
-bool operator==(const Optional<T> &X, NoneType) {
+template <typename T>
+constexpr bool operator==(const Optional<T> &X, NoneType) {
return !X;
}
-template<typename T>
-bool operator==(NoneType, const Optional<T> &X) {
+template <typename T>
+constexpr bool operator==(NoneType, const Optional<T> &X) {
return X == None;
}
-template<typename T>
-bool operator!=(const Optional<T> &X, NoneType) {
+template <typename T>
+constexpr bool operator!=(const Optional<T> &X, NoneType) {
return !(X == None);
}
-template<typename T>
-bool operator!=(NoneType, const Optional<T> &X) {
+template <typename T>
+constexpr bool operator!=(NoneType, const Optional<T> &X) {
return X != None;
}
-template <typename T> bool operator<(const Optional<T> &X, NoneType) {
+template <typename T> constexpr bool operator<(const Optional<T> &X, NoneType) {
return false;
}
-template <typename T> bool operator<(NoneType, const Optional<T> &X) {
+template <typename T> constexpr bool operator<(NoneType, const Optional<T> &X) {
return X.hasValue();
}
-template <typename T> bool operator<=(const Optional<T> &X, NoneType) {
+template <typename T>
+constexpr bool operator<=(const Optional<T> &X, NoneType) {
return !(None < X);
}
-template <typename T> bool operator<=(NoneType, const Optional<T> &X) {
+template <typename T>
+constexpr bool operator<=(NoneType, const Optional<T> &X) {
return !(X < None);
}
-template <typename T> bool operator>(const Optional<T> &X, NoneType) {
+template <typename T> constexpr bool operator>(const Optional<T> &X, NoneType) {
return None < X;
}
-template <typename T> bool operator>(NoneType, const Optional<T> &X) {
+template <typename T> constexpr bool operator>(NoneType, const Optional<T> &X) {
return X < None;
}
-template <typename T> bool operator>=(const Optional<T> &X, NoneType) {
+template <typename T>
+constexpr bool operator>=(const Optional<T> &X, NoneType) {
return None <= X;
}
-template <typename T> bool operator>=(NoneType, const Optional<T> &X) {
+template <typename T>
+constexpr bool operator>=(NoneType, const Optional<T> &X) {
return X <= None;
}
-template <typename T> bool operator==(const Optional<T> &X, const T &Y) {
+template <typename T>
+constexpr bool operator==(const Optional<T> &X, const T &Y) {
return X && *X == Y;
}
-template <typename T> bool operator==(const T &X, const Optional<T> &Y) {
+template <typename T>
+constexpr bool operator==(const T &X, const Optional<T> &Y) {
return Y && X == *Y;
}
-template <typename T> bool operator!=(const Optional<T> &X, const T &Y) {
+template <typename T>
+constexpr bool operator!=(const Optional<T> &X, const T &Y) {
return !(X == Y);
}
-template <typename T> bool operator!=(const T &X, const Optional<T> &Y) {
+template <typename T>
+constexpr bool operator!=(const T &X, const Optional<T> &Y) {
return !(X == Y);
}
-template <typename T> bool operator<(const Optional<T> &X, const T &Y) {
+template <typename T>
+constexpr bool operator<(const Optional<T> &X, const T &Y) {
return !X || *X < Y;
}
-template <typename T> bool operator<(const T &X, const Optional<T> &Y) {
+template <typename T>
+constexpr bool operator<(const T &X, const Optional<T> &Y) {
return Y && X < *Y;
}
-template <typename T> bool operator<=(const Optional<T> &X, const T &Y) {
+template <typename T>
+constexpr bool operator<=(const Optional<T> &X, const T &Y) {
return !(Y < X);
}
-template <typename T> bool operator<=(const T &X, const Optional<T> &Y) {
+template <typename T>
+constexpr bool operator<=(const T &X, const Optional<T> &Y) {
return !(Y < X);
}
-template <typename T> bool operator>(const Optional<T> &X, const T &Y) {
+template <typename T>
+constexpr bool operator>(const Optional<T> &X, const T &Y) {
return Y < X;
}
-template <typename T> bool operator>(const T &X, const Optional<T> &Y) {
+template <typename T>
+constexpr bool operator>(const T &X, const Optional<T> &Y) {
return Y < X;
}
-template <typename T> bool operator>=(const Optional<T> &X, const T &Y) {
+template <typename T>
+constexpr bool operator>=(const Optional<T> &X, const T &Y) {
return !(X < Y);
}
-template <typename T> bool operator>=(const T &X, const Optional<T> &Y) {
+template <typename T>
+constexpr bool operator>=(const T &X, const Optional<T> &Y) {
return !(X < Y);
}
diff --git a/linux-x64/clang/include/llvm/ADT/PointerEmbeddedInt.h b/linux-x64/clang/include/llvm/ADT/PointerEmbeddedInt.h
index 3eb6edb..fbc48af 100644
--- a/linux-x64/clang/include/llvm/ADT/PointerEmbeddedInt.h
+++ b/linux-x64/clang/include/llvm/ADT/PointerEmbeddedInt.h
@@ -94,7 +94,7 @@
return T(reinterpret_cast<uintptr_t>(P), typename T::RawValueTag());
}
- enum { NumLowBitsAvailable = T::Shift };
+ static constexpr int NumLowBitsAvailable = T::Shift;
};
// Teach DenseMap how to use PointerEmbeddedInt objects as keys if the Int type
diff --git a/linux-x64/clang/include/llvm/ADT/PointerIntPair.h b/linux-x64/clang/include/llvm/ADT/PointerIntPair.h
index 24a2bb6..cb8b202 100644
--- a/linux-x64/clang/include/llvm/ADT/PointerIntPair.h
+++ b/linux-x64/clang/include/llvm/ADT/PointerIntPair.h
@@ -13,6 +13,7 @@
#ifndef LLVM_ADT_POINTERINTPAIR_H
#define LLVM_ADT_POINTERINTPAIR_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
#include "llvm/Support/type_traits.h"
#include <cassert>
@@ -59,19 +60,19 @@
IntType getInt() const { return (IntType)Info::getInt(Value); }
- void setPointer(PointerTy PtrVal) {
+ void setPointer(PointerTy PtrVal) LLVM_LVALUE_FUNCTION {
Value = Info::updatePointer(Value, PtrVal);
}
- void setInt(IntType IntVal) {
+ void setInt(IntType IntVal) LLVM_LVALUE_FUNCTION {
Value = Info::updateInt(Value, static_cast<intptr_t>(IntVal));
}
- void initWithPointer(PointerTy PtrVal) {
+ void initWithPointer(PointerTy PtrVal) LLVM_LVALUE_FUNCTION {
Value = Info::updatePointer(0, PtrVal);
}
- void setPointerAndInt(PointerTy PtrVal, IntType IntVal) {
+ void setPointerAndInt(PointerTy PtrVal, IntType IntVal) LLVM_LVALUE_FUNCTION {
Value = Info::updateInt(Info::updatePointer(0, PtrVal),
static_cast<intptr_t>(IntVal));
}
@@ -89,7 +90,7 @@
void *getOpaqueValue() const { return reinterpret_cast<void *>(Value); }
- void setFromOpaqueValue(void *Val) {
+ void setFromOpaqueValue(void *Val) LLVM_LVALUE_FUNCTION {
Value = reinterpret_cast<intptr_t>(Val);
}
@@ -146,7 +147,7 @@
"cannot use a pointer type that has all bits free");
static_assert(IntBits <= PtrTraits::NumLowBitsAvailable,
"PointerIntPair with integer size too large for pointer");
- enum : uintptr_t {
+ enum MaskAndShiftConstants : uintptr_t {
/// PointerBitMask - The bits that come from the pointer.
PointerBitMask =
~(uintptr_t)(((intptr_t)1 << PtrTraits::NumLowBitsAvailable) - 1),
@@ -234,7 +235,8 @@
return PointerIntPair<PointerTy, IntBits, IntType>::getFromOpaqueValue(P);
}
- enum { NumLowBitsAvailable = PtrTraits::NumLowBitsAvailable - IntBits };
+ static constexpr int NumLowBitsAvailable =
+ PtrTraits::NumLowBitsAvailable - IntBits;
};
} // end namespace llvm
diff --git a/linux-x64/clang/include/llvm/ADT/PointerSumType.h b/linux-x64/clang/include/llvm/ADT/PointerSumType.h
index d467f83..a7ef774 100644
--- a/linux-x64/clang/include/llvm/ADT/PointerSumType.h
+++ b/linux-x64/clang/include/llvm/ADT/PointerSumType.h
@@ -214,7 +214,7 @@
LookupOverload(PointerSumTypeMember<N, PointerT, TraitsT> *);
template <TagT N> static void LookupOverload(...);
template <TagT N> struct Lookup {
- // Compute a particular member type by resolving the lookup helper ovorload.
+ // Compute a particular member type by resolving the lookup helper overload.
using MemberT = decltype(
LookupOverload<N>(static_cast<PointerSumTypeHelper *>(nullptr)));
diff --git a/linux-x64/clang/include/llvm/ADT/PointerUnion.h b/linux-x64/clang/include/llvm/ADT/PointerUnion.h
index 2bcdf54..c396910 100644
--- a/linux-x64/clang/include/llvm/ADT/PointerUnion.h
+++ b/linux-x64/clang/include/llvm/ADT/PointerUnion.h
@@ -54,21 +54,14 @@
};
namespace pointer_union_detail {
- constexpr int constexprMin(int a, int b) { return a < b ? a : b; }
/// Determine the number of bits required to store integers with values < n.
/// This is ceil(log2(n)).
constexpr int bitsRequired(unsigned n) {
return n > 1 ? 1 + bitsRequired((n + 1) / 2) : 0;
}
- // FIXME: In C++14, replace this with
- // std::min({PointerLikeTypeTraits<Ts>::NumLowBitsAvailable...})
- template <typename T> constexpr int lowBitsAvailable() {
- return PointerLikeTypeTraits<T>::NumLowBitsAvailable;
- }
- template <typename T1, typename T2, typename... Ts>
- constexpr int lowBitsAvailable() {
- return constexprMin(lowBitsAvailable<T1>(), lowBitsAvailable<T2, Ts...>());
+ template <typename... Ts> constexpr int lowBitsAvailable() {
+ return std::min<int>({PointerLikeTypeTraits<Ts>::NumLowBitsAvailable...});
}
/// Find the index of a type in a list of types. TypeIndex<T, Us...>::Index
@@ -100,13 +93,6 @@
static constexpr int NumLowBitsAvailable = lowBitsAvailable<PTs...>();
};
- /// Implement assigment in terms of construction.
- template <typename Derived, typename T> struct AssignableFrom {
- Derived &operator=(T t) {
- return static_cast<Derived &>(*this) = Derived(t);
- }
- };
-
template <typename Derived, typename ValTy, int I, typename ...Types>
class PointerUnionMembers;
@@ -167,10 +153,11 @@
void *, pointer_union_detail::bitsRequired(sizeof...(PTs)), int,
pointer_union_detail::PointerUnionUIntTraits<PTs...>>,
0, PTs...> {
- // The first type is special in some ways, but we don't want PointerUnion to
- // be a 'template <typename First, typename ...Rest>' because it's much more
- // convenient to have a name for the whole pack. So split off the first type
- // here.
+ // The first type is special because we want to directly cast a pointer to a
+ // default-initialized union to a pointer to the first type. But we don't
+ // want PointerUnion to be a 'template <typename First, typename ...Rest>'
+ // because it's much more convenient to have a name for the whole pack. So
+ // split off the first type here.
using First = typename pointer_union_detail::GetFirstType<PTs...>::type;
using Base = typename PointerUnion::PointerUnionMembers;
@@ -182,17 +169,12 @@
/// Test if the pointer held in the union is null, regardless of
/// which type it is.
- bool isNull() const {
- // Convert from the void* to one of the pointer types, to make sure that
- // we recursively strip off low bits if we have a nested PointerUnion.
- return !PointerLikeTypeTraits<First>::getFromVoidPointer(
- this->Val.getPointer());
- }
+ bool isNull() const { return !this->Val.getPointer(); }
explicit operator bool() const { return !isNull(); }
/// Test if the Union currently holds the type matching T.
- template <typename T> int is() const {
+ template <typename T> bool is() const {
constexpr int Index = pointer_union_detail::TypeIndex<T, PTs...>::Index;
static_assert(Index < sizeof...(PTs),
"PointerUnion::is<T> given type not in the union");
@@ -208,7 +190,7 @@
}
/// Returns the current pointer if it is of the specified pointer type,
- /// otherwises returns null.
+ /// otherwise returns null.
template <typename T> T dyn_cast() const {
if (is<T>())
return get<T>();
@@ -226,7 +208,8 @@
First *getAddrOfPtr1() {
assert(is<First>() && "Val is not the first pointer");
assert(
- get<First>() == this->Val.getPointer() &&
+ PointerLikeTypeTraits<First>::getAsVoidPointer(get<First>()) ==
+ this->Val.getPointer() &&
"Can't get the address because PointerLikeTypeTraits changes the ptr");
return const_cast<First *>(
reinterpret_cast<const First *>(this->Val.getAddrOfPointer()));
@@ -282,16 +265,6 @@
PointerUnion<PTs...>::Val)>::NumLowBitsAvailable;
};
-/// A pointer union of three pointer types. See documentation for PointerUnion
-/// for usage.
-template <typename PT1, typename PT2, typename PT3>
-using PointerUnion3 = PointerUnion<PT1, PT2, PT3>;
-
-/// A pointer union of four pointer types. See documentation for PointerUnion
-/// for usage.
-template <typename PT1, typename PT2, typename PT3, typename PT4>
-using PointerUnion4 = PointerUnion<PT1, PT2, PT3, PT4>;
-
// Teach DenseMap how to use PointerUnions as keys.
template <typename ...PTs> struct DenseMapInfo<PointerUnion<PTs...>> {
using Union = PointerUnion<PTs...>;
diff --git a/linux-x64/clang/include/llvm/ADT/PostOrderIterator.h b/linux-x64/clang/include/llvm/ADT/PostOrderIterator.h
index 2fe7447..bb413a9 100644
--- a/linux-x64/clang/include/llvm/ADT/PostOrderIterator.h
+++ b/linux-x64/clang/include/llvm/ADT/PostOrderIterator.h
@@ -18,6 +18,7 @@
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator_range.h"
#include <iterator>
#include <set>
@@ -101,7 +102,7 @@
// VisitStack - Used to maintain the ordering. Top = current block
// First element is basic block pointer, second is the 'next child' to visit
- std::vector<std::pair<NodeRef, ChildItTy>> VisitStack;
+ SmallVector<std::pair<NodeRef, ChildItTy>, 8> VisitStack;
po_iterator(NodeRef BB) {
this->insertEdge(Optional<NodeRef>(), BB);
diff --git a/linux-x64/clang/include/llvm/ADT/PriorityWorklist.h b/linux-x64/clang/include/llvm/ADT/PriorityWorklist.h
index 96d22c8..01dd59a 100644
--- a/linux-x64/clang/include/llvm/ADT/PriorityWorklist.h
+++ b/linux-x64/clang/include/llvm/ADT/PriorityWorklist.h
@@ -110,7 +110,7 @@
/// Insert a sequence of new elements into the PriorityWorklist.
template <typename SequenceT>
- typename std::enable_if<!std::is_convertible<SequenceT, T>::value>::type
+ std::enable_if_t<!std::is_convertible<SequenceT, T>::value>
insert(SequenceT &&Input) {
if (std::begin(Input) == std::end(Input))
// Nothing to do for an empty input sequence.
diff --git a/linux-x64/clang/include/llvm/ADT/SCCIterator.h b/linux-x64/clang/include/llvm/ADT/SCCIterator.h
index eb1a5d0..8a7c0a7 100644
--- a/linux-x64/clang/include/llvm/ADT/SCCIterator.h
+++ b/linux-x64/clang/include/llvm/ADT/SCCIterator.h
@@ -124,17 +124,20 @@
return CurrentSCC;
}
- /// Test if the current SCC has a loop.
+ /// Test if the current SCC has a cycle.
///
/// If the SCC has more than one node, this is trivially true. If not, it may
- /// still contain a loop if the node has an edge back to itself.
- bool hasLoop() const;
+ /// still contain a cycle if the node has an edge back to itself.
+ bool hasCycle() const;
/// This informs the \c scc_iterator that the specified \c Old node
/// has been deleted, and \c New is to be used in its place.
void ReplaceNode(NodeRef Old, NodeRef New) {
assert(nodeVisitNumbers.count(Old) && "Old not in scc_iterator?");
- nodeVisitNumbers[New] = nodeVisitNumbers[Old];
+ // Do the assignment in two steps, in case 'New' is not yet in the map, and
+ // inserting it causes the map to grow.
+ auto tempVal = nodeVisitNumbers[Old];
+ nodeVisitNumbers[New] = tempVal;
nodeVisitNumbers.erase(Old);
}
};
@@ -209,7 +212,7 @@
}
template <class GraphT, class GT>
-bool scc_iterator<GraphT, GT>::hasLoop() const {
+bool scc_iterator<GraphT, GT>::hasCycle() const {
assert(!CurrentSCC.empty() && "Dereferencing END SCC iterator!");
if (CurrentSCC.size() > 1)
return true;
diff --git a/linux-x64/clang/include/llvm/ADT/STLExtras.h b/linux-x64/clang/include/llvm/ADT/STLExtras.h
index 81dce01..c8c1aff 100644
--- a/linux-x64/clang/include/llvm/ADT/STLExtras.h
+++ b/linux-x64/clang/include/llvm/ADT/STLExtras.h
@@ -17,7 +17,6 @@
#define LLVM_ADT_STLEXTRAS_H
#include "llvm/ADT/Optional.h"
-#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Config/abi-breaking.h"
@@ -80,6 +79,79 @@
typename std::add_const<T>::type>::type;
};
+/// Utilities for detecting if a given trait holds for some set of arguments
+/// 'Args'. For example, the given trait could be used to detect if a given type
+/// has a copy assignment operator:
+/// template<class T>
+/// using has_copy_assign_t = decltype(std::declval<T&>()
+/// = std::declval<const T&>());
+/// bool fooHasCopyAssign = is_detected<has_copy_assign_t, FooClass>::value;
+namespace detail {
+template <typename...> using void_t = void;
+template <class, template <class...> class Op, class... Args> struct detector {
+ using value_t = std::false_type;
+};
+template <template <class...> class Op, class... Args>
+struct detector<void_t<Op<Args...>>, Op, Args...> {
+ using value_t = std::true_type;
+};
+} // end namespace detail
+
+template <template <class...> class Op, class... Args>
+using is_detected = typename detail::detector<void, Op, Args...>::value_t;
+
+/// Check if a Callable type can be invoked with the given set of arg types.
+namespace detail {
+template <typename Callable, typename... Args>
+using is_invocable =
+ decltype(std::declval<Callable &>()(std::declval<Args>()...));
+} // namespace detail
+
+template <typename Callable, typename... Args>
+using is_invocable = is_detected<detail::is_invocable, Callable, Args...>;
+
+/// This class provides various trait information about a callable object.
+/// * To access the number of arguments: Traits::num_args
+/// * To access the type of an argument: Traits::arg_t<Index>
+/// * To access the type of the result: Traits::result_t
+template <typename T, bool isClass = std::is_class<T>::value>
+struct function_traits : public function_traits<decltype(&T::operator())> {};
+
+/// Overload for class function types.
+template <typename ClassType, typename ReturnType, typename... Args>
+struct function_traits<ReturnType (ClassType::*)(Args...) const, false> {
+ /// The number of arguments to this function.
+ enum { num_args = sizeof...(Args) };
+
+ /// The result type of this function.
+ using result_t = ReturnType;
+
+ /// The type of an argument to this function.
+ template <size_t Index>
+ using arg_t = typename std::tuple_element<Index, std::tuple<Args...>>::type;
+};
+/// Overload for class function types.
+template <typename ClassType, typename ReturnType, typename... Args>
+struct function_traits<ReturnType (ClassType::*)(Args...), false>
+ : function_traits<ReturnType (ClassType::*)(Args...) const> {};
+/// Overload for non-class function types.
+template <typename ReturnType, typename... Args>
+struct function_traits<ReturnType (*)(Args...), false> {
+ /// The number of arguments to this function.
+ enum { num_args = sizeof...(Args) };
+
+ /// The result type of this function.
+ using result_t = ReturnType;
+
+ /// The type of an argument to this function.
+ template <size_t i>
+ using arg_t = typename std::tuple_element<i, std::tuple<Args...>>::type;
+};
+/// Overload for non-class function type references.
+template <typename ReturnType, typename... Args>
+struct function_traits<ReturnType (&)(Args...), false>
+ : public function_traits<ReturnType (*)(Args...)> {};
+
//===----------------------------------------------------------------------===//
// Extra additions to <functional>
//===----------------------------------------------------------------------===//
@@ -95,18 +167,6 @@
}
};
-template <class Ty> struct less_ptr {
- bool operator()(const Ty* left, const Ty* right) const {
- return *left < *right;
- }
-};
-
-template <class Ty> struct greater_ptr {
- bool operator()(const Ty* left, const Ty* right) const {
- return *right < *left;
- }
-};
-
/// An efficient, type-erasing, non-owning reference to a callable. This is
/// intended for use as the type of a function parameter that is not used
/// after the function in question returns.
@@ -131,10 +191,17 @@
function_ref(std::nullptr_t) {}
template <typename Callable>
- function_ref(Callable &&callable,
- typename std::enable_if<
- !std::is_same<typename std::remove_reference<Callable>::type,
- function_ref>::value>::type * = nullptr)
+ function_ref(
+ Callable &&callable,
+ // This is not the copy-constructor.
+ std::enable_if_t<
+ !std::is_same<std::remove_cv_t<std::remove_reference_t<Callable>>,
+ function_ref>::value> * = nullptr,
+ // Functor must be callable and return a suitable type.
+ std::enable_if_t<std::is_void<Ret>::value ||
+ std::is_convertible<decltype(std::declval<Callable>()(
+ std::declval<Params>()...)),
+ Ret>::value> * = nullptr)
: callback(callback_fn<typename std::remove_reference<Callable>::type>),
callable(reinterpret_cast<intptr_t>(&callable)) {}
@@ -142,18 +209,9 @@
return callback(callable, std::forward<Params>(params)...);
}
- operator bool() const { return callback; }
+ explicit operator bool() const { return callback; }
};
-// deleter - Very very very simple method that is used to invoke operator
-// delete on something. It is used like this:
-//
-// for_each(V.begin(), B.end(), deleter<Interval>);
-template <class T>
-inline void deleter(T *Ptr) {
- delete Ptr;
-}
-
//===----------------------------------------------------------------------===//
// Extra additions to <iterator>
//===----------------------------------------------------------------------===//
@@ -163,16 +221,14 @@
using std::begin;
template <typename ContainerTy>
-auto adl_begin(ContainerTy &&container)
- -> decltype(begin(std::forward<ContainerTy>(container))) {
+decltype(auto) adl_begin(ContainerTy &&container) {
return begin(std::forward<ContainerTy>(container));
}
using std::end;
template <typename ContainerTy>
-auto adl_end(ContainerTy &&container)
- -> decltype(end(std::forward<ContainerTy>(container))) {
+decltype(auto) adl_end(ContainerTy &&container) {
return end(std::forward<ContainerTy>(container));
}
@@ -187,14 +243,12 @@
} // end namespace adl_detail
template <typename ContainerTy>
-auto adl_begin(ContainerTy &&container)
- -> decltype(adl_detail::adl_begin(std::forward<ContainerTy>(container))) {
+decltype(auto) adl_begin(ContainerTy &&container) {
return adl_detail::adl_begin(std::forward<ContainerTy>(container));
}
template <typename ContainerTy>
-auto adl_end(ContainerTy &&container)
- -> decltype(adl_detail::adl_end(std::forward<ContainerTy>(container))) {
+decltype(auto) adl_end(ContainerTy &&container) {
return adl_detail::adl_end(std::forward<ContainerTy>(container));
}
@@ -210,6 +264,19 @@
return adl_begin(RangeOrContainer) == adl_end(RangeOrContainer);
}
+/// Returns true if the given container only contains a single element.
+template <typename ContainerTy> bool hasSingleElement(ContainerTy &&C) {
+ auto B = std::begin(C), E = std::end(C);
+ return B != E && std::next(B) == E;
+}
+
+/// Return a range covering \p RangeOrContainer with the first N elements
+/// excluded.
+template <typename T> auto drop_begin(T &&RangeOrContainer, size_t N) {
+ return make_range(std::next(adl_begin(RangeOrContainer), N),
+ adl_end(RangeOrContainer));
+}
+
// mapped_iterator - This is a simple iterator adapter that causes a function to
// be applied whenever operator* is invoked on the iterator.
@@ -227,7 +294,7 @@
ItTy getCurrent() { return this->I; }
- FuncReturnTy operator*() { return F(*this->I); }
+ FuncReturnTy operator*() const { return F(*this->I); }
private:
FuncTy F;
@@ -241,9 +308,7 @@
}
template <class ContainerTy, class FuncTy>
-auto map_range(ContainerTy &&C, FuncTy F)
- -> decltype(make_range(map_iterator(C.begin(), F),
- map_iterator(C.end(), F))) {
+auto map_range(ContainerTy &&C, FuncTy F) {
return make_range(map_iterator(C.begin(), F), map_iterator(C.end(), F));
}
@@ -271,8 +336,7 @@
// Note that the container must have rbegin()/rend() methods for this to work.
template <typename ContainerTy>
auto reverse(ContainerTy &&C,
- typename std::enable_if<has_rbegin<ContainerTy>::value>::type * =
- nullptr) -> decltype(make_range(C.rbegin(), C.rend())) {
+ std::enable_if_t<has_rbegin<ContainerTy>::value> * = nullptr) {
return make_range(C.rbegin(), C.rend());
}
@@ -286,11 +350,8 @@
// Note that the container must have begin()/end() methods which return
// bidirectional iterators for this to work.
template <typename ContainerTy>
-auto reverse(
- ContainerTy &&C,
- typename std::enable_if<!has_rbegin<ContainerTy>::value>::type * = nullptr)
- -> decltype(make_range(llvm::make_reverse_iterator(std::end(C)),
- llvm::make_reverse_iterator(std::begin(C)))) {
+auto reverse(ContainerTy &&C,
+ std::enable_if_t<!has_rbegin<ContainerTy>::value> * = nullptr) {
return make_range(llvm::make_reverse_iterator(std::end(C)),
llvm::make_reverse_iterator(std::begin(C)));
}
@@ -477,7 +538,7 @@
early_inc_iterator_impl(WrappedIteratorT I) : BaseT(I) {}
using BaseT::operator*;
- typename BaseT::reference operator*() {
+ decltype(*std::declval<WrappedIteratorT>()) operator*() {
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
assert(!IsEarlyIncremented && "Cannot dereference twice!");
IsEarlyIncremented = true;
@@ -494,12 +555,12 @@
return *this;
}
- using BaseT::operator==;
- bool operator==(const early_inc_iterator_impl &RHS) const {
+ friend bool operator==(const early_inc_iterator_impl &LHS,
+ const early_inc_iterator_impl &RHS) {
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
- assert(!IsEarlyIncremented && "Cannot compare after dereferencing!");
+ assert(!LHS.IsEarlyIncremented && "Cannot compare after dereferencing!");
#endif
- return BaseT::operator==(RHS);
+ return (const BaseT &)LHS == (const BaseT &)RHS;
}
};
@@ -530,10 +591,6 @@
template <typename R, typename UnaryPredicate>
bool any_of(R &&range, UnaryPredicate P);
-template <size_t... I> struct index_sequence;
-
-template <class... Ts> struct index_sequence_for;
-
namespace detail {
using std::declval;
@@ -568,38 +625,38 @@
std::tuple<Iters...> iterators;
protected:
- template <size_t... Ns> value_type deref(index_sequence<Ns...>) const {
+ template <size_t... Ns> value_type deref(std::index_sequence<Ns...>) const {
return value_type(*std::get<Ns>(iterators)...);
}
template <size_t... Ns>
- decltype(iterators) tup_inc(index_sequence<Ns...>) const {
+ decltype(iterators) tup_inc(std::index_sequence<Ns...>) const {
return std::tuple<Iters...>(std::next(std::get<Ns>(iterators))...);
}
template <size_t... Ns>
- decltype(iterators) tup_dec(index_sequence<Ns...>) const {
+ decltype(iterators) tup_dec(std::index_sequence<Ns...>) const {
return std::tuple<Iters...>(std::prev(std::get<Ns>(iterators))...);
}
public:
zip_common(Iters &&... ts) : iterators(std::forward<Iters>(ts)...) {}
- value_type operator*() { return deref(index_sequence_for<Iters...>{}); }
+ value_type operator*() { return deref(std::index_sequence_for<Iters...>{}); }
const value_type operator*() const {
- return deref(index_sequence_for<Iters...>{});
+ return deref(std::index_sequence_for<Iters...>{});
}
ZipType &operator++() {
- iterators = tup_inc(index_sequence_for<Iters...>{});
+ iterators = tup_inc(std::index_sequence_for<Iters...>{});
return *reinterpret_cast<ZipType *>(this);
}
ZipType &operator--() {
static_assert(Base::IsBidirectional,
"All inner iterators must be at least bidirectional.");
- iterators = tup_dec(index_sequence_for<Iters...>{});
+ iterators = tup_dec(std::index_sequence_for<Iters...>{});
return *reinterpret_cast<ZipType *>(this);
}
};
@@ -618,7 +675,8 @@
template <typename... Iters>
class zip_shortest : public zip_common<zip_shortest<Iters...>, Iters...> {
template <size_t... Ns>
- bool test(const zip_shortest<Iters...> &other, index_sequence<Ns...>) const {
+ bool test(const zip_shortest<Iters...> &other,
+ std::index_sequence<Ns...>) const {
return all_of(std::initializer_list<bool>{std::get<Ns>(this->iterators) !=
std::get<Ns>(other.iterators)...},
identity<bool>{});
@@ -630,7 +688,7 @@
zip_shortest(Iters &&... ts) : Base(std::forward<Iters>(ts)...) {}
bool operator==(const zip_shortest<Iters...> &other) const {
- return !test(other, index_sequence_for<Iters...>{});
+ return !test(other, std::index_sequence_for<Iters...>{});
}
};
@@ -646,18 +704,21 @@
private:
std::tuple<Args...> ts;
- template <size_t... Ns> iterator begin_impl(index_sequence<Ns...>) const {
+ template <size_t... Ns>
+ iterator begin_impl(std::index_sequence<Ns...>) const {
return iterator(std::begin(std::get<Ns>(ts))...);
}
- template <size_t... Ns> iterator end_impl(index_sequence<Ns...>) const {
+ template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) const {
return iterator(std::end(std::get<Ns>(ts))...);
}
public:
zippy(Args &&... ts_) : ts(std::forward<Args>(ts_)...) {}
- iterator begin() const { return begin_impl(index_sequence_for<Args...>{}); }
- iterator end() const { return end_impl(index_sequence_for<Args...>{}); }
+ iterator begin() const {
+ return begin_impl(std::index_sequence_for<Args...>{});
+ }
+ iterator end() const { return end_impl(std::index_sequence_for<Args...>{}); }
};
} // end namespace detail
@@ -681,16 +742,15 @@
namespace detail {
template <typename Iter>
-static Iter next_or_end(const Iter &I, const Iter &End) {
+Iter next_or_end(const Iter &I, const Iter &End) {
if (I == End)
return End;
return std::next(I);
}
template <typename Iter>
-static auto deref_or_none(const Iter &I, const Iter &End)
- -> llvm::Optional<typename std::remove_const<
- typename std::remove_reference<decltype(*I)>::type>::type> {
+auto deref_or_none(const Iter &I, const Iter &End) -> llvm::Optional<
+ std::remove_const_t<std::remove_reference_t<decltype(*I)>>> {
if (I == End)
return None;
return *I;
@@ -727,20 +787,20 @@
template <size_t... Ns>
bool test(const zip_longest_iterator<Iters...> &other,
- index_sequence<Ns...>) const {
+ std::index_sequence<Ns...>) const {
return llvm::any_of(
std::initializer_list<bool>{std::get<Ns>(this->iterators) !=
std::get<Ns>(other.iterators)...},
identity<bool>{});
}
- template <size_t... Ns> value_type deref(index_sequence<Ns...>) const {
+ template <size_t... Ns> value_type deref(std::index_sequence<Ns...>) const {
return value_type(
deref_or_none(std::get<Ns>(iterators), std::get<Ns>(end_iterators))...);
}
template <size_t... Ns>
- decltype(iterators) tup_inc(index_sequence<Ns...>) const {
+ decltype(iterators) tup_inc(std::index_sequence<Ns...>) const {
return std::tuple<Iters...>(
next_or_end(std::get<Ns>(iterators), std::get<Ns>(end_iterators))...);
}
@@ -750,17 +810,19 @@
: iterators(std::forward<Iters>(ts.first)...),
end_iterators(std::forward<Iters>(ts.second)...) {}
- value_type operator*() { return deref(index_sequence_for<Iters...>{}); }
+ value_type operator*() { return deref(std::index_sequence_for<Iters...>{}); }
- value_type operator*() const { return deref(index_sequence_for<Iters...>{}); }
+ value_type operator*() const {
+ return deref(std::index_sequence_for<Iters...>{});
+ }
zip_longest_iterator<Iters...> &operator++() {
- iterators = tup_inc(index_sequence_for<Iters...>{});
+ iterators = tup_inc(std::index_sequence_for<Iters...>{});
return *this;
}
bool operator==(const zip_longest_iterator<Iters...> &other) const {
- return !test(other, index_sequence_for<Iters...>{});
+ return !test(other, std::index_sequence_for<Iters...>{});
}
};
@@ -777,12 +839,13 @@
private:
std::tuple<Args...> ts;
- template <size_t... Ns> iterator begin_impl(index_sequence<Ns...>) const {
+ template <size_t... Ns>
+ iterator begin_impl(std::index_sequence<Ns...>) const {
return iterator(std::make_pair(adl_begin(std::get<Ns>(ts)),
adl_end(std::get<Ns>(ts)))...);
}
- template <size_t... Ns> iterator end_impl(index_sequence<Ns...>) const {
+ template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) const {
return iterator(std::make_pair(adl_end(std::get<Ns>(ts)),
adl_end(std::get<Ns>(ts)))...);
}
@@ -790,8 +853,10 @@
public:
zip_longest_range(Args &&... ts_) : ts(std::forward<Args>(ts_)...) {}
- iterator begin() const { return begin_impl(index_sequence_for<Args...>{}); }
- iterator end() const { return end_impl(index_sequence_for<Args...>{}); }
+ iterator begin() const {
+ return begin_impl(std::index_sequence_for<Args...>{});
+ }
+ iterator end() const { return end_impl(std::index_sequence_for<Args...>{}); }
};
} // namespace detail
@@ -847,7 +912,7 @@
/// Increments the first non-end iterator.
///
/// It is an error to call this with all iterators at the end.
- template <size_t... Ns> void increment(index_sequence<Ns...>) {
+ template <size_t... Ns> void increment(std::index_sequence<Ns...>) {
// Build a sequence of functions to increment each iterator if possible.
bool (concat_iterator::*IncrementHelperFns[])() = {
&concat_iterator::incrementHelper<Ns>...};
@@ -876,7 +941,7 @@
/// reference.
///
/// It is an error to call this with all iterators at the end.
- template <size_t... Ns> ValueT &get(index_sequence<Ns...>) const {
+ template <size_t... Ns> ValueT &get(std::index_sequence<Ns...>) const {
// Build a sequence of functions to get from iterator if possible.
ValueT *(concat_iterator::*GetHelperFns[])() const = {
&concat_iterator::getHelper<Ns>...};
@@ -890,7 +955,7 @@
}
public:
- /// Constructs an iterator from a squence of ranges.
+ /// Constructs an iterator from a sequence of ranges.
///
/// We need the full range to know how to switch between each of the
/// iterators.
@@ -901,11 +966,13 @@
using BaseT::operator++;
concat_iterator &operator++() {
- increment(index_sequence_for<IterTs...>());
+ increment(std::index_sequence_for<IterTs...>());
return *this;
}
- ValueT &operator*() const { return get(index_sequence_for<IterTs...>()); }
+ ValueT &operator*() const {
+ return get(std::index_sequence_for<IterTs...>());
+ }
bool operator==(const concat_iterator &RHS) const {
return Begins == RHS.Begins && Ends == RHS.Ends;
@@ -928,10 +995,10 @@
private:
std::tuple<RangeTs...> Ranges;
- template <size_t... Ns> iterator begin_impl(index_sequence<Ns...>) {
+ template <size_t... Ns> iterator begin_impl(std::index_sequence<Ns...>) {
return iterator(std::get<Ns>(Ranges)...);
}
- template <size_t... Ns> iterator end_impl(index_sequence<Ns...>) {
+ template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) {
return iterator(make_range(std::end(std::get<Ns>(Ranges)),
std::end(std::get<Ns>(Ranges)))...);
}
@@ -940,8 +1007,8 @@
concat_range(RangeTs &&... Ranges)
: Ranges(std::forward<RangeTs>(Ranges)...) {}
- iterator begin() { return begin_impl(index_sequence_for<RangeTs...>{}); }
- iterator end() { return end_impl(index_sequence_for<RangeTs...>{}); }
+ iterator begin() { return begin_impl(std::index_sequence_for<RangeTs...>{}); }
+ iterator end() { return end_impl(std::index_sequence_for<RangeTs...>{}); }
};
} // end namespace detail
@@ -957,6 +1024,243 @@
std::forward<RangeTs>(Ranges)...);
}
+/// A utility class used to implement an iterator that contains some base object
+/// and an index. The iterator moves the index but keeps the base constant.
+template <typename DerivedT, typename BaseT, typename T,
+ typename PointerT = T *, typename ReferenceT = T &>
+class indexed_accessor_iterator
+ : public llvm::iterator_facade_base<DerivedT,
+ std::random_access_iterator_tag, T,
+ std::ptrdiff_t, PointerT, ReferenceT> {
+public:
+ ptrdiff_t operator-(const indexed_accessor_iterator &rhs) const {
+ assert(base == rhs.base && "incompatible iterators");
+ return index - rhs.index;
+ }
+ bool operator==(const indexed_accessor_iterator &rhs) const {
+ return base == rhs.base && index == rhs.index;
+ }
+ bool operator<(const indexed_accessor_iterator &rhs) const {
+ assert(base == rhs.base && "incompatible iterators");
+ return index < rhs.index;
+ }
+
+ DerivedT &operator+=(ptrdiff_t offset) {
+ this->index += offset;
+ return static_cast<DerivedT &>(*this);
+ }
+ DerivedT &operator-=(ptrdiff_t offset) {
+ this->index -= offset;
+ return static_cast<DerivedT &>(*this);
+ }
+
+ /// Returns the current index of the iterator.
+ ptrdiff_t getIndex() const { return index; }
+
+ /// Returns the current base of the iterator.
+ const BaseT &getBase() const { return base; }
+
+protected:
+ indexed_accessor_iterator(BaseT base, ptrdiff_t index)
+ : base(base), index(index) {}
+ BaseT base;
+ ptrdiff_t index;
+};
+
+namespace detail {
+/// The class represents the base of a range of indexed_accessor_iterators. It
+/// provides support for many different range functionalities, e.g.
+/// drop_front/slice/etc.. Derived range classes must implement the following
+/// static methods:
+/// * ReferenceT dereference_iterator(const BaseT &base, ptrdiff_t index)
+/// - Dereference an iterator pointing to the base object at the given
+/// index.
+/// * BaseT offset_base(const BaseT &base, ptrdiff_t index)
+/// - Return a new base that is offset from the provide base by 'index'
+/// elements.
+template <typename DerivedT, typename BaseT, typename T,
+ typename PointerT = T *, typename ReferenceT = T &>
+class indexed_accessor_range_base {
+public:
+ using RangeBaseT =
+ indexed_accessor_range_base<DerivedT, BaseT, T, PointerT, ReferenceT>;
+
+ /// An iterator element of this range.
+ class iterator : public indexed_accessor_iterator<iterator, BaseT, T,
+ PointerT, ReferenceT> {
+ public:
+ // Index into this iterator, invoking a static method on the derived type.
+ ReferenceT operator*() const {
+ return DerivedT::dereference_iterator(this->getBase(), this->getIndex());
+ }
+
+ private:
+ iterator(BaseT owner, ptrdiff_t curIndex)
+ : indexed_accessor_iterator<iterator, BaseT, T, PointerT, ReferenceT>(
+ owner, curIndex) {}
+
+ /// Allow access to the constructor.
+ friend indexed_accessor_range_base<DerivedT, BaseT, T, PointerT,
+ ReferenceT>;
+ };
+
+ indexed_accessor_range_base(iterator begin, iterator end)
+ : base(offset_base(begin.getBase(), begin.getIndex())),
+ count(end.getIndex() - begin.getIndex()) {}
+ indexed_accessor_range_base(const iterator_range<iterator> &range)
+ : indexed_accessor_range_base(range.begin(), range.end()) {}
+ indexed_accessor_range_base(BaseT base, ptrdiff_t count)
+ : base(base), count(count) {}
+
+ iterator begin() const { return iterator(base, 0); }
+ iterator end() const { return iterator(base, count); }
+ ReferenceT operator[](unsigned index) const {
+ assert(index < size() && "invalid index for value range");
+ return DerivedT::dereference_iterator(base, index);
+ }
+ ReferenceT front() const {
+ assert(!empty() && "expected non-empty range");
+ return (*this)[0];
+ }
+ ReferenceT back() const {
+ assert(!empty() && "expected non-empty range");
+ return (*this)[size() - 1];
+ }
+
+ /// Compare this range with another.
+ template <typename OtherT> bool operator==(const OtherT &other) const {
+ return size() ==
+ static_cast<size_t>(std::distance(other.begin(), other.end())) &&
+ std::equal(begin(), end(), other.begin());
+ }
+ template <typename OtherT> bool operator!=(const OtherT &other) const {
+ return !(*this == other);
+ }
+
+ /// Return the size of this range.
+ size_t size() const { return count; }
+
+ /// Return if the range is empty.
+ bool empty() const { return size() == 0; }
+
+ /// Drop the first N elements, and keep M elements.
+ DerivedT slice(size_t n, size_t m) const {
+ assert(n + m <= size() && "invalid size specifiers");
+ return DerivedT(offset_base(base, n), m);
+ }
+
+ /// Drop the first n elements.
+ DerivedT drop_front(size_t n = 1) const {
+ assert(size() >= n && "Dropping more elements than exist");
+ return slice(n, size() - n);
+ }
+ /// Drop the last n elements.
+ DerivedT drop_back(size_t n = 1) const {
+ assert(size() >= n && "Dropping more elements than exist");
+ return DerivedT(base, size() - n);
+ }
+
+ /// Take the first n elements.
+ DerivedT take_front(size_t n = 1) const {
+ return n < size() ? drop_back(size() - n)
+ : static_cast<const DerivedT &>(*this);
+ }
+
+ /// Take the last n elements.
+ DerivedT take_back(size_t n = 1) const {
+ return n < size() ? drop_front(size() - n)
+ : static_cast<const DerivedT &>(*this);
+ }
+
+ /// Allow conversion to any type accepting an iterator_range.
+ template <typename RangeT, typename = std::enable_if_t<std::is_constructible<
+ RangeT, iterator_range<iterator>>::value>>
+ operator RangeT() const {
+ return RangeT(iterator_range<iterator>(*this));
+ }
+
+ /// Returns the base of this range.
+ const BaseT &getBase() const { return base; }
+
+private:
+ /// Offset the given base by the given amount.
+ static BaseT offset_base(const BaseT &base, size_t n) {
+ return n == 0 ? base : DerivedT::offset_base(base, n);
+ }
+
+protected:
+ indexed_accessor_range_base(const indexed_accessor_range_base &) = default;
+ indexed_accessor_range_base(indexed_accessor_range_base &&) = default;
+ indexed_accessor_range_base &
+ operator=(const indexed_accessor_range_base &) = default;
+
+ /// The base that owns the provided range of values.
+ BaseT base;
+ /// The size from the owning range.
+ ptrdiff_t count;
+};
+} // end namespace detail
+
+/// This class provides an implementation of a range of
+/// indexed_accessor_iterators where the base is not indexable. Ranges with
+/// bases that are offsetable should derive from indexed_accessor_range_base
+/// instead. Derived range classes are expected to implement the following
+/// static method:
+/// * ReferenceT dereference(const BaseT &base, ptrdiff_t index)
+/// - Dereference an iterator pointing to a parent base at the given index.
+template <typename DerivedT, typename BaseT, typename T,
+ typename PointerT = T *, typename ReferenceT = T &>
+class indexed_accessor_range
+ : public detail::indexed_accessor_range_base<
+ DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT, ReferenceT> {
+public:
+ indexed_accessor_range(BaseT base, ptrdiff_t startIndex, ptrdiff_t count)
+ : detail::indexed_accessor_range_base<
+ DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT, ReferenceT>(
+ std::make_pair(base, startIndex), count) {}
+ using detail::indexed_accessor_range_base<
+ DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT,
+ ReferenceT>::indexed_accessor_range_base;
+
+ /// Returns the current base of the range.
+ const BaseT &getBase() const { return this->base.first; }
+
+ /// Returns the current start index of the range.
+ ptrdiff_t getStartIndex() const { return this->base.second; }
+
+ /// See `detail::indexed_accessor_range_base` for details.
+ static std::pair<BaseT, ptrdiff_t>
+ offset_base(const std::pair<BaseT, ptrdiff_t> &base, ptrdiff_t index) {
+ // We encode the internal base as a pair of the derived base and a start
+ // index into the derived base.
+ return std::make_pair(base.first, base.second + index);
+ }
+ /// See `detail::indexed_accessor_range_base` for details.
+ static ReferenceT
+ dereference_iterator(const std::pair<BaseT, ptrdiff_t> &base,
+ ptrdiff_t index) {
+ return DerivedT::dereference(base.first, base.second + index);
+ }
+};
+
+/// Given a container of pairs, return a range over the first elements.
+template <typename ContainerTy> auto make_first_range(ContainerTy &&c) {
+ return llvm::map_range(
+ std::forward<ContainerTy>(c),
+ [](decltype((*std::begin(c))) elt) -> decltype((elt.first)) {
+ return elt.first;
+ });
+}
+
+/// Given a container of pairs, return a range over the second elements.
+template <typename ContainerTy> auto make_second_range(ContainerTy &&c) {
+ return llvm::map_range(
+ std::forward<ContainerTy>(c),
+ [](decltype((*std::begin(c))) elt) -> decltype((elt.second)) {
+ return elt.second;
+ });
+}
+
//===----------------------------------------------------------------------===//
// Extra additions to <utility>
//===----------------------------------------------------------------------===//
@@ -984,34 +1288,11 @@
FuncTy func;
template <typename T>
- auto operator()(const T &lhs, const T &rhs) const
- -> decltype(func(lhs.first, rhs.first)) {
+ decltype(auto) operator()(const T &lhs, const T &rhs) const {
return func(lhs.first, rhs.first);
}
};
-// A subset of N3658. More stuff can be added as-needed.
-
-/// Represents a compile-time sequence of integers.
-template <class T, T... I> struct integer_sequence {
- using value_type = T;
-
- static constexpr size_t size() { return sizeof...(I); }
-};
-
-/// Alias for the common case of a sequence of size_ts.
-template <size_t... I>
-struct index_sequence : integer_sequence<std::size_t, I...> {};
-
-template <std::size_t N, std::size_t... I>
-struct build_index_impl : build_index_impl<N - 1, N - 1, I...> {};
-template <std::size_t... I>
-struct build_index_impl<0, I...> : index_sequence<I...> {};
-
-/// Creates a compile-time integer sequence for a parameter pack.
-template <class... Ts>
-struct index_sequence_for : build_index_impl<sizeof...(Ts)> {};
-
/// Utility type to build an inheritance chain that makes it easy to rank
/// overload candidates.
template <int N> struct rank : rank<N - 1> {};
@@ -1045,6 +1326,16 @@
// Extra additions for arrays
//===----------------------------------------------------------------------===//
+// We have a copy here so that LLVM behaves the same when using different
+// standard libraries.
+template <class Iterator, class RNG>
+void shuffle(Iterator first, Iterator last, RNG &&g) {
+ // It would be better to use a std::uniform_int_distribution,
+ // but that would be stdlib dependent.
+ for (auto size = last - first; size > 1; ++first, (void)--size)
+ std::iter_swap(first, first + g() % size);
+}
+
/// Find the length of an array.
template <class T, std::size_t N>
constexpr inline size_t array_lengthof(T (&)[N]) {
@@ -1071,6 +1362,23 @@
return array_pod_sort_comparator<T>;
}
+#ifdef EXPENSIVE_CHECKS
+namespace detail {
+
+inline unsigned presortShuffleEntropy() {
+ static unsigned Result(std::random_device{}());
+ return Result;
+}
+
+template <class IteratorTy>
+inline void presortShuffle(IteratorTy Start, IteratorTy End) {
+ std::mt19937 Generator(presortShuffleEntropy());
+ std::shuffle(Start, End, Generator);
+}
+
+} // end namespace detail
+#endif
+
/// array_pod_sort - This sorts an array with the specified start and end
/// extent. This is just like std::sort, except that it calls qsort instead of
/// using an inlined template. qsort is slightly slower than std::sort, but
@@ -1092,8 +1400,7 @@
auto NElts = End - Start;
if (NElts <= 1) return;
#ifdef EXPENSIVE_CHECKS
- std::mt19937 Generator(std::random_device{}());
- std::shuffle(Start, End, Generator);
+ detail::presortShuffle<IteratorTy>(Start, End);
#endif
qsort(&*Start, NElts, sizeof(*Start), get_array_pod_sort_comparator(*Start));
}
@@ -1109,24 +1416,42 @@
auto NElts = End - Start;
if (NElts <= 1) return;
#ifdef EXPENSIVE_CHECKS
- std::mt19937 Generator(std::random_device{}());
- std::shuffle(Start, End, Generator);
+ detail::presortShuffle<IteratorTy>(Start, End);
#endif
qsort(&*Start, NElts, sizeof(*Start),
reinterpret_cast<int (*)(const void *, const void *)>(Compare));
}
+namespace detail {
+template <typename T>
+// We can use qsort if the iterator type is a pointer and the underlying value
+// is trivially copyable.
+using sort_trivially_copyable = conjunction<
+ std::is_pointer<T>,
+ std::is_trivially_copyable<typename std::iterator_traits<T>::value_type>>;
+} // namespace detail
+
// Provide wrappers to std::sort which shuffle the elements before sorting
// to help uncover non-deterministic behavior (PR35135).
-template <typename IteratorTy>
+template <typename IteratorTy,
+ std::enable_if_t<!detail::sort_trivially_copyable<IteratorTy>::value,
+ int> = 0>
inline void sort(IteratorTy Start, IteratorTy End) {
#ifdef EXPENSIVE_CHECKS
- std::mt19937 Generator(std::random_device{}());
- std::shuffle(Start, End, Generator);
+ detail::presortShuffle<IteratorTy>(Start, End);
#endif
std::sort(Start, End);
}
+// Forward trivially copyable types to array_pod_sort. This avoids a large
+// amount of code bloat for a minor performance hit.
+template <typename IteratorTy,
+ std::enable_if_t<detail::sort_trivially_copyable<IteratorTy>::value,
+ int> = 0>
+inline void sort(IteratorTy Start, IteratorTy End) {
+ array_pod_sort(Start, End);
+}
+
template <typename Container> inline void sort(Container &&C) {
llvm::sort(adl_begin(C), adl_end(C));
}
@@ -1134,8 +1459,7 @@
template <typename IteratorTy, typename Compare>
inline void sort(IteratorTy Start, IteratorTy End, Compare Comp) {
#ifdef EXPENSIVE_CHECKS
- std::mt19937 Generator(std::random_device{}());
- std::shuffle(Start, End, Generator);
+ detail::presortShuffle<IteratorTy>(Start, End);
#endif
std::sort(Start, End, Comp);
}
@@ -1149,41 +1473,23 @@
// Extra additions to <algorithm>
//===----------------------------------------------------------------------===//
-/// For a container of pointers, deletes the pointers and then clears the
-/// container.
-template<typename Container>
-void DeleteContainerPointers(Container &C) {
- for (auto V : C)
- delete V;
- C.clear();
-}
-
-/// In a container of pairs (usually a map) whose second element is a pointer,
-/// deletes the second elements and then clears the container.
-template<typename Container>
-void DeleteContainerSeconds(Container &C) {
- for (auto &V : C)
- delete V.second;
- C.clear();
-}
-
/// Get the size of a range. This is a wrapper function around std::distance
/// which is only enabled when the operation is O(1).
template <typename R>
-auto size(R &&Range, typename std::enable_if<
- std::is_same<typename std::iterator_traits<decltype(
- Range.begin())>::iterator_category,
- std::random_access_iterator_tag>::value,
- void>::type * = nullptr)
- -> decltype(std::distance(Range.begin(), Range.end())) {
+auto size(R &&Range,
+ std::enable_if_t<
+ std::is_base_of<std::random_access_iterator_tag,
+ typename std::iterator_traits<decltype(
+ Range.begin())>::iterator_category>::value,
+ void> * = nullptr) {
return std::distance(Range.begin(), Range.end());
}
/// Provide wrappers to std::for_each which take ranges instead of having to
/// pass begin/end explicitly.
-template <typename R, typename UnaryPredicate>
-UnaryPredicate for_each(R &&Range, UnaryPredicate P) {
- return std::for_each(adl_begin(Range), adl_end(Range), P);
+template <typename R, typename UnaryFunction>
+UnaryFunction for_each(R &&Range, UnaryFunction F) {
+ return std::for_each(adl_begin(Range), adl_end(Range), F);
}
/// Provide wrappers to std::all_of which take ranges instead of having to pass
@@ -1209,27 +1515,26 @@
/// Provide wrappers to std::find which take ranges instead of having to pass
/// begin/end explicitly.
-template <typename R, typename T>
-auto find(R &&Range, const T &Val) -> decltype(adl_begin(Range)) {
+template <typename R, typename T> auto find(R &&Range, const T &Val) {
return std::find(adl_begin(Range), adl_end(Range), Val);
}
/// Provide wrappers to std::find_if which take ranges instead of having to pass
/// begin/end explicitly.
template <typename R, typename UnaryPredicate>
-auto find_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range)) {
+auto find_if(R &&Range, UnaryPredicate P) {
return std::find_if(adl_begin(Range), adl_end(Range), P);
}
template <typename R, typename UnaryPredicate>
-auto find_if_not(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range)) {
+auto find_if_not(R &&Range, UnaryPredicate P) {
return std::find_if_not(adl_begin(Range), adl_end(Range), P);
}
/// Provide wrappers to std::remove_if which take ranges instead of having to
/// pass begin/end explicitly.
template <typename R, typename UnaryPredicate>
-auto remove_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range)) {
+auto remove_if(R &&Range, UnaryPredicate P) {
return std::remove_if(adl_begin(Range), adl_end(Range), P);
}
@@ -1245,6 +1550,13 @@
return std::copy(adl_begin(Range), adl_end(Range), Out);
}
+/// Provide wrappers to std::move which take ranges instead of having to
+/// pass begin/end explicitly.
+template <typename R, typename OutputIt>
+OutputIt move(R &&Range, OutputIt Out) {
+ return std::move(adl_begin(Range), adl_end(Range), Out);
+}
+
/// Wrapper function around std::find to detect if an element exists
/// in a container.
template <typename R, typename E>
@@ -1252,62 +1564,67 @@
return std::find(adl_begin(Range), adl_end(Range), Element) != adl_end(Range);
}
+/// Wrapper function around std::is_sorted to check if elements in a range \p R
+/// are sorted with respect to a comparator \p C.
+template <typename R, typename Compare> bool is_sorted(R &&Range, Compare C) {
+ return std::is_sorted(adl_begin(Range), adl_end(Range), C);
+}
+
+/// Wrapper function around std::is_sorted to check if elements in a range \p R
+/// are sorted in non-descending order.
+template <typename R> bool is_sorted(R &&Range) {
+ return std::is_sorted(adl_begin(Range), adl_end(Range));
+}
+
/// Wrapper function around std::count to count the number of times an element
/// \p Element occurs in the given range \p Range.
-template <typename R, typename E>
-auto count(R &&Range, const E &Element) ->
- typename std::iterator_traits<decltype(adl_begin(Range))>::difference_type {
+template <typename R, typename E> auto count(R &&Range, const E &Element) {
return std::count(adl_begin(Range), adl_end(Range), Element);
}
/// Wrapper function around std::count_if to count the number of times an
/// element satisfying a given predicate occurs in a range.
template <typename R, typename UnaryPredicate>
-auto count_if(R &&Range, UnaryPredicate P) ->
- typename std::iterator_traits<decltype(adl_begin(Range))>::difference_type {
+auto count_if(R &&Range, UnaryPredicate P) {
return std::count_if(adl_begin(Range), adl_end(Range), P);
}
/// Wrapper function around std::transform to apply a function to a range and
/// store the result elsewhere.
-template <typename R, typename OutputIt, typename UnaryPredicate>
-OutputIt transform(R &&Range, OutputIt d_first, UnaryPredicate P) {
- return std::transform(adl_begin(Range), adl_end(Range), d_first, P);
+template <typename R, typename OutputIt, typename UnaryFunction>
+OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F) {
+ return std::transform(adl_begin(Range), adl_end(Range), d_first, F);
}
/// Provide wrappers to std::partition which take ranges instead of having to
/// pass begin/end explicitly.
template <typename R, typename UnaryPredicate>
-auto partition(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range)) {
+auto partition(R &&Range, UnaryPredicate P) {
return std::partition(adl_begin(Range), adl_end(Range), P);
}
/// Provide wrappers to std::lower_bound which take ranges instead of having to
/// pass begin/end explicitly.
-template <typename R, typename T>
-auto lower_bound(R &&Range, T &&Value) -> decltype(adl_begin(Range)) {
+template <typename R, typename T> auto lower_bound(R &&Range, T &&Value) {
return std::lower_bound(adl_begin(Range), adl_end(Range),
std::forward<T>(Value));
}
template <typename R, typename T, typename Compare>
-auto lower_bound(R &&Range, T &&Value, Compare C)
- -> decltype(adl_begin(Range)) {
+auto lower_bound(R &&Range, T &&Value, Compare C) {
return std::lower_bound(adl_begin(Range), adl_end(Range),
std::forward<T>(Value), C);
}
/// Provide wrappers to std::upper_bound which take ranges instead of having to
/// pass begin/end explicitly.
-template <typename R, typename T>
-auto upper_bound(R &&Range, T &&Value) -> decltype(adl_begin(Range)) {
+template <typename R, typename T> auto upper_bound(R &&Range, T &&Value) {
return std::upper_bound(adl_begin(Range), adl_end(Range),
std::forward<T>(Value));
}
template <typename R, typename T, typename Compare>
-auto upper_bound(R &&Range, T &&Value, Compare C)
- -> decltype(adl_begin(Range)) {
+auto upper_bound(R &&Range, T &&Value, Compare C) {
return std::upper_bound(adl_begin(Range), adl_end(Range),
std::forward<T>(Value), C);
}
@@ -1326,7 +1643,7 @@
/// Requires that C is always true below some limit, and always false above it.
template <typename R, typename Predicate,
typename Val = decltype(*adl_begin(std::declval<R>()))>
-auto partition_point(R &&Range, Predicate P) -> decltype(adl_begin(Range)) {
+auto partition_point(R &&Range, Predicate P) {
return std::partition_point(adl_begin(Range), adl_end(Range), P);
}
@@ -1339,15 +1656,6 @@
std::equal(adl_begin(Range) + 1, adl_end(Range), adl_begin(Range)));
}
-/// Given a range of type R, iterate the entire range and return a
-/// SmallVector with elements of the vector. This is useful, for example,
-/// when you want to iterate a range and then sort the results.
-template <unsigned Size, typename R>
-SmallVector<typename std::remove_const<detail::ValueOfRange<R>>::type, Size>
-to_vector(R &&Range) {
- return {adl_begin(Range), adl_end(Range)};
-}
-
/// Provide a container algorithm similar to C++ Library Fundamentals v2's
/// `erase_if` which is equivalent to:
///
@@ -1360,6 +1668,22 @@
C.erase(remove_if(C, P), C.end());
}
+/// Wrapper function to remove a value from a container:
+///
+/// C.erase(remove(C.begin(), C.end(), V), C.end());
+template <typename Container, typename ValueType>
+void erase_value(Container &C, ValueType V) {
+ C.erase(std::remove(C.begin(), C.end(), V), C.end());
+}
+
+/// Wrapper function to append a range to a container.
+///
+/// C.insert(C.end(), R.begin(), R.end());
+template <typename Container, typename Range>
+inline void append_range(Container &C, Range &&R) {
+ C.insert(C.end(), R.begin(), R.end());
+}
+
/// Given a sequence container Cont, replace the range [ContIt, ContEnd) with
/// the range [ValIt, ValEnd) (which is not from the same container).
template<typename Container, typename RandomAccessIterator>
@@ -1387,45 +1711,73 @@
replace(Cont, ContIt, ContEnd, R.begin(), R.end());
}
+/// An STL-style algorithm similar to std::for_each that applies a second
+/// functor between every pair of elements.
+///
+/// This provides the control flow logic to, for example, print a
+/// comma-separated list:
+/// \code
+/// interleave(names.begin(), names.end(),
+/// [&](StringRef name) { os << name; },
+/// [&] { os << ", "; });
+/// \endcode
+template <typename ForwardIterator, typename UnaryFunctor,
+ typename NullaryFunctor,
+ typename = typename std::enable_if<
+ !std::is_constructible<StringRef, UnaryFunctor>::value &&
+ !std::is_constructible<StringRef, NullaryFunctor>::value>::type>
+inline void interleave(ForwardIterator begin, ForwardIterator end,
+ UnaryFunctor each_fn, NullaryFunctor between_fn) {
+ if (begin == end)
+ return;
+ each_fn(*begin);
+ ++begin;
+ for (; begin != end; ++begin) {
+ between_fn();
+ each_fn(*begin);
+ }
+}
+
+template <typename Container, typename UnaryFunctor, typename NullaryFunctor,
+ typename = typename std::enable_if<
+ !std::is_constructible<StringRef, UnaryFunctor>::value &&
+ !std::is_constructible<StringRef, NullaryFunctor>::value>::type>
+inline void interleave(const Container &c, UnaryFunctor each_fn,
+ NullaryFunctor between_fn) {
+ interleave(c.begin(), c.end(), each_fn, between_fn);
+}
+
+/// Overload of interleave for the common case of string separator.
+template <typename Container, typename UnaryFunctor, typename StreamT,
+ typename T = detail::ValueOfRange<Container>>
+inline void interleave(const Container &c, StreamT &os, UnaryFunctor each_fn,
+ const StringRef &separator) {
+ interleave(c.begin(), c.end(), each_fn, [&] { os << separator; });
+}
+template <typename Container, typename StreamT,
+ typename T = detail::ValueOfRange<Container>>
+inline void interleave(const Container &c, StreamT &os,
+ const StringRef &separator) {
+ interleave(
+ c, os, [&](const T &a) { os << a; }, separator);
+}
+
+template <typename Container, typename UnaryFunctor, typename StreamT,
+ typename T = detail::ValueOfRange<Container>>
+inline void interleaveComma(const Container &c, StreamT &os,
+ UnaryFunctor each_fn) {
+ interleave(c, os, each_fn, ", ");
+}
+template <typename Container, typename StreamT,
+ typename T = detail::ValueOfRange<Container>>
+inline void interleaveComma(const Container &c, StreamT &os) {
+ interleaveComma(c, os, [&](const T &a) { os << a; });
+}
+
//===----------------------------------------------------------------------===//
// Extra additions to <memory>
//===----------------------------------------------------------------------===//
-// Implement make_unique according to N3656.
-
-/// Constructs a `new T()` with the given args and returns a
-/// `unique_ptr<T>` which owns the object.
-///
-/// Example:
-///
-/// auto p = make_unique<int>();
-/// auto p = make_unique<std::tuple<int, int>>(0, 1);
-template <class T, class... Args>
-typename std::enable_if<!std::is_array<T>::value, std::unique_ptr<T>>::type
-make_unique(Args &&... args) {
- return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
-}
-
-/// Constructs a `new T[n]` with the given args and returns a
-/// `unique_ptr<T[]>` which owns the object.
-///
-/// \param n size of the new array.
-///
-/// Example:
-///
-/// auto p = make_unique<int[]>(2); // value-initializes the array with 0's.
-template <class T>
-typename std::enable_if<std::is_array<T>::value && std::extent<T>::value == 0,
- std::unique_ptr<T>>::type
-make_unique(size_t n) {
- return std::unique_ptr<T>(new typename std::remove_extent<T>::type[n]());
-}
-
-/// This function isn't used and is only here to provide better compile errors.
-template <class T, class... Args>
-typename std::enable_if<std::extent<T>::value != 0>::type
-make_unique(Args &&...) = delete;
-
struct FreeDeleter {
void operator()(void* v) {
::free(v);
@@ -1439,20 +1791,6 @@
}
};
-/// A functor like C++14's std::less<void> in its absence.
-struct less {
- template <typename A, typename B> bool operator()(A &&a, B &&b) const {
- return std::forward<A>(a) < std::forward<B>(b);
- }
-};
-
-/// A functor like C++14's std::equal<void> in its absence.
-struct equal {
- template <typename A, typename B> bool operator()(A &&a, B &&b) const {
- return std::forward<A>(a) == std::forward<B>(b);
- }
-};
-
/// Binary functor that adapts to any other binary functor after dereferencing
/// operands.
template <typename T> struct deref {
@@ -1461,8 +1799,7 @@
// Could be further improved to cope with non-derivable functors and
// non-binary functors (should be a variadic template member function
// operator()).
- template <typename A, typename B>
- auto operator()(A &lhs, B &rhs) const -> decltype(func(*lhs, *rhs)) {
+ template <typename A, typename B> auto operator()(A &lhs, B &rhs) const {
assert(lhs);
assert(rhs);
return func(*lhs, *rhs);
@@ -1483,6 +1820,8 @@
result_pair(std::size_t Index, IterOfRange<R> Iter)
: Index(Index), Iter(Iter) {}
+ result_pair<R>(const result_pair<R> &Other)
+ : Index(Other.Index), Iter(Other.Iter) {}
result_pair<R> &operator=(const result_pair<R> &Other) {
Index = Other.Index;
Iter = Other.Iter;
@@ -1531,6 +1870,7 @@
return Result.Iter == RHS.Result.Iter;
}
+ enumerator_iter<R>(const enumerator_iter<R> &Other) : Result(Other.Result) {}
enumerator_iter<R> &operator=(const enumerator_iter<R> &Other) {
Result = Other.Result;
return *this;
@@ -1580,8 +1920,7 @@
namespace detail {
template <typename F, typename Tuple, std::size_t... I>
-auto apply_tuple_impl(F &&f, Tuple &&t, index_sequence<I...>)
- -> decltype(std::forward<F>(f)(std::get<I>(std::forward<Tuple>(t))...)) {
+decltype(auto) apply_tuple_impl(F &&f, Tuple &&t, std::index_sequence<I...>) {
return std::forward<F>(f)(std::get<I>(std::forward<Tuple>(t))...);
}
@@ -1591,11 +1930,8 @@
/// tuple variadically to f as if by calling f(a1, a2, ..., an) and
/// return the result.
template <typename F, typename Tuple>
-auto apply_tuple(F &&f, Tuple &&t) -> decltype(detail::apply_tuple_impl(
- std::forward<F>(f), std::forward<Tuple>(t),
- build_index_impl<
- std::tuple_size<typename std::decay<Tuple>::type>::value>{})) {
- using Indices = build_index_impl<
+decltype(auto) apply_tuple(F &&f, Tuple &&t) {
+ using Indices = std::make_index_sequence<
std::tuple_size<typename std::decay<Tuple>::type>::value>;
return detail::apply_tuple_impl(std::forward<F>(f), std::forward<Tuple>(t),
@@ -1604,49 +1940,89 @@
/// Return true if the sequence [Begin, End) has exactly N items. Runs in O(N)
/// time. Not meant for use with random-access iterators.
-template <typename IterTy>
+/// Can optionally take a predicate to filter lazily some items.
+template <typename IterTy,
+ typename Pred = bool (*)(const decltype(*std::declval<IterTy>()) &)>
bool hasNItems(
IterTy &&Begin, IterTy &&End, unsigned N,
- typename std::enable_if<
- !std::is_same<
- typename std::iterator_traits<typename std::remove_reference<
- decltype(Begin)>::type>::iterator_category,
- std::random_access_iterator_tag>::value,
- void>::type * = nullptr) {
- for (; N; --N, ++Begin)
+ Pred &&ShouldBeCounted =
+ [](const decltype(*std::declval<IterTy>()) &) { return true; },
+ std::enable_if_t<
+ !std::is_base_of<std::random_access_iterator_tag,
+ typename std::iterator_traits<std::remove_reference_t<
+ decltype(Begin)>>::iterator_category>::value,
+ void> * = nullptr) {
+ for (; N; ++Begin) {
if (Begin == End)
return false; // Too few.
- return Begin == End;
+ N -= ShouldBeCounted(*Begin);
+ }
+ for (; Begin != End; ++Begin)
+ if (ShouldBeCounted(*Begin))
+ return false; // Too many.
+ return true;
}
/// Return true if the sequence [Begin, End) has N or more items. Runs in O(N)
/// time. Not meant for use with random-access iterators.
-template <typename IterTy>
+/// Can optionally take a predicate to lazily filter some items.
+template <typename IterTy,
+ typename Pred = bool (*)(const decltype(*std::declval<IterTy>()) &)>
bool hasNItemsOrMore(
IterTy &&Begin, IterTy &&End, unsigned N,
- typename std::enable_if<
- !std::is_same<
- typename std::iterator_traits<typename std::remove_reference<
- decltype(Begin)>::type>::iterator_category,
- std::random_access_iterator_tag>::value,
- void>::type * = nullptr) {
- for (; N; --N, ++Begin)
+ Pred &&ShouldBeCounted =
+ [](const decltype(*std::declval<IterTy>()) &) { return true; },
+ std::enable_if_t<
+ !std::is_base_of<std::random_access_iterator_tag,
+ typename std::iterator_traits<std::remove_reference_t<
+ decltype(Begin)>>::iterator_category>::value,
+ void> * = nullptr) {
+ for (; N; ++Begin) {
if (Begin == End)
return false; // Too few.
+ N -= ShouldBeCounted(*Begin);
+ }
return true;
}
+/// Returns true if the sequence [Begin, End) has N or less items. Can
+/// optionally take a predicate to lazily filter some items.
+template <typename IterTy,
+ typename Pred = bool (*)(const decltype(*std::declval<IterTy>()) &)>
+bool hasNItemsOrLess(
+ IterTy &&Begin, IterTy &&End, unsigned N,
+ Pred &&ShouldBeCounted = [](const decltype(*std::declval<IterTy>()) &) {
+ return true;
+ }) {
+ assert(N != std::numeric_limits<unsigned>::max());
+ return !hasNItemsOrMore(Begin, End, N + 1, ShouldBeCounted);
+}
+
+/// Returns true if the given container has exactly N items
+template <typename ContainerTy> bool hasNItems(ContainerTy &&C, unsigned N) {
+ return hasNItems(std::begin(C), std::end(C), N);
+}
+
+/// Returns true if the given container has N or more items
+template <typename ContainerTy>
+bool hasNItemsOrMore(ContainerTy &&C, unsigned N) {
+ return hasNItemsOrMore(std::begin(C), std::end(C), N);
+}
+
+/// Returns true if the given container has N or less items
+template <typename ContainerTy>
+bool hasNItemsOrLess(ContainerTy &&C, unsigned N) {
+ return hasNItemsOrLess(std::begin(C), std::end(C), N);
+}
+
/// Returns a raw pointer that represents the same address as the argument.
///
-/// The late bound return should be removed once we move to C++14 to better
-/// align with the C++20 declaration. Also, this implementation can be removed
-/// once we move to C++20 where it's defined as std::to_addres()
+/// This implementation can be removed once we move to C++20 where it's defined
+/// as std::to_address().
///
/// The std::pointer_traits<>::to_address(p) variations of these overloads has
/// not been implemented.
-template <class Ptr> auto to_address(const Ptr &P) -> decltype(P.operator->()) {
- return P.operator->();
-}
+template <class Ptr> auto to_address(const Ptr &P) { return P.operator->(); }
template <class T> constexpr T *to_address(T *P) { return P; }
} // end namespace llvm
diff --git a/linux-x64/clang/include/llvm/ADT/ScopedHashTable.h b/linux-x64/clang/include/llvm/ADT/ScopedHashTable.h
index 40c49eb..a5e57c6 100644
--- a/linux-x64/clang/include/llvm/ADT/ScopedHashTable.h
+++ b/linux-x64/clang/include/llvm/ADT/ScopedHashTable.h
@@ -32,7 +32,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseMapInfo.h"
-#include "llvm/Support/Allocator.h"
+#include "llvm/Support/AllocatorBase.h"
#include <cassert>
#include <new>
diff --git a/linux-x64/clang/include/llvm/ADT/Sequence.h b/linux-x64/clang/include/llvm/ADT/Sequence.h
index 8c505f2..8a695d7 100644
--- a/linux-x64/clang/include/llvm/ADT/Sequence.h
+++ b/linux-x64/clang/include/llvm/ADT/Sequence.h
@@ -42,6 +42,10 @@
value_sequence_iterator(const value_sequence_iterator &) = default;
value_sequence_iterator(value_sequence_iterator &&Arg)
: Value(std::move(Arg.Value)) {}
+ value_sequence_iterator &operator=(const value_sequence_iterator &Arg) {
+ Value = Arg.Value;
+ return *this;
+ }
template <typename U, typename Enabler = decltype(ValueT(std::declval<U>()))>
value_sequence_iterator(U &&Value) : Value(std::forward<U>(Value)) {}
diff --git a/linux-x64/clang/include/llvm/ADT/SetOperations.h b/linux-x64/clang/include/llvm/ADT/SetOperations.h
index 037256a..6087f47 100644
--- a/linux-x64/clang/include/llvm/ADT/SetOperations.h
+++ b/linux-x64/clang/include/llvm/ADT/SetOperations.h
@@ -65,6 +65,27 @@
S1.erase(*SI);
}
+/// set_is_subset(A, B) - Return true iff A in B
+///
+template <class S1Ty, class S2Ty>
+bool set_is_subset(const S1Ty &S1, const S2Ty &S2) {
+ if (S1.size() > S2.size())
+ return false;
+ for (auto &It : S1)
+ if (!S2.count(It))
+ return false;
+ return true;
+}
+
+/// set_is_strict_subset(A, B) - Return true iff A in B and and A != B
+///
+template <class S1Ty, class S2Ty>
+bool set_is_strict_subset(const S1Ty &S1, const S2Ty &S2) {
+ if (S1.size() >= S2.size())
+ return false;
+ return set_is_subset(S1, S2);
+}
+
} // End llvm namespace
#endif
diff --git a/linux-x64/clang/include/llvm/ADT/SetVector.h b/linux-x64/clang/include/llvm/ADT/SetVector.h
index d0a0d28..32bcd50 100644
--- a/linux-x64/clang/include/llvm/ADT/SetVector.h
+++ b/linux-x64/clang/include/llvm/ADT/SetVector.h
@@ -174,7 +174,7 @@
set_.erase(V);
// FIXME: No need to use the non-const iterator when built with
- // std:vector.erase(const_iterator) as defined in C++11. This is for
+ // std::vector.erase(const_iterator) as defined in C++11. This is for
// compatibility with non-standard libstdc++ up to 4.8 (fixed in 4.9).
auto NI = vector_.begin();
std::advance(NI, std::distance<iterator>(NI, I));
@@ -205,6 +205,11 @@
return true;
}
+ /// Check if the SetVector contains the given key.
+ bool contains(const key_type &key) const {
+ return set_.find(key) != set_.end();
+ }
+
/// Count the number of elements of a given key in the SetVector.
/// \returns 0 if the element is not in the SetVector, 1 if it is.
size_type count(const key_type &key) const {
@@ -263,6 +268,11 @@
remove(*SI);
}
+ void swap(SetVector<T, Vector, Set> &RHS) {
+ set_.swap(RHS.set_);
+ vector_.swap(RHS.vector_);
+ }
+
private:
/// A wrapper predicate designed for use with std::remove_if.
///
@@ -308,4 +318,22 @@
} // end namespace llvm
+namespace std {
+
+/// Implement std::swap in terms of SetVector swap.
+template<typename T, typename V, typename S>
+inline void
+swap(llvm::SetVector<T, V, S> &LHS, llvm::SetVector<T, V, S> &RHS) {
+ LHS.swap(RHS);
+}
+
+/// Implement std::swap in terms of SmallSetVector swap.
+template<typename T, unsigned N>
+inline void
+swap(llvm::SmallSetVector<T, N> &LHS, llvm::SmallSetVector<T, N> &RHS) {
+ LHS.swap(RHS);
+}
+
+} // end namespace std
+
#endif // LLVM_ADT_SETVECTOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/SmallBitVector.h b/linux-x64/clang/include/llvm/ADT/SmallBitVector.h
index 742450e..f570bac 100644
--- a/linux-x64/clang/include/llvm/ADT/SmallBitVector.h
+++ b/linux-x64/clang/include/llvm/ADT/SmallBitVector.h
@@ -287,11 +287,11 @@
/// Returns -1 if the next unset bit is not found.
int find_next_unset(unsigned Prev) const {
if (isSmall()) {
- ++Prev;
uintptr_t Bits = getSmallBits();
// Mask in previous bits.
- uintptr_t Mask = (1 << Prev) - 1;
- Bits |= Mask;
+ Bits |= (uintptr_t(1) << (Prev + 1)) - 1;
+ // Mask in unused bits.
+ Bits |= ~uintptr_t(0) << getSmallSize();
if (Bits == ~uintptr_t(0) || Prev + 1 >= getSmallSize())
return -1;
@@ -662,6 +662,19 @@
getPointer()->clearBitsNotInMask(Mask, MaskWords);
}
+ void invalid() {
+ assert(empty());
+ X = (uintptr_t)-1;
+ }
+ bool isInvalid() const { return X == (uintptr_t)-1; }
+
+ ArrayRef<uintptr_t> getData(uintptr_t &Store) const {
+ if (!isSmall())
+ return getPointer()->getData();
+ Store = getSmallBits();
+ return makeArrayRef(Store);
+ }
+
private:
template <bool AddBits, bool InvertMask>
void applyMask(const uint32_t *Mask, unsigned MaskWords) {
@@ -699,6 +712,24 @@
return Result;
}
+template <> struct DenseMapInfo<SmallBitVector> {
+ static inline SmallBitVector getEmptyKey() { return SmallBitVector(); }
+ static inline SmallBitVector getTombstoneKey() {
+ SmallBitVector V;
+ V.invalid();
+ return V;
+ }
+ static unsigned getHashValue(const SmallBitVector &V) {
+ uintptr_t Store;
+ return DenseMapInfo<std::pair<unsigned, ArrayRef<uintptr_t>>>::getHashValue(
+ std::make_pair(V.size(), V.getData(Store)));
+ }
+ static bool isEqual(const SmallBitVector &LHS, const SmallBitVector &RHS) {
+ if (LHS.isInvalid() || RHS.isInvalid())
+ return LHS.isInvalid() == RHS.isInvalid();
+ return LHS == RHS;
+ }
+};
} // end namespace llvm
namespace std {
diff --git a/linux-x64/clang/include/llvm/ADT/SmallPtrSet.h b/linux-x64/clang/include/llvm/ADT/SmallPtrSet.h
index 9135182..57dd8f6 100644
--- a/linux-x64/clang/include/llvm/ADT/SmallPtrSet.h
+++ b/linux-x64/clang/include/llvm/ADT/SmallPtrSet.h
@@ -278,7 +278,7 @@
const DebugEpochBase &Epoch)
: SmallPtrSetIteratorImpl(BP, E), DebugEpochBase::HandleBase(&Epoch) {}
- // Most methods provided by baseclass.
+ // Most methods are provided by the base class.
const PtrTy operator*() const {
assert(isHandleInSync() && "invalid iterator access!");
@@ -346,14 +346,8 @@
using ConstPtrTraits = PointerLikeTypeTraits<ConstPtrType>;
protected:
- // Constructors that forward to the base.
- SmallPtrSetImpl(const void **SmallStorage, const SmallPtrSetImpl &that)
- : SmallPtrSetImplBase(SmallStorage, that) {}
- SmallPtrSetImpl(const void **SmallStorage, unsigned SmallSize,
- SmallPtrSetImpl &&that)
- : SmallPtrSetImplBase(SmallStorage, SmallSize, std::move(that)) {}
- explicit SmallPtrSetImpl(const void **SmallStorage, unsigned SmallSize)
- : SmallPtrSetImplBase(SmallStorage, SmallSize) {}
+ // Forward constructors to the base.
+ using SmallPtrSetImplBase::SmallPtrSetImplBase;
public:
using iterator = SmallPtrSetIterator<PtrType>;
@@ -378,10 +372,15 @@
return erase_imp(PtrTraits::getAsVoidPointer(Ptr));
}
/// count - Return 1 if the specified pointer is in the set, 0 otherwise.
- size_type count(ConstPtrType Ptr) const { return find(Ptr) != end() ? 1 : 0; }
+ size_type count(ConstPtrType Ptr) const {
+ return find_imp(ConstPtrTraits::getAsVoidPointer(Ptr)) != EndPointer();
+ }
iterator find(ConstPtrType Ptr) const {
return makeIterator(find_imp(ConstPtrTraits::getAsVoidPointer(Ptr)));
}
+ bool contains(ConstPtrType Ptr) const {
+ return find_imp(ConstPtrTraits::getAsVoidPointer(Ptr)) != EndPointer();
+ }
template <typename IterT>
void insert(IterT I, IterT E) {
@@ -409,6 +408,32 @@
}
};
+/// Equality comparison for SmallPtrSet.
+///
+/// Iterates over elements of LHS confirming that each value from LHS is also in
+/// RHS, and that no additional values are in RHS.
+template <typename PtrType>
+bool operator==(const SmallPtrSetImpl<PtrType> &LHS,
+ const SmallPtrSetImpl<PtrType> &RHS) {
+ if (LHS.size() != RHS.size())
+ return false;
+
+ for (const auto *KV : LHS)
+ if (!RHS.count(KV))
+ return false;
+
+ return true;
+}
+
+/// Inequality comparison for SmallPtrSet.
+///
+/// Equivalent to !(LHS == RHS).
+template <typename PtrType>
+bool operator!=(const SmallPtrSetImpl<PtrType> &LHS,
+ const SmallPtrSetImpl<PtrType> &RHS) {
+ return !(LHS == RHS);
+}
+
/// SmallPtrSet - This class implements a set which is optimized for holding
/// SmallSize or less elements. This internally rounds up SmallSize to the next
/// power of two if it is not already a power of two. See the comments above
diff --git a/linux-x64/clang/include/llvm/ADT/SmallSet.h b/linux-x64/clang/include/llvm/ADT/SmallSet.h
index 6b128c2..0600e52 100644
--- a/linux-x64/clang/include/llvm/ADT/SmallSet.h
+++ b/linux-x64/clang/include/llvm/ADT/SmallSet.h
@@ -232,6 +232,13 @@
return {Set.end()};
}
+ /// Check if the SmallSet contains the given element.
+ bool contains(const T &V) const {
+ if (isSmall())
+ return vfind(V) != Vector.end();
+ return Set.find(V) != Set.end();
+ }
+
private:
bool isSmall() const { return Set.empty(); }
@@ -248,6 +255,31 @@
template <typename PointeeType, unsigned N>
class SmallSet<PointeeType*, N> : public SmallPtrSet<PointeeType*, N> {};
+/// Equality comparison for SmallSet.
+///
+/// Iterates over elements of LHS confirming that each element is also a member
+/// of RHS, and that RHS contains no additional values.
+/// Equivalent to N calls to RHS.count.
+/// For small-set mode amortized complexity is O(N^2)
+/// For large-set mode amortized complexity is linear, worst case is O(N^2) (if
+/// every hash collides).
+template <typename T, unsigned LN, unsigned RN, typename C>
+bool operator==(const SmallSet<T, LN, C> &LHS, const SmallSet<T, RN, C> &RHS) {
+ if (LHS.size() != RHS.size())
+ return false;
+
+ // All elements in LHS must also be in RHS
+ return all_of(LHS, [&RHS](const T &E) { return RHS.count(E); });
+}
+
+/// Inequality comparison for SmallSet.
+///
+/// Equivalent to !(LHS == RHS). See operator== for performance notes.
+template <typename T, unsigned LN, unsigned RN, typename C>
+bool operator!=(const SmallSet<T, LN, C> &LHS, const SmallSet<T, RN, C> &RHS) {
+ return !(LHS == RHS);
+}
+
} // end namespace llvm
#endif // LLVM_ADT_SMALLSET_H
diff --git a/linux-x64/clang/include/llvm/ADT/SmallString.h b/linux-x64/clang/include/llvm/ADT/SmallString.h
index 898be80..c0e8fcd 100644
--- a/linux-x64/clang/include/llvm/ADT/SmallString.h
+++ b/linux-x64/clang/include/llvm/ADT/SmallString.h
@@ -30,6 +30,12 @@
/// Initialize from a StringRef.
SmallString(StringRef S) : SmallVector<char, InternalLen>(S.begin(), S.end()) {}
+ /// Initialize by concatenating a list of StringRefs.
+ SmallString(std::initializer_list<StringRef> Refs)
+ : SmallVector<char, InternalLen>() {
+ this->append(Refs);
+ }
+
/// Initialize with a range.
template<typename ItTy>
SmallString(ItTy S, ItTy E) : SmallVector<char, InternalLen>(S, E) {}
@@ -65,6 +71,12 @@
SmallVectorImpl<char>::append(RHS.begin(), RHS.end());
}
+ /// Assign from a list of StringRefs.
+ void assign(std::initializer_list<StringRef> Refs) {
+ this->clear();
+ append(Refs);
+ }
+
/// @}
/// @name String Concatenation
/// @{
@@ -89,6 +101,20 @@
SmallVectorImpl<char>::append(RHS.begin(), RHS.end());
}
+ /// Append from a list of StringRefs.
+ void append(std::initializer_list<StringRef> Refs) {
+ size_t SizeNeeded = this->size();
+ for (const StringRef &Ref : Refs)
+ SizeNeeded += Ref.size();
+ this->reserve(SizeNeeded);
+ auto CurEnd = this->end();
+ for (const StringRef &Ref : Refs) {
+ this->uninitialized_copy(Ref.begin(), Ref.end(), CurEnd);
+ CurEnd += Ref.size();
+ }
+ this->set_size(SizeNeeded);
+ }
+
/// @}
/// @name String Comparison
/// @{
@@ -263,7 +289,7 @@
// Extra methods.
/// Explicit conversion to StringRef.
- StringRef str() const { return StringRef(this->begin(), this->size()); }
+ StringRef str() const { return StringRef(this->data(), this->size()); }
// TODO: Make this const, if it's safe...
const char* c_str() {
@@ -275,10 +301,14 @@
/// Implicit conversion to StringRef.
operator StringRef() const { return str(); }
+ explicit operator std::string() const {
+ return std::string(this->data(), this->size());
+ }
+
// Extra operators.
- const SmallString &operator=(StringRef RHS) {
- this->clear();
- return *this += RHS;
+ SmallString &operator=(StringRef RHS) {
+ this->assign(RHS);
+ return *this;
}
SmallString &operator+=(StringRef RHS) {
diff --git a/linux-x64/clang/include/llvm/ADT/SmallVector.h b/linux-x64/clang/include/llvm/ADT/SmallVector.h
index 1758690..2e47846 100644
--- a/linux-x64/clang/include/llvm/ADT/SmallVector.h
+++ b/linux-x64/clang/include/llvm/ADT/SmallVector.h
@@ -14,12 +14,11 @@
#define LLVM_ADT_SMALLVECTOR_H
#include "llvm/ADT/iterator_range.h"
-#include "llvm/Support/AlignOf.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/MemAlloc.h"
#include "llvm/Support/type_traits.h"
-#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
@@ -27,6 +26,7 @@
#include <cstring>
#include <initializer_list>
#include <iterator>
+#include <limits>
#include <memory>
#include <new>
#include <type_traits>
@@ -34,11 +34,23 @@
namespace llvm {
-/// This is all the non-templated stuff common to all SmallVectors.
-class SmallVectorBase {
+/// This is all the stuff common to all SmallVectors.
+///
+/// The template parameter specifies the type which should be used to hold the
+/// Size and Capacity of the SmallVector, so it can be adjusted.
+/// Using 32 bit size is desirable to shrink the size of the SmallVector.
+/// Using 64 bit size is desirable for cases like SmallVector<char>, where a
+/// 32 bit size would limit the vector to ~4GB. SmallVectors are used for
+/// buffering bitcode output - which can exceed 4GB.
+template <class Size_T> class SmallVectorBase {
protected:
void *BeginX;
- unsigned Size = 0, Capacity;
+ Size_T Size = 0, Capacity;
+
+ /// The maximum value of the Size_T used.
+ static constexpr size_t SizeTypeMax() {
+ return std::numeric_limits<Size_T>::max();
+ }
SmallVectorBase() = delete;
SmallVectorBase(void *FirstEl, size_t TotalCapacity)
@@ -46,7 +58,15 @@
/// This is an implementation of the grow() method which only works
/// on POD-like data types and is out of line to reduce code duplication.
- void grow_pod(void *FirstEl, size_t MinCapacity, size_t TSize);
+ /// This function will report a fatal error if it cannot increase capacity.
+ void grow_pod(void *FirstEl, size_t MinSize, size_t TSize);
+
+ /// Report that MinSize doesn't fit into this vector's size type. Throws
+ /// std::length_error or calls report_fatal_error.
+ LLVM_ATTRIBUTE_NORETURN static void report_size_overflow(size_t MinSize);
+ /// Report that this vector is already at maximum capacity. Throws
+ /// std::length_error or calls report_fatal_error.
+ LLVM_ATTRIBUTE_NORETURN static void report_at_maximum_capacity();
public:
size_t size() const { return Size; }
@@ -69,17 +89,26 @@
}
};
+template <class T>
+using SmallVectorSizeType =
+ typename std::conditional<sizeof(T) < 4 && sizeof(void *) >= 8, uint64_t,
+ uint32_t>::type;
+
/// Figure out the offset of the first element.
template <class T, typename = void> struct SmallVectorAlignmentAndSize {
- AlignedCharArrayUnion<SmallVectorBase> Base;
- AlignedCharArrayUnion<T> FirstEl;
+ alignas(SmallVectorBase<SmallVectorSizeType<T>>) char Base[sizeof(
+ SmallVectorBase<SmallVectorSizeType<T>>)];
+ alignas(T) char FirstEl[sizeof(T)];
};
/// This is the part of SmallVectorTemplateBase which does not depend on whether
/// the type T is a POD. The extra dummy template argument is used by ArrayRef
/// to avoid unnecessarily requiring T to be complete.
template <typename T, typename = void>
-class SmallVectorTemplateCommon : public SmallVectorBase {
+class SmallVectorTemplateCommon
+ : public SmallVectorBase<SmallVectorSizeType<T>> {
+ using Base = SmallVectorBase<SmallVectorSizeType<T>>;
+
/// Find the address of the first element. For this pointer math to be valid
/// with small-size of 0 for T with lots of alignment, it's important that
/// SmallVectorStorage is properly-aligned even for small-size of 0.
@@ -91,21 +120,125 @@
// Space after 'FirstEl' is clobbered, do not add any instance vars after it.
protected:
- SmallVectorTemplateCommon(size_t Size)
- : SmallVectorBase(getFirstEl(), Size) {}
+ SmallVectorTemplateCommon(size_t Size) : Base(getFirstEl(), Size) {}
- void grow_pod(size_t MinCapacity, size_t TSize) {
- SmallVectorBase::grow_pod(getFirstEl(), MinCapacity, TSize);
+ void grow_pod(size_t MinSize, size_t TSize) {
+ Base::grow_pod(getFirstEl(), MinSize, TSize);
}
/// Return true if this is a smallvector which has not had dynamic
/// memory allocated for it.
- bool isSmall() const { return BeginX == getFirstEl(); }
+ bool isSmall() const { return this->BeginX == getFirstEl(); }
/// Put this vector in a state of being small.
void resetToSmall() {
- BeginX = getFirstEl();
- Size = Capacity = 0; // FIXME: Setting Capacity to 0 is suspect.
+ this->BeginX = getFirstEl();
+ this->Size = this->Capacity = 0; // FIXME: Setting Capacity to 0 is suspect.
+ }
+
+ /// Return true if V is an internal reference to the given range.
+ bool isReferenceToRange(const void *V, const void *First, const void *Last) const {
+ // Use std::less to avoid UB.
+ std::less<> LessThan;
+ return !LessThan(V, First) && LessThan(V, Last);
+ }
+
+ /// Return true if V is an internal reference to this vector.
+ bool isReferenceToStorage(const void *V) const {
+ return isReferenceToRange(V, this->begin(), this->end());
+ }
+
+ /// Return true if First and Last form a valid (possibly empty) range in this
+ /// vector's storage.
+ bool isRangeInStorage(const void *First, const void *Last) const {
+ // Use std::less to avoid UB.
+ std::less<> LessThan;
+ return !LessThan(First, this->begin()) && !LessThan(Last, First) &&
+ !LessThan(this->end(), Last);
+ }
+
+ /// Return true unless Elt will be invalidated by resizing the vector to
+ /// NewSize.
+ bool isSafeToReferenceAfterResize(const void *Elt, size_t NewSize) {
+ // Past the end.
+ if (LLVM_LIKELY(!isReferenceToStorage(Elt)))
+ return true;
+
+ // Return false if Elt will be destroyed by shrinking.
+ if (NewSize <= this->size())
+ return Elt < this->begin() + NewSize;
+
+ // Return false if we need to grow.
+ return NewSize <= this->capacity();
+ }
+
+ /// Check whether Elt will be invalidated by resizing the vector to NewSize.
+ void assertSafeToReferenceAfterResize(const void *Elt, size_t NewSize) {
+ assert(isSafeToReferenceAfterResize(Elt, NewSize) &&
+ "Attempting to reference an element of the vector in an operation "
+ "that invalidates it");
+ }
+
+ /// Check whether Elt will be invalidated by increasing the size of the
+ /// vector by N.
+ void assertSafeToAdd(const void *Elt, size_t N = 1) {
+ this->assertSafeToReferenceAfterResize(Elt, this->size() + N);
+ }
+
+ /// Check whether any part of the range will be invalidated by clearing.
+ void assertSafeToReferenceAfterClear(const T *From, const T *To) {
+ if (From == To)
+ return;
+ this->assertSafeToReferenceAfterResize(From, 0);
+ this->assertSafeToReferenceAfterResize(To - 1, 0);
+ }
+ template <
+ class ItTy,
+ std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>, T *>::value,
+ bool> = false>
+ void assertSafeToReferenceAfterClear(ItTy, ItTy) {}
+
+ /// Check whether any part of the range will be invalidated by growing.
+ void assertSafeToAddRange(const T *From, const T *To) {
+ if (From == To)
+ return;
+ this->assertSafeToAdd(From, To - From);
+ this->assertSafeToAdd(To - 1, To - From);
+ }
+ template <
+ class ItTy,
+ std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>, T *>::value,
+ bool> = false>
+ void assertSafeToAddRange(ItTy, ItTy) {}
+
+ /// Check whether any argument will be invalidated by growing for
+ /// emplace_back.
+ template <class ArgType1, class... ArgTypes>
+ void assertSafeToEmplace(ArgType1 &Arg1, ArgTypes &... Args) {
+ this->assertSafeToAdd(&Arg1);
+ this->assertSafeToEmplace(Args...);
+ }
+ void assertSafeToEmplace() {}
+
+ /// Reserve enough space to add one element, and return the updated element
+ /// pointer in case it was a reference to the storage.
+ template <class U>
+ static const T *reserveForParamAndGetAddressImpl(U *This, const T &Elt,
+ size_t N) {
+ size_t NewSize = This->size() + N;
+ if (LLVM_LIKELY(NewSize <= This->capacity()))
+ return &Elt;
+
+ bool ReferencesStorage = false;
+ int64_t Index = -1;
+ if (!U::TakesParamByValue) {
+ if (LLVM_UNLIKELY(This->isReferenceToStorage(&Elt))) {
+ ReferencesStorage = true;
+ Index = &Elt - This->begin();
+ }
+ }
+ This->grow(NewSize);
+ return ReferencesStorage ? This->begin() + Index : &Elt;
}
public:
@@ -123,6 +256,10 @@
using pointer = T *;
using const_pointer = const T *;
+ using Base::capacity;
+ using Base::empty;
+ using Base::size;
+
// forward iterator creation methods.
iterator begin() { return (iterator)this->BeginX; }
const_iterator begin() const { return (const_iterator)this->BeginX; }
@@ -136,7 +273,9 @@
const_reverse_iterator rend() const { return const_reverse_iterator(begin());}
size_type size_in_bytes() const { return size() * sizeof(T); }
- size_type max_size() const { return size_type(-1) / sizeof(T); }
+ size_type max_size() const {
+ return std::min(this->SizeTypeMax(), size_type(-1) / sizeof(T));
+ }
size_t capacity_in_bytes() const { return capacity() * sizeof(T); }
@@ -173,11 +312,24 @@
}
};
-/// SmallVectorTemplateBase<TriviallyCopyable = false> - This is where we put method
-/// implementations that are designed to work with non-POD-like T's.
-template <typename T, bool = is_trivially_copyable<T>::value>
+/// SmallVectorTemplateBase<TriviallyCopyable = false> - This is where we put
+/// method implementations that are designed to work with non-trivial T's.
+///
+/// We approximate is_trivially_copyable with trivial move/copy construction and
+/// trivial destruction. While the standard doesn't specify that you're allowed
+/// copy these types with memcpy, there is no way for the type to observe this.
+/// This catches the important case of std::pair<POD, POD>, which is not
+/// trivially assignable.
+template <typename T, bool = (is_trivially_copy_constructible<T>::value) &&
+ (is_trivially_move_constructible<T>::value) &&
+ std::is_trivially_destructible<T>::value>
class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> {
+ friend class SmallVectorTemplateCommon<T>;
+
protected:
+ static constexpr bool TakesParamByValue = false;
+ using ValueParamT = const T &;
+
SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
static void destroy_range(T *S, T *E) {
@@ -207,18 +359,32 @@
/// element, or MinSize more elements if specified.
void grow(size_t MinSize = 0);
+ /// Reserve enough space to add one element, and return the updated element
+ /// pointer in case it was a reference to the storage.
+ const T *reserveForParamAndGetAddress(const T &Elt, size_t N = 1) {
+ return this->reserveForParamAndGetAddressImpl(this, Elt, N);
+ }
+
+ /// Reserve enough space to add one element, and return the updated element
+ /// pointer in case it was a reference to the storage.
+ T *reserveForParamAndGetAddress(T &Elt, size_t N = 1) {
+ return const_cast<T *>(
+ this->reserveForParamAndGetAddressImpl(this, Elt, N));
+ }
+
+ static T &&forward_value_param(T &&V) { return std::move(V); }
+ static const T &forward_value_param(const T &V) { return V; }
+
public:
void push_back(const T &Elt) {
- if (LLVM_UNLIKELY(this->size() >= this->capacity()))
- this->grow();
- ::new ((void*) this->end()) T(Elt);
+ const T *EltPtr = reserveForParamAndGetAddress(Elt);
+ ::new ((void *)this->end()) T(*EltPtr);
this->set_size(this->size() + 1);
}
void push_back(T &&Elt) {
- if (LLVM_UNLIKELY(this->size() >= this->capacity()))
- this->grow();
- ::new ((void*) this->end()) T(::std::move(Elt));
+ T *EltPtr = reserveForParamAndGetAddress(Elt);
+ ::new ((void *)this->end()) T(::std::move(*EltPtr));
this->set_size(this->size() + 1);
}
@@ -231,12 +397,21 @@
// Define this out-of-line to dissuade the C++ compiler from inlining it.
template <typename T, bool TriviallyCopyable>
void SmallVectorTemplateBase<T, TriviallyCopyable>::grow(size_t MinSize) {
- if (MinSize > UINT32_MAX)
- report_bad_alloc_error("SmallVector capacity overflow during allocation");
+ // Ensure we can fit the new capacity.
+ // This is only going to be applicable when the capacity is 32 bit.
+ if (MinSize > this->SizeTypeMax())
+ this->report_size_overflow(MinSize);
+
+ // Ensure we can meet the guarantee of space for at least one more element.
+ // The above check alone will not catch the case where grow is called with a
+ // default MinSize of 0, but the current capacity cannot be increased.
+ // This is only going to be applicable when the capacity is 32 bit.
+ if (this->capacity() == this->SizeTypeMax())
+ this->report_at_maximum_capacity();
// Always grow, even from zero.
size_t NewCapacity = size_t(NextPowerOf2(this->capacity() + 2));
- NewCapacity = std::min(std::max(NewCapacity, MinSize), size_t(UINT32_MAX));
+ NewCapacity = std::min(std::max(NewCapacity, MinSize), this->SizeTypeMax());
T *NewElts = static_cast<T*>(llvm::safe_malloc(NewCapacity*sizeof(T)));
// Move the elements over.
@@ -254,10 +429,23 @@
}
/// SmallVectorTemplateBase<TriviallyCopyable = true> - This is where we put
-/// method implementations that are designed to work with POD-like T's.
+/// method implementations that are designed to work with trivially copyable
+/// T's. This allows using memcpy in place of copy/move construction and
+/// skipping destruction.
template <typename T>
class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
+ friend class SmallVectorTemplateCommon<T>;
+
protected:
+ /// True if it's cheap enough to take parameters by value. Doing so avoids
+ /// overhead related to mitigations for reference invalidation.
+ static constexpr bool TakesParamByValue = sizeof(T) <= 2 * sizeof(void *);
+
+ /// Either const T& or T, depending on whether it's cheap enough to take
+ /// parameters by value.
+ using ValueParamT =
+ typename std::conditional<TakesParamByValue, T, const T &>::type;
+
SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
// No need to do a destroy loop for POD's.
@@ -284,8 +472,8 @@
template <typename T1, typename T2>
static void uninitialized_copy(
T1 *I, T1 *E, T2 *Dest,
- typename std::enable_if<std::is_same<typename std::remove_const<T1>::type,
- T2>::value>::type * = nullptr) {
+ std::enable_if_t<std::is_same<typename std::remove_const<T1>::type,
+ T2>::value> * = nullptr) {
// Use memcpy for PODs iterated by pointers (which includes SmallVector
// iterators): std::uninitialized_copy optimizes to memmove, but we can
// use memcpy here. Note that I and E are iterators and thus might be
@@ -298,11 +486,26 @@
/// least one more element or MinSize if specified.
void grow(size_t MinSize = 0) { this->grow_pod(MinSize, sizeof(T)); }
+ /// Reserve enough space to add one element, and return the updated element
+ /// pointer in case it was a reference to the storage.
+ const T *reserveForParamAndGetAddress(const T &Elt, size_t N = 1) {
+ return this->reserveForParamAndGetAddressImpl(this, Elt, N);
+ }
+
+ /// Reserve enough space to add one element, and return the updated element
+ /// pointer in case it was a reference to the storage.
+ T *reserveForParamAndGetAddress(T &Elt, size_t N = 1) {
+ return const_cast<T *>(
+ this->reserveForParamAndGetAddressImpl(this, Elt, N));
+ }
+
+ /// Copy \p V or return a reference, depending on \a ValueParamT.
+ static ValueParamT forward_value_param(ValueParamT V) { return V; }
+
public:
- void push_back(const T &Elt) {
- if (LLVM_UNLIKELY(this->size() >= this->capacity()))
- this->grow();
- memcpy(reinterpret_cast<void *>(this->end()), &Elt, sizeof(T));
+ void push_back(ValueParamT Elt) {
+ const T *EltPtr = reserveForParamAndGetAddress(Elt);
+ memcpy(reinterpret_cast<void *>(this->end()), EltPtr, sizeof(T));
this->set_size(this->size() + 1);
}
@@ -322,6 +525,9 @@
using size_type = typename SuperClass::size_type;
protected:
+ using SmallVectorTemplateBase<T>::TakesParamByValue;
+ using ValueParamT = typename SuperClass::ValueParamT;
+
// Default ctor - Initialize to empty.
explicit SmallVectorImpl(unsigned N)
: SmallVectorTemplateBase<T>(N) {}
@@ -341,29 +547,38 @@
this->Size = 0;
}
- void resize(size_type N) {
+private:
+ template <bool ForOverwrite> void resizeImpl(size_type N) {
if (N < this->size()) {
- this->destroy_range(this->begin()+N, this->end());
- this->set_size(N);
+ this->pop_back_n(this->size() - N);
} else if (N > this->size()) {
- if (this->capacity() < N)
- this->grow(N);
+ this->reserve(N);
for (auto I = this->end(), E = this->begin() + N; I != E; ++I)
- new (&*I) T();
+ if (ForOverwrite)
+ new (&*I) T;
+ else
+ new (&*I) T();
this->set_size(N);
}
}
- void resize(size_type N, const T &NV) {
+public:
+ void resize(size_type N) { resizeImpl<false>(N); }
+
+ /// Like resize, but \ref T is POD, the new values won't be initialized.
+ void resize_for_overwrite(size_type N) { resizeImpl<true>(N); }
+
+ void resize(size_type N, ValueParamT NV) {
+ if (N == this->size())
+ return;
+
if (N < this->size()) {
- this->destroy_range(this->begin()+N, this->end());
- this->set_size(N);
- } else if (N > this->size()) {
- if (this->capacity() < N)
- this->grow(N);
- std::uninitialized_fill(this->end(), this->begin()+N, NV);
- this->set_size(N);
+ this->pop_back_n(this->size() - N);
+ return;
}
+
+ // N > this->size(). Defer to append.
+ this->append(N - this->size(), NV);
}
void reserve(size_type N) {
@@ -371,6 +586,12 @@
this->grow(N);
}
+ void pop_back_n(size_type NumItems) {
+ assert(this->size() >= NumItems);
+ this->destroy_range(this->end() - NumItems, this->end());
+ this->set_size(this->size() - NumItems);
+ }
+
LLVM_NODISCARD T pop_back_val() {
T Result = ::std::move(this->back());
this->pop_back();
@@ -381,24 +602,21 @@
/// Add the specified range to the end of the SmallVector.
template <typename in_iter,
- typename = typename std::enable_if<std::is_convertible<
+ typename = std::enable_if_t<std::is_convertible<
typename std::iterator_traits<in_iter>::iterator_category,
- std::input_iterator_tag>::value>::type>
+ std::input_iterator_tag>::value>>
void append(in_iter in_start, in_iter in_end) {
+ this->assertSafeToAddRange(in_start, in_end);
size_type NumInputs = std::distance(in_start, in_end);
- if (NumInputs > this->capacity() - this->size())
- this->grow(this->size()+NumInputs);
-
+ this->reserve(this->size() + NumInputs);
this->uninitialized_copy(in_start, in_end, this->end());
this->set_size(this->size() + NumInputs);
}
/// Append \p NumInputs copies of \p Elt to the end.
- void append(size_type NumInputs, const T &Elt) {
- if (NumInputs > this->capacity() - this->size())
- this->grow(this->size()+NumInputs);
-
- std::uninitialized_fill_n(this->end(), NumInputs, Elt);
+ void append(size_type NumInputs, ValueParamT Elt) {
+ const T *EltPtr = this->reserveForParamAndGetAddress(Elt, NumInputs);
+ std::uninitialized_fill_n(this->end(), NumInputs, *EltPtr);
this->set_size(this->size() + NumInputs);
}
@@ -410,18 +628,19 @@
// re-initializing them - for all assign(...) variants.
void assign(size_type NumElts, const T &Elt) {
+ this->assertSafeToReferenceAfterResize(&Elt, 0);
clear();
- if (this->capacity() < NumElts)
- this->grow(NumElts);
+ this->reserve(NumElts);
this->set_size(NumElts);
std::uninitialized_fill(this->begin(), this->end(), Elt);
}
template <typename in_iter,
- typename = typename std::enable_if<std::is_convertible<
+ typename = std::enable_if_t<std::is_convertible<
typename std::iterator_traits<in_iter>::iterator_category,
- std::input_iterator_tag>::value>::type>
+ std::input_iterator_tag>::value>>
void assign(in_iter in_start, in_iter in_end) {
+ this->assertSafeToReferenceAfterClear(in_start, in_end);
clear();
append(in_start, in_end);
}
@@ -435,8 +654,7 @@
// Just cast away constness because this is a non-const member function.
iterator I = const_cast<iterator>(CI);
- assert(I >= this->begin() && "Iterator to erase is out of bounds.");
- assert(I < this->end() && "Erasing at past-the-end iterator.");
+ assert(this->isReferenceToStorage(CI) && "Iterator to erase is out of bounds.");
iterator N = I;
// Shift all elts down one.
@@ -451,9 +669,7 @@
iterator S = const_cast<iterator>(CS);
iterator E = const_cast<iterator>(CE);
- assert(S >= this->begin() && "Range to erase is out of bounds.");
- assert(S <= E && "Trying to erase invalid range.");
- assert(E <= this->end() && "Trying to erase past the end.");
+ assert(this->isRangeInStorage(S, E) && "Range to erase is out of bounds.");
iterator N = S;
// Shift all elts down.
@@ -464,20 +680,26 @@
return(N);
}
- iterator insert(iterator I, T &&Elt) {
+private:
+ template <class ArgType> iterator insert_one_impl(iterator I, ArgType &&Elt) {
+ // Callers ensure that ArgType is derived from T.
+ static_assert(
+ std::is_same<std::remove_const_t<std::remove_reference_t<ArgType>>,
+ T>::value,
+ "ArgType must be derived from T!");
+
if (I == this->end()) { // Important special case for empty vector.
- this->push_back(::std::move(Elt));
+ this->push_back(::std::forward<ArgType>(Elt));
return this->end()-1;
}
- assert(I >= this->begin() && "Insertion iterator is out of bounds.");
- assert(I <= this->end() && "Inserting past the end of the vector.");
+ assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.");
- if (this->size() >= this->capacity()) {
- size_t EltNo = I-this->begin();
- this->grow();
- I = this->begin()+EltNo;
- }
+ // Grow if necessary.
+ size_t Index = I - this->begin();
+ std::remove_reference_t<ArgType> *EltPtr =
+ this->reserveForParamAndGetAddress(Elt);
+ I = this->begin() + Index;
::new ((void*) this->end()) T(::std::move(this->back()));
// Push everything else over.
@@ -485,45 +707,26 @@
this->set_size(this->size() + 1);
// If we just moved the element we're inserting, be sure to update
- // the reference.
- T *EltPtr = &Elt;
- if (I <= EltPtr && EltPtr < this->end())
+ // the reference (never happens if TakesParamByValue).
+ static_assert(!TakesParamByValue || std::is_same<ArgType, T>::value,
+ "ArgType must be 'T' when taking by value!");
+ if (!TakesParamByValue && this->isReferenceToRange(EltPtr, I, this->end()))
++EltPtr;
- *I = ::std::move(*EltPtr);
+ *I = ::std::forward<ArgType>(*EltPtr);
return I;
}
+public:
+ iterator insert(iterator I, T &&Elt) {
+ return insert_one_impl(I, this->forward_value_param(std::move(Elt)));
+ }
+
iterator insert(iterator I, const T &Elt) {
- if (I == this->end()) { // Important special case for empty vector.
- this->push_back(Elt);
- return this->end()-1;
- }
-
- assert(I >= this->begin() && "Insertion iterator is out of bounds.");
- assert(I <= this->end() && "Inserting past the end of the vector.");
-
- if (this->size() >= this->capacity()) {
- size_t EltNo = I-this->begin();
- this->grow();
- I = this->begin()+EltNo;
- }
- ::new ((void*) this->end()) T(std::move(this->back()));
- // Push everything else over.
- std::move_backward(I, this->end()-1, this->end());
- this->set_size(this->size() + 1);
-
- // If we just moved the element we're inserting, be sure to update
- // the reference.
- const T *EltPtr = &Elt;
- if (I <= EltPtr && EltPtr < this->end())
- ++EltPtr;
-
- *I = *EltPtr;
- return I;
+ return insert_one_impl(I, this->forward_value_param(Elt));
}
- iterator insert(iterator I, size_type NumToInsert, const T &Elt) {
+ iterator insert(iterator I, size_type NumToInsert, ValueParamT Elt) {
// Convert iterator to elt# to avoid invalidating iterator when we reserve()
size_t InsertElt = I - this->begin();
@@ -532,11 +735,11 @@
return this->begin()+InsertElt;
}
- assert(I >= this->begin() && "Insertion iterator is out of bounds.");
- assert(I <= this->end() && "Inserting past the end of the vector.");
+ assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.");
- // Ensure there is enough space.
- reserve(this->size() + NumToInsert);
+ // Ensure there is enough space, and get the (maybe updated) address of
+ // Elt.
+ const T *EltPtr = this->reserveForParamAndGetAddress(Elt, NumToInsert);
// Uninvalidate the iterator.
I = this->begin()+InsertElt;
@@ -553,7 +756,12 @@
// Copy the existing elements that get replaced.
std::move_backward(I, OldEnd-NumToInsert, OldEnd);
- std::fill_n(I, NumToInsert, Elt);
+ // If we just moved the element we're inserting, be sure to update
+ // the reference (never happens if TakesParamByValue).
+ if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end())
+ EltPtr += NumToInsert;
+
+ std::fill_n(I, NumToInsert, *EltPtr);
return I;
}
@@ -566,18 +774,23 @@
size_t NumOverwritten = OldEnd-I;
this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
+ // If we just moved the element we're inserting, be sure to update
+ // the reference (never happens if TakesParamByValue).
+ if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end())
+ EltPtr += NumToInsert;
+
// Replace the overwritten part.
- std::fill_n(I, NumOverwritten, Elt);
+ std::fill_n(I, NumOverwritten, *EltPtr);
// Insert the non-overwritten middle part.
- std::uninitialized_fill_n(OldEnd, NumToInsert-NumOverwritten, Elt);
+ std::uninitialized_fill_n(OldEnd, NumToInsert - NumOverwritten, *EltPtr);
return I;
}
template <typename ItTy,
- typename = typename std::enable_if<std::is_convertible<
+ typename = std::enable_if_t<std::is_convertible<
typename std::iterator_traits<ItTy>::iterator_category,
- std::input_iterator_tag>::value>::type>
+ std::input_iterator_tag>::value>>
iterator insert(iterator I, ItTy From, ItTy To) {
// Convert iterator to elt# to avoid invalidating iterator when we reserve()
size_t InsertElt = I - this->begin();
@@ -587,8 +800,10 @@
return this->begin()+InsertElt;
}
- assert(I >= this->begin() && "Insertion iterator is out of bounds.");
- assert(I <= this->end() && "Inserting past the end of the vector.");
+ assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.");
+
+ // Check that the reserve that follows doesn't invalidate the iterators.
+ this->assertSafeToAddRange(From, To);
size_t NumToInsert = std::distance(From, To);
@@ -639,6 +854,7 @@
}
template <typename... ArgTypes> reference emplace_back(ArgTypes &&... Args) {
+ this->assertSafeToEmplace(Args...);
if (LLVM_UNLIKELY(this->size() >= this->capacity()))
this->grow();
::new ((void *)this->end()) T(std::forward<ArgTypes>(Args)...);
@@ -675,10 +891,8 @@
std::swap(this->Capacity, RHS.Capacity);
return;
}
- if (RHS.size() > this->capacity())
- this->grow(RHS.size());
- if (this->size() > RHS.capacity())
- RHS.grow(this->size());
+ this->reserve(RHS.size());
+ RHS.reserve(this->size());
// Swap the shared elements.
size_t NumShared = this->size();
@@ -733,8 +947,7 @@
// FIXME: don't do this if they're efficiently moveable.
if (this->capacity() < RHSSize) {
// Destroy current elements.
- this->destroy_range(this->begin(), this->end());
- this->set_size(0);
+ this->clear();
CurSize = 0;
this->grow(RHSSize);
} else if (CurSize) {
@@ -793,8 +1006,7 @@
// elements.
if (this->capacity() < RHSSize) {
// Destroy current elements.
- this->destroy_range(this->begin(), this->end());
- this->set_size(0);
+ this->clear();
CurSize = 0;
this->grow(RHSSize);
} else if (CurSize) {
@@ -817,13 +1029,71 @@
/// to avoid allocating unnecessary storage.
template <typename T, unsigned N>
struct SmallVectorStorage {
- AlignedCharArrayUnion<T> InlineElts[N];
+ alignas(T) char InlineElts[N * sizeof(T)];
};
/// We need the storage to be properly aligned even for small-size of 0 so that
/// the pointer math in \a SmallVectorTemplateCommon::getFirstEl() is
/// well-defined.
-template <typename T> struct alignas(alignof(T)) SmallVectorStorage<T, 0> {};
+template <typename T> struct alignas(T) SmallVectorStorage<T, 0> {};
+
+/// Forward declaration of SmallVector so that
+/// calculateSmallVectorDefaultInlinedElements can reference
+/// `sizeof(SmallVector<T, 0>)`.
+template <typename T, unsigned N> class LLVM_GSL_OWNER SmallVector;
+
+/// Helper class for calculating the default number of inline elements for
+/// `SmallVector<T>`.
+///
+/// This should be migrated to a constexpr function when our minimum
+/// compiler support is enough for multi-statement constexpr functions.
+template <typename T> struct CalculateSmallVectorDefaultInlinedElements {
+ // Parameter controlling the default number of inlined elements
+ // for `SmallVector<T>`.
+ //
+ // The default number of inlined elements ensures that
+ // 1. There is at least one inlined element.
+ // 2. `sizeof(SmallVector<T>) <= kPreferredSmallVectorSizeof` unless
+ // it contradicts 1.
+ static constexpr size_t kPreferredSmallVectorSizeof = 64;
+
+ // static_assert that sizeof(T) is not "too big".
+ //
+ // Because our policy guarantees at least one inlined element, it is possible
+ // for an arbitrarily large inlined element to allocate an arbitrarily large
+ // amount of inline storage. We generally consider it an antipattern for a
+ // SmallVector to allocate an excessive amount of inline storage, so we want
+ // to call attention to these cases and make sure that users are making an
+ // intentional decision if they request a lot of inline storage.
+ //
+ // We want this assertion to trigger in pathological cases, but otherwise
+ // not be too easy to hit. To accomplish that, the cutoff is actually somewhat
+ // larger than kPreferredSmallVectorSizeof (otherwise,
+ // `SmallVector<SmallVector<T>>` would be one easy way to trip it, and that
+ // pattern seems useful in practice).
+ //
+ // One wrinkle is that this assertion is in theory non-portable, since
+ // sizeof(T) is in general platform-dependent. However, we don't expect this
+ // to be much of an issue, because most LLVM development happens on 64-bit
+ // hosts, and therefore sizeof(T) is expected to *decrease* when compiled for
+ // 32-bit hosts, dodging the issue. The reverse situation, where development
+ // happens on a 32-bit host and then fails due to sizeof(T) *increasing* on a
+ // 64-bit host, is expected to be very rare.
+ static_assert(
+ sizeof(T) <= 256,
+ "You are trying to use a default number of inlined elements for "
+ "`SmallVector<T>` but `sizeof(T)` is really big! Please use an "
+ "explicit number of inlined elements with `SmallVector<T, N>` to make "
+ "sure you really want that much inline storage.");
+
+ // Discount the size of the header itself when calculating the maximum inline
+ // bytes.
+ static constexpr size_t PreferredInlineBytes =
+ kPreferredSmallVectorSizeof - sizeof(SmallVector<T, 0>);
+ static constexpr size_t NumElementsThatFit = PreferredInlineBytes / sizeof(T);
+ static constexpr size_t value =
+ NumElementsThatFit == 0 ? 1 : NumElementsThatFit;
+};
/// This is a 'vector' (really, a variable-sized array), optimized
/// for the case when the array is small. It contains some number of elements
@@ -831,10 +1101,20 @@
/// elements is below that threshold. This allows normal "small" cases to be
/// fast without losing generality for large inputs.
///
-/// Note that this does not attempt to be exception safe.
+/// \note
+/// In the absence of a well-motivated choice for the number of inlined
+/// elements \p N, it is recommended to use \c SmallVector<T> (that is,
+/// omitting the \p N). This will choose a default number of inlined elements
+/// reasonable for allocation on the stack (for example, trying to keep \c
+/// sizeof(SmallVector<T>) around 64 bytes).
///
-template <typename T, unsigned N>
-class SmallVector : public SmallVectorImpl<T>, SmallVectorStorage<T, N> {
+/// \warning This does not attempt to be exception safe.
+///
+/// \see https://llvm.org/docs/ProgrammersManual.html#llvm-adt-smallvector-h
+template <typename T,
+ unsigned N = CalculateSmallVectorDefaultInlinedElements<T>::value>
+class LLVM_GSL_OWNER SmallVector : public SmallVectorImpl<T>,
+ SmallVectorStorage<T, N> {
public:
SmallVector() : SmallVectorImpl<T>(N) {}
@@ -849,9 +1129,9 @@
}
template <typename ItTy,
- typename = typename std::enable_if<std::is_convertible<
+ typename = std::enable_if_t<std::is_convertible<
typename std::iterator_traits<ItTy>::iterator_category,
- std::input_iterator_tag>::value>::type>
+ std::input_iterator_tag>::value>>
SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(N) {
this->append(S, E);
}
@@ -871,7 +1151,7 @@
SmallVectorImpl<T>::operator=(RHS);
}
- const SmallVector &operator=(const SmallVector &RHS) {
+ SmallVector &operator=(const SmallVector &RHS) {
SmallVectorImpl<T>::operator=(RHS);
return *this;
}
@@ -886,17 +1166,17 @@
SmallVectorImpl<T>::operator=(::std::move(RHS));
}
- const SmallVector &operator=(SmallVector &&RHS) {
+ SmallVector &operator=(SmallVector &&RHS) {
SmallVectorImpl<T>::operator=(::std::move(RHS));
return *this;
}
- const SmallVector &operator=(SmallVectorImpl<T> &&RHS) {
+ SmallVector &operator=(SmallVectorImpl<T> &&RHS) {
SmallVectorImpl<T>::operator=(::std::move(RHS));
return *this;
}
- const SmallVector &operator=(std::initializer_list<T> IL) {
+ SmallVector &operator=(std::initializer_list<T> IL) {
this->assign(IL);
return *this;
}
@@ -907,6 +1187,17 @@
return X.capacity_in_bytes();
}
+/// Given a range of type R, iterate the entire range and return a
+/// SmallVector with elements of the vector. This is useful, for example,
+/// when you want to iterate a range and then sort the results.
+template <unsigned Size, typename R>
+SmallVector<typename std::remove_const<typename std::remove_reference<
+ decltype(*std::begin(std::declval<R &>()))>::type>::type,
+ Size>
+to_vector(R &&Range) {
+ return {std::begin(Range), std::end(Range)};
+}
+
} // end namespace llvm
namespace std {
diff --git a/linux-x64/clang/include/llvm/ADT/SparseMultiSet.h b/linux-x64/clang/include/llvm/ADT/SparseMultiSet.h
index d9d3ff4..307d2c3 100644
--- a/linux-x64/clang/include/llvm/ADT/SparseMultiSet.h
+++ b/linux-x64/clang/include/llvm/ADT/SparseMultiSet.h
@@ -94,7 +94,7 @@
/// tombstones, in which case they are actually nodes in a single-linked
/// freelist of recyclable slots.
struct SMSNode {
- static const unsigned INVALID = ~0U;
+ static constexpr unsigned INVALID = ~0U;
ValueT Data;
unsigned Prev;
diff --git a/linux-x64/clang/include/llvm/ADT/SparseSet.h b/linux-x64/clang/include/llvm/ADT/SparseSet.h
index a6eb9b9..d8acf1e 100644
--- a/linux-x64/clang/include/llvm/ADT/SparseSet.h
+++ b/linux-x64/clang/include/llvm/ADT/SparseSet.h
@@ -21,7 +21,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/Allocator.h"
+#include "llvm/Support/AllocatorBase.h"
#include <cassert>
#include <cstdint>
#include <cstdlib>
@@ -79,7 +79,7 @@
}
};
-/// SparseSet - Fast set implmentation for objects that can be identified by
+/// SparseSet - Fast set implementation for objects that can be identified by
/// small unsigned keys.
///
/// SparseSet allocates memory proportional to the size of the key universe, so
@@ -229,12 +229,15 @@
return const_cast<SparseSet*>(this)->findIndex(KeyIndexOf(Key));
}
+ /// Check if the set contains the given \c Key.
+ ///
+ /// @param Key A valid key to find.
+ bool contains(const KeyT &Key) const { return find(Key) == end() ? 0 : 1; }
+
/// count - Returns 1 if this set contains an element identified by Key,
/// 0 otherwise.
///
- size_type count(const KeyT &Key) const {
- return find(Key) == end() ? 0 : 1;
- }
+ size_type count(const KeyT &Key) const { return contains(Key) ? 1 : 0; }
/// insert - Attempts to insert a new element.
///
diff --git a/linux-x64/clang/include/llvm/ADT/Statistic.h b/linux-x64/clang/include/llvm/ADT/Statistic.h
index 2ac59da..aa338cc 100644
--- a/linux-x64/clang/include/llvm/ADT/Statistic.h
+++ b/linux-x64/clang/include/llvm/ADT/Statistic.h
@@ -36,6 +36,8 @@
// configure time.
#if !defined(NDEBUG) || LLVM_FORCE_ENABLE_STATS
#define LLVM_ENABLE_STATS 1
+#else
+#define LLVM_ENABLE_STATS 0
#endif
namespace llvm {
@@ -44,38 +46,39 @@
class raw_fd_ostream;
class StringRef;
-class Statistic {
+class StatisticBase {
public:
const char *DebugType;
const char *Name;
const char *Desc;
- std::atomic<unsigned> Value;
- std::atomic<bool> Initialized;
- unsigned getValue() const { return Value.load(std::memory_order_relaxed); }
+ StatisticBase(const char *DebugType, const char *Name, const char *Desc)
+ : DebugType(DebugType), Name(Name), Desc(Desc) {}
+
const char *getDebugType() const { return DebugType; }
const char *getName() const { return Name; }
const char *getDesc() const { return Desc; }
+};
- /// construct - This should only be called for non-global statistics.
- void construct(const char *debugtype, const char *name, const char *desc) {
- DebugType = debugtype;
- Name = name;
- Desc = desc;
- Value = 0;
- Initialized = false;
- }
+class TrackingStatistic : public StatisticBase {
+public:
+ std::atomic<unsigned> Value;
+ std::atomic<bool> Initialized;
+
+ TrackingStatistic(const char *DebugType, const char *Name, const char *Desc)
+ : StatisticBase(DebugType, Name, Desc), Value(0), Initialized(false) {}
+
+ unsigned getValue() const { return Value.load(std::memory_order_relaxed); }
// Allow use of this class as the value itself.
operator unsigned() const { return getValue(); }
-#if LLVM_ENABLE_STATS
- const Statistic &operator=(unsigned Val) {
+ const TrackingStatistic &operator=(unsigned Val) {
Value.store(Val, std::memory_order_relaxed);
return init();
}
- const Statistic &operator++() {
+ const TrackingStatistic &operator++() {
Value.fetch_add(1, std::memory_order_relaxed);
return init();
}
@@ -85,7 +88,7 @@
return Value.fetch_add(1, std::memory_order_relaxed);
}
- const Statistic &operator--() {
+ const TrackingStatistic &operator--() {
Value.fetch_sub(1, std::memory_order_relaxed);
return init();
}
@@ -95,14 +98,14 @@
return Value.fetch_sub(1, std::memory_order_relaxed);
}
- const Statistic &operator+=(unsigned V) {
+ const TrackingStatistic &operator+=(unsigned V) {
if (V == 0)
return *this;
Value.fetch_add(V, std::memory_order_relaxed);
return init();
}
- const Statistic &operator-=(unsigned V) {
+ const TrackingStatistic &operator-=(unsigned V) {
if (V == 0)
return *this;
Value.fetch_sub(V, std::memory_order_relaxed);
@@ -119,42 +122,8 @@
init();
}
-#else // Statistics are disabled in release builds.
-
- const Statistic &operator=(unsigned Val) {
- return *this;
- }
-
- const Statistic &operator++() {
- return *this;
- }
-
- unsigned operator++(int) {
- return 0;
- }
-
- const Statistic &operator--() {
- return *this;
- }
-
- unsigned operator--(int) {
- return 0;
- }
-
- const Statistic &operator+=(const unsigned &V) {
- return *this;
- }
-
- const Statistic &operator-=(const unsigned &V) {
- return *this;
- }
-
- void updateMax(unsigned V) {}
-
-#endif // LLVM_ENABLE_STATS
-
protected:
- Statistic &init() {
+ TrackingStatistic &init() {
if (!Initialized.load(std::memory_order_acquire))
RegisterStatistic();
return *this;
@@ -163,13 +132,50 @@
void RegisterStatistic();
};
+class NoopStatistic : public StatisticBase {
+public:
+ using StatisticBase::StatisticBase;
+
+ unsigned getValue() const { return 0; }
+
+ // Allow use of this class as the value itself.
+ operator unsigned() const { return 0; }
+
+ const NoopStatistic &operator=(unsigned Val) { return *this; }
+
+ const NoopStatistic &operator++() { return *this; }
+
+ unsigned operator++(int) { return 0; }
+
+ const NoopStatistic &operator--() { return *this; }
+
+ unsigned operator--(int) { return 0; }
+
+ const NoopStatistic &operator+=(const unsigned &V) { return *this; }
+
+ const NoopStatistic &operator-=(const unsigned &V) { return *this; }
+
+ void updateMax(unsigned V) {}
+};
+
+#if LLVM_ENABLE_STATS
+using Statistic = TrackingStatistic;
+#else
+using Statistic = NoopStatistic;
+#endif
+
// STATISTIC - A macro to make definition of statistics really simple. This
// automatically passes the DEBUG_TYPE of the file into the statistic.
#define STATISTIC(VARNAME, DESC) \
- static llvm::Statistic VARNAME = {DEBUG_TYPE, #VARNAME, DESC, {0}, {false}}
+ static llvm::Statistic VARNAME = {DEBUG_TYPE, #VARNAME, DESC}
+
+// ALWAYS_ENABLED_STATISTIC - A macro to define a statistic like STATISTIC but
+// it is enabled even if LLVM_ENABLE_STATS is off.
+#define ALWAYS_ENABLED_STATISTIC(VARNAME, DESC) \
+ static llvm::TrackingStatistic VARNAME = {DEBUG_TYPE, #VARNAME, DESC}
/// Enable the collection and printing of statistics.
-void EnableStatistics(bool PrintOnExit = true);
+void EnableStatistics(bool DoPrintOnExit = true);
/// Check if statistics are enabled.
bool AreStatisticsEnabled();
diff --git a/linux-x64/clang/include/llvm/ADT/StringExtras.h b/linux-x64/clang/include/llvm/ADT/StringExtras.h
index 16ac90b..10596cf 100644
--- a/linux-x64/clang/include/llvm/ADT/StringExtras.h
+++ b/linux-x64/clang/include/llvm/ADT/StringExtras.h
@@ -66,17 +66,29 @@
///
/// If \p C is not a valid hex digit, -1U is returned.
inline unsigned hexDigitValue(char C) {
- if (C >= '0' && C <= '9') return C-'0';
- if (C >= 'a' && C <= 'f') return C-'a'+10U;
- if (C >= 'A' && C <= 'F') return C-'A'+10U;
- return -1U;
+ struct HexTable {
+ unsigned LUT[255] = {};
+ constexpr HexTable() {
+ // Default initialize everything to invalid.
+ for (int i = 0; i < 255; ++i)
+ LUT[i] = ~0U;
+ // Initialize `0`-`9`.
+ for (int i = 0; i < 10; ++i)
+ LUT['0' + i] = i;
+ // Initialize `A`-`F` and `a`-`f`.
+ for (int i = 0; i < 6; ++i)
+ LUT['A' + i] = LUT['a' + i] = 10 + i;
+ }
+ };
+ constexpr HexTable Table;
+ return Table.LUT[static_cast<unsigned char>(C)];
}
/// Checks if character \p C is one of the 10 decimal digits.
inline bool isDigit(char C) { return C >= '0' && C <= '9'; }
/// Checks if character \p C is a hexadecimal numeric character.
-inline bool isHexDigit(char C) { return hexDigitValue(C) != -1U; }
+inline bool isHexDigit(char C) { return hexDigitValue(C) != ~0U; }
/// Checks if character \p C is a valid letter as classified by "C" locale.
inline bool isAlpha(char C) {
@@ -107,6 +119,14 @@
return (0x20 <= UC) && (UC <= 0x7E);
}
+/// Checks whether character \p C is whitespace in the "C" locale.
+///
+/// Locale-independent version of the C standard library isspace.
+inline bool isSpace(char C) {
+ return C == ' ' || C == '\f' || C == '\n' || C == '\r' || C == '\t' ||
+ C == '\v';
+}
+
/// Returns the corresponding lowercase character if \p x is uppercase.
inline char toLower(char x) {
if (x >= 'A' && x <= 'Z')
@@ -157,34 +177,68 @@
return toHex(toStringRef(Input), LowerCase);
}
-inline uint8_t hexFromNibbles(char MSB, char LSB) {
+/// Store the binary representation of the two provided values, \p MSB and
+/// \p LSB, that make up the nibbles of a hexadecimal digit. If \p MSB or \p LSB
+/// do not correspond to proper nibbles of a hexadecimal digit, this method
+/// returns false. Otherwise, returns true.
+inline bool tryGetHexFromNibbles(char MSB, char LSB, uint8_t &Hex) {
unsigned U1 = hexDigitValue(MSB);
unsigned U2 = hexDigitValue(LSB);
- assert(U1 != -1U && U2 != -1U);
+ if (U1 == ~0U || U2 == ~0U)
+ return false;
- return static_cast<uint8_t>((U1 << 4) | U2);
+ Hex = static_cast<uint8_t>((U1 << 4) | U2);
+ return true;
}
-/// Convert hexadecimal string \p Input to its binary representation.
-/// The return string is half the size of \p Input.
-inline std::string fromHex(StringRef Input) {
- if (Input.empty())
- return std::string();
+/// Return the binary representation of the two provided values, \p MSB and
+/// \p LSB, that make up the nibbles of a hexadecimal digit.
+inline uint8_t hexFromNibbles(char MSB, char LSB) {
+ uint8_t Hex = 0;
+ bool GotHex = tryGetHexFromNibbles(MSB, LSB, Hex);
+ (void)GotHex;
+ assert(GotHex && "MSB and/or LSB do not correspond to hex digits");
+ return Hex;
+}
- std::string Output;
+/// Convert hexadecimal string \p Input to its binary representation and store
+/// the result in \p Output. Returns true if the binary representation could be
+/// converted from the hexadecimal string. Returns false if \p Input contains
+/// non-hexadecimal digits. The output string is half the size of \p Input.
+inline bool tryGetFromHex(StringRef Input, std::string &Output) {
+ if (Input.empty())
+ return true;
+
Output.reserve((Input.size() + 1) / 2);
if (Input.size() % 2 == 1) {
- Output.push_back(hexFromNibbles('0', Input.front()));
+ uint8_t Hex = 0;
+ if (!tryGetHexFromNibbles('0', Input.front(), Hex))
+ return false;
+
+ Output.push_back(Hex);
Input = Input.drop_front();
}
assert(Input.size() % 2 == 0);
while (!Input.empty()) {
- uint8_t Hex = hexFromNibbles(Input[0], Input[1]);
+ uint8_t Hex = 0;
+ if (!tryGetHexFromNibbles(Input[0], Input[1], Hex))
+ return false;
+
Output.push_back(Hex);
Input = Input.drop_front(2);
}
- return Output;
+ return true;
+}
+
+/// Convert hexadecimal string \p Input to its binary representation.
+/// The return string is half the size of \p Input.
+inline std::string fromHex(StringRef Input) {
+ std::string Hex;
+ bool GotHex = tryGetFromHex(Input, Hex);
+ (void)GotHex;
+ assert(GotHex && "Input contains non hex digits");
+ return Hex;
}
/// Convert the string \p S to an integer of the specified type using
@@ -237,7 +291,7 @@
inline std::string itostr(int64_t X) {
if (X < 0)
- return utostr(static_cast<uint64_t>(-X), true);
+ return utostr(static_cast<uint64_t>(1) + ~static_cast<uint64_t>(X), true);
else
return utostr(static_cast<uint64_t>(X));
}
@@ -292,6 +346,18 @@
/// printLowerCase - Print each character as lowercase if it is uppercase.
void printLowerCase(StringRef String, raw_ostream &Out);
+/// Converts a string from camel-case to snake-case by replacing all uppercase
+/// letters with '_' followed by the letter in lowercase, except if the
+/// uppercase letter is the first character of the string.
+std::string convertToSnakeFromCamelCase(StringRef input);
+
+/// Converts a string from snake-case to camel-case by replacing all occurrences
+/// of '_' followed by a lowercase letter with the letter in uppercase.
+/// Optionally allow capitalization of the first letter (if it is a lowercase
+/// letter)
+std::string convertToCamelFromSnakeCase(StringRef input,
+ bool capitalizeFirst = false);
+
namespace detail {
template <typename IteratorT>
@@ -318,13 +384,16 @@
size_t Len = (std::distance(Begin, End) - 1) * Separator.size();
for (IteratorT I = Begin; I != End; ++I)
- Len += (*Begin).size();
+ Len += (*I).size();
S.reserve(Len);
+ size_t PrevCapacity = S.capacity();
+ (void)PrevCapacity;
S += (*Begin);
while (++Begin != End) {
S += Separator;
S += (*Begin);
}
+ assert(PrevCapacity == S.capacity() && "String grew during building");
return S;
}
@@ -345,7 +414,7 @@
join_items_impl(Result, Separator, std::forward<Args>(Items)...);
}
-inline size_t join_one_item_size(char C) { return 1; }
+inline size_t join_one_item_size(char) { return 1; }
inline size_t join_one_item_size(const char *S) { return S ? ::strlen(S) : 0; }
template <typename T> inline size_t join_one_item_size(const T &Str) {
@@ -396,6 +465,30 @@
return Result;
}
+/// A helper class to return the specified delimiter string after the first
+/// invocation of operator StringRef(). Used to generate a comma-separated
+/// list from a loop like so:
+///
+/// \code
+/// ListSeparator SD;
+/// for (auto &I : C)
+/// OS << SD << I.getName();
+/// \end
+class ListSeparator {
+ bool First = true;
+ StringRef Separator;
+
+public:
+ ListSeparator(StringRef Separator = ", ") : Separator(Separator) {}
+ operator StringRef() {
+ if (First) {
+ First = false;
+ return {};
+ }
+ return Separator;
+ }
+};
+
} // end namespace llvm
#endif // LLVM_ADT_STRINGEXTRAS_H
diff --git a/linux-x64/clang/include/llvm/ADT/StringMap.h b/linux-x64/clang/include/llvm/ADT/StringMap.h
index 8a586fc..a82afc9 100644
--- a/linux-x64/clang/include/llvm/ADT/StringMap.h
+++ b/linux-x64/clang/include/llvm/ADT/StringMap.h
@@ -13,36 +13,17 @@
#ifndef LLVM_ADT_STRINGMAP_H
#define LLVM_ADT_STRINGMAP_H
-#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/iterator.h"
-#include "llvm/ADT/iterator_range.h"
-#include "llvm/Support/Allocator.h"
+#include "llvm/ADT/StringMapEntry.h"
+#include "llvm/Support/AllocatorBase.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
-#include "llvm/Support/ErrorHandling.h"
-#include <algorithm>
-#include <cassert>
-#include <cstdint>
-#include <cstdlib>
-#include <cstring>
#include <initializer_list>
#include <iterator>
-#include <utility>
namespace llvm {
-template<typename ValueTy> class StringMapConstIterator;
-template<typename ValueTy> class StringMapIterator;
-template<typename ValueTy> class StringMapKeyIterator;
-
-/// StringMapEntryBase - Shared base class of StringMapEntry instances.
-class StringMapEntryBase {
- size_t StrLen;
-
-public:
- explicit StringMapEntryBase(size_t Len) : StrLen(Len) {}
-
- size_t getKeyLength() const { return StrLen; }
-};
+template <typename ValueTy> class StringMapConstIterator;
+template <typename ValueTy> class StringMapIterator;
+template <typename ValueTy> class StringMapKeyIterator;
/// StringMapImpl - This is the base class of StringMap that is shared among
/// all of its instantiations.
@@ -58,8 +39,7 @@
unsigned ItemSize;
protected:
- explicit StringMapImpl(unsigned itemSize)
- : ItemSize(itemSize) {}
+ explicit StringMapImpl(unsigned itemSize) : ItemSize(itemSize) {}
StringMapImpl(StringMapImpl &&RHS)
: TheTable(RHS.TheTable), NumBuckets(RHS.NumBuckets),
NumItems(RHS.NumItems), NumTombstones(RHS.NumTombstones),
@@ -98,10 +78,12 @@
void init(unsigned Size);
public:
+ static constexpr uintptr_t TombstoneIntVal =
+ static_cast<uintptr_t>(-1)
+ << PointerLikeTypeTraits<StringMapEntryBase *>::NumLowBitsAvailable;
+
static StringMapEntryBase *getTombstoneVal() {
- uintptr_t Val = static_cast<uintptr_t>(-1);
- Val <<= PointerLikeTypeTraits<StringMapEntryBase *>::NumLowBitsAvailable;
- return reinterpret_cast<StringMapEntryBase *>(Val);
+ return reinterpret_cast<StringMapEntryBase *>(TombstoneIntVal);
}
unsigned getNumBuckets() const { return NumBuckets; }
@@ -118,104 +100,11 @@
}
};
-/// StringMapEntry - This is used to represent one value that is inserted into
-/// a StringMap. It contains the Value itself and the key: the string length
-/// and data.
-template<typename ValueTy>
-class StringMapEntry : public StringMapEntryBase {
-public:
- ValueTy second;
-
- explicit StringMapEntry(size_t strLen)
- : StringMapEntryBase(strLen), second() {}
- template <typename... InitTy>
- StringMapEntry(size_t strLen, InitTy &&... InitVals)
- : StringMapEntryBase(strLen), second(std::forward<InitTy>(InitVals)...) {}
- StringMapEntry(StringMapEntry &E) = delete;
-
- StringRef getKey() const {
- return StringRef(getKeyData(), getKeyLength());
- }
-
- const ValueTy &getValue() const { return second; }
- ValueTy &getValue() { return second; }
-
- void setValue(const ValueTy &V) { second = V; }
-
- /// getKeyData - Return the start of the string data that is the key for this
- /// value. The string data is always stored immediately after the
- /// StringMapEntry object.
- const char *getKeyData() const {return reinterpret_cast<const char*>(this+1);}
-
- StringRef first() const { return StringRef(getKeyData(), getKeyLength()); }
-
- /// Create a StringMapEntry for the specified key construct the value using
- /// \p InitiVals.
- template <typename AllocatorTy, typename... InitTy>
- static StringMapEntry *Create(StringRef Key, AllocatorTy &Allocator,
- InitTy &&... InitVals) {
- size_t KeyLength = Key.size();
-
- // Allocate a new item with space for the string at the end and a null
- // terminator.
- size_t AllocSize = sizeof(StringMapEntry) + KeyLength + 1;
- size_t Alignment = alignof(StringMapEntry);
-
- StringMapEntry *NewItem =
- static_cast<StringMapEntry*>(Allocator.Allocate(AllocSize,Alignment));
- assert(NewItem && "Unhandled out-of-memory");
-
- // Construct the value.
- new (NewItem) StringMapEntry(KeyLength, std::forward<InitTy>(InitVals)...);
-
- // Copy the string information.
- char *StrBuffer = const_cast<char*>(NewItem->getKeyData());
- if (KeyLength > 0)
- memcpy(StrBuffer, Key.data(), KeyLength);
- StrBuffer[KeyLength] = 0; // Null terminate for convenience of clients.
- return NewItem;
- }
-
- /// Create - Create a StringMapEntry with normal malloc/free.
- template <typename... InitType>
- static StringMapEntry *Create(StringRef Key, InitType &&... InitVal) {
- MallocAllocator A;
- return Create(Key, A, std::forward<InitType>(InitVal)...);
- }
-
- static StringMapEntry *Create(StringRef Key) {
- return Create(Key, ValueTy());
- }
-
- /// GetStringMapEntryFromKeyData - Given key data that is known to be embedded
- /// into a StringMapEntry, return the StringMapEntry itself.
- static StringMapEntry &GetStringMapEntryFromKeyData(const char *KeyData) {
- char *Ptr = const_cast<char*>(KeyData) - sizeof(StringMapEntry<ValueTy>);
- return *reinterpret_cast<StringMapEntry*>(Ptr);
- }
-
- /// Destroy - Destroy this StringMapEntry, releasing memory back to the
- /// specified allocator.
- template<typename AllocatorTy>
- void Destroy(AllocatorTy &Allocator) {
- // Free memory referenced by the item.
- size_t AllocSize = sizeof(StringMapEntry) + getKeyLength() + 1;
- this->~StringMapEntry();
- Allocator.Deallocate(static_cast<void *>(this), AllocSize);
- }
-
- /// Destroy this object, releasing memory back to the malloc allocator.
- void Destroy() {
- MallocAllocator A;
- Destroy(A);
- }
-};
-
/// StringMap - This is an unconventional map that is specialized for handling
/// keys that are "strings", which are basically ranges of bytes. This does some
/// funky memory allocation and hashing things to make it extremely efficient,
/// storing the string data *after* the value in the map.
-template<typename ValueTy, typename AllocatorTy = MallocAllocator>
+template <typename ValueTy, typename AllocatorTy = MallocAllocator>
class StringMap : public StringMapImpl {
AllocatorTy Allocator;
@@ -225,14 +114,15 @@
StringMap() : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))) {}
explicit StringMap(unsigned InitialSize)
- : StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))) {}
+ : StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))) {}
explicit StringMap(AllocatorTy A)
- : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))), Allocator(A) {}
+ : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))), Allocator(A) {
+ }
StringMap(unsigned InitialSize, AllocatorTy A)
- : StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))),
- Allocator(A) {}
+ : StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))),
+ Allocator(A) {}
StringMap(std::initializer_list<std::pair<StringRef, ValueTy>> List)
: StringMapImpl(List.size(), static_cast<unsigned>(sizeof(MapEntryTy))) {
@@ -244,9 +134,9 @@
StringMap(StringMap &&RHS)
: StringMapImpl(std::move(RHS)), Allocator(std::move(RHS.Allocator)) {}
- StringMap(const StringMap &RHS) :
- StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))),
- Allocator(RHS.Allocator) {
+ StringMap(const StringMap &RHS)
+ : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))),
+ Allocator(RHS.Allocator) {
if (RHS.empty())
return;
@@ -293,7 +183,7 @@
for (unsigned I = 0, E = NumBuckets; I != E; ++I) {
StringMapEntryBase *Bucket = TheTable[I];
if (Bucket && Bucket != getTombstoneVal()) {
- static_cast<MapEntryTy*>(Bucket)->Destroy(Allocator);
+ static_cast<MapEntryTy *>(Bucket)->Destroy(Allocator);
}
}
}
@@ -303,7 +193,7 @@
AllocatorTy &getAllocator() { return Allocator; }
const AllocatorTy &getAllocator() const { return Allocator; }
- using key_type = const char*;
+ using key_type = const char *;
using mapped_type = ValueTy;
using value_type = StringMapEntry<ValueTy>;
using size_type = size_t;
@@ -311,17 +201,13 @@
using const_iterator = StringMapConstIterator<ValueTy>;
using iterator = StringMapIterator<ValueTy>;
- iterator begin() {
- return iterator(TheTable, NumBuckets == 0);
- }
- iterator end() {
- return iterator(TheTable+NumBuckets, true);
- }
+ iterator begin() { return iterator(TheTable, NumBuckets == 0); }
+ iterator end() { return iterator(TheTable + NumBuckets, true); }
const_iterator begin() const {
return const_iterator(TheTable, NumBuckets == 0);
}
const_iterator end() const {
- return const_iterator(TheTable+NumBuckets, true);
+ return const_iterator(TheTable + NumBuckets, true);
}
iterator_range<StringMapKeyIterator<ValueTy>> keys() const {
@@ -331,14 +217,16 @@
iterator find(StringRef Key) {
int Bucket = FindKey(Key);
- if (Bucket == -1) return end();
- return iterator(TheTable+Bucket, true);
+ if (Bucket == -1)
+ return end();
+ return iterator(TheTable + Bucket, true);
}
const_iterator find(StringRef Key) const {
int Bucket = FindKey(Key);
- if (Bucket == -1) return end();
- return const_iterator(TheTable+Bucket, true);
+ if (Bucket == -1)
+ return end();
+ return const_iterator(TheTable + Bucket, true);
}
/// lookup - Return the entry for the specified key, or a default
@@ -355,15 +243,33 @@
ValueTy &operator[](StringRef Key) { return try_emplace(Key).first->second; }
/// count - Return 1 if the element is in the map, 0 otherwise.
- size_type count(StringRef Key) const {
- return find(Key) == end() ? 0 : 1;
- }
+ size_type count(StringRef Key) const { return find(Key) == end() ? 0 : 1; }
template <typename InputTy>
size_type count(const StringMapEntry<InputTy> &MapEntry) const {
return count(MapEntry.getKey());
}
+ /// equal - check whether both of the containers are equal.
+ bool operator==(const StringMap &RHS) const {
+ if (size() != RHS.size())
+ return false;
+
+ for (const auto &KeyValue : *this) {
+ auto FindInRHS = RHS.find(KeyValue.getKey());
+
+ if (FindInRHS == RHS.end())
+ return false;
+
+ if (!(KeyValue.getValue() == FindInRHS->getValue()))
+ return false;
+ }
+
+ return true;
+ }
+
+ bool operator!=(const StringMap &RHS) const { return !(*this == RHS); }
+
/// insert - Insert the specified key/value pair into the map. If the key
/// already exists in the map, return false and ignore the request, otherwise
/// insert it and return true.
@@ -371,7 +277,7 @@
unsigned BucketNo = LookupBucketFor(KeyValue->getKey());
StringMapEntryBase *&Bucket = TheTable[BucketNo];
if (Bucket && Bucket != getTombstoneVal())
- return false; // Already exists in map.
+ return false; // Already exists in map.
if (Bucket == getTombstoneVal())
--NumTombstones;
@@ -391,6 +297,16 @@
return try_emplace(KV.first, std::move(KV.second));
}
+ /// Inserts an element or assigns to the current element if the key already
+ /// exists. The return type is the same as try_emplace.
+ template <typename V>
+ std::pair<iterator, bool> insert_or_assign(StringRef Key, V &&Val) {
+ auto Ret = try_emplace(Key, std::forward<V>(Val));
+ if (!Ret.second)
+ Ret.first->second = std::forward<V>(Val);
+ return Ret;
+ }
+
/// Emplace a new element for the specified key into the map if the key isn't
/// already in the map. The bool component of the returned pair is true
/// if and only if the insertion takes place, and the iterator component of
@@ -415,14 +331,15 @@
// clear - Empties out the StringMap
void clear() {
- if (empty()) return;
+ if (empty())
+ return;
// Zap all values, resetting the keys back to non-present (not tombstone),
// which is safe because we're removing all elements.
for (unsigned I = 0, E = NumBuckets; I != E; ++I) {
StringMapEntryBase *&Bucket = TheTable[I];
if (Bucket && Bucket != getTombstoneVal()) {
- static_cast<MapEntryTy*>(Bucket)->Destroy(Allocator);
+ static_cast<MapEntryTy *>(Bucket)->Destroy(Allocator);
}
Bucket = nullptr;
}
@@ -433,9 +350,7 @@
/// remove - Remove the specified key/value pair from the map, but do not
/// erase it. This aborts if the key is not in the map.
- void remove(MapEntryTy *KeyValue) {
- RemoveKey(KeyValue);
- }
+ void remove(MapEntryTy *KeyValue) { RemoveKey(KeyValue); }
void erase(iterator I) {
MapEntryTy &V = *I;
@@ -445,7 +360,8 @@
bool erase(StringRef Key) {
iterator I = find(Key);
- if (I == end()) return false;
+ if (I == end())
+ return false;
erase(I);
return true;
}
@@ -464,7 +380,8 @@
explicit StringMapIterBase(StringMapEntryBase **Bucket,
bool NoAdvance = false)
: Ptr(Bucket) {
- if (!NoAdvance) AdvancePastEmptyBuckets();
+ if (!NoAdvance)
+ AdvancePastEmptyBuckets();
}
DerivedTy &operator=(const DerivedTy &Other) {
@@ -472,7 +389,9 @@
return static_cast<DerivedTy &>(*this);
}
- bool operator==(const DerivedTy &RHS) const { return Ptr == RHS.Ptr; }
+ friend bool operator==(const DerivedTy &LHS, const DerivedTy &RHS) {
+ return LHS.Ptr == RHS.Ptr;
+ }
DerivedTy &operator++() { // Preincrement
++Ptr;
diff --git a/linux-x64/clang/include/llvm/ADT/StringMapEntry.h b/linux-x64/clang/include/llvm/ADT/StringMapEntry.h
new file mode 100644
index 0000000..ea3aad6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/StringMapEntry.h
@@ -0,0 +1,135 @@
+//===- StringMapEntry.h - String Hash table map interface -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the StringMapEntry class - it is intended to be a low
+// dependency implementation detail of StringMap that is more suitable for
+// inclusion in public headers than StringMap.h itself is.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_STRINGMAPENTRY_H
+#define LLVM_ADT_STRINGMAPENTRY_H
+
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+
+/// StringMapEntryBase - Shared base class of StringMapEntry instances.
+class StringMapEntryBase {
+ size_t keyLength;
+
+public:
+ explicit StringMapEntryBase(size_t keyLength) : keyLength(keyLength) {}
+
+ size_t getKeyLength() const { return keyLength; }
+};
+
+/// StringMapEntryStorage - Holds the value in a StringMapEntry.
+///
+/// Factored out into a separate base class to make it easier to specialize.
+/// This is primarily intended to support StringSet, which doesn't need a value
+/// stored at all.
+template <typename ValueTy>
+class StringMapEntryStorage : public StringMapEntryBase {
+public:
+ ValueTy second;
+
+ explicit StringMapEntryStorage(size_t keyLength)
+ : StringMapEntryBase(keyLength), second() {}
+ template <typename... InitTy>
+ StringMapEntryStorage(size_t keyLength, InitTy &&... initVals)
+ : StringMapEntryBase(keyLength),
+ second(std::forward<InitTy>(initVals)...) {}
+ StringMapEntryStorage(StringMapEntryStorage &e) = delete;
+
+ const ValueTy &getValue() const { return second; }
+ ValueTy &getValue() { return second; }
+
+ void setValue(const ValueTy &V) { second = V; }
+};
+
+template <> class StringMapEntryStorage<NoneType> : public StringMapEntryBase {
+public:
+ explicit StringMapEntryStorage(size_t keyLength, NoneType none = None)
+ : StringMapEntryBase(keyLength) {}
+ StringMapEntryStorage(StringMapEntryStorage &entry) = delete;
+
+ NoneType getValue() const { return None; }
+};
+
+/// StringMapEntry - This is used to represent one value that is inserted into
+/// a StringMap. It contains the Value itself and the key: the string length
+/// and data.
+template <typename ValueTy>
+class StringMapEntry final : public StringMapEntryStorage<ValueTy> {
+public:
+ using StringMapEntryStorage<ValueTy>::StringMapEntryStorage;
+
+ StringRef getKey() const {
+ return StringRef(getKeyData(), this->getKeyLength());
+ }
+
+ /// getKeyData - Return the start of the string data that is the key for this
+ /// value. The string data is always stored immediately after the
+ /// StringMapEntry object.
+ const char *getKeyData() const {
+ return reinterpret_cast<const char *>(this + 1);
+ }
+
+ StringRef first() const {
+ return StringRef(getKeyData(), this->getKeyLength());
+ }
+
+ /// Create a StringMapEntry for the specified key construct the value using
+ /// \p InitiVals.
+ template <typename AllocatorTy, typename... InitTy>
+ static StringMapEntry *Create(StringRef key, AllocatorTy &allocator,
+ InitTy &&... initVals) {
+ size_t keyLength = key.size();
+
+ // Allocate a new item with space for the string at the end and a null
+ // terminator.
+ size_t allocSize = sizeof(StringMapEntry) + keyLength + 1;
+ size_t alignment = alignof(StringMapEntry);
+
+ StringMapEntry *newItem =
+ static_cast<StringMapEntry *>(allocator.Allocate(allocSize, alignment));
+ assert(newItem && "Unhandled out-of-memory");
+
+ // Construct the value.
+ new (newItem) StringMapEntry(keyLength, std::forward<InitTy>(initVals)...);
+
+ // Copy the string information.
+ char *strBuffer = const_cast<char *>(newItem->getKeyData());
+ if (keyLength > 0)
+ memcpy(strBuffer, key.data(), keyLength);
+ strBuffer[keyLength] = 0; // Null terminate for convenience of clients.
+ return newItem;
+ }
+
+ /// GetStringMapEntryFromKeyData - Given key data that is known to be embedded
+ /// into a StringMapEntry, return the StringMapEntry itself.
+ static StringMapEntry &GetStringMapEntryFromKeyData(const char *keyData) {
+ char *ptr = const_cast<char *>(keyData) - sizeof(StringMapEntry<ValueTy>);
+ return *reinterpret_cast<StringMapEntry *>(ptr);
+ }
+
+ /// Destroy - Destroy this StringMapEntry, releasing memory back to the
+ /// specified allocator.
+ template <typename AllocatorTy> void Destroy(AllocatorTy &allocator) {
+ // Free memory referenced by the item.
+ size_t AllocSize = sizeof(StringMapEntry) + this->getKeyLength() + 1;
+ this->~StringMapEntry();
+ allocator.Deallocate(static_cast<void *>(this), AllocSize,
+ alignof(StringMapEntry));
+ }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_STRINGMAPENTRY_H
diff --git a/linux-x64/clang/include/llvm/ADT/StringRef.h b/linux-x64/clang/include/llvm/ADT/StringRef.h
index 4661b1e..98c120f 100644
--- a/linux-x64/clang/include/llvm/ADT/StringRef.h
+++ b/linux-x64/clang/include/llvm/ADT/StringRef.h
@@ -18,9 +18,18 @@
#include <cstring>
#include <limits>
#include <string>
+#if __cplusplus > 201402L
+#include <string_view>
+#endif
#include <type_traits>
#include <utility>
+// Declare the __builtin_strlen intrinsic for MSVC so it can be used in
+// constexpr context.
+#if defined(_MSC_VER)
+extern "C" size_t __builtin_strlen(const char *);
+#endif
+
namespace llvm {
class APInt;
@@ -45,9 +54,9 @@
/// situations where the character data resides in some other buffer, whose
/// lifetime extends past that of the StringRef. For this reason, it is not in
/// general safe to store a StringRef.
- class StringRef {
+ class LLVM_GSL_POINTER StringRef {
public:
- static const size_t npos = ~size_t(0);
+ static constexpr size_t npos = ~size_t(0);
using iterator = const char *;
using const_iterator = const char *;
@@ -67,6 +76,21 @@
return ::memcmp(Lhs,Rhs,Length);
}
+ // Constexpr version of std::strlen.
+ static constexpr size_t strLen(const char *Str) {
+#if __cplusplus > 201402L
+ return std::char_traits<char>::length(Str);
+#elif __has_builtin(__builtin_strlen) || defined(__GNUC__) || \
+ (defined(_MSC_VER) && _MSC_VER >= 1916)
+ return __builtin_strlen(Str);
+#else
+ const char *Begin = Str;
+ while (*Str != '\0')
+ ++Str;
+ return Str - Begin;
+#endif
+ }
+
public:
/// @name Constructors
/// @{
@@ -79,8 +103,8 @@
StringRef(std::nullptr_t) = delete;
/// Construct a string ref from a cstring.
- /*implicit*/ StringRef(const char *Str)
- : Data(Str), Length(Str ? ::strlen(Str) : 0) {}
+ /*implicit*/ constexpr StringRef(const char *Str)
+ : Data(Str), Length(Str ? strLen(Str) : 0) {}
/// Construct a string ref from a pointer and length.
/*implicit*/ constexpr StringRef(const char *data, size_t length)
@@ -90,6 +114,12 @@
/*implicit*/ StringRef(const std::string &Str)
: Data(Str.data()), Length(Str.length()) {}
+#if __cplusplus > 201402L
+ /// Construct a string ref from an std::string_view.
+ /*implicit*/ constexpr StringRef(std::string_view Str)
+ : Data(Str.data()), Length(Str.size()) {}
+#endif
+
static StringRef withNullAsEmpty(const char *data) {
return StringRef(data ? data : "");
}
@@ -235,17 +265,20 @@
/// The declaration here is extra complicated so that `stringRef = {}`
/// and `stringRef = "abc"` continue to select the move assignment operator.
template <typename T>
- typename std::enable_if<std::is_same<T, std::string>::value,
- StringRef>::type &
+ std::enable_if_t<std::is_same<T, std::string>::value, StringRef> &
operator=(T &&Str) = delete;
/// @}
/// @name Type Conversions
/// @{
- operator std::string() const {
- return str();
+ explicit operator std::string() const { return str(); }
+
+#if __cplusplus > 201402L
+ operator std::string_view() const {
+ return std::string_view(data(), size());
}
+#endif
/// @}
/// @name String Predicates
@@ -474,7 +507,7 @@
/// this returns true to signify the error. The string is considered
/// erroneous if empty or if it overflows T.
template <typename T>
- typename std::enable_if<std::numeric_limits<T>::is_signed, bool>::type
+ std::enable_if_t<std::numeric_limits<T>::is_signed, bool>
getAsInteger(unsigned Radix, T &Result) const {
long long LLVal;
if (getAsSignedInteger(*this, Radix, LLVal) ||
@@ -485,7 +518,7 @@
}
template <typename T>
- typename std::enable_if<!std::numeric_limits<T>::is_signed, bool>::type
+ std::enable_if_t<!std::numeric_limits<T>::is_signed, bool>
getAsInteger(unsigned Radix, T &Result) const {
unsigned long long ULLVal;
// The additional cast to unsigned long long is required to avoid the
@@ -508,7 +541,7 @@
/// The portion of the string representing the discovered numeric value
/// is removed from the beginning of the string.
template <typename T>
- typename std::enable_if<std::numeric_limits<T>::is_signed, bool>::type
+ std::enable_if_t<std::numeric_limits<T>::is_signed, bool>
consumeInteger(unsigned Radix, T &Result) {
long long LLVal;
if (consumeSignedInteger(*this, Radix, LLVal) ||
@@ -519,7 +552,7 @@
}
template <typename T>
- typename std::enable_if<!std::numeric_limits<T>::is_signed, bool>::type
+ std::enable_if_t<!std::numeric_limits<T>::is_signed, bool>
consumeInteger(unsigned Radix, T &Result) {
unsigned long long ULLVal;
if (consumeUnsignedInteger(*this, Radix, ULLVal) ||
@@ -546,7 +579,8 @@
///
/// If \p AllowInexact is false, the function will fail if the string
/// cannot be represented exactly. Otherwise, the function only fails
- /// in case of an overflow or underflow.
+ /// in case of an overflow or underflow, or an invalid floating point
+ /// representation.
bool getAsDouble(double &Result, bool AllowInexact = true) const;
/// @}
diff --git a/linux-x64/clang/include/llvm/ADT/StringSet.h b/linux-x64/clang/include/llvm/ADT/StringSet.h
index af3a44a..c424517 100644
--- a/linux-x64/clang/include/llvm/ADT/StringSet.h
+++ b/linux-x64/clang/include/llvm/ADT/StringSet.h
@@ -1,4 +1,4 @@
-//===- StringSet.h - The LLVM Compiler Driver -------------------*- C++ -*-===//
+//===- StringSet.h - An efficient set built on StringMap --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -14,44 +14,41 @@
#define LLVM_ADT_STRINGSET_H
#include "llvm/ADT/StringMap.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/Support/Allocator.h"
-#include <cassert>
-#include <initializer_list>
-#include <utility>
namespace llvm {
- /// StringSet - A wrapper for StringMap that provides set-like functionality.
- template <class AllocatorTy = MallocAllocator>
- class StringSet : public StringMap<char, AllocatorTy> {
- using base = StringMap<char, AllocatorTy>;
+/// StringSet - A wrapper for StringMap that provides set-like functionality.
+template <class AllocatorTy = MallocAllocator>
+class StringSet : public StringMap<NoneType, AllocatorTy> {
+ using Base = StringMap<NoneType, AllocatorTy>;
- public:
- StringSet() = default;
- StringSet(std::initializer_list<StringRef> S) {
- for (StringRef X : S)
- insert(X);
- }
- explicit StringSet(AllocatorTy A) : base(A) {}
+public:
+ StringSet() = default;
+ StringSet(std::initializer_list<StringRef> initializer) {
+ for (StringRef str : initializer)
+ insert(str);
+ }
+ explicit StringSet(AllocatorTy a) : Base(a) {}
- std::pair<typename base::iterator, bool> insert(StringRef Key) {
- assert(!Key.empty());
- return base::insert(std::make_pair(Key, '\0'));
- }
+ std::pair<typename Base::iterator, bool> insert(StringRef key) {
+ return Base::try_emplace(key);
+ }
- template <typename InputIt>
- void insert(const InputIt &Begin, const InputIt &End) {
- for (auto It = Begin; It != End; ++It)
- base::insert(std::make_pair(*It, '\0'));
- }
+ template <typename InputIt>
+ void insert(const InputIt &begin, const InputIt &end) {
+ for (auto it = begin; it != end; ++it)
+ insert(*it);
+ }
- template <typename ValueTy>
- std::pair<typename base::iterator, bool>
- insert(const StringMapEntry<ValueTy> &MapEntry) {
- return insert(MapEntry.getKey());
- }
- };
+ template <typename ValueTy>
+ std::pair<typename Base::iterator, bool>
+ insert(const StringMapEntry<ValueTy> &mapEntry) {
+ return insert(mapEntry.getKey());
+ }
+
+ /// Check if the set contains the given \c key.
+ bool contains(StringRef key) const { return Base::FindKey(key) != -1; }
+};
} // end namespace llvm
diff --git a/linux-x64/clang/include/llvm/ADT/TinyPtrVector.h b/linux-x64/clang/include/llvm/ADT/TinyPtrVector.h
index ac82451..ed20a76 100644
--- a/linux-x64/clang/include/llvm/ADT/TinyPtrVector.h
+++ b/linux-x64/clang/include/llvm/ADT/TinyPtrVector.h
@@ -31,6 +31,10 @@
public:
using VecTy = SmallVector<EltTy, 4>;
using value_type = typename VecTy::value_type;
+ // EltTy must be the first pointer type so that is<EltTy> is true for the
+ // default-constructed PtrUnion. This allows an empty TinyPtrVector to
+ // naturally vend a begin/end iterator of type EltTy* without an additional
+ // check for the empty state.
using PtrUnion = PointerUnion<EltTy, VecTy *>;
private:
@@ -96,14 +100,14 @@
if (RHS.Val.template is<EltTy>()) {
V->clear();
V->push_back(RHS.front());
- RHS.Val = (EltTy)nullptr;
+ RHS.Val = EltTy();
return *this;
}
delete V;
}
Val = RHS.Val;
- RHS.Val = (EltTy)nullptr;
+ RHS.Val = EltTy();
return *this;
}
@@ -148,10 +152,10 @@
}
// Implicit conversion to ArrayRef<U> if EltTy* implicitly converts to U*.
- template<typename U,
- typename std::enable_if<
- std::is_convertible<ArrayRef<EltTy>, ArrayRef<U>>::value,
- bool>::type = false>
+ template <
+ typename U,
+ std::enable_if_t<std::is_convertible<ArrayRef<EltTy>, ArrayRef<U>>::value,
+ bool> = false>
operator ArrayRef<U>() const {
return operator ArrayRef<EltTy>();
}
@@ -213,9 +217,9 @@
EltTy operator[](unsigned i) const {
assert(!Val.isNull() && "can't index into an empty vector");
- if (EltTy V = Val.template dyn_cast<EltTy>()) {
+ if (Val.template is<EltTy>()) {
assert(i == 0 && "tinyvector index out of range");
- return V;
+ return Val.template get<EltTy>();
}
assert(i < Val.template get<VecTy*>()->size() &&
@@ -225,29 +229,29 @@
EltTy front() const {
assert(!empty() && "vector empty");
- if (EltTy V = Val.template dyn_cast<EltTy>())
- return V;
+ if (Val.template is<EltTy>())
+ return Val.template get<EltTy>();
return Val.template get<VecTy*>()->front();
}
EltTy back() const {
assert(!empty() && "vector empty");
- if (EltTy V = Val.template dyn_cast<EltTy>())
- return V;
+ if (Val.template is<EltTy>())
+ return Val.template get<EltTy>();
return Val.template get<VecTy*>()->back();
}
void push_back(EltTy NewVal) {
- assert(NewVal && "Can't add a null value");
-
// If we have nothing, add something.
if (Val.isNull()) {
Val = NewVal;
+ assert(!Val.isNull() && "Can't add a null value");
return;
}
// If we have a single value, convert to a vector.
- if (EltTy V = Val.template dyn_cast<EltTy>()) {
+ if (Val.template is<EltTy>()) {
+ EltTy V = Val.template get<EltTy>();
Val = new VecTy();
Val.template get<VecTy*>()->push_back(V);
}
@@ -267,7 +271,7 @@
void clear() {
// If we have a single value, convert to empty.
if (Val.template is<EltTy>()) {
- Val = (EltTy)nullptr;
+ Val = EltTy();
} else if (VecTy *Vec = Val.template dyn_cast<VecTy*>()) {
// If we have a vector form, just clear it.
Vec->clear();
@@ -282,7 +286,7 @@
// If we have a single value, convert to empty.
if (Val.template is<EltTy>()) {
if (I == begin())
- Val = (EltTy)nullptr;
+ Val = EltTy();
} else if (VecTy *Vec = Val.template dyn_cast<VecTy*>()) {
// multiple items in a vector; just do the erase, there is no
// benefit to collapsing back to a pointer
@@ -298,7 +302,7 @@
if (Val.template is<EltTy>()) {
if (S == begin() && S != E)
- Val = (EltTy)nullptr;
+ Val = EltTy();
} else if (VecTy *Vec = Val.template dyn_cast<VecTy*>()) {
return Vec->erase(S, E);
}
@@ -313,7 +317,8 @@
return std::prev(end());
}
assert(!Val.isNull() && "Null value with non-end insert iterator.");
- if (EltTy V = Val.template dyn_cast<EltTy>()) {
+ if (Val.template is<EltTy>()) {
+ EltTy V = Val.template get<EltTy>();
assert(I == begin());
Val = Elt;
push_back(V);
@@ -339,7 +344,8 @@
}
Val = new VecTy();
- } else if (EltTy V = Val.template dyn_cast<EltTy>()) {
+ } else if (Val.template is<EltTy>()) {
+ EltTy V = Val.template get<EltTy>();
Val = new VecTy();
Val.template get<VecTy*>()->push_back(V);
}
diff --git a/linux-x64/clang/include/llvm/ADT/Triple.h b/linux-x64/clang/include/llvm/ADT/Triple.h
index edeb31e..f6f0155 100644
--- a/linux-x64/clang/include/llvm/ADT/Triple.h
+++ b/linux-x64/clang/include/llvm/ADT/Triple.h
@@ -19,6 +19,8 @@
namespace llvm {
+class VersionTuple;
+
/// Triple - Helper class for working with autoconf configuration names. For
/// historical reasons, we also call these 'triples' (they used to contain
/// exactly three fields).
@@ -54,6 +56,7 @@
avr, // AVR: Atmel AVR microcontroller
bpfel, // eBPF or extended BPF or 64-bit BPF (little endian)
bpfeb, // eBPF or extended BPF or 64-bit BPF (big endian)
+ csky, // CSKY: csky
hexagon, // Hexagon: hexagon
mips, // MIPS: mips, mipsallegrex, mipsr6
mipsel, // MIPSEL: mipsel, mipsallegrexe, mipsr6el
@@ -61,6 +64,7 @@
mips64el, // MIPS64EL: mips64el, mips64r6el, mipsn32el, mipsn32r6el
msp430, // MSP430: msp430
ppc, // PPC: powerpc
+ ppcle, // PPCLE: powerpc (little endian)
ppc64, // PPC64: powerpc64, ppu
ppc64le, // PPC64LE: powerpc64le
r600, // R600: AMD GPUs HD2XXX - HD6XXX
@@ -95,11 +99,14 @@
wasm64, // WebAssembly with 64-bit pointers
renderscript32, // 32-bit RenderScript
renderscript64, // 64-bit RenderScript
- LastArchType = renderscript64
+ ve, // NEC SX-Aurora Vector Engine
+ LastArchType = ve
};
enum SubArchType {
NoSubArch,
+ ARMSubArch_v8_7a,
+ ARMSubArch_v8_6a,
ARMSubArch_v8_5a,
ARMSubArch_v8_4a,
ARMSubArch_v8_3a,
@@ -124,11 +131,15 @@
ARMSubArch_v5te,
ARMSubArch_v4t,
+ AArch64SubArch_arm64e,
+
KalimbaSubArch_v3,
KalimbaSubArch_v4,
KalimbaSubArch_v5,
- MipsSubArch_r6
+ MipsSubArch_r6,
+
+ PPCSubArch_spe
};
enum VendorType {
UnknownVendor,
@@ -136,8 +147,6 @@
Apple,
PC,
SCEI,
- BGP,
- BGQ,
Freescale,
IBM,
ImaginationTechnologies,
@@ -169,11 +178,11 @@
OpenBSD,
Solaris,
Win32,
+ ZOS,
Haiku,
Minix,
RTEMS,
NaCl, // Native Client
- CNK, // BG/P Compute-Node Kernel
AIX,
CUDA, // NVIDIA CUDA
NVCL, // NVIDIA OpenCL
@@ -203,8 +212,6 @@
CODE16,
EABI,
EABIHF,
- ELFv1,
- ELFv2,
Android,
Musl,
MuslEABI,
@@ -223,6 +230,7 @@
COFF,
ELF,
+ GOFF,
MachO,
Wasm,
XCOFF,
@@ -436,17 +444,7 @@
/// compatibility, which handles supporting skewed version numbering schemes
/// used by the "darwin" triples.
bool isMacOSXVersionLT(unsigned Major, unsigned Minor = 0,
- unsigned Micro = 0) const {
- assert(isMacOSX() && "Not an OS X triple!");
-
- // If this is OS X, expect a sane version number.
- if (getOS() == Triple::MacOSX)
- return isOSVersionLT(Major, Minor, Micro);
-
- // Otherwise, compare to the "Darwin" number.
- assert(Major == 10 && "Unexpected major version");
- return isOSVersionLT(Minor + 4, Micro, 0);
- }
+ unsigned Micro = 0) const;
/// isMacOSX - Is this a Mac OS X triple. For legacy reasons, we support both
/// "darwin" and "osx" as OS X triples.
@@ -477,7 +475,9 @@
return getSubArch() == Triple::ARMSubArch_v7k;
}
- /// isOSDarwin - Is this a "Darwin" OS (OS X, iOS, or watchOS).
+ bool isOSzOS() const { return getOS() == Triple::ZOS; }
+
+ /// isOSDarwin - Is this a "Darwin" OS (macOS, iOS, tvOS or watchOS).
bool isOSDarwin() const {
return isMacOSX() || isiOS() || isWatchOS();
}
@@ -490,6 +490,12 @@
return getEnvironment() == Triple::MacABI;
}
+ /// Returns true for targets that run on a macOS machine.
+ bool isTargetMachineMac() const {
+ return isMacOSX() || (isOSDarwin() && (isSimulatorEnvironment() ||
+ isMacCatalystEnvironment()));
+ }
+
bool isOSNetBSD() const {
return getOS() == Triple::NetBSD;
}
@@ -629,6 +635,9 @@
return getObjectFormat() == Triple::COFF;
}
+ /// Tests whether the OS uses the GOFF binary format.
+ bool isOSBinFormatGOFF() const { return getObjectFormat() == Triple::GOFF; }
+
/// Tests whether the environment is MachO.
bool isOSBinFormatMachO() const {
return getObjectFormat() == Triple::MachO;
@@ -690,6 +699,13 @@
return getArch() == Triple::nvptx || getArch() == Triple::nvptx64;
}
+ /// Tests whether the target is AMDGCN
+ bool isAMDGCN() const { return getArch() == Triple::amdgcn; }
+
+ bool isAMDGPU() const {
+ return getArch() == Triple::r600 || getArch() == Triple::amdgcn;
+ }
+
/// Tests whether the target is Thumb (little and big endian).
bool isThumb() const {
return getArch() == Triple::thumb || getArch() == Triple::thumbeb;
@@ -702,7 +718,17 @@
/// Tests whether the target is AArch64 (little and big endian).
bool isAArch64() const {
- return getArch() == Triple::aarch64 || getArch() == Triple::aarch64_be;
+ return getArch() == Triple::aarch64 || getArch() == Triple::aarch64_be ||
+ getArch() == Triple::aarch64_32;
+ }
+
+ /// Tests whether the target is AArch64 and pointers are the size specified by
+ /// \p PointerWidth.
+ bool isAArch64(int PointerWidth) const {
+ assert(PointerWidth == 64 || PointerWidth == 32);
+ if (!isAArch64())
+ return false;
+ return isArch64Bit() ? PointerWidth == 64 : PointerWidth == 32;
}
/// Tests whether the target is MIPS 32-bit (little and big endian).
@@ -720,6 +746,17 @@
return isMIPS32() || isMIPS64();
}
+ /// Tests whether the target is PowerPC (32- or 64-bit LE or BE).
+ bool isPPC() const {
+ return getArch() == Triple::ppc || getArch() == Triple::ppc64 ||
+ getArch() == Triple::ppcle || getArch() == Triple::ppc64le;
+ }
+
+ /// Tests whether the target is 32-bit PowerPC (little and big endian).
+ bool isPPC32() const {
+ return getArch() == Triple::ppc || getArch() == Triple::ppcle;
+ }
+
/// Tests whether the target is 64-bit PowerPC (little and big endian).
bool isPPC64() const {
return getArch() == Triple::ppc64 || getArch() == Triple::ppc64le;
@@ -730,9 +767,40 @@
return getArch() == Triple::riscv32 || getArch() == Triple::riscv64;
}
+ /// Tests whether the target is SystemZ.
+ bool isSystemZ() const {
+ return getArch() == Triple::systemz;
+ }
+
+ /// Tests whether the target is x86 (32- or 64-bit).
+ bool isX86() const {
+ return getArch() == Triple::x86 || getArch() == Triple::x86_64;
+ }
+
+ /// Tests whether the target is VE
+ bool isVE() const {
+ return getArch() == Triple::ve;
+ }
+
+ /// Tests whether the target is wasm (32- and 64-bit).
+ bool isWasm() const {
+ return getArch() == Triple::wasm32 || getArch() == Triple::wasm64;
+ }
+
+ // Tests whether the target is CSKY
+ bool isCSKY() const {
+ return getArch() == Triple::csky;
+ }
+
+ /// Tests whether the target is the Apple "arm64e" AArch64 subarch.
+ bool isArm64e() const {
+ return getArch() == Triple::aarch64 &&
+ getSubArch() == Triple::AArch64SubArch_arm64e;
+ }
+
/// Tests whether the target supports comdat
bool supportsCOMDAT() const {
- return !isOSBinFormatMachO();
+ return !(isOSBinFormatMachO() || isOSBinFormatXCOFF());
}
/// Tests whether the target uses emulated TLS as default.
@@ -740,6 +808,14 @@
return isAndroid() || isOSOpenBSD() || isWindowsCygwinEnvironment();
}
+ /// Tests whether the target uses -data-sections as default.
+ bool hasDefaultDataSections() const {
+ return isOSBinFormatXCOFF() || isWasm();
+ }
+
+ /// Tests if the environment supports dllimport/export annotations.
+ bool hasDLLImportExport() const { return isOSWindows() || isPS4CPU(); }
+
/// @}
/// @name Mutators
/// @{
@@ -839,6 +915,12 @@
/// Merge target triples.
std::string merge(const Triple &Other) const;
+ /// Some platforms have different minimum supported OS versions that
+ /// varies by the architecture specified in the triple. This function
+ /// returns the minimum supported OS version for this triple if one an exists,
+ /// or an invalid version tuple if this triple doesn't have one.
+ VersionTuple getMinimumSupportedOSVersion() const;
+
/// @}
/// @name Static helpers for IDs.
/// @{
@@ -873,6 +955,10 @@
static ArchType getArchTypeForLLVMName(StringRef Str);
/// @}
+
+ /// Returns a canonicalized OS version number for the specified OS.
+ static VersionTuple getCanonicalVersionForOS(OSType OSKind,
+ const VersionTuple &Version);
};
} // End llvm namespace
diff --git a/linux-x64/clang/include/llvm/ADT/TypeSwitch.h b/linux-x64/clang/include/llvm/ADT/TypeSwitch.h
new file mode 100644
index 0000000..bfcb206
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/TypeSwitch.h
@@ -0,0 +1,176 @@
+//===- TypeSwitch.h - Switch functionality for RTTI casting -*- C++ -*-----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the TypeSwitch template, which mimics a switch()
+// statement whose cases are type names.
+//
+//===-----------------------------------------------------------------------===/
+
+#ifndef LLVM_ADT_TYPESWITCH_H
+#define LLVM_ADT_TYPESWITCH_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Casting.h"
+
+namespace llvm {
+namespace detail {
+
+template <typename DerivedT, typename T> class TypeSwitchBase {
+public:
+ TypeSwitchBase(const T &value) : value(value) {}
+ TypeSwitchBase(TypeSwitchBase &&other) : value(other.value) {}
+ ~TypeSwitchBase() = default;
+
+ /// TypeSwitchBase is not copyable.
+ TypeSwitchBase(const TypeSwitchBase &) = delete;
+ void operator=(const TypeSwitchBase &) = delete;
+ void operator=(TypeSwitchBase &&other) = delete;
+
+ /// Invoke a case on the derived class with multiple case types.
+ template <typename CaseT, typename CaseT2, typename... CaseTs,
+ typename CallableT>
+ DerivedT &Case(CallableT &&caseFn) {
+ DerivedT &derived = static_cast<DerivedT &>(*this);
+ return derived.template Case<CaseT>(caseFn)
+ .template Case<CaseT2, CaseTs...>(caseFn);
+ }
+
+ /// Invoke a case on the derived class, inferring the type of the Case from
+ /// the first input of the given callable.
+ /// Note: This inference rules for this overload are very simple: strip
+ /// pointers and references.
+ template <typename CallableT> DerivedT &Case(CallableT &&caseFn) {
+ using Traits = function_traits<std::decay_t<CallableT>>;
+ using CaseT = std::remove_cv_t<std::remove_pointer_t<
+ std::remove_reference_t<typename Traits::template arg_t<0>>>>;
+
+ DerivedT &derived = static_cast<DerivedT &>(*this);
+ return derived.template Case<CaseT>(std::forward<CallableT>(caseFn));
+ }
+
+protected:
+ /// Trait to check whether `ValueT` provides a 'dyn_cast' method with type
+ /// `CastT`.
+ template <typename ValueT, typename CastT>
+ using has_dyn_cast_t =
+ decltype(std::declval<ValueT &>().template dyn_cast<CastT>());
+
+ /// Attempt to dyn_cast the given `value` to `CastT`. This overload is
+ /// selected if `value` already has a suitable dyn_cast method.
+ template <typename CastT, typename ValueT>
+ static auto castValue(
+ ValueT value,
+ typename std::enable_if_t<
+ is_detected<has_dyn_cast_t, ValueT, CastT>::value> * = nullptr) {
+ return value.template dyn_cast<CastT>();
+ }
+
+ /// Attempt to dyn_cast the given `value` to `CastT`. This overload is
+ /// selected if llvm::dyn_cast should be used.
+ template <typename CastT, typename ValueT>
+ static auto castValue(
+ ValueT value,
+ typename std::enable_if_t<
+ !is_detected<has_dyn_cast_t, ValueT, CastT>::value> * = nullptr) {
+ return dyn_cast<CastT>(value);
+ }
+
+ /// The root value we are switching on.
+ const T value;
+};
+} // end namespace detail
+
+/// This class implements a switch-like dispatch statement for a value of 'T'
+/// using dyn_cast functionality. Each `Case<T>` takes a callable to be invoked
+/// if the root value isa<T>, the callable is invoked with the result of
+/// dyn_cast<T>() as a parameter.
+///
+/// Example:
+/// Operation *op = ...;
+/// LogicalResult result = TypeSwitch<Operation *, LogicalResult>(op)
+/// .Case<ConstantOp>([](ConstantOp op) { ... })
+/// .Default([](Operation *op) { ... });
+///
+template <typename T, typename ResultT = void>
+class TypeSwitch : public detail::TypeSwitchBase<TypeSwitch<T, ResultT>, T> {
+public:
+ using BaseT = detail::TypeSwitchBase<TypeSwitch<T, ResultT>, T>;
+ using BaseT::BaseT;
+ using BaseT::Case;
+ TypeSwitch(TypeSwitch &&other) = default;
+
+ /// Add a case on the given type.
+ template <typename CaseT, typename CallableT>
+ TypeSwitch<T, ResultT> &Case(CallableT &&caseFn) {
+ if (result)
+ return *this;
+
+ // Check to see if CaseT applies to 'value'.
+ if (auto caseValue = BaseT::template castValue<CaseT>(this->value))
+ result = caseFn(caseValue);
+ return *this;
+ }
+
+ /// As a default, invoke the given callable within the root value.
+ template <typename CallableT>
+ LLVM_NODISCARD ResultT Default(CallableT &&defaultFn) {
+ if (result)
+ return std::move(*result);
+ return defaultFn(this->value);
+ }
+
+ LLVM_NODISCARD
+ operator ResultT() {
+ assert(result && "Fell off the end of a type-switch");
+ return std::move(*result);
+ }
+
+private:
+ /// The pointer to the result of this switch statement, once known,
+ /// null before that.
+ Optional<ResultT> result;
+};
+
+/// Specialization of TypeSwitch for void returning callables.
+template <typename T>
+class TypeSwitch<T, void>
+ : public detail::TypeSwitchBase<TypeSwitch<T, void>, T> {
+public:
+ using BaseT = detail::TypeSwitchBase<TypeSwitch<T, void>, T>;
+ using BaseT::BaseT;
+ using BaseT::Case;
+ TypeSwitch(TypeSwitch &&other) = default;
+
+ /// Add a case on the given type.
+ template <typename CaseT, typename CallableT>
+ TypeSwitch<T, void> &Case(CallableT &&caseFn) {
+ if (foundMatch)
+ return *this;
+
+ // Check to see if any of the types apply to 'value'.
+ if (auto caseValue = BaseT::template castValue<CaseT>(this->value)) {
+ caseFn(caseValue);
+ foundMatch = true;
+ }
+ return *this;
+ }
+
+ /// As a default, invoke the given callable within the root value.
+ template <typename CallableT> void Default(CallableT &&defaultFn) {
+ if (!foundMatch)
+ defaultFn(this->value);
+ }
+
+private:
+ /// A flag detailing if we have already found a match.
+ bool foundMatch = false;
+};
+} // end namespace llvm
+
+#endif // LLVM_ADT_TYPESWITCH_H
diff --git a/linux-x64/clang/include/llvm/ADT/VariadicFunction.h b/linux-x64/clang/include/llvm/ADT/VariadicFunction.h
deleted file mode 100644
index 5aefb05..0000000
--- a/linux-x64/clang/include/llvm/ADT/VariadicFunction.h
+++ /dev/null
@@ -1,330 +0,0 @@
-//===- VariadicFunction.h - Variadic Functions ------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements compile-time type-safe variadic functions.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_ADT_VARIADICFUNCTION_H
-#define LLVM_ADT_VARIADICFUNCTION_H
-
-#include "llvm/ADT/ArrayRef.h"
-
-namespace llvm {
-
-// Define macros to aid in expanding a comma separated series with the index of
-// the series pasted onto the last token.
-#define LLVM_COMMA_JOIN1(x) x ## 0
-#define LLVM_COMMA_JOIN2(x) LLVM_COMMA_JOIN1(x), x ## 1
-#define LLVM_COMMA_JOIN3(x) LLVM_COMMA_JOIN2(x), x ## 2
-#define LLVM_COMMA_JOIN4(x) LLVM_COMMA_JOIN3(x), x ## 3
-#define LLVM_COMMA_JOIN5(x) LLVM_COMMA_JOIN4(x), x ## 4
-#define LLVM_COMMA_JOIN6(x) LLVM_COMMA_JOIN5(x), x ## 5
-#define LLVM_COMMA_JOIN7(x) LLVM_COMMA_JOIN6(x), x ## 6
-#define LLVM_COMMA_JOIN8(x) LLVM_COMMA_JOIN7(x), x ## 7
-#define LLVM_COMMA_JOIN9(x) LLVM_COMMA_JOIN8(x), x ## 8
-#define LLVM_COMMA_JOIN10(x) LLVM_COMMA_JOIN9(x), x ## 9
-#define LLVM_COMMA_JOIN11(x) LLVM_COMMA_JOIN10(x), x ## 10
-#define LLVM_COMMA_JOIN12(x) LLVM_COMMA_JOIN11(x), x ## 11
-#define LLVM_COMMA_JOIN13(x) LLVM_COMMA_JOIN12(x), x ## 12
-#define LLVM_COMMA_JOIN14(x) LLVM_COMMA_JOIN13(x), x ## 13
-#define LLVM_COMMA_JOIN15(x) LLVM_COMMA_JOIN14(x), x ## 14
-#define LLVM_COMMA_JOIN16(x) LLVM_COMMA_JOIN15(x), x ## 15
-#define LLVM_COMMA_JOIN17(x) LLVM_COMMA_JOIN16(x), x ## 16
-#define LLVM_COMMA_JOIN18(x) LLVM_COMMA_JOIN17(x), x ## 17
-#define LLVM_COMMA_JOIN19(x) LLVM_COMMA_JOIN18(x), x ## 18
-#define LLVM_COMMA_JOIN20(x) LLVM_COMMA_JOIN19(x), x ## 19
-#define LLVM_COMMA_JOIN21(x) LLVM_COMMA_JOIN20(x), x ## 20
-#define LLVM_COMMA_JOIN22(x) LLVM_COMMA_JOIN21(x), x ## 21
-#define LLVM_COMMA_JOIN23(x) LLVM_COMMA_JOIN22(x), x ## 22
-#define LLVM_COMMA_JOIN24(x) LLVM_COMMA_JOIN23(x), x ## 23
-#define LLVM_COMMA_JOIN25(x) LLVM_COMMA_JOIN24(x), x ## 24
-#define LLVM_COMMA_JOIN26(x) LLVM_COMMA_JOIN25(x), x ## 25
-#define LLVM_COMMA_JOIN27(x) LLVM_COMMA_JOIN26(x), x ## 26
-#define LLVM_COMMA_JOIN28(x) LLVM_COMMA_JOIN27(x), x ## 27
-#define LLVM_COMMA_JOIN29(x) LLVM_COMMA_JOIN28(x), x ## 28
-#define LLVM_COMMA_JOIN30(x) LLVM_COMMA_JOIN29(x), x ## 29
-#define LLVM_COMMA_JOIN31(x) LLVM_COMMA_JOIN30(x), x ## 30
-#define LLVM_COMMA_JOIN32(x) LLVM_COMMA_JOIN31(x), x ## 31
-
-/// Class which can simulate a type-safe variadic function.
-///
-/// The VariadicFunction class template makes it easy to define
-/// type-safe variadic functions where all arguments have the same
-/// type.
-///
-/// Suppose we need a variadic function like this:
-///
-/// ResultT Foo(const ArgT &A_0, const ArgT &A_1, ..., const ArgT &A_N);
-///
-/// Instead of many overloads of Foo(), we only need to define a helper
-/// function that takes an array of arguments:
-///
-/// ResultT FooImpl(ArrayRef<const ArgT *> Args) {
-/// // 'Args[i]' is a pointer to the i-th argument passed to Foo().
-/// ...
-/// }
-///
-/// and then define Foo() like this:
-///
-/// const VariadicFunction<ResultT, ArgT, FooImpl> Foo;
-///
-/// VariadicFunction takes care of defining the overloads of Foo().
-///
-/// Actually, Foo is a function object (i.e. functor) instead of a plain
-/// function. This object is stateless and its constructor/destructor
-/// does nothing, so it's safe to create global objects and call Foo(...) at
-/// any time.
-///
-/// Sometimes we need a variadic function to have some fixed leading
-/// arguments whose types may be different from that of the optional
-/// arguments. For example:
-///
-/// bool FullMatch(const StringRef &S, const RE &Regex,
-/// const ArgT &A_0, ..., const ArgT &A_N);
-///
-/// VariadicFunctionN is for such cases, where N is the number of fixed
-/// arguments. It is like VariadicFunction, except that it takes N more
-/// template arguments for the types of the fixed arguments:
-///
-/// bool FullMatchImpl(const StringRef &S, const RE &Regex,
-/// ArrayRef<const ArgT *> Args) { ... }
-/// const VariadicFunction2<bool, const StringRef&,
-/// const RE&, ArgT, FullMatchImpl>
-/// FullMatch;
-///
-/// Currently VariadicFunction and friends support up-to 3
-/// fixed leading arguments and up-to 32 optional arguments.
-template <typename ResultT, typename ArgT,
- ResultT (*Func)(ArrayRef<const ArgT *>)>
-struct VariadicFunction {
- ResultT operator()() const {
- return Func(None);
- }
-
-#define LLVM_DEFINE_OVERLOAD(N) \
- ResultT operator()(LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \
- const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \
- return Func(makeArrayRef(Args)); \
- }
- LLVM_DEFINE_OVERLOAD(1)
- LLVM_DEFINE_OVERLOAD(2)
- LLVM_DEFINE_OVERLOAD(3)
- LLVM_DEFINE_OVERLOAD(4)
- LLVM_DEFINE_OVERLOAD(5)
- LLVM_DEFINE_OVERLOAD(6)
- LLVM_DEFINE_OVERLOAD(7)
- LLVM_DEFINE_OVERLOAD(8)
- LLVM_DEFINE_OVERLOAD(9)
- LLVM_DEFINE_OVERLOAD(10)
- LLVM_DEFINE_OVERLOAD(11)
- LLVM_DEFINE_OVERLOAD(12)
- LLVM_DEFINE_OVERLOAD(13)
- LLVM_DEFINE_OVERLOAD(14)
- LLVM_DEFINE_OVERLOAD(15)
- LLVM_DEFINE_OVERLOAD(16)
- LLVM_DEFINE_OVERLOAD(17)
- LLVM_DEFINE_OVERLOAD(18)
- LLVM_DEFINE_OVERLOAD(19)
- LLVM_DEFINE_OVERLOAD(20)
- LLVM_DEFINE_OVERLOAD(21)
- LLVM_DEFINE_OVERLOAD(22)
- LLVM_DEFINE_OVERLOAD(23)
- LLVM_DEFINE_OVERLOAD(24)
- LLVM_DEFINE_OVERLOAD(25)
- LLVM_DEFINE_OVERLOAD(26)
- LLVM_DEFINE_OVERLOAD(27)
- LLVM_DEFINE_OVERLOAD(28)
- LLVM_DEFINE_OVERLOAD(29)
- LLVM_DEFINE_OVERLOAD(30)
- LLVM_DEFINE_OVERLOAD(31)
- LLVM_DEFINE_OVERLOAD(32)
-#undef LLVM_DEFINE_OVERLOAD
-};
-
-template <typename ResultT, typename Param0T, typename ArgT,
- ResultT (*Func)(Param0T, ArrayRef<const ArgT *>)>
-struct VariadicFunction1 {
- ResultT operator()(Param0T P0) const {
- return Func(P0, None);
- }
-
-#define LLVM_DEFINE_OVERLOAD(N) \
- ResultT operator()(Param0T P0, LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \
- const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \
- return Func(P0, makeArrayRef(Args)); \
- }
- LLVM_DEFINE_OVERLOAD(1)
- LLVM_DEFINE_OVERLOAD(2)
- LLVM_DEFINE_OVERLOAD(3)
- LLVM_DEFINE_OVERLOAD(4)
- LLVM_DEFINE_OVERLOAD(5)
- LLVM_DEFINE_OVERLOAD(6)
- LLVM_DEFINE_OVERLOAD(7)
- LLVM_DEFINE_OVERLOAD(8)
- LLVM_DEFINE_OVERLOAD(9)
- LLVM_DEFINE_OVERLOAD(10)
- LLVM_DEFINE_OVERLOAD(11)
- LLVM_DEFINE_OVERLOAD(12)
- LLVM_DEFINE_OVERLOAD(13)
- LLVM_DEFINE_OVERLOAD(14)
- LLVM_DEFINE_OVERLOAD(15)
- LLVM_DEFINE_OVERLOAD(16)
- LLVM_DEFINE_OVERLOAD(17)
- LLVM_DEFINE_OVERLOAD(18)
- LLVM_DEFINE_OVERLOAD(19)
- LLVM_DEFINE_OVERLOAD(20)
- LLVM_DEFINE_OVERLOAD(21)
- LLVM_DEFINE_OVERLOAD(22)
- LLVM_DEFINE_OVERLOAD(23)
- LLVM_DEFINE_OVERLOAD(24)
- LLVM_DEFINE_OVERLOAD(25)
- LLVM_DEFINE_OVERLOAD(26)
- LLVM_DEFINE_OVERLOAD(27)
- LLVM_DEFINE_OVERLOAD(28)
- LLVM_DEFINE_OVERLOAD(29)
- LLVM_DEFINE_OVERLOAD(30)
- LLVM_DEFINE_OVERLOAD(31)
- LLVM_DEFINE_OVERLOAD(32)
-#undef LLVM_DEFINE_OVERLOAD
-};
-
-template <typename ResultT, typename Param0T, typename Param1T, typename ArgT,
- ResultT (*Func)(Param0T, Param1T, ArrayRef<const ArgT *>)>
-struct VariadicFunction2 {
- ResultT operator()(Param0T P0, Param1T P1) const {
- return Func(P0, P1, None);
- }
-
-#define LLVM_DEFINE_OVERLOAD(N) \
- ResultT operator()(Param0T P0, Param1T P1, \
- LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \
- const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \
- return Func(P0, P1, makeArrayRef(Args)); \
- }
- LLVM_DEFINE_OVERLOAD(1)
- LLVM_DEFINE_OVERLOAD(2)
- LLVM_DEFINE_OVERLOAD(3)
- LLVM_DEFINE_OVERLOAD(4)
- LLVM_DEFINE_OVERLOAD(5)
- LLVM_DEFINE_OVERLOAD(6)
- LLVM_DEFINE_OVERLOAD(7)
- LLVM_DEFINE_OVERLOAD(8)
- LLVM_DEFINE_OVERLOAD(9)
- LLVM_DEFINE_OVERLOAD(10)
- LLVM_DEFINE_OVERLOAD(11)
- LLVM_DEFINE_OVERLOAD(12)
- LLVM_DEFINE_OVERLOAD(13)
- LLVM_DEFINE_OVERLOAD(14)
- LLVM_DEFINE_OVERLOAD(15)
- LLVM_DEFINE_OVERLOAD(16)
- LLVM_DEFINE_OVERLOAD(17)
- LLVM_DEFINE_OVERLOAD(18)
- LLVM_DEFINE_OVERLOAD(19)
- LLVM_DEFINE_OVERLOAD(20)
- LLVM_DEFINE_OVERLOAD(21)
- LLVM_DEFINE_OVERLOAD(22)
- LLVM_DEFINE_OVERLOAD(23)
- LLVM_DEFINE_OVERLOAD(24)
- LLVM_DEFINE_OVERLOAD(25)
- LLVM_DEFINE_OVERLOAD(26)
- LLVM_DEFINE_OVERLOAD(27)
- LLVM_DEFINE_OVERLOAD(28)
- LLVM_DEFINE_OVERLOAD(29)
- LLVM_DEFINE_OVERLOAD(30)
- LLVM_DEFINE_OVERLOAD(31)
- LLVM_DEFINE_OVERLOAD(32)
-#undef LLVM_DEFINE_OVERLOAD
-};
-
-template <typename ResultT, typename Param0T, typename Param1T,
- typename Param2T, typename ArgT,
- ResultT (*Func)(Param0T, Param1T, Param2T, ArrayRef<const ArgT *>)>
-struct VariadicFunction3 {
- ResultT operator()(Param0T P0, Param1T P1, Param2T P2) const {
- return Func(P0, P1, P2, None);
- }
-
-#define LLVM_DEFINE_OVERLOAD(N) \
- ResultT operator()(Param0T P0, Param1T P1, Param2T P2, \
- LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \
- const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \
- return Func(P0, P1, P2, makeArrayRef(Args)); \
- }
- LLVM_DEFINE_OVERLOAD(1)
- LLVM_DEFINE_OVERLOAD(2)
- LLVM_DEFINE_OVERLOAD(3)
- LLVM_DEFINE_OVERLOAD(4)
- LLVM_DEFINE_OVERLOAD(5)
- LLVM_DEFINE_OVERLOAD(6)
- LLVM_DEFINE_OVERLOAD(7)
- LLVM_DEFINE_OVERLOAD(8)
- LLVM_DEFINE_OVERLOAD(9)
- LLVM_DEFINE_OVERLOAD(10)
- LLVM_DEFINE_OVERLOAD(11)
- LLVM_DEFINE_OVERLOAD(12)
- LLVM_DEFINE_OVERLOAD(13)
- LLVM_DEFINE_OVERLOAD(14)
- LLVM_DEFINE_OVERLOAD(15)
- LLVM_DEFINE_OVERLOAD(16)
- LLVM_DEFINE_OVERLOAD(17)
- LLVM_DEFINE_OVERLOAD(18)
- LLVM_DEFINE_OVERLOAD(19)
- LLVM_DEFINE_OVERLOAD(20)
- LLVM_DEFINE_OVERLOAD(21)
- LLVM_DEFINE_OVERLOAD(22)
- LLVM_DEFINE_OVERLOAD(23)
- LLVM_DEFINE_OVERLOAD(24)
- LLVM_DEFINE_OVERLOAD(25)
- LLVM_DEFINE_OVERLOAD(26)
- LLVM_DEFINE_OVERLOAD(27)
- LLVM_DEFINE_OVERLOAD(28)
- LLVM_DEFINE_OVERLOAD(29)
- LLVM_DEFINE_OVERLOAD(30)
- LLVM_DEFINE_OVERLOAD(31)
- LLVM_DEFINE_OVERLOAD(32)
-#undef LLVM_DEFINE_OVERLOAD
-};
-
-// Cleanup the macro namespace.
-#undef LLVM_COMMA_JOIN1
-#undef LLVM_COMMA_JOIN2
-#undef LLVM_COMMA_JOIN3
-#undef LLVM_COMMA_JOIN4
-#undef LLVM_COMMA_JOIN5
-#undef LLVM_COMMA_JOIN6
-#undef LLVM_COMMA_JOIN7
-#undef LLVM_COMMA_JOIN8
-#undef LLVM_COMMA_JOIN9
-#undef LLVM_COMMA_JOIN10
-#undef LLVM_COMMA_JOIN11
-#undef LLVM_COMMA_JOIN12
-#undef LLVM_COMMA_JOIN13
-#undef LLVM_COMMA_JOIN14
-#undef LLVM_COMMA_JOIN15
-#undef LLVM_COMMA_JOIN16
-#undef LLVM_COMMA_JOIN17
-#undef LLVM_COMMA_JOIN18
-#undef LLVM_COMMA_JOIN19
-#undef LLVM_COMMA_JOIN20
-#undef LLVM_COMMA_JOIN21
-#undef LLVM_COMMA_JOIN22
-#undef LLVM_COMMA_JOIN23
-#undef LLVM_COMMA_JOIN24
-#undef LLVM_COMMA_JOIN25
-#undef LLVM_COMMA_JOIN26
-#undef LLVM_COMMA_JOIN27
-#undef LLVM_COMMA_JOIN28
-#undef LLVM_COMMA_JOIN29
-#undef LLVM_COMMA_JOIN30
-#undef LLVM_COMMA_JOIN31
-#undef LLVM_COMMA_JOIN32
-
-} // end namespace llvm
-
-#endif // LLVM_ADT_VARIADICFUNCTION_H
diff --git a/linux-x64/clang/include/llvm/ADT/Waymarking.h b/linux-x64/clang/include/llvm/ADT/Waymarking.h
new file mode 100644
index 0000000..f00bc10
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/Waymarking.h
@@ -0,0 +1,325 @@
+//===- Waymarking.h - Array waymarking algorithm ----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Utility to backtrace an array's head, from a pointer into it. For the
+// backtrace to work, we use "Waymarks", which are special tags embedded into
+// the array's elements.
+//
+// A Tag of n-bits (in size) is composed as follows:
+//
+// bits: | n-1 | n-2 ... 0 |
+// .---------.------------------------------------.
+// |Stop Mask|(2^(n-1))-ary numeric system - digit|
+// '---------'------------------------------------'
+//
+// Backtracing is done as follows:
+// Walk back (starting from a given pointer to an element into the array), until
+// a tag with a "Stop Mask" is reached. Then start calculating the "Offset" from
+// the array's head, by picking up digits along the way, until another stop is
+// reached. The "Offset" is then subtracted from the current pointer, and the
+// result is the array's head.
+// A special case - if we first encounter a Tag with a Stop and a zero digit,
+// then this is already the head.
+//
+// For example:
+// In case of 2 bits:
+//
+// Tags:
+// x0 - binary digit 0
+// x1 - binary digit 1
+// 1x - stop and calculate (s)
+//
+// Array:
+// .---.---.---.---.---.---.---.---.---.---.---.---.---.---.---.---.
+// head -> |s0 |s1 | 0 |s1 | 0 | 0 |s1 | 1 | 1 |s1 | 0 | 1 | 0 |s1 | 0 | 1 |
+// '---'---'---'---'---'---'---'---'---'---'---'---'---'---'---'---'
+// |-1 |-2 |-4 |-7 |-10 |-14
+// <_ | | | | | |
+// <_____ | | | | |
+// <_____________ | | | |
+// <_________________________ | | |
+// <_____________________________________ | |
+// <_____________________________________________________ |
+//
+//
+// In case of 3 bits:
+//
+// Tags:
+// x00 - quaternary digit 0
+// x01 - quaternary digit 1
+// x10 - quaternary digit 2
+// x11 - quaternary digit 3
+// 1xy - stop and calculate (s)
+//
+// Array:
+// .---.---.---.---.---.---.---.---.---.---.---.---.---.---.---.---.
+// head -> |s0 |s1 |s2 |s3 | 0 |s1 | 2 |s1 | 0 |s2 | 2 |s2 | 0 |s3 | 2 |s3 |
+// '---'---'---'---'---'---'---'---'---'---'---'---'---'---'---'---'
+// |-1 |-2 |-3 |-4 |-6 |-8 |-10 |-12 |-14 |-16
+// <_ | | | | | | | | | |
+// <_____ | | | | | | | | |
+// <_________ | | | | | | | |
+// <_____________ | | | | | | |
+// <_____________________ | | | | | |
+// <_____________________________ | | | | |
+// <_____________________________________ | | | |
+// <_____________________________________________ | | |
+// <_____________________________________________________ | |
+// <_____________________________________________________________ |
+//
+//
+// The API introduce 2 functions:
+// 1. fillWaymarks
+// 2. followWaymarks
+//
+// Example:
+// int N = 10;
+// int M = 5;
+// int **A = new int *[N + M]; // Define the array.
+// for (int I = 0; I < N + M; ++I)
+// A[I] = new int(I);
+//
+// fillWaymarks(A, A + N); // Set the waymarks for the first N elements
+// // of the array.
+// // Note that it must be done AFTER we fill
+// // the array's elements.
+//
+// ... // Elements which are not in the range
+// // [A, A+N) will not be marked, and we won't
+// // be able to call followWaymarks on them.
+//
+// ... // Elements which will be changed after the
+// // call to fillWaymarks, will have to be
+// // retagged.
+//
+// fillWaymarks(A + N, A + N + M, N); // Set the waymarks of the remaining M
+// // elements.
+// ...
+// int **It = A + N + 1;
+// int **B = followWaymarks(It); // Find the head of the array containing It.
+// assert(B == A);
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_WAYMARKING_H
+#define LLVM_ADT_WAYMARKING_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/PointerLikeTypeTraits.h"
+
+namespace llvm {
+
+namespace detail {
+
+template <unsigned NumBits> struct WaymarkingTraits {
+ enum : unsigned {
+ // The number of bits of a Waymarking Tag.
+ NUM_BITS = NumBits,
+
+ // A Tag is composed from a Mark and a Stop mask.
+ MARK_SIZE = NUM_BITS - 1,
+ STOP_MASK = (1 << MARK_SIZE),
+ MARK_MASK = (STOP_MASK - 1),
+ TAG_MASK = (MARK_MASK | STOP_MASK),
+
+ // The number of pre-computed tags (for fast fill).
+ NUM_STATIC_TAGS = 32
+ };
+
+private:
+ // Add a new tag, calculated from Count and Stop, to the Vals pack, while
+ // continuing recursively to decrease Len down to 0.
+ template <unsigned Len, bool Stop, unsigned Count, uint8_t... Vals>
+ struct AddTag;
+
+ // Delegate to the specialized AddTag according to the need of a Stop mask.
+ template <unsigned Len, unsigned Count, uint8_t... Vals> struct GenTag {
+ typedef
+ typename AddTag<Len, (Count <= MARK_MASK), Count, Vals...>::Xdata Xdata;
+ };
+
+ // Start adding tags while calculating the next Count, which is actually the
+ // number of already calculated tags (equivalent to the position in the
+ // array).
+ template <unsigned Len, uint8_t... Vals> struct GenOffset {
+ typedef typename GenTag<Len, sizeof...(Vals), Vals...>::Xdata Xdata;
+ };
+
+ // Add the tag and remove it from Count.
+ template <unsigned Len, unsigned Count, uint8_t... Vals>
+ struct AddTag<Len, false, Count, Vals...> {
+ typedef typename GenTag<Len - 1, (Count >> MARK_SIZE), Vals...,
+ Count & MARK_MASK>::Xdata Xdata;
+ };
+
+ // We have reached the end of this Count, so start with a new Count.
+ template <unsigned Len, unsigned Count, uint8_t... Vals>
+ struct AddTag<Len, true, Count, Vals...> {
+ typedef typename GenOffset<Len - 1, Vals...,
+ (Count & MARK_MASK) | STOP_MASK>::Xdata Xdata;
+ };
+
+ template <unsigned Count, uint8_t... Vals> struct TagsData {
+ // The remaining number for calculating the next tag, following the last one
+ // in Values.
+ static const unsigned Remain = Count;
+
+ // The array of ordered pre-computed Tags.
+ static const uint8_t Values[sizeof...(Vals)];
+ };
+
+ // Specialize the case when Len equals 0, as the recursion stop condition.
+ template <unsigned Count, uint8_t... Vals>
+ struct AddTag<0, false, Count, Vals...> {
+ typedef TagsData<Count, Vals...> Xdata;
+ };
+
+ template <unsigned Count, uint8_t... Vals>
+ struct AddTag<0, true, Count, Vals...> {
+ typedef TagsData<Count, Vals...> Xdata;
+ };
+
+public:
+ typedef typename GenOffset<NUM_STATIC_TAGS>::Xdata Tags;
+};
+
+template <unsigned NumBits>
+template <unsigned Count, uint8_t... Vals>
+const uint8_t WaymarkingTraits<NumBits>::TagsData<
+ Count, Vals...>::Values[sizeof...(Vals)] = {Vals...};
+
+} // end namespace detail
+
+/// This class is responsible for tagging (and retrieving the tag of) a given
+/// element of type T.
+template <class T, class WTraits = detail::WaymarkingTraits<
+ PointerLikeTypeTraits<T>::NumLowBitsAvailable>>
+struct Waymarker {
+ using Traits = WTraits;
+ static void setWaymark(T &N, unsigned Tag) { N.setWaymark(Tag); }
+ static unsigned getWaymark(const T &N) { return N.getWaymark(); }
+};
+
+template <class T, class WTraits> struct Waymarker<T *, WTraits> {
+ using Traits = WTraits;
+ static void setWaymark(T *&N, unsigned Tag) {
+ reinterpret_cast<uintptr_t &>(N) |= static_cast<uintptr_t>(Tag);
+ }
+ static unsigned getWaymark(const T *N) {
+ return static_cast<unsigned>(reinterpret_cast<uintptr_t>(N)) &
+ Traits::TAG_MASK;
+ }
+};
+
+/// Sets up the waymarking algorithm's tags for a given range [Begin, End).
+///
+/// \param Begin The beginning of the range to mark with tags (inclusive).
+/// \param End The ending of the range to mark with tags (exclusive).
+/// \param Offset The position in the supposed tags array from which to start
+/// marking the given range.
+template <class TIter, class Marker = Waymarker<
+ typename std::iterator_traits<TIter>::value_type>>
+void fillWaymarks(TIter Begin, TIter End, size_t Offset = 0) {
+ if (Begin == End)
+ return;
+
+ size_t Count = Marker::Traits::Tags::Remain;
+ if (Offset <= Marker::Traits::NUM_STATIC_TAGS) {
+ // Start by filling the pre-calculated tags, starting from the given offset.
+ while (Offset != Marker::Traits::NUM_STATIC_TAGS) {
+ Marker::setWaymark(*Begin, Marker::Traits::Tags::Values[Offset]);
+
+ ++Offset;
+ ++Begin;
+
+ if (Begin == End)
+ return;
+ }
+ } else {
+ // The given offset is larger than the number of pre-computed tags, so we
+ // must do it the hard way.
+ // Calculate the next remaining Count, as if we have filled the tags up to
+ // the given offset.
+ size_t Off = Marker::Traits::NUM_STATIC_TAGS;
+ do {
+ ++Off;
+
+ unsigned Tag = Count & Marker::Traits::MARK_MASK;
+
+ // If the count can fit into the tag, then the counting must stop.
+ if (Count <= Marker::Traits::MARK_MASK) {
+ Tag |= Marker::Traits::STOP_MASK;
+ Count = Off;
+ } else
+ Count >>= Marker::Traits::MARK_SIZE;
+ } while (Off != Offset);
+ }
+
+ // By now, we have the matching remaining Count for the current offset.
+ do {
+ ++Offset;
+
+ unsigned Tag = Count & Marker::Traits::MARK_MASK;
+
+ // If the count can fit into the tag, then the counting must stop.
+ if (Count <= Marker::Traits::MARK_MASK) {
+ Tag |= Marker::Traits::STOP_MASK;
+ Count = Offset;
+ } else
+ Count >>= Marker::Traits::MARK_SIZE;
+
+ Marker::setWaymark(*Begin, Tag);
+ ++Begin;
+ } while (Begin != End);
+}
+
+/// Sets up the waymarking algorithm's tags for a given range.
+///
+/// \param Range The range to mark with tags.
+/// \param Offset The position in the supposed tags array from which to start
+/// marking the given range.
+template <typename R, class Marker = Waymarker<typename std::remove_reference<
+ decltype(*std::begin(std::declval<R &>()))>::type>>
+void fillWaymarks(R &&Range, size_t Offset = 0) {
+ return fillWaymarks<decltype(std::begin(std::declval<R &>())), Marker>(
+ adl_begin(Range), adl_end(Range), Offset);
+}
+
+/// Retrieves the element marked with tag of only STOP_MASK, by following the
+/// waymarks. This is the first element in a range passed to a previous call to
+/// \c fillWaymarks with \c Offset 0.
+///
+/// For the trivial usage of calling \c fillWaymarks(Array), and \I is an
+/// iterator inside \c Array, this function retrieves the head of \c Array, by
+/// following the waymarks.
+///
+/// \param I The iterator into an array which was marked by the waymarking tags
+/// (by a previous call to \c fillWaymarks).
+template <class TIter, class Marker = Waymarker<
+ typename std::iterator_traits<TIter>::value_type>>
+TIter followWaymarks(TIter I) {
+ unsigned Tag;
+ do
+ Tag = Marker::getWaymark(*I--);
+ while (!(Tag & Marker::Traits::STOP_MASK));
+
+ // Special case for the first Use.
+ if (Tag != Marker::Traits::STOP_MASK) {
+ ptrdiff_t Offset = Tag & Marker::Traits::MARK_MASK;
+ while (!((Tag = Marker::getWaymark(*I)) & Marker::Traits::STOP_MASK)) {
+ Offset = (Offset << Marker::Traits::MARK_SIZE) + Tag;
+ --I;
+ }
+ I -= Offset;
+ }
+ return ++I;
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_WAYMARKING_H
diff --git a/linux-x64/clang/include/llvm/ADT/bit.h b/linux-x64/clang/include/llvm/ADT/bit.h
index a790d5e..d76bc6c 100644
--- a/linux-x64/clang/include/llvm/ADT/bit.h
+++ b/linux-x64/clang/include/llvm/ADT/bit.h
@@ -22,23 +22,28 @@
// This implementation of bit_cast is different from the C++17 one in two ways:
// - It isn't constexpr because that requires compiler support.
// - It requires trivially-constructible To, to avoid UB in the implementation.
-template <typename To, typename From
- , typename = typename std::enable_if<sizeof(To) == sizeof(From)>::type
+template <
+ typename To, typename From,
+ typename = std::enable_if_t<sizeof(To) == sizeof(From)>
#if (__has_feature(is_trivially_constructible) && defined(_LIBCPP_VERSION)) || \
(defined(__GNUC__) && __GNUC__ >= 5)
- , typename = typename std::is_trivially_constructible<To>::type
+ ,
+ typename = std::enable_if_t<std::is_trivially_constructible<To>::value>
#elif __has_feature(is_trivially_constructible)
- , typename = typename std::enable_if<__is_trivially_constructible(To)>::type
+ ,
+ typename = std::enable_if_t<__is_trivially_constructible(To)>
#else
// See comment below.
#endif
#if (__has_feature(is_trivially_copyable) && defined(_LIBCPP_VERSION)) || \
(defined(__GNUC__) && __GNUC__ >= 5)
- , typename = typename std::enable_if<std::is_trivially_copyable<To>::value>::type
- , typename = typename std::enable_if<std::is_trivially_copyable<From>::value>::type
+ ,
+ typename = std::enable_if_t<std::is_trivially_copyable<To>::value>,
+ typename = std::enable_if_t<std::is_trivially_copyable<From>::value>
#elif __has_feature(is_trivially_copyable)
- , typename = typename std::enable_if<__is_trivially_copyable(To)>::type
- , typename = typename std::enable_if<__is_trivially_copyable(From)>::type
+ ,
+ typename = std::enable_if_t<__is_trivially_copyable(To)>,
+ typename = std::enable_if_t<__is_trivially_copyable(From)>
#else
// This case is GCC 4.x. clang with libc++ or libstdc++ never get here. Unlike
// llvm/Support/type_traits.h's is_trivially_copyable we don't want to
@@ -46,7 +51,7 @@
// compilation failures on the bots instead of locally. That's acceptable
// because it's very few developers, and only until we move past C++11.
#endif
->
+ >
inline To bit_cast(const From &from) noexcept {
To to;
std::memcpy(&to, &from, sizeof(To));
diff --git a/linux-x64/clang/include/llvm/ADT/fallible_iterator.h b/linux-x64/clang/include/llvm/ADT/fallible_iterator.h
index 6501ad2..a196d88 100644
--- a/linux-x64/clang/include/llvm/ADT/fallible_iterator.h
+++ b/linux-x64/clang/include/llvm/ADT/fallible_iterator.h
@@ -86,7 +86,7 @@
return fallible_iterator(std::move(I), &Err);
}
- /// Construct a fallible iteratro that can be used as an end-of-range value.
+ /// Construct a fallible iterator that can be used as an end-of-range value.
///
/// A value created by this method can be dereferenced (if the underlying
/// value points at a valid value) and compared, but not incremented or
@@ -96,12 +96,10 @@
}
/// Forward dereference to the underlying iterator.
- auto operator*() -> decltype(*std::declval<Underlying>()) { return *I; }
+ decltype(auto) operator*() { return *I; }
/// Forward const dereference to the underlying iterator.
- auto operator*() const -> decltype(*std::declval<const Underlying>()) {
- return *I;
- }
+ decltype(auto) operator*() const { return *I; }
/// Forward structure dereference to the underlying iterator (if the
/// underlying iterator supports it).
diff --git a/linux-x64/clang/include/llvm/ADT/ilist.h b/linux-x64/clang/include/llvm/ADT/ilist.h
index 06c7abf..d5a1f28 100644
--- a/linux-x64/clang/include/llvm/ADT/ilist.h
+++ b/linux-x64/clang/include/llvm/ADT/ilist.h
@@ -198,10 +198,12 @@
iplist_impl &operator=(const iplist_impl &) = delete;
iplist_impl(iplist_impl &&X)
- : TraitsT(std::move(X)), IntrusiveListT(std::move(X)) {}
+ : TraitsT(std::move(static_cast<TraitsT &>(X))),
+ IntrusiveListT(std::move(static_cast<IntrusiveListT &>(X))) {}
iplist_impl &operator=(iplist_impl &&X) {
- *static_cast<TraitsT *>(this) = std::move(X);
- *static_cast<IntrusiveListT *>(this) = std::move(X);
+ *static_cast<TraitsT *>(this) = std::move(static_cast<TraitsT &>(X));
+ *static_cast<IntrusiveListT *>(this) =
+ std::move(static_cast<IntrusiveListT &>(X));
return *this;
}
diff --git a/linux-x64/clang/include/llvm/ADT/ilist_iterator.h b/linux-x64/clang/include/llvm/ADT/ilist_iterator.h
index cbe5cef..be87634 100644
--- a/linux-x64/clang/include/llvm/ADT/ilist_iterator.h
+++ b/linux-x64/clang/include/llvm/ADT/ilist_iterator.h
@@ -88,15 +88,14 @@
// This is templated so that we can allow constructing a const iterator from
// a nonconst iterator...
template <bool RHSIsConst>
- ilist_iterator(
- const ilist_iterator<OptionsT, IsReverse, RHSIsConst> &RHS,
- typename std::enable_if<IsConst || !RHSIsConst, void *>::type = nullptr)
+ ilist_iterator(const ilist_iterator<OptionsT, IsReverse, RHSIsConst> &RHS,
+ std::enable_if_t<IsConst || !RHSIsConst, void *> = nullptr)
: NodePtr(RHS.NodePtr) {}
// This is templated so that we can allow assigning to a const iterator from
// a nonconst iterator...
template <bool RHSIsConst>
- typename std::enable_if<IsConst || !RHSIsConst, ilist_iterator &>::type
+ std::enable_if_t<IsConst || !RHSIsConst, ilist_iterator &>
operator=(const ilist_iterator<OptionsT, IsReverse, RHSIsConst> &RHS) {
NodePtr = RHS.NodePtr;
return *this;
diff --git a/linux-x64/clang/include/llvm/ADT/iterator.h b/linux-x64/clang/include/llvm/ADT/iterator.h
index 467fd4c..6625a3f 100644
--- a/linux-x64/clang/include/llvm/ADT/iterator.h
+++ b/linux-x64/clang/include/llvm/ADT/iterator.h
@@ -142,28 +142,30 @@
return tmp;
}
+#ifndef __cpp_impl_three_way_comparison
bool operator!=(const DerivedT &RHS) const {
- return !static_cast<const DerivedT *>(this)->operator==(RHS);
+ return !(static_cast<const DerivedT &>(*this) == RHS);
}
+#endif
bool operator>(const DerivedT &RHS) const {
static_assert(
IsRandomAccess,
"Relational operators are only defined for random access iterators.");
- return !static_cast<const DerivedT *>(this)->operator<(RHS) &&
- !static_cast<const DerivedT *>(this)->operator==(RHS);
+ return !(static_cast<const DerivedT &>(*this) < RHS) &&
+ !(static_cast<const DerivedT &>(*this) == RHS);
}
bool operator<=(const DerivedT &RHS) const {
static_assert(
IsRandomAccess,
"Relational operators are only defined for random access iterators.");
- return !static_cast<const DerivedT *>(this)->operator>(RHS);
+ return !(static_cast<const DerivedT &>(*this) > RHS);
}
bool operator>=(const DerivedT &RHS) const {
static_assert(
IsRandomAccess,
"Relational operators are only defined for random access iterators.");
- return !static_cast<const DerivedT *>(this)->operator<(RHS);
+ return !(static_cast<const DerivedT &>(*this) < RHS);
}
PointerT operator->() { return &static_cast<DerivedT *>(this)->operator*(); }
@@ -194,14 +196,14 @@
typename T = typename std::iterator_traits<WrappedIteratorT>::value_type,
typename DifferenceTypeT =
typename std::iterator_traits<WrappedIteratorT>::difference_type,
- typename PointerT = typename std::conditional<
+ typename PointerT = std::conditional_t<
std::is_same<T, typename std::iterator_traits<
WrappedIteratorT>::value_type>::value,
- typename std::iterator_traits<WrappedIteratorT>::pointer, T *>::type,
- typename ReferenceT = typename std::conditional<
+ typename std::iterator_traits<WrappedIteratorT>::pointer, T *>,
+ typename ReferenceT = std::conditional_t<
std::is_same<T, typename std::iterator_traits<
WrappedIteratorT>::value_type>::value,
- typename std::iterator_traits<WrappedIteratorT>::reference, T &>::type>
+ typename std::iterator_traits<WrappedIteratorT>::reference, T &>>
class iterator_adaptor_base
: public iterator_facade_base<DerivedT, IteratorCategoryT, T,
DifferenceTypeT, PointerT, ReferenceT> {
@@ -260,12 +262,16 @@
return *static_cast<DerivedT *>(this);
}
- bool operator==(const DerivedT &RHS) const { return I == RHS.I; }
- bool operator<(const DerivedT &RHS) const {
+ friend bool operator==(const iterator_adaptor_base &LHS,
+ const iterator_adaptor_base &RHS) {
+ return LHS.I == RHS.I;
+ }
+ friend bool operator<(const iterator_adaptor_base &LHS,
+ const iterator_adaptor_base &RHS) {
static_assert(
BaseT::IsRandomAccess,
"Relational operators are only defined for random access iterators.");
- return I < RHS.I;
+ return LHS.I < RHS.I;
}
ReferenceT operator*() const { return *I; }
@@ -281,8 +287,8 @@
/// using iterator = pointee_iterator<SmallVectorImpl<T *>::iterator>;
/// \endcode
template <typename WrappedIteratorT,
- typename T = typename std::remove_reference<
- decltype(**std::declval<WrappedIteratorT>())>::type>
+ typename T = std::remove_reference_t<decltype(
+ **std::declval<WrappedIteratorT>())>>
struct pointee_iterator
: iterator_adaptor_base<
pointee_iterator<WrappedIteratorT, T>, WrappedIteratorT,
@@ -333,6 +339,13 @@
PointerIteratorT(std::end(std::forward<RangeT>(Range))));
}
+template <typename WrappedIteratorT,
+ typename T1 = std::remove_reference_t<decltype(
+ **std::declval<WrappedIteratorT>())>,
+ typename T2 = std::add_pointer_t<T1>>
+using raw_pointer_iterator =
+ pointer_iterator<pointee_iterator<WrappedIteratorT, T1>, T2>;
+
// Wrapper iterator over iterator ItType, adding DataRef to the type of ItType,
// to create NodeRef = std::pair<InnerTypeOfItType, DataRef>.
template <typename ItType, typename NodeRef, typename DataRef>
diff --git a/linux-x64/clang/include/llvm/ADT/iterator_range.h b/linux-x64/clang/include/llvm/ADT/iterator_range.h
index 774c7c4..a9b46a3 100644
--- a/linux-x64/clang/include/llvm/ADT/iterator_range.h
+++ b/linux-x64/clang/include/llvm/ADT/iterator_range.h
@@ -18,7 +18,6 @@
#ifndef LLVM_ADT_ITERATOR_RANGE_H
#define LLVM_ADT_ITERATOR_RANGE_H
-#include <iterator>
#include <utility>
namespace llvm {
@@ -44,6 +43,7 @@
IteratorT begin() const { return begin_iterator; }
IteratorT end() const { return end_iterator; }
+ bool empty() const { return begin_iterator == end_iterator; }
};
/// Convenience function for iterating over sub-ranges.
@@ -58,11 +58,6 @@
return iterator_range<T>(std::move(p.first), std::move(p.second));
}
-template <typename T>
-iterator_range<decltype(adl_begin(std::declval<T>()))> drop_begin(T &&t,
- int n) {
- return make_range(std::next(adl_begin(t), n), adl_end(t));
-}
}
#endif
diff --git a/linux-x64/clang/include/llvm/ADT/simple_ilist.h b/linux-x64/clang/include/llvm/ADT/simple_ilist.h
index 9257b47..d5ae20c 100644
--- a/linux-x64/clang/include/llvm/ADT/simple_ilist.h
+++ b/linux-x64/clang/include/llvm/ADT/simple_ilist.h
@@ -52,7 +52,7 @@
/// to calling \a std::for_each() on the range to be discarded.
///
/// The currently available \p Options customize the nodes in the list. The
-/// same options must be specified in the \a ilist_node instantation for
+/// same options must be specified in the \a ilist_node instantiation for
/// compatibility (although the order is irrelevant).
/// \li Use \a ilist_tag to designate which ilist_node for a given \p T this
/// list should use. This is useful if a type \p T is part of multiple,