Update clang to r339409b.
Change-Id: Ied8a188bb072c40035320acedc86164b66d920af
diff --git a/linux-x64/clang/include/llvm/CodeGen/AsmPrinter.h b/linux-x64/clang/include/llvm/CodeGen/AsmPrinter.h
index b605638..14bc081 100644
--- a/linux-x64/clang/include/llvm/CodeGen/AsmPrinter.h
+++ b/linux-x64/clang/include/llvm/CodeGen/AsmPrinter.h
@@ -631,6 +631,11 @@
/// inline asm.
void EmitInlineAsm(const MachineInstr *MI) const;
+ /// Add inline assembly info to the diagnostics machinery, so we can
+ /// emit file and position info. Returns SrcMgr memory buffer position.
+ unsigned addInlineAsmDiagBuffer(StringRef AsmStr,
+ const MDNode *LocMDNode) const;
+
//===------------------------------------------------------------------===//
// Internal Implementation Details
//===------------------------------------------------------------------===//
diff --git a/linux-x64/clang/include/llvm/CodeGen/BasicTTIImpl.h b/linux-x64/clang/include/llvm/CodeGen/BasicTTIImpl.h
index f76a242..b460cdc 100644
--- a/linux-x64/clang/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/linux-x64/clang/include/llvm/CodeGen/BasicTTIImpl.h
@@ -1036,6 +1036,9 @@
case Intrinsic::fabs:
ISDs.push_back(ISD::FABS);
break;
+ case Intrinsic::canonicalize:
+ ISDs.push_back(ISD::FCANONICALIZE);
+ break;
case Intrinsic::minnum:
ISDs.push_back(ISD::FMINNUM);
if (FMF.noNaNs())
@@ -1136,7 +1139,8 @@
SmallVector<unsigned, 2> CustomCost;
for (unsigned ISD : ISDs) {
if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
- if (IID == Intrinsic::fabs && TLI->isFAbsFree(LT.second)) {
+ if (IID == Intrinsic::fabs && LT.second.isFloatingPoint() &&
+ TLI->isFAbsFree(LT.second)) {
return 0;
}
diff --git a/linux-x64/clang/include/llvm/CodeGen/CommandFlags.inc b/linux-x64/clang/include/llvm/CodeGen/CommandFlags.inc
index 7d2d167..6535e06 100644
--- a/linux-x64/clang/include/llvm/CodeGen/CommandFlags.inc
+++ b/linux-x64/clang/include/llvm/CodeGen/CommandFlags.inc
@@ -74,7 +74,8 @@
static cl::opt<llvm::CodeModel::Model> CMModel(
"code-model", cl::desc("Choose code model"),
- cl::values(clEnumValN(CodeModel::Small, "small", "Small code model"),
+ cl::values(clEnumValN(CodeModel::Tiny, "tiny", "Tiny code model"),
+ clEnumValN(CodeModel::Small, "small", "Small code model"),
clEnumValN(CodeModel::Kernel, "kernel", "Kernel code model"),
clEnumValN(CodeModel::Medium, "medium", "Medium code model"),
clEnumValN(CodeModel::Large, "large", "Large code model")));
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Combiner.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Combiner.h
index 36a33de..fa49e8c 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Combiner.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Combiner.h
@@ -24,6 +24,16 @@
class TargetPassConfig;
class MachineFunction;
+class CombinerChangeObserver {
+public:
+ virtual ~CombinerChangeObserver() {}
+
+ /// An instruction was erased.
+ virtual void erasedInstr(MachineInstr &MI) = 0;
+ /// An instruction was created and inseerted into the function.
+ virtual void createdInstr(MachineInstr &MI) = 0;
+};
+
class Combiner {
public:
Combiner(CombinerInfo &CombinerInfo, const TargetPassConfig *TPC);
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index 5d5b839..ee67f90 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -20,6 +20,7 @@
namespace llvm {
+class CombinerChangeObserver;
class MachineIRBuilder;
class MachineRegisterInfo;
class MachineInstr;
@@ -27,14 +28,22 @@
class CombinerHelper {
MachineIRBuilder &Builder;
MachineRegisterInfo &MRI;
+ CombinerChangeObserver &Observer;
+
+ void eraseInstr(MachineInstr &MI);
+ void scheduleForVisit(MachineInstr &MI);
public:
- CombinerHelper(MachineIRBuilder &B);
+ CombinerHelper(CombinerChangeObserver &Observer, MachineIRBuilder &B);
/// If \p MI is COPY, try to combine it.
/// Returns true if MI changed.
bool tryCombineCopy(MachineInstr &MI);
+ /// If \p MI is extend that consumes the result of a load, try to combine it.
+ /// Returns true if MI changed.
+ bool tryCombineExtendingLoads(MachineInstr &MI);
+
/// Try to transform \p MI by using all of the above
/// combine functions. Returns true if changed.
bool tryCombine(MachineInstr &MI);
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerInfo.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerInfo.h
index 1d24854..98c5b98 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerInfo.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerInfo.h
@@ -17,10 +17,12 @@
#include <cassert>
namespace llvm {
+class CombinerChangeObserver;
class LegalizerInfo;
class MachineInstr;
class MachineIRBuilder;
class MachineRegisterInfo;
+
// Contains information relevant to enabling/disabling various combines for a
// pass.
class CombinerInfo {
@@ -41,7 +43,8 @@
/// illegal ops that are created.
bool LegalizeIllegalOps; // TODO: Make use of this.
const LegalizerInfo *LInfo;
- virtual bool combine(MachineInstr &MI, MachineIRBuilder &B) const = 0;
+ virtual bool combine(CombinerChangeObserver &Observer, MachineInstr &MI,
+ MachineIRBuilder &B) const = 0;
};
} // namespace llvm
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/IRTranslator.h
index f355396..2498ee9 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/IRTranslator.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/IRTranslator.h
@@ -232,6 +232,7 @@
/// Returns true if the value should be split into multiple LLTs.
/// If \p Offsets is given then the split type's offsets will be stored in it.
+ /// If \p Offsets is not empty it will be cleared first.
bool valueIsSplit(const Value &V,
SmallVectorImpl<uint64_t> *Offsets = nullptr);
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index d122e67..4d804d0 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -49,6 +49,7 @@
};
LegalizerHelper(MachineFunction &MF);
+ LegalizerHelper(MachineFunction &MF, const LegalizerInfo &LI);
/// Replace \p MI by a sequence of legal instructions that can implement the
/// same operation. Note that this means \p MI may be deleted, so any iterator
@@ -112,6 +113,8 @@
void extractParts(unsigned Reg, LLT Ty, int NumParts,
SmallVectorImpl<unsigned> &VRegs);
+ LegalizeResult lowerBitCount(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+
MachineRegisterInfo &MRI;
const LegalizerInfo &LI;
};
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index ac1673d..a837cf0 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -60,11 +60,6 @@
class MachineIRBuilderBase {
MachineIRBuilderState State;
- const TargetInstrInfo &getTII() {
- assert(State.TII && "TargetInstrInfo is not set");
- return *State.TII;
- }
-
void validateTruncExt(unsigned Dst, unsigned Src, bool IsExtend);
protected:
@@ -107,6 +102,11 @@
MachineIRBuilderBase(const MachineIRBuilderState &BState) : State(BState) {}
+ const TargetInstrInfo &getTII() {
+ assert(State.TII && "TargetInstrInfo is not set");
+ return *State.TII;
+ }
+
/// Getter for the function we currently build.
MachineFunction &getMF() {
assert(State.MF && "MachineFunction is not set");
@@ -208,6 +208,10 @@
const MDNode *Variable,
const MDNode *Expr);
+ /// Build and insert a DBG_LABEL instructions specifying that \p Label is
+ /// given. Convert "llvm.dbg.label Label" to "DBG_LABEL Label".
+ MachineInstrBuilder buildDbgLabel(const MDNode *Label);
+
/// Build and insert \p Res = G_FRAME_INDEX \p Idx
///
/// G_FRAME_INDEX materializes the address of an alloca value or other
@@ -664,6 +668,11 @@
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred,
unsigned Res, unsigned Op0, unsigned Op1);
+ template <typename DstTy, typename... UseArgsTy>
+ MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, DstTy &&Dst,
+ UseArgsTy &&... UseArgs) {
+ return buildICmp(Pred, getDestFromArg(Dst), getRegFromArg(UseArgs)...);
+ }
/// Build and insert a \p Res = G_FCMP \p Pred\p Op0, \p Op1
///
@@ -692,6 +701,10 @@
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildSelect(unsigned Res, unsigned Tst,
unsigned Op0, unsigned Op1);
+ template <typename DstTy, typename... UseArgsTy>
+ MachineInstrBuilder buildSelect(DstTy &&Dst, UseArgsTy &&... UseArgs) {
+ return buildSelect(getDestFromArg(Dst), getRegFromArg(UseArgs)...);
+ }
/// Build and insert \p Res = G_INSERT_VECTOR_ELT \p Val,
/// \p Elt, \p Idx
diff --git a/linux-x64/clang/include/llvm/CodeGen/ISDOpcodes.h b/linux-x64/clang/include/llvm/CodeGen/ISDOpcodes.h
index 80bd796..ec9c461 100644
--- a/linux-x64/clang/include/llvm/CodeGen/ISDOpcodes.h
+++ b/linux-x64/clang/include/llvm/CodeGen/ISDOpcodes.h
@@ -550,11 +550,8 @@
/// is often a storage-only type but has native conversions.
FP16_TO_FP, FP_TO_FP16,
- /// FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW,
- /// FLOG, FLOG2, FLOG10, FEXP, FEXP2,
- /// FCEIL, FTRUNC, FRINT, FNEARBYINT, FROUND, FFLOOR - Perform various unary
- /// floating point operations. These are inspired by libm.
- FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW,
+ /// Perform various unary floating-point operations inspired by libm.
+ FNEG, FABS, FSQRT, FCBRT, FSIN, FCOS, FPOWI, FPOW,
FLOG, FLOG2, FLOG10, FEXP, FEXP2,
FCEIL, FTRUNC, FRINT, FNEARBYINT, FROUND, FFLOOR,
/// FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two
@@ -786,11 +783,20 @@
// Masked load and store - consecutive vector load and store operations
// with additional mask operand that prevents memory accesses to the
// masked-off lanes.
+ //
+ // Val, OutChain = MLOAD(BasePtr, Mask, PassThru)
+ // OutChain = MSTORE(Value, BasePtr, Mask)
MLOAD, MSTORE,
// Masked gather and scatter - load and store operations for a vector of
// random addresses with additional mask operand that prevents memory
// accesses to the masked-off lanes.
+ //
+ // Val, OutChain = GATHER(InChain, PassThru, Mask, BasePtr, Index, Scale)
+ // OutChain = SCATTER(InChain, Value, Mask, BasePtr, Index, Scale)
+ //
+ // The Index operand can have more vector elements than the other operands
+ // due to type legalization. The extra elements are ignored.
MGATHER, MSCATTER,
/// This corresponds to the llvm.lifetime.* intrinsics. The first operand
diff --git a/linux-x64/clang/include/llvm/CodeGen/MIRYamlMapping.h b/linux-x64/clang/include/llvm/CodeGen/MIRYamlMapping.h
index 7f46406..dc90575 100644
--- a/linux-x64/clang/include/llvm/CodeGen/MIRYamlMapping.h
+++ b/linux-x64/clang/include/llvm/CodeGen/MIRYamlMapping.h
@@ -425,6 +425,7 @@
StringValue StackProtector;
// TODO: Serialize FunctionContextIdx
unsigned MaxCallFrameSize = ~0u; ///< ~0u means: not computed yet.
+ unsigned CVBytesOfCalleeSavedRegisters = 0;
bool HasOpaqueSPAdjustment = false;
bool HasVAStart = false;
bool HasMustTailInVarArgFunc = false;
@@ -443,6 +444,8 @@
AdjustsStack == Other.AdjustsStack && HasCalls == Other.HasCalls &&
StackProtector == Other.StackProtector &&
MaxCallFrameSize == Other.MaxCallFrameSize &&
+ CVBytesOfCalleeSavedRegisters ==
+ Other.CVBytesOfCalleeSavedRegisters &&
HasOpaqueSPAdjustment == Other.HasOpaqueSPAdjustment &&
HasVAStart == Other.HasVAStart &&
HasMustTailInVarArgFunc == Other.HasMustTailInVarArgFunc &&
@@ -465,6 +468,8 @@
YamlIO.mapOptional("stackProtector", MFI.StackProtector,
StringValue()); // Don't print it out when it's empty.
YamlIO.mapOptional("maxCallFrameSize", MFI.MaxCallFrameSize, (unsigned)~0);
+ YamlIO.mapOptional("cvBytesOfCalleeSavedRegisters",
+ MFI.CVBytesOfCalleeSavedRegisters, 0U);
YamlIO.mapOptional("hasOpaqueSPAdjustment", MFI.HasOpaqueSPAdjustment,
false);
YamlIO.mapOptional("hasVAStart", MFI.HasVAStart, false);
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineBasicBlock.h b/linux-x64/clang/include/llvm/CodeGen/MachineBasicBlock.h
index ace33ef..ec2f270 100644
--- a/linux-x64/clang/include/llvm/CodeGen/MachineBasicBlock.h
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineBasicBlock.h
@@ -569,6 +569,12 @@
return !empty() && back().isReturn();
}
+ /// Convenience function that returns true if the bock ends in a EH scope
+ /// return instruction.
+ bool isEHScopeReturnBlock() const {
+ return !empty() && back().isEHScopeReturn();
+ }
+
/// Split the critical edge from this block to the given successor block, and
/// return the newly created block, or null if splitting is not possible.
///
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineFrameInfo.h b/linux-x64/clang/include/llvm/CodeGen/MachineFrameInfo.h
index 2d6081f..02b2f1b 100644
--- a/linux-x64/clang/include/llvm/CodeGen/MachineFrameInfo.h
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineFrameInfo.h
@@ -266,6 +266,10 @@
/// It is only valid during and after prolog/epilog code insertion.
unsigned MaxCallFrameSize = ~0u;
+ /// The number of bytes of callee saved registers that the target wants to
+ /// report for the current function in the CodeView S_FRAMEPROC record.
+ unsigned CVBytesOfCalleeSavedRegisters = 0;
+
/// The prolog/epilog code inserter fills in this vector with each
/// callee saved register saved in the frame. Beyond its use by the prolog/
/// epilog code inserter, this data used for debug info and exception
@@ -603,6 +607,15 @@
}
void setMaxCallFrameSize(unsigned S) { MaxCallFrameSize = S; }
+ /// Returns how many bytes of callee-saved registers the target pushed in the
+ /// prologue. Only used for debug info.
+ unsigned getCVBytesOfCalleeSavedRegisters() const {
+ return CVBytesOfCalleeSavedRegisters;
+ }
+ void setCVBytesOfCalleeSavedRegisters(unsigned S) {
+ CVBytesOfCalleeSavedRegisters = S;
+ }
+
/// Create a new object at a fixed location on the stack.
/// All fixed objects should be created before other objects are created for
/// efficiency. By default, fixed objects are not pointed to by LLVM IR
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineFunction.h b/linux-x64/clang/include/llvm/CodeGen/MachineFunction.h
index e8a4d52..7471b31 100644
--- a/linux-x64/clang/include/llvm/CodeGen/MachineFunction.h
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineFunction.h
@@ -294,7 +294,7 @@
bool HasInlineAsm = false;
/// True if any WinCFI instruction have been emitted in this function.
- Optional<bool> HasWinCFI;
+ bool HasWinCFI = false;
/// Current high-level properties of the IR of the function (e.g. is in SSA
/// form or whether registers have been allocated)
@@ -363,6 +363,25 @@
int Slot, const DILocation *Loc)
: Var(Var), Expr(Expr), Slot(Slot), Loc(Loc) {}
};
+
+ class Delegate {
+ virtual void anchor();
+
+ public:
+ virtual ~Delegate() = default;
+ virtual void MF_HandleInsertion(const MachineInstr &MI) = 0;
+ virtual void MF_HandleRemoval(const MachineInstr &MI) = 0;
+ };
+
+private:
+ Delegate *TheDelegate = nullptr;
+
+ // Callbacks for insertion and removal.
+ void handleInsertion(const MachineInstr &MI);
+ void handleRemoval(const MachineInstr &MI);
+ friend struct ilist_traits<MachineInstr>;
+
+public:
using VariableDbgInfoMapTy = SmallVector<VariableDbgInfo, 4>;
VariableDbgInfoMapTy VariableDbgInfos;
@@ -379,6 +398,23 @@
init();
}
+ /// Reset the currently registered delegate - otherwise assert.
+ void resetDelegate(Delegate *delegate) {
+ assert(TheDelegate == delegate &&
+ "Only the current delegate can perform reset!");
+ TheDelegate = nullptr;
+ }
+
+ /// Set the delegate. resetDelegate must be called before attempting
+ /// to set.
+ void setDelegate(Delegate *delegate) {
+ assert(delegate && !TheDelegate &&
+ "Attempted to set delegate to null, or to change it without "
+ "first resetting it!");
+
+ TheDelegate = delegate;
+ }
+
MachineModuleInfo &getMMI() const { return MMI; }
MCContext &getContext() const { return Ctx; }
@@ -484,8 +520,7 @@
}
bool hasWinCFI() const {
- assert(HasWinCFI.hasValue() && "HasWinCFI not set yet!");
- return *HasWinCFI;
+ return HasWinCFI;
}
void setHasWinCFI(bool v) { HasWinCFI = v; }
@@ -619,6 +654,14 @@
BasicBlocks.sort(comp);
}
+ /// Return the number of \p MachineInstrs in this \p MachineFunction.
+ unsigned getInstructionCount() const {
+ unsigned InstrCount = 0;
+ for (const MachineBasicBlock &MBB : BasicBlocks)
+ InstrCount += MBB.size();
+ return InstrCount;
+ }
+
//===--------------------------------------------------------------------===//
// Internal functions used to automatically number MachineBasicBlocks
@@ -711,23 +754,14 @@
/// Allocate and initialize a register mask with @p NumRegister bits.
uint32_t *allocateRegMask();
- /// allocateMemRefsArray - Allocate an array to hold MachineMemOperand
- /// pointers. This array is owned by the MachineFunction.
- MachineInstr::mmo_iterator allocateMemRefsArray(unsigned long Num);
-
- /// extractLoadMemRefs - Allocate an array and populate it with just the
- /// load information from the given MachineMemOperand sequence.
- std::pair<MachineInstr::mmo_iterator,
- MachineInstr::mmo_iterator>
- extractLoadMemRefs(MachineInstr::mmo_iterator Begin,
- MachineInstr::mmo_iterator End);
-
- /// extractStoreMemRefs - Allocate an array and populate it with just the
- /// store information from the given MachineMemOperand sequence.
- std::pair<MachineInstr::mmo_iterator,
- MachineInstr::mmo_iterator>
- extractStoreMemRefs(MachineInstr::mmo_iterator Begin,
- MachineInstr::mmo_iterator End);
+ /// Allocate and construct an extra info structure for a `MachineInstr`.
+ ///
+ /// This is allocated on the function's allocator and so lives the life of
+ /// the function.
+ MachineInstr::ExtraInfo *
+ createMIExtraInfo(ArrayRef<MachineMemOperand *> MMOs,
+ MCSymbol *PreInstrSymbol = nullptr,
+ MCSymbol *PostInstrSymbol = nullptr);
/// Allocate a string and populate it with the given external symbol name.
const char *createExternalSymbolName(StringRef Name);
@@ -788,7 +822,9 @@
void addInvoke(MachineBasicBlock *LandingPad,
MCSymbol *BeginLabel, MCSymbol *EndLabel);
- /// Add a new panding pad. Returns the label ID for the landing pad entry.
+ /// Add a new panding pad, and extract the exception handling information from
+ /// the landingpad instruction. Returns the label ID for the landing pad
+ /// entry.
MCSymbol *addLandingPad(MachineBasicBlock *LandingPad);
/// Provide the catch typeinfo for a landing pad.
@@ -880,15 +916,6 @@
}
};
-/// \name Exception Handling
-/// \{
-
-/// Extract the exception handling information from the landingpad instruction
-/// and add them to the specified machine module info.
-void addLandingPadInfo(const LandingPadInst &I, MachineBasicBlock &MBB);
-
-/// \}
-
//===--------------------------------------------------------------------===//
// GraphTraits specializations for function basic block graphs (CFGs)
//===--------------------------------------------------------------------===//
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineInstr.h b/linux-x64/clang/include/llvm/CodeGen/MachineInstr.h
index 88e13cd..7c4e771 100644
--- a/linux-x64/clang/include/llvm/CodeGen/MachineInstr.h
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineInstr.h
@@ -17,16 +17,20 @@
#define LLVM_CODEGEN_MACHINEINSTR_H
#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/PointerSumType.h"
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/MC/MCInstrDesc.h"
+#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/ArrayRecycler.h"
+#include "llvm/Support/TrailingObjects.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
@@ -61,7 +65,7 @@
: public ilist_node_with_parent<MachineInstr, MachineBasicBlock,
ilist_sentinel_tracking<true>> {
public:
- using mmo_iterator = MachineMemOperand **;
+ using mmo_iterator = ArrayRef<MachineMemOperand *>::iterator;
/// Flags to specify different kinds of comments to output in
/// assembly code. These flags carry semantic information not
@@ -93,8 +97,14 @@
// contraction operations like fma.
FmAfn = 1 << 9, // Instruction may map to Fast math
// instrinsic approximation.
- FmReassoc = 1 << 10 // Instruction supports Fast math
+ FmReassoc = 1 << 10, // Instruction supports Fast math
// reassociation of operand order.
+ NoUWrap = 1 << 11, // Instruction supports binary operator
+ // no unsigned wrap.
+ NoSWrap = 1 << 12, // Instruction supports binary operator
+ // no signed wrap.
+ IsExact = 1 << 13 // Instruction supports division is
+ // known to be exact.
};
private:
@@ -118,14 +128,102 @@
// anything other than to convey comment
// information to AsmPrinter.
- uint8_t NumMemRefs = 0; // Information on memory references.
- // Note that MemRefs == nullptr, means 'don't know', not 'no memory access'.
- // Calling code must treat missing information conservatively. If the number
- // of memory operands required to be precise exceeds the maximum value of
- // NumMemRefs - currently 256 - we remove the operands entirely. Note also
- // that this is a non-owning reference to a shared copy on write buffer owned
- // by the MachineFunction and created via MF.allocateMemRefsArray.
- mmo_iterator MemRefs = nullptr;
+ /// Internal implementation detail class that provides out-of-line storage for
+ /// extra info used by the machine instruction when this info cannot be stored
+ /// in-line within the instruction itself.
+ ///
+ /// This has to be defined eagerly due to the implementation constraints of
+ /// `PointerSumType` where it is used.
+ class ExtraInfo final
+ : TrailingObjects<ExtraInfo, MachineMemOperand *, MCSymbol *> {
+ public:
+ static ExtraInfo *create(BumpPtrAllocator &Allocator,
+ ArrayRef<MachineMemOperand *> MMOs,
+ MCSymbol *PreInstrSymbol = nullptr,
+ MCSymbol *PostInstrSymbol = nullptr) {
+ bool HasPreInstrSymbol = PreInstrSymbol != nullptr;
+ bool HasPostInstrSymbol = PostInstrSymbol != nullptr;
+ auto *Result = new (Allocator.Allocate(
+ totalSizeToAlloc<MachineMemOperand *, MCSymbol *>(
+ MMOs.size(), HasPreInstrSymbol + HasPostInstrSymbol),
+ alignof(ExtraInfo)))
+ ExtraInfo(MMOs.size(), HasPreInstrSymbol, HasPostInstrSymbol);
+
+ // Copy the actual data into the trailing objects.
+ std::copy(MMOs.begin(), MMOs.end(),
+ Result->getTrailingObjects<MachineMemOperand *>());
+
+ if (HasPreInstrSymbol)
+ Result->getTrailingObjects<MCSymbol *>()[0] = PreInstrSymbol;
+ if (HasPostInstrSymbol)
+ Result->getTrailingObjects<MCSymbol *>()[HasPreInstrSymbol] =
+ PostInstrSymbol;
+
+ return Result;
+ }
+
+ ArrayRef<MachineMemOperand *> getMMOs() const {
+ return makeArrayRef(getTrailingObjects<MachineMemOperand *>(), NumMMOs);
+ }
+
+ MCSymbol *getPreInstrSymbol() const {
+ return HasPreInstrSymbol ? getTrailingObjects<MCSymbol *>()[0] : nullptr;
+ }
+
+ MCSymbol *getPostInstrSymbol() const {
+ return HasPostInstrSymbol
+ ? getTrailingObjects<MCSymbol *>()[HasPreInstrSymbol]
+ : nullptr;
+ }
+
+ private:
+ friend TrailingObjects;
+
+ // Description of the extra info, used to interpret the actual optional
+ // data appended.
+ //
+ // Note that this is not terribly space optimized. This leaves a great deal
+ // of flexibility to fit more in here later.
+ const int NumMMOs;
+ const bool HasPreInstrSymbol;
+ const bool HasPostInstrSymbol;
+
+ // Implement the `TrailingObjects` internal API.
+ size_t numTrailingObjects(OverloadToken<MachineMemOperand *>) const {
+ return NumMMOs;
+ }
+ size_t numTrailingObjects(OverloadToken<MCSymbol *>) const {
+ return HasPreInstrSymbol + HasPostInstrSymbol;
+ }
+
+ // Just a boring constructor to allow us to initialize the sizes. Always use
+ // the `create` routine above.
+ ExtraInfo(int NumMMOs, bool HasPreInstrSymbol, bool HasPostInstrSymbol)
+ : NumMMOs(NumMMOs), HasPreInstrSymbol(HasPreInstrSymbol),
+ HasPostInstrSymbol(HasPostInstrSymbol) {}
+ };
+
+ /// Enumeration of the kinds of inline extra info available. It is important
+ /// that the `MachineMemOperand` inline kind has a tag value of zero to make
+ /// it accessible as an `ArrayRef`.
+ enum ExtraInfoInlineKinds {
+ EIIK_MMO = 0,
+ EIIK_PreInstrSymbol,
+ EIIK_PostInstrSymbol,
+ EIIK_OutOfLine
+ };
+
+ // We store extra information about the instruction here. The common case is
+ // expected to be nothing or a single pointer (typically a MMO or a symbol).
+ // We work to optimize this common case by storing it inline here rather than
+ // requiring a separate allocation, but we fall back to an allocation when
+ // multiple pointers are needed.
+ PointerSumType<ExtraInfoInlineKinds,
+ PointerSumTypeMember<EIIK_MMO, MachineMemOperand *>,
+ PointerSumTypeMember<EIIK_PreInstrSymbol, MCSymbol *>,
+ PointerSumTypeMember<EIIK_PostInstrSymbol, MCSymbol *>,
+ PointerSumTypeMember<EIIK_OutOfLine, ExtraInfo *>>
+ Info;
DebugLoc debugLoc; // Source line information.
@@ -412,28 +510,70 @@
return I - operands_begin();
}
- /// Access to memory operands of the instruction
- mmo_iterator memoperands_begin() const { return MemRefs; }
- mmo_iterator memoperands_end() const { return MemRefs + NumMemRefs; }
+ /// Access to memory operands of the instruction. If there are none, that does
+ /// not imply anything about whether the function accesses memory. Instead,
+ /// the caller must behave conservatively.
+ ArrayRef<MachineMemOperand *> memoperands() const {
+ if (!Info)
+ return {};
+
+ if (Info.is<EIIK_MMO>())
+ return makeArrayRef(Info.getAddrOfZeroTagPointer(), 1);
+
+ if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
+ return EI->getMMOs();
+
+ return {};
+ }
+
+ /// Access to memory operands of the instruction.
+ ///
+ /// If `memoperands_begin() == memoperands_end()`, that does not imply
+ /// anything about whether the function accesses memory. Instead, the caller
+ /// must behave conservatively.
+ mmo_iterator memoperands_begin() const { return memoperands().begin(); }
+
+ /// Access to memory operands of the instruction.
+ ///
+ /// If `memoperands_begin() == memoperands_end()`, that does not imply
+ /// anything about whether the function accesses memory. Instead, the caller
+ /// must behave conservatively.
+ mmo_iterator memoperands_end() const { return memoperands().end(); }
+
/// Return true if we don't have any memory operands which described the
/// memory access done by this instruction. If this is true, calling code
/// must be conservative.
- bool memoperands_empty() const { return NumMemRefs == 0; }
-
- iterator_range<mmo_iterator> memoperands() {
- return make_range(memoperands_begin(), memoperands_end());
- }
- iterator_range<mmo_iterator> memoperands() const {
- return make_range(memoperands_begin(), memoperands_end());
- }
+ bool memoperands_empty() const { return memoperands().empty(); }
/// Return true if this instruction has exactly one MachineMemOperand.
- bool hasOneMemOperand() const {
- return NumMemRefs == 1;
- }
+ bool hasOneMemOperand() const { return memoperands().size() == 1; }
/// Return the number of memory operands.
- unsigned getNumMemOperands() const { return NumMemRefs; }
+ unsigned getNumMemOperands() const { return memoperands().size(); }
+
+ /// Helper to extract a pre-instruction symbol if one has been added.
+ MCSymbol *getPreInstrSymbol() const {
+ if (!Info)
+ return nullptr;
+ if (MCSymbol *S = Info.get<EIIK_PreInstrSymbol>())
+ return S;
+ if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
+ return EI->getPreInstrSymbol();
+
+ return nullptr;
+ }
+
+ /// Helper to extract a post-instruction symbol if one has been added.
+ MCSymbol *getPostInstrSymbol() const {
+ if (!Info)
+ return nullptr;
+ if (MCSymbol *S = Info.get<EIIK_PostInstrSymbol>())
+ return S;
+ if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
+ return EI->getPostInstrSymbol();
+
+ return nullptr;
+ }
/// API for querying MachineInstr properties. They are the same as MCInstrDesc
/// queries but they are bundle aware.
@@ -450,6 +590,8 @@
/// The second argument indicates whether the query should look inside
/// instruction bundles.
bool hasProperty(unsigned MCFlag, QueryType Type = AnyInBundle) const {
+ assert(MCFlag < 64 &&
+ "MCFlag out of range for bit mask in getFlags/hasPropertyInBundle.");
// Inline the fast path for unbundled or bundle-internal instructions.
if (Type == IgnoreBundle || !isBundled() || isBundledWithPred())
return getDesc().getFlags() & (1ULL << MCFlag);
@@ -482,6 +624,12 @@
return hasProperty(MCID::Return, Type);
}
+ /// Return true if this is an instruction that marks the end of an EH scope,
+ /// i.e., a catchpad or a cleanuppad instruction.
+ bool isEHScopeReturn(QueryType Type = AnyInBundle) const {
+ return hasProperty(MCID::EHScopeReturn, Type);
+ }
+
bool isCall(QueryType Type = AnyInBundle) const {
return hasProperty(MCID::Call, Type);
}
@@ -1323,47 +1471,63 @@
/// fewer operand than it started with.
void RemoveOperand(unsigned OpNo);
+ /// Clear this MachineInstr's memory reference descriptor list. This resets
+ /// the memrefs to their most conservative state. This should be used only
+ /// as a last resort since it greatly pessimizes our knowledge of the memory
+ /// access performed by the instruction.
+ void dropMemRefs(MachineFunction &MF);
+
+ /// Assign this MachineInstr's memory reference descriptor list.
+ ///
+ /// Unlike other methods, this *will* allocate them into a new array
+ /// associated with the provided `MachineFunction`.
+ void setMemRefs(MachineFunction &MF, ArrayRef<MachineMemOperand *> MemRefs);
+
/// Add a MachineMemOperand to the machine instruction.
/// This function should be used only occasionally. The setMemRefs function
/// is the primary method for setting up a MachineInstr's MemRefs list.
void addMemOperand(MachineFunction &MF, MachineMemOperand *MO);
- /// Assign this MachineInstr's memory reference descriptor list.
- /// This does not transfer ownership.
- void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd) {
- setMemRefs(std::make_pair(NewMemRefs, NewMemRefsEnd-NewMemRefs));
- }
+ /// Clone another MachineInstr's memory reference descriptor list and replace
+ /// ours with it.
+ ///
+ /// Note that `*this` may be the incoming MI!
+ ///
+ /// Prefer this API whenever possible as it can avoid allocations in common
+ /// cases.
+ void cloneMemRefs(MachineFunction &MF, const MachineInstr &MI);
- /// Assign this MachineInstr's memory reference descriptor list. First
- /// element in the pair is the begin iterator/pointer to the array; the
- /// second is the number of MemoryOperands. This does not transfer ownership
- /// of the underlying memory.
- void setMemRefs(std::pair<mmo_iterator, unsigned> NewMemRefs) {
- MemRefs = NewMemRefs.first;
- NumMemRefs = uint8_t(NewMemRefs.second);
- assert(NumMemRefs == NewMemRefs.second &&
- "Too many memrefs - must drop memory operands");
- }
+ /// Clone the merge of multiple MachineInstrs' memory reference descriptors
+ /// list and replace ours with it.
+ ///
+ /// Note that `*this` may be one of the incoming MIs!
+ ///
+ /// Prefer this API whenever possible as it can avoid allocations in common
+ /// cases.
+ void cloneMergedMemRefs(MachineFunction &MF,
+ ArrayRef<const MachineInstr *> MIs);
- /// Return a set of memrefs (begin iterator, size) which conservatively
- /// describe the memory behavior of both MachineInstrs. This is appropriate
- /// for use when merging two MachineInstrs into one. This routine does not
- /// modify the memrefs of the this MachineInstr.
- std::pair<mmo_iterator, unsigned> mergeMemRefsWith(const MachineInstr& Other);
+ /// Set a symbol that will be emitted just prior to the instruction itself.
+ ///
+ /// Setting this to a null pointer will remove any such symbol.
+ ///
+ /// FIXME: This is not fully implemented yet.
+ void setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol);
+
+ /// Set a symbol that will be emitted just after the instruction itself.
+ ///
+ /// Setting this to a null pointer will remove any such symbol.
+ ///
+ /// FIXME: This is not fully implemented yet.
+ void setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol);
/// Return the MIFlags which represent both MachineInstrs. This
/// should be used when merging two MachineInstrs into one. This routine does
/// not modify the MIFlags of this MachineInstr.
uint16_t mergeFlagsWith(const MachineInstr& Other) const;
- /// Clear this MachineInstr's memory reference descriptor list. This resets
- /// the memrefs to their most conservative state. This should be used only
- /// as a last resort since it greatly pessimizes our knowledge of the memory
- /// access performed by the instruction.
- void dropMemRefs() {
- MemRefs = nullptr;
- NumMemRefs = 0;
- }
+ /// Copy all flags to MachineInst MIFlags
+ void copyIRFlags(const Instruction &I);
/// Break any tie involving OpIdx.
void untieRegOperand(unsigned OpIdx) {
@@ -1377,6 +1541,13 @@
/// Add all implicit def and use operands to this instruction.
void addImplicitDefUseOperands(MachineFunction &MF);
+ /// Scan instructions following MI and collect any matching DBG_VALUEs.
+ void collectDebugValues(SmallVectorImpl<MachineInstr *> &DbgValues);
+
+ /// Find all DBG_VALUEs immediately following this instruction that point
+ /// to a register def in this instruction and point them to \p Reg instead.
+ void changeDebugValuesDefReg(unsigned Reg);
+
private:
/// If this instruction is embedded into a MachineFunction, return the
/// MachineRegisterInfo object for the current function, otherwise
@@ -1394,7 +1565,7 @@
void AddRegOperandsToUseLists(MachineRegisterInfo&);
/// Slow path for hasProperty when we're dealing with a bundle.
- bool hasPropertyInBundle(unsigned Mask, QueryType Type) const;
+ bool hasPropertyInBundle(uint64_t Mask, QueryType Type) const;
/// Implements the logic of getRegClassConstraintEffectForVReg for the
/// this MI and the given operand index \p OpIdx.
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineInstrBuilder.h b/linux-x64/clang/include/llvm/CodeGen/MachineInstrBuilder.h
index 6656087..b5e523f 100644
--- a/linux-x64/clang/include/llvm/CodeGen/MachineInstrBuilder.h
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineInstrBuilder.h
@@ -191,15 +191,20 @@
return *this;
}
- const MachineInstrBuilder &setMemRefs(MachineInstr::mmo_iterator b,
- MachineInstr::mmo_iterator e) const {
- MI->setMemRefs(b, e);
+ const MachineInstrBuilder &
+ setMemRefs(ArrayRef<MachineMemOperand *> MMOs) const {
+ MI->setMemRefs(*MF, MMOs);
return *this;
}
- const MachineInstrBuilder &setMemRefs(std::pair<MachineInstr::mmo_iterator,
- unsigned> MemOperandsRef) const {
- MI->setMemRefs(MemOperandsRef);
+ const MachineInstrBuilder &cloneMemRefs(const MachineInstr &OtherMI) const {
+ MI->cloneMemRefs(*MF, OtherMI);
+ return *this;
+ }
+
+ const MachineInstrBuilder &
+ cloneMergedMemRefs(ArrayRef<const MachineInstr *> OtherMIs) const {
+ MI->cloneMergedMemRefs(*MF, OtherMIs);
return *this;
}
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineModuleInfoImpls.h b/linux-x64/clang/include/llvm/CodeGen/MachineModuleInfoImpls.h
index 6a87fa2..17df1fa 100644
--- a/linux-x64/clang/include/llvm/CodeGen/MachineModuleInfoImpls.h
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineModuleInfoImpls.h
@@ -80,6 +80,28 @@
SymbolListTy GetGVStubList() { return getSortedStubs(GVStubs); }
};
+/// MachineModuleInfoCOFF - This is a MachineModuleInfoImpl implementation
+/// for COFF targets.
+class MachineModuleInfoCOFF : public MachineModuleInfoImpl {
+ /// GVStubs - These stubs are used to materialize global addresses in PIC
+ /// mode.
+ DenseMap<MCSymbol *, StubValueTy> GVStubs;
+
+ virtual void anchor(); // Out of line virtual method.
+
+public:
+ MachineModuleInfoCOFF(const MachineModuleInfo &) {}
+
+ StubValueTy &getGVStubEntry(MCSymbol *Sym) {
+ assert(Sym && "Key cannot be null");
+ return GVStubs[Sym];
+ }
+
+ /// Accessor methods to return the set of stubs in sorted order.
+
+ SymbolListTy GetGVStubList() { return getSortedStubs(GVStubs); }
+};
+
} // end namespace llvm
#endif // LLVM_CODEGEN_MACHINEMODULEINFOIMPLS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineRegisterInfo.h b/linux-x64/clang/include/llvm/CodeGen/MachineRegisterInfo.h
index 5bf4a49..a6836a5 100644
--- a/linux-x64/clang/include/llvm/CodeGen/MachineRegisterInfo.h
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineRegisterInfo.h
@@ -717,6 +717,10 @@
unsigned createVirtualRegister(const TargetRegisterClass *RegClass,
StringRef Name = "");
+ /// Create and return a new virtual register in the function with the same
+ /// attributes as the given register.
+ unsigned cloneVirtualRegister(unsigned VReg, StringRef Name = "");
+
/// Get the low-level type of \p Reg or LLT{} if Reg is not a generic
/// (target independent) virtual register.
LLT getType(unsigned Reg) const {
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineScheduler.h b/linux-x64/clang/include/llvm/CodeGen/MachineScheduler.h
index 85ffa4e..6003b1b 100644
--- a/linux-x64/clang/include/llvm/CodeGen/MachineScheduler.h
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineScheduler.h
@@ -466,6 +466,9 @@
PressureDiff &getPressureDiff(const SUnit *SU) {
return SUPressureDiffs[SU->NodeNum];
}
+ const PressureDiff &getPressureDiff(const SUnit *SU) const {
+ return SUPressureDiffs[SU->NodeNum];
+ }
/// Compute a DFSResult after DAG building is complete, and before any
/// queue comparisons.
@@ -491,6 +494,8 @@
/// Compute the cyclic critical path through the DAG.
unsigned computeCyclicCriticalPath();
+ void dump() const override;
+
protected:
// Top-Level entry points for the schedule() driver...
@@ -895,6 +900,10 @@
#ifndef NDEBUG
void traceCandidate(const SchedCandidate &Cand);
#endif
+
+private:
+ bool shouldReduceLatency(const CandPolicy &Policy, SchedBoundary &CurrZone,
+ bool ComputeRemLatency, unsigned &RemLatency) const;
};
// Utility functions used by heuristics in tryCandidate().
diff --git a/linux-x64/clang/include/llvm/CodeGen/PseudoSourceValue.h b/linux-x64/clang/include/llvm/CodeGen/PseudoSourceValue.h
index bdf0bb7..f66191b 100644
--- a/linux-x64/clang/include/llvm/CodeGen/PseudoSourceValue.h
+++ b/linux-x64/clang/include/llvm/CodeGen/PseudoSourceValue.h
@@ -36,7 +36,7 @@
/// below the stack frame (e.g., argument space), or constant pool.
class PseudoSourceValue {
public:
- enum PSVKind {
+ enum PSVKind : unsigned {
Stack,
GOT,
JumpTable,
@@ -48,7 +48,7 @@
};
private:
- PSVKind Kind;
+ unsigned Kind;
unsigned AddressSpace;
friend raw_ostream &llvm::operator<<(raw_ostream &OS,
const PseudoSourceValue* PSV);
@@ -60,11 +60,11 @@
virtual void printCustom(raw_ostream &O) const;
public:
- explicit PseudoSourceValue(PSVKind Kind, const TargetInstrInfo &TII);
+ explicit PseudoSourceValue(unsigned Kind, const TargetInstrInfo &TII);
virtual ~PseudoSourceValue();
- PSVKind kind() const { return Kind; }
+ unsigned kind() const { return Kind; }
bool isStack() const { return Kind == Stack; }
bool isGOT() const { return Kind == GOT; }
@@ -116,7 +116,7 @@
class CallEntryPseudoSourceValue : public PseudoSourceValue {
protected:
- CallEntryPseudoSourceValue(PSVKind Kind, const TargetInstrInfo &TII);
+ CallEntryPseudoSourceValue(unsigned Kind, const TargetInstrInfo &TII);
public:
bool isConstant(const MachineFrameInfo *) const override;
diff --git a/linux-x64/clang/include/llvm/CodeGen/ScheduleDAG.h b/linux-x64/clang/include/llvm/CodeGen/ScheduleDAG.h
index 56adc2e..f2b0727 100644
--- a/linux-x64/clang/include/llvm/CodeGen/ScheduleDAG.h
+++ b/linux-x64/clang/include/llvm/CodeGen/ScheduleDAG.h
@@ -236,8 +236,7 @@
Contents.Reg = Reg;
}
- raw_ostream &print(raw_ostream &O,
- const TargetRegisterInfo *TRI = nullptr) const;
+ void dump(const TargetRegisterInfo *TRI = nullptr) const;
};
template <>
@@ -459,12 +458,7 @@
/// edge occurs first.
void biasCriticalPath();
- void dump(const ScheduleDAG *G) const;
- void dumpAll(const ScheduleDAG *G) const;
- raw_ostream &print(raw_ostream &O,
- const SUnit *Entry = nullptr,
- const SUnit *Exit = nullptr) const;
- raw_ostream &print(raw_ostream &O, const ScheduleDAG *G) const;
+ void dumpAttributes() const;
private:
void ComputeDepth();
@@ -597,7 +591,9 @@
virtual void viewGraph(const Twine &Name, const Twine &Title);
virtual void viewGraph();
- virtual void dumpNode(const SUnit *SU) const = 0;
+ virtual void dumpNode(const SUnit &SU) const = 0;
+ virtual void dump() const = 0;
+ void dumpNodeName(const SUnit &SU) const;
/// Returns a label for an SUnit node in a visualization of the ScheduleDAG.
virtual std::string getGraphNodeLabel(const SUnit *SU) const = 0;
@@ -614,6 +610,9 @@
unsigned VerifyScheduledDAG(bool isBottomUp);
#endif
+ protected:
+ void dumpNodeAll(const SUnit &SU) const;
+
private:
/// Returns the MCInstrDesc of this SDNode or NULL.
const MCInstrDesc *getNodeDesc(const SDNode *Node) const;
diff --git a/linux-x64/clang/include/llvm/CodeGen/ScheduleDAGInstrs.h b/linux-x64/clang/include/llvm/CodeGen/ScheduleDAGInstrs.h
index 520a238..daad181 100644
--- a/linux-x64/clang/include/llvm/CodeGen/ScheduleDAGInstrs.h
+++ b/linux-x64/clang/include/llvm/CodeGen/ScheduleDAGInstrs.h
@@ -327,7 +327,8 @@
/// whole MachineFunction. By default does nothing.
virtual void finalizeSchedule() {}
- void dumpNode(const SUnit *SU) const override;
+ void dumpNode(const SUnit &SU) const override;
+ void dump() const override;
/// Returns a label for a DAG node that points to an instruction.
std::string getGraphNodeLabel(const SUnit *SU) const override;
diff --git a/linux-x64/clang/include/llvm/CodeGen/SelectionDAG.h b/linux-x64/clang/include/llvm/CodeGen/SelectionDAG.h
index c691d44..973a3dd 100644
--- a/linux-x64/clang/include/llvm/CodeGen/SelectionDAG.h
+++ b/linux-x64/clang/include/llvm/CodeGen/SelectionDAG.h
@@ -28,7 +28,7 @@
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Analysis/DivergenceAnalysis.h"
+#include "llvm/Analysis/LegacyDivergenceAnalysis.h"
#include "llvm/CodeGen/DAGCombine.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/ISDOpcodes.h"
@@ -188,8 +188,8 @@
return DbgValues.empty() && ByvalParmDbgValues.empty() && DbgLabels.empty();
}
- ArrayRef<SDDbgValue*> getSDDbgValues(const SDNode *Node) {
- DbgValMapType::iterator I = DbgValMap.find(Node);
+ ArrayRef<SDDbgValue*> getSDDbgValues(const SDNode *Node) const {
+ auto I = DbgValMap.find(Node);
if (I != DbgValMap.end())
return I->second;
return ArrayRef<SDDbgValue*>();
@@ -229,7 +229,7 @@
LLVMContext *Context;
CodeGenOpt::Level OptLevel;
- DivergenceAnalysis * DA = nullptr;
+ LegacyDivergenceAnalysis * DA = nullptr;
FunctionLoweringInfo * FLI = nullptr;
/// The function-level optimization remark emitter. Used to emit remarks
@@ -382,7 +382,7 @@
/// Prepare this SelectionDAG to process code in the given MachineFunction.
void init(MachineFunction &NewMF, OptimizationRemarkEmitter &NewORE,
Pass *PassPtr, const TargetLibraryInfo *LibraryInfo,
- DivergenceAnalysis * Divergence);
+ LegacyDivergenceAnalysis * Divergence);
void setFunctionLoweringInfo(FunctionLoweringInfo * FuncInfo) {
FLI = FuncInfo;
@@ -471,7 +471,9 @@
return Root;
}
+#ifndef NDEBUG
void VerifyDAGDiverence();
+#endif
/// This iterates over the nodes in the SelectionDAG, folding
/// certain types of nodes together, or eliminating superfluous nodes. The
@@ -1156,6 +1158,11 @@
SDValue Op3, SDValue Op4, SDValue Op5);
SDNode *UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops);
+ /// *Mutate* the specified machine node's memory references to the provided
+ /// list.
+ void setNodeMemRefs(MachineSDNode *N,
+ ArrayRef<MachineMemOperand *> NewMemRefs);
+
// Propagates the change in divergence to users
void updateDivergence(SDNode * N);
@@ -1346,7 +1353,7 @@
void AddDbgLabel(SDDbgLabel *DB);
/// Get the debug values which reference the given SDNode.
- ArrayRef<SDDbgValue*> GetDbgValues(const SDNode* SD) {
+ ArrayRef<SDDbgValue*> GetDbgValues(const SDNode* SD) const {
return DbgInfo->getSDDbgValues(SD);
}
@@ -1429,15 +1436,27 @@
/// every vector element.
/// Targets can implement the computeKnownBitsForTargetNode method in the
/// TargetLowering class to allow target nodes to be understood.
- void computeKnownBits(SDValue Op, KnownBits &Known, unsigned Depth = 0) const;
+ KnownBits computeKnownBits(SDValue Op, unsigned Depth = 0) const;
/// Determine which bits of Op are known to be either zero or one and return
/// them in Known. The DemandedElts argument allows us to only collect the
/// known bits that are shared by the requested vector elements.
/// Targets can implement the computeKnownBitsForTargetNode method in the
/// TargetLowering class to allow target nodes to be understood.
+ KnownBits computeKnownBits(SDValue Op, const APInt &DemandedElts,
+ unsigned Depth = 0) const;
+
+ /// \copydoc SelectionDAG::computeKnownBits(SDValue,unsigned)
+ void computeKnownBits(SDValue Op, KnownBits &Known,
+ unsigned Depth = 0) const {
+ Known = computeKnownBits(Op, Depth);
+ }
+
+ /// \copydoc SelectionDAG::computeKnownBits(SDValue,const APInt&,unsigned)
void computeKnownBits(SDValue Op, KnownBits &Known, const APInt &DemandedElts,
- unsigned Depth = 0) const;
+ unsigned Depth = 0) const {
+ Known = computeKnownBits(Op, DemandedElts, Depth);
+ }
/// Used to represent the possible overflow behavior of an operation.
/// Never: the operation cannot overflow.
diff --git a/linux-x64/clang/include/llvm/CodeGen/SelectionDAGNodes.h b/linux-x64/clang/include/llvm/CodeGen/SelectionDAGNodes.h
index a6d7e76..28d27b7 100644
--- a/linux-x64/clang/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/linux-x64/clang/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -1589,15 +1589,23 @@
/// Returns true if \p V is a constant integer one.
bool isOneConstant(SDValue V);
+/// Return the non-bitcasted source operand of \p V if it exists.
+/// If \p V is not a bitcasted value, it is returned as-is.
+SDValue peekThroughBitcasts(SDValue V);
+
+/// Return the non-bitcasted and one-use source operand of \p V if it exists.
+/// If \p V is not a bitcasted one-use value, it is returned as-is.
+SDValue peekThroughOneUseBitcasts(SDValue V);
+
/// Returns true if \p V is a bitwise not operation. Assumes that an all ones
/// constant is canonicalized to be operand 1.
bool isBitwiseNot(SDValue V);
/// Returns the SDNode if it is a constant splat BuildVector or constant int.
-ConstantSDNode *isConstOrConstSplat(SDValue N);
+ConstantSDNode *isConstOrConstSplat(SDValue N, bool AllowUndefs = false);
/// Returns the SDNode if it is a constant splat BuildVector or constant float.
-ConstantFPSDNode *isConstOrConstSplatFP(SDValue N);
+ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, bool AllowUndefs = false);
class GlobalAddressSDNode : public SDNode {
friend class SelectionDAG;
@@ -2113,12 +2121,15 @@
MachineMemOperand *MMO)
: MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {}
- // In the both nodes address is Op1, mask is Op2:
- // MaskedLoadSDNode (Chain, ptr, mask, src0), src0 is a passthru value
- // MaskedStoreSDNode (Chain, ptr, mask, data)
+ // MaskedLoadSDNode (Chain, ptr, mask, passthru)
+ // MaskedStoreSDNode (Chain, data, ptr, mask)
// Mask is a vector of i1 elements
- const SDValue &getBasePtr() const { return getOperand(1); }
- const SDValue &getMask() const { return getOperand(2); }
+ const SDValue &getBasePtr() const {
+ return getOperand(getOpcode() == ISD::MLOAD ? 1 : 2);
+ }
+ const SDValue &getMask() const {
+ return getOperand(getOpcode() == ISD::MLOAD ? 2 : 3);
+ }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::MLOAD ||
@@ -2143,7 +2154,10 @@
return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
}
+ const SDValue &getBasePtr() const { return getOperand(1); }
+ const SDValue &getMask() const { return getOperand(2); }
const SDValue &getPassThru() const { return getOperand(3); }
+
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::MLOAD;
}
@@ -2175,7 +2189,9 @@
/// memory at base_addr.
bool isCompressingStore() const { return StoreSDNodeBits.IsCompressing; }
- const SDValue &getValue() const { return getOperand(3); }
+ const SDValue &getValue() const { return getOperand(1); }
+ const SDValue &getBasePtr() const { return getOperand(2); }
+ const SDValue &getMask() const { return getOperand(3); }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::MSTORE;
@@ -2246,32 +2262,60 @@
/// An SDNode that represents everything that will be needed
/// to construct a MachineInstr. These nodes are created during the
/// instruction selection proper phase.
+///
+/// Note that the only supported way to set the `memoperands` is by calling the
+/// `SelectionDAG::setNodeMemRefs` function as the memory management happens
+/// inside the DAG rather than in the node.
class MachineSDNode : public SDNode {
-public:
- using mmo_iterator = MachineMemOperand **;
-
private:
friend class SelectionDAG;
MachineSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL, SDVTList VTs)
: SDNode(Opc, Order, DL, VTs) {}
- /// Memory reference descriptions for this instruction.
- mmo_iterator MemRefs = nullptr;
- mmo_iterator MemRefsEnd = nullptr;
+ // We use a pointer union between a single `MachineMemOperand` pointer and
+ // a pointer to an array of `MachineMemOperand` pointers. This is null when
+ // the number of these is zero, the single pointer variant used when the
+ // number is one, and the array is used for larger numbers.
+ //
+ // The array is allocated via the `SelectionDAG`'s allocator and so will
+ // always live until the DAG is cleaned up and doesn't require ownership here.
+ //
+ // We can't use something simpler like `TinyPtrVector` here because `SDNode`
+ // subclasses aren't managed in a conforming C++ manner. See the comments on
+ // `SelectionDAG::MorphNodeTo` which details what all goes on, but the
+ // constraint here is that these don't manage memory with their constructor or
+ // destructor and can be initialized to a good state even if they start off
+ // uninitialized.
+ PointerUnion<MachineMemOperand *, MachineMemOperand **> MemRefs = {};
+
+ // Note that this could be folded into the above `MemRefs` member if doing so
+ // is advantageous at some point. We don't need to store this in most cases.
+ // However, at the moment this doesn't appear to make the allocation any
+ // smaller and makes the code somewhat simpler to read.
+ int NumMemRefs = 0;
public:
- mmo_iterator memoperands_begin() const { return MemRefs; }
- mmo_iterator memoperands_end() const { return MemRefsEnd; }
- bool memoperands_empty() const { return MemRefsEnd == MemRefs; }
+ using mmo_iterator = ArrayRef<MachineMemOperand *>::const_iterator;
- /// Assign this MachineSDNodes's memory reference descriptor
- /// list. This does not transfer ownership.
- void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd) {
- for (mmo_iterator MMI = NewMemRefs, MME = NewMemRefsEnd; MMI != MME; ++MMI)
- assert(*MMI && "Null mem ref detected!");
- MemRefs = NewMemRefs;
- MemRefsEnd = NewMemRefsEnd;
+ ArrayRef<MachineMemOperand *> memoperands() const {
+ // Special case the common cases.
+ if (NumMemRefs == 0)
+ return {};
+ if (NumMemRefs == 1)
+ return makeArrayRef(MemRefs.getAddrOfPtr1(), 1);
+
+ // Otherwise we have an actual array.
+ return makeArrayRef(MemRefs.get<MachineMemOperand **>(), NumMemRefs);
+ }
+ mmo_iterator memoperands_begin() const { return memoperands().begin(); }
+ mmo_iterator memoperands_end() const { return memoperands().end(); }
+ bool memoperands_empty() const { return memoperands().empty(); }
+
+ /// Clear out the memory reference descriptor list.
+ void clearMemRefs() {
+ MemRefs = nullptr;
+ NumMemRefs = 0;
}
static bool classof(const SDNode *N) {
@@ -2408,6 +2452,18 @@
cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
}
+ /// Return true if the node is a math/logic binary operator. This corresponds
+ /// to the IR function of the same name.
+ inline bool isBinaryOp(const SDNode *N) {
+ auto Op = N->getOpcode();
+ return (Op == ISD::ADD || Op == ISD::SUB || Op == ISD::MUL ||
+ Op == ISD::AND || Op == ISD::OR || Op == ISD::XOR ||
+ Op == ISD::SHL || Op == ISD::SRL || Op == ISD::SRA ||
+ Op == ISD::SDIV || Op == ISD::UDIV || Op == ISD::SREM ||
+ Op == ISD::UREM || Op == ISD::FADD || Op == ISD::FSUB ||
+ Op == ISD::FMUL || Op == ISD::FDIV || Op == ISD::FREM);
+ }
+
/// Attempt to match a unary predicate against a scalar/splat constant or
/// every element of a constant BUILD_VECTOR.
bool matchUnaryPredicate(SDValue Op,
diff --git a/linux-x64/clang/include/llvm/CodeGen/SlotIndexes.h b/linux-x64/clang/include/llvm/CodeGen/SlotIndexes.h
index 334267d..5508222 100644
--- a/linux-x64/clang/include/llvm/CodeGen/SlotIndexes.h
+++ b/linux-x64/clang/include/llvm/CodeGen/SlotIndexes.h
@@ -414,6 +414,8 @@
SlotIndex getInstructionIndex(const MachineInstr &MI) const {
// Instructions inside a bundle have the same number as the bundle itself.
const MachineInstr &BundleStart = *getBundleStart(MI.getIterator());
+ assert(!BundleStart.isDebugInstr() &&
+ "Could not use a debug instruction to query mi2iMap.");
Mi2IndexMap::const_iterator itr = mi2iMap.find(&BundleStart);
assert(itr != mi2iMap.end() && "Instruction not found in maps.");
return itr->second;
@@ -674,7 +676,7 @@
idx2MBBMap.push_back(IdxMBBPair(startIdx, mbb));
renumberIndexes(newItr);
- llvm::sort(idx2MBBMap.begin(), idx2MBBMap.end(), Idx2MBBCompare());
+ llvm::sort(idx2MBBMap, Idx2MBBCompare());
}
/// Free the resources that were required to maintain a SlotIndex.
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetInstrInfo.h b/linux-x64/clang/include/llvm/CodeGen/TargetInstrInfo.h
index b5bc561..1fa3de4 100644
--- a/linux-x64/clang/include/llvm/CodeGen/TargetInstrInfo.h
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetInstrInfo.h
@@ -246,14 +246,14 @@
}
/// If the specified machine instruction has a load from a stack slot,
- /// return true along with the FrameIndex of the loaded stack slot and the
- /// machine mem operand containing the reference.
+ /// return true along with the FrameIndices of the loaded stack slot and the
+ /// machine mem operands containing the reference.
/// If not, return false. Unlike isLoadFromStackSlot, this returns true for
/// any instructions that loads from the stack. This is just a hint, as some
/// cases may be missed.
- virtual bool hasLoadFromStackSlot(const MachineInstr &MI,
- const MachineMemOperand *&MMO,
- int &FrameIndex) const;
+ virtual bool hasLoadFromStackSlot(
+ const MachineInstr &MI,
+ SmallVectorImpl<const MachineMemOperand *> &Accesses) const;
/// If the specified machine instruction is a direct
/// store to a stack slot, return the virtual or physical register number of
@@ -284,14 +284,14 @@
}
/// If the specified machine instruction has a store to a stack slot,
- /// return true along with the FrameIndex of the loaded stack slot and the
- /// machine mem operand containing the reference.
+ /// return true along with the FrameIndices of the loaded stack slot and the
+ /// machine mem operands containing the reference.
/// If not, return false. Unlike isStoreToStackSlot,
/// this returns true for any instructions that stores to the
/// stack. This is just a hint, as some cases may be missed.
- virtual bool hasStoreToStackSlot(const MachineInstr &MI,
- const MachineMemOperand *&MMO,
- int &FrameIndex) const;
+ virtual bool hasStoreToStackSlot(
+ const MachineInstr &MI,
+ SmallVectorImpl<const MachineMemOperand *> &Accesses) const;
/// Return true if the specified machine instruction
/// is a copy of one stack slot to another and has no other effect.
@@ -846,15 +846,33 @@
llvm_unreachable("Target didn't implement TargetInstrInfo::copyPhysReg!");
}
+protected:
+ /// Target-dependent implemenation for IsCopyInstr.
/// If the specific machine instruction is a instruction that moves/copies
/// value from one register to another register return true along with
/// @Source machine operand and @Destination machine operand.
- virtual bool isCopyInstr(const MachineInstr &MI,
- const MachineOperand *&SourceOpNum,
- const MachineOperand *&Destination) const {
+ virtual bool isCopyInstrImpl(const MachineInstr &MI,
+ const MachineOperand *&Source,
+ const MachineOperand *&Destination) const {
return false;
}
+public:
+ /// If the specific machine instruction is a instruction that moves/copies
+ /// value from one register to another register return true along with
+ /// @Source machine operand and @Destination machine operand.
+ /// For COPY-instruction the method naturally returns true, for all other
+ /// instructions the method calls target-dependent implementation.
+ bool isCopyInstr(const MachineInstr &MI, const MachineOperand *&Source,
+ const MachineOperand *&Destination) const {
+ if (MI.isCopy()) {
+ Destination = &MI.getOperand(0);
+ Source = &MI.getOperand(1);
+ return true;
+ }
+ return isCopyInstrImpl(MI, Source, Destination);
+ }
+
/// Store the specified register of the given register class to the specified
/// stack frame index. The store instruction is to be added to the given
/// machine basic block before the specified machine instruction. If isKill
@@ -1063,7 +1081,7 @@
/// getAddressSpaceForPseudoSourceKind - Given the kind of memory
/// (e.g. stack) the target returns the corresponding address space.
virtual unsigned
- getAddressSpaceForPseudoSourceKind(PseudoSourceValue::PSVKind Kind) const {
+ getAddressSpaceForPseudoSourceKind(unsigned Kind) const {
return 0;
}
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetLowering.h b/linux-x64/clang/include/llvm/CodeGen/TargetLowering.h
index dce4168..a593907 100644
--- a/linux-x64/clang/include/llvm/CodeGen/TargetLowering.h
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetLowering.h
@@ -29,7 +29,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/Analysis/DivergenceAnalysis.h"
+#include "llvm/Analysis/LegacyDivergenceAnalysis.h"
#include "llvm/CodeGen/DAGCombine.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
@@ -163,6 +163,7 @@
LLOnly, // Expand the (load) instruction into just a load-linked, which has
// greater atomic guarantees than a normal load.
CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
+ MaskedIntrinsic, // Use a target-specific intrinsic for the LL/SC loop.
};
/// Enum that specifies when a multiplication should be expanded.
@@ -545,6 +546,12 @@
return false;
}
+ /// Return true if inserting a scalar into a variable element of an undef
+ /// vector is more efficiently handled by splatting the scalar instead.
+ virtual bool shouldSplatInsEltVarIndex(EVT) const {
+ return false;
+ }
+
/// Return true if target supports floating point exceptions.
bool hasFloatingPointExceptions() const {
return HasFloatingPointExceptions;
@@ -798,6 +805,7 @@
case ISD::STRICT_FSUB: EqOpc = ISD::FSUB; break;
case ISD::STRICT_FMUL: EqOpc = ISD::FMUL; break;
case ISD::STRICT_FDIV: EqOpc = ISD::FDIV; break;
+ case ISD::STRICT_FREM: EqOpc = ISD::FREM; break;
case ISD::STRICT_FSQRT: EqOpc = ISD::FSQRT; break;
case ISD::STRICT_FPOW: EqOpc = ISD::FPOW; break;
case ISD::STRICT_FPOWI: EqOpc = ISD::FPOWI; break;
@@ -1428,6 +1436,12 @@
return PrefLoopAlignment;
}
+ /// Should loops be aligned even when the function is marked OptSize (but not
+ /// MinSize).
+ virtual bool alignLoopsWithOptSize() const {
+ return false;
+ }
+
/// If the target has a standard location for the stack protector guard,
/// returns the address of that location. Otherwise, returns nullptr.
/// DEPRECATED: please override useLoadStackGuardNode and customize
@@ -1549,6 +1563,26 @@
llvm_unreachable("Store conditional unimplemented on this target");
}
+ /// Perform a masked atomicrmw using a target-specific intrinsic. This
+ /// represents the core LL/SC loop which will be lowered at a late stage by
+ /// the backend.
+ virtual Value *emitMaskedAtomicRMWIntrinsic(IRBuilder<> &Builder,
+ AtomicRMWInst *AI,
+ Value *AlignedAddr, Value *Incr,
+ Value *Mask, Value *ShiftAmt,
+ AtomicOrdering Ord) const {
+ llvm_unreachable("Masked atomicrmw expansion unimplemented on this target");
+ }
+
+ /// Perform a masked cmpxchg using a target-specific intrinsic. This
+ /// represents the core LL/SC loop which will be lowered at a late stage by
+ /// the backend.
+ virtual Value *emitMaskedAtomicCmpXchgIntrinsic(
+ IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
+ Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
+ llvm_unreachable("Masked cmpxchg expansion unimplemented on this target");
+ }
+
/// Inserts in the IR a target-specific intrinsic specifying a fence.
/// It is called by AtomicExpandPass before expanding an
/// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad
@@ -1625,11 +1659,11 @@
return AtomicExpansionKind::None;
}
- /// Returns true if the given atomic cmpxchg should be expanded by the
- /// IR-level AtomicExpand pass into a load-linked/store-conditional sequence
- /// (through emitLoadLinked() and emitStoreConditional()).
- virtual bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
- return false;
+ /// Returns how the given atomic cmpxchg should be expanded by the IR-level
+ /// AtomicExpand pass.
+ virtual AtomicExpansionKind
+ shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
+ return AtomicExpansionKind::None;
}
/// Returns how the IR-level AtomicExpand pass should expand the given
@@ -1687,6 +1721,15 @@
return false;
}
+ /// Return true if it is profitable to transform an integer
+ /// multiplication-by-constant into simpler operations like shifts and adds.
+ /// This may be true if the target does not directly support the
+ /// multiplication operation for the specified type or the sequence of simpler
+ /// ops is faster than the multiply.
+ virtual bool decomposeMulByConstant(EVT VT, SDValue C) const {
+ return false;
+ }
+
//===--------------------------------------------------------------------===//
// TargetLowering Configuration Methods - These methods should be invoked by
// the derived class constructor to configure this object for the target.
@@ -2648,7 +2691,7 @@
virtual bool isSDNodeSourceOfDivergence(const SDNode *N,
FunctionLoweringInfo *FLI,
- DivergenceAnalysis *DA) const {
+ LegacyDivergenceAnalysis *DA) const {
return false;
}
@@ -2803,7 +2846,8 @@
unsigned Depth = 0,
bool AssumeSingleUse = false) const;
- /// Helper wrapper around SimplifyDemandedBits
+ /// Helper wrapper around SimplifyDemandedBits.
+ /// Adds Op back to the worklist upon success.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
DAGCombinerInfo &DCI) const;
@@ -2826,7 +2870,8 @@
TargetLoweringOpt &TLO, unsigned Depth = 0,
bool AssumeSingleUse = false) const;
- /// Helper wrapper around SimplifyDemandedVectorElts
+ /// Helper wrapper around SimplifyDemandedVectorElts.
+ /// Adds Op back to the worklist upon success.
bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedElts,
APInt &KnownUndef, APInt &KnownZero,
DAGCombinerInfo &DCI) const;
@@ -2942,12 +2987,16 @@
///
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
- /// Return true if it is profitable to move a following shift through this
- // node, adjusting any immediate operands as necessary to preserve semantics.
- // This transformation may not be desirable if it disrupts a particularly
- // auspicious target-specific tree (e.g. bitfield extraction in AArch64).
- // By default, it returns true.
- virtual bool isDesirableToCommuteWithShift(const SDNode *N) const {
+ /// Return true if it is profitable to move this shift by a constant amount
+ /// though its operand, adjusting any immediate operands as necessary to
+ /// preserve semantics. This transformation may not be desirable if it
+ /// disrupts a particularly auspicious target-specific tree (e.g. bitfield
+ /// extraction in AArch64). By default, it returns true.
+ ///
+ /// @param N the shift node
+ /// @param Level the current DAGCombine legalization level.
+ virtual bool isDesirableToCommuteWithShift(const SDNode *N,
+ CombineLevel Level) const {
return true;
}
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetRegisterInfo.h b/linux-x64/clang/include/llvm/CodeGen/TargetRegisterInfo.h
index 55a8ba6..0fbff31 100644
--- a/linux-x64/clang/include/llvm/CodeGen/TargetRegisterInfo.h
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetRegisterInfo.h
@@ -510,6 +510,13 @@
/// markSuperRegs() and checkAllSuperRegsMarked() in this case.
virtual BitVector getReservedRegs(const MachineFunction &MF) const = 0;
+ /// Returns false if we can't guarantee that Physreg, specified as an IR asm
+ /// clobber constraint, will be preserved across the statement.
+ virtual bool isAsmClobberable(const MachineFunction &MF,
+ unsigned PhysReg) const {
+ return true;
+ }
+
/// Returns true if PhysReg is unallocatable and constant throughout the
/// function. Used by MachineRegisterInfo::isConstantPhysReg().
virtual bool isConstantPhysReg(unsigned PhysReg) const { return false; }
@@ -817,13 +824,6 @@
// Do nothing.
}
- /// The creation of multiple copy hints have been implemented in
- /// weightCalcHelper(), but since this affects so many tests for many
- /// targets, this is temporarily disabled per default. THIS SHOULD BE
- /// "GENERAL GOODNESS" and hopefully all targets will update their tests
- /// and enable this soon. This hook should then be removed.
- virtual bool enableMultipleCopyHints() const { return false; }
-
/// Allow the target to reverse allocation order of local live ranges. This
/// will generally allocate shorter local live ranges first. For targets with
/// many registers, this could reduce regalloc compile time by a large
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetSubtargetInfo.h b/linux-x64/clang/include/llvm/CodeGen/TargetSubtargetInfo.h
index 227e591..e28673d 100644
--- a/linux-x64/clang/include/llvm/CodeGen/TargetSubtargetInfo.h
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetSubtargetInfo.h
@@ -14,6 +14,7 @@
#ifndef LLVM_CODEGEN_TARGETSUBTARGETINFO_H
#define LLVM_CODEGEN_TARGETSUBTARGETINFO_H
+#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
@@ -144,6 +145,30 @@
return 0;
}
+ /// Returns true if MI is a dependency breaking zero-idiom instruction for the
+ /// subtarget.
+ ///
+ /// This function also sets bits in Mask related to input operands that
+ /// are not in a data dependency relationship. There is one bit for each
+ /// machine operand; implicit operands follow explicit operands in the bit
+ /// representation used for Mask. An empty (i.e. a mask with all bits
+ /// cleared) means: data dependencies are "broken" for all the explicit input
+ /// machine operands of MI.
+ virtual bool isZeroIdiom(const MachineInstr *MI, APInt &Mask) const {
+ return false;
+ }
+
+ /// Returns true if MI is a dependency breaking instruction for the subtarget.
+ ///
+ /// Similar in behavior to `isZeroIdiom`. However, it knows how to identify
+ /// all dependency breaking instructions (i.e. not just zero-idioms).
+ ///
+ /// As for `isZeroIdiom`, this method returns a mask of "broken" dependencies.
+ /// (See method `isZeroIdiom` for a detailed description of Mask).
+ virtual bool isDependencyBreaking(const MachineInstr *MI, APInt &Mask) const {
+ return isZeroIdiom(MI, Mask);
+ }
+
/// True if the subtarget should run MachineScheduler after aggressive
/// coalescing.
///
diff --git a/linux-x64/clang/include/llvm/CodeGen/WasmEHFuncInfo.h b/linux-x64/clang/include/llvm/CodeGen/WasmEHFuncInfo.h
index 3ad6760..aa97934 100644
--- a/linux-x64/clang/include/llvm/CodeGen/WasmEHFuncInfo.h
+++ b/linux-x64/clang/include/llvm/CodeGen/WasmEHFuncInfo.h
@@ -14,8 +14,8 @@
#ifndef LLVM_CODEGEN_WASMEHFUNCINFO_H
#define LLVM_CODEGEN_WASMEHFUNCINFO_H
-#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PointerUnion.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/IR/BasicBlock.h"