Update clang to r339409.
Change-Id: I800772d2d838223be1f6b40d490c4591b937fca2
diff --git a/linux-x64/clang/include/llvm/CodeGen/AccelTable.h b/linux-x64/clang/include/llvm/CodeGen/AccelTable.h
index ec850a8..1392858 100644
--- a/linux-x64/clang/include/llvm/CodeGen/AccelTable.h
+++ b/linux-x64/clang/include/llvm/CodeGen/AccelTable.h
@@ -108,6 +108,8 @@
namespace llvm {
class AsmPrinter;
+class DwarfCompileUnit;
+class DwarfDebug;
/// Interface which the different types of accelerator table data have to
/// conform. It serves as a base class for different values of the template
@@ -120,8 +122,8 @@
return order() < Other.order();
}
- // Subclasses should implement:
- // static uint32_t hash(StringRef Name);
+ // Subclasses should implement:
+ // static uint32_t hash(StringRef Name);
#ifndef NDEBUG
virtual void print(raw_ostream &OS) const = 0;
@@ -244,6 +246,54 @@
static uint32_t hash(StringRef Buffer) { return djbHash(Buffer); }
};
+/// The Data class implementation for DWARF v5 accelerator table. Unlike the
+/// Apple Data classes, this class is just a DIE wrapper, and does not know to
+/// serialize itself. The complete serialization logic is in the
+/// emitDWARF5AccelTable function.
+class DWARF5AccelTableData : public AccelTableData {
+public:
+ static uint32_t hash(StringRef Name) { return caseFoldingDjbHash(Name); }
+
+ DWARF5AccelTableData(const DIE &Die) : Die(Die) {}
+
+#ifndef NDEBUG
+ void print(raw_ostream &OS) const override;
+#endif
+
+ const DIE &getDie() const { return Die; }
+ uint64_t getDieOffset() const { return Die.getOffset(); }
+ unsigned getDieTag() const { return Die.getTag(); }
+
+protected:
+ const DIE &Die;
+
+ uint64_t order() const override { return Die.getOffset(); }
+};
+
+class DWARF5AccelTableStaticData : public AccelTableData {
+public:
+ static uint32_t hash(StringRef Name) { return caseFoldingDjbHash(Name); }
+
+ DWARF5AccelTableStaticData(uint64_t DieOffset, unsigned DieTag,
+ unsigned CUIndex)
+ : DieOffset(DieOffset), DieTag(DieTag), CUIndex(CUIndex) {}
+
+#ifndef NDEBUG
+ void print(raw_ostream &OS) const override;
+#endif
+
+ uint64_t getDieOffset() const { return DieOffset; }
+ unsigned getDieTag() const { return DieTag; }
+ unsigned getCUIndex() const { return CUIndex; }
+
+protected:
+ uint64_t DieOffset;
+ unsigned DieTag;
+ unsigned CUIndex;
+
+ uint64_t order() const override { return DieOffset; }
+};
+
void emitAppleAccelTableImpl(AsmPrinter *Asm, AccelTableBase &Contents,
StringRef Prefix, const MCSymbol *SecBegin,
ArrayRef<AppleAccelTableData::Atom> Atoms);
@@ -258,11 +308,22 @@
emitAppleAccelTableImpl(Asm, Contents, Prefix, SecBegin, DataT::Atoms);
}
+void emitDWARF5AccelTable(AsmPrinter *Asm,
+ AccelTable<DWARF5AccelTableData> &Contents,
+ const DwarfDebug &DD,
+ ArrayRef<std::unique_ptr<DwarfCompileUnit>> CUs);
+
+void emitDWARF5AccelTable(
+ AsmPrinter *Asm, AccelTable<DWARF5AccelTableStaticData> &Contents,
+ ArrayRef<MCSymbol *> CUs,
+ llvm::function_ref<unsigned(const DWARF5AccelTableStaticData &)>
+ getCUIndexForEntry);
+
/// Accelerator table data implementation for simple Apple accelerator tables
/// with just a DIE reference.
class AppleAccelTableOffsetData : public AppleAccelTableData {
public:
- AppleAccelTableOffsetData(const DIE *D) : Die(D) {}
+ AppleAccelTableOffsetData(const DIE &D) : Die(D) {}
void emit(AsmPrinter *Asm) const override;
@@ -279,15 +340,15 @@
void print(raw_ostream &OS) const override;
#endif
protected:
- uint64_t order() const override { return Die->getOffset(); }
+ uint64_t order() const override { return Die.getOffset(); }
- const DIE *Die;
+ const DIE &Die;
};
/// Accelerator table data implementation for Apple type accelerator tables.
class AppleAccelTableTypeData : public AppleAccelTableOffsetData {
public:
- AppleAccelTableTypeData(const DIE *D) : AppleAccelTableOffsetData(D) {}
+ AppleAccelTableTypeData(const DIE &D) : AppleAccelTableOffsetData(D) {}
void emit(AsmPrinter *Asm) const override;
diff --git a/linux-x64/clang/include/llvm/CodeGen/Analysis.h b/linux-x64/clang/include/llvm/CodeGen/Analysis.h
index ba88f1f..d77aee6 100644
--- a/linux-x64/clang/include/llvm/CodeGen/Analysis.h
+++ b/linux-x64/clang/include/llvm/CodeGen/Analysis.h
@@ -36,7 +36,7 @@
class SelectionDAG;
struct EVT;
-/// \brief Compute the linearized index of a member in a nested
+/// Compute the linearized index of a member in a nested
/// aggregate/struct/array.
///
/// Given an LLVM IR aggregate type and a sequence of insertvalue or
@@ -124,7 +124,7 @@
const TargetLoweringBase &TLI);
DenseMap<const MachineBasicBlock *, int>
-getFuncletMembership(const MachineFunction &MF);
+getEHScopeMembership(const MachineFunction &MF);
} // End llvm namespace
diff --git a/linux-x64/clang/include/llvm/CodeGen/AsmPrinter.h b/linux-x64/clang/include/llvm/CodeGen/AsmPrinter.h
index 3d3bd3a..b605638 100644
--- a/linux-x64/clang/include/llvm/CodeGen/AsmPrinter.h
+++ b/linux-x64/clang/include/llvm/CodeGen/AsmPrinter.h
@@ -50,6 +50,7 @@
class GlobalVariable;
class MachineBasicBlock;
class MachineConstantPoolValue;
+class MachineDominatorTree;
class MachineFunction;
class MachineInstr;
class MachineJumpTableInfo;
@@ -92,11 +93,17 @@
std::unique_ptr<MCStreamer> OutStreamer;
/// The current machine function.
- const MachineFunction *MF = nullptr;
+ MachineFunction *MF = nullptr;
/// This is a pointer to the current MachineModuleInfo.
MachineModuleInfo *MMI = nullptr;
+ /// This is a pointer to the current MachineLoopInfo.
+ MachineDominatorTree *MDT = nullptr;
+
+ /// This is a pointer to the current MachineLoopInfo.
+ MachineLoopInfo *MLI = nullptr;
+
/// Optimization remark emitter.
MachineOptimizationRemarkEmitter *ORE;
@@ -130,9 +137,6 @@
static char ID;
- /// If VerboseAsm is set, a pointer to the loop info for this function.
- MachineLoopInfo *LI = nullptr;
-
struct HandlerInfo {
AsmPrinterHandler *Handler;
const char *TimerName;
@@ -161,6 +165,12 @@
};
private:
+ /// If generated on the fly this own the instance.
+ std::unique_ptr<MachineDominatorTree> OwnedMDT;
+
+ /// If generated on the fly this own the instance.
+ std::unique_ptr<MachineLoopInfo> OwnedMLI;
+
/// Structure for generating diagnostics for inline assembly. Only initialised
/// when necessary.
mutable std::unique_ptr<SrcMgrDiagInfo> DiagInfo;
@@ -191,6 +201,10 @@
/// Return a unique ID for the current function.
unsigned getFunctionNumber() const;
+ /// Return symbol for the function pseudo stack if the stack frame is not a
+ /// register based.
+ virtual const MCSymbol *getFunctionFrameSymbol() const { return nullptr; }
+
MCSymbol *getFunctionBegin() const { return CurrentFnBegin; }
MCSymbol *getFunctionEnd() const { return CurrentFnEnd; }
MCSymbol *getCurExceptionSym();
@@ -228,6 +242,7 @@
TAIL_CALL = 2,
LOG_ARGS_ENTER = 3,
CUSTOM_EVENT = 4,
+ TYPED_EVENT = 5,
};
// The table will contain these structs that point to the sled, the function
@@ -327,15 +342,15 @@
/// global value is specified, and if that global has an explicit alignment
/// requested, it will override the alignment request if required for
/// correctness.
- void EmitAlignment(unsigned NumBits, const GlobalObject *GO = nullptr) const;
+ void EmitAlignment(unsigned NumBits, const GlobalObject *GV = nullptr) const;
/// Lower the specified LLVM Constant to an MCExpr.
virtual const MCExpr *lowerConstant(const Constant *CV);
- /// \brief Print a general LLVM constant to the .s file.
+ /// Print a general LLVM constant to the .s file.
void EmitGlobalConstant(const DataLayout &DL, const Constant *CV);
- /// \brief Unnamed constant global variables solely contaning a pointer to
+ /// Unnamed constant global variables solely contaning a pointer to
/// another globals variable act like a global variable "proxy", or GOT
/// equivalents, i.e., it's only used to hold the address of the latter. One
/// optimization is to replace accesses to these proxies by using the GOT
@@ -345,7 +360,7 @@
/// accesses to GOT entries.
void computeGlobalGOTEquivs(Module &M);
- /// \brief Constant expressions using GOT equivalent globals may not be
+ /// Constant expressions using GOT equivalent globals may not be
/// eligible for PC relative GOT entry conversion, in such cases we need to
/// emit the proxies we previously omitted in EmitGlobalVariable.
void emitGlobalGOTEquivs();
@@ -452,6 +467,9 @@
/// Emit a long directive and value.
void emitInt32(int Value) const;
+ /// Emit a long long directive and value.
+ void emitInt64(uint64_t Value) const;
+
/// Emit something like ".long Hi-Lo" where the size in bytes of the directive
/// is specified by Size and Hi/Lo specify the labels. This implicitly uses
/// .set if it is available.
@@ -530,10 +548,10 @@
// Dwarf Lowering Routines
//===------------------------------------------------------------------===//
- /// \brief Emit frame instruction to describe the layout of the frame.
+ /// Emit frame instruction to describe the layout of the frame.
void emitCFIInstruction(const MCCFIInstruction &Inst) const;
- /// \brief Emit Dwarf abbreviation table.
+ /// Emit Dwarf abbreviation table.
template <typename T> void emitDwarfAbbrevs(const T &Abbrevs) const {
// For each abbreviation.
for (const auto &Abbrev : Abbrevs)
@@ -545,7 +563,7 @@
void emitDwarfAbbrev(const DIEAbbrev &Abbrev) const;
- /// \brief Recursively emit Dwarf DIE tree.
+ /// Recursively emit Dwarf DIE tree.
void emitDwarfDIE(const DIE &Die) const;
//===------------------------------------------------------------------===//
@@ -632,10 +650,9 @@
void EmitXXStructorList(const DataLayout &DL, const Constant *List,
bool isCtor);
- GCMetadataPrinter *GetOrCreateGCPrinter(GCStrategy &C);
+ GCMetadataPrinter *GetOrCreateGCPrinter(GCStrategy &S);
/// Emit GlobalAlias or GlobalIFunc.
- void emitGlobalIndirectSymbol(Module &M,
- const GlobalIndirectSymbol& GIS);
+ void emitGlobalIndirectSymbol(Module &M, const GlobalIndirectSymbol &GIS);
void setupCodePaddingContext(const MachineBasicBlock &MBB,
MCCodePaddingContext &Context) const;
};
diff --git a/linux-x64/clang/include/llvm/CodeGen/AtomicExpandUtils.h b/linux-x64/clang/include/llvm/CodeGen/AtomicExpandUtils.h
index 1f9c96b..b1adf66 100644
--- a/linux-x64/clang/include/llvm/CodeGen/AtomicExpandUtils.h
+++ b/linux-x64/clang/include/llvm/CodeGen/AtomicExpandUtils.h
@@ -26,7 +26,7 @@
function_ref<void(IRBuilder<> &, Value *, Value *, Value *, AtomicOrdering,
Value *&, Value *&)>;
-/// \brief Expand an atomic RMW instruction into a loop utilizing
+/// Expand an atomic RMW instruction into a loop utilizing
/// cmpxchg. You'll want to make sure your target machine likes cmpxchg
/// instructions in the first place and that there isn't another, better,
/// transformation available (for example AArch32/AArch64 have linked loads).
@@ -58,7 +58,7 @@
/// [...]
///
/// Returns true if the containing function was modified.
-bool expandAtomicRMWToCmpXchg(AtomicRMWInst *AI, CreateCmpXchgInstFun Factory);
+bool expandAtomicRMWToCmpXchg(AtomicRMWInst *AI, CreateCmpXchgInstFun CreateCmpXchg);
} // end namespace llvm
diff --git a/linux-x64/clang/include/llvm/CodeGen/BasicTTIImpl.h b/linux-x64/clang/include/llvm/CodeGen/BasicTTIImpl.h
index 9096263..f76a242 100644
--- a/linux-x64/clang/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/linux-x64/clang/include/llvm/CodeGen/BasicTTIImpl.h
@@ -65,7 +65,7 @@
extern cl::opt<unsigned> PartialUnrollingThreshold;
-/// \brief Base class which can be used to help build a TTI implementation.
+/// Base class which can be used to help build a TTI implementation.
///
/// This class provides as much implementation of the TTI interface as is
/// possible using the target independent parts of the code generator.
@@ -101,12 +101,12 @@
return Cost;
}
- /// \brief Local query method delegates up to T which *must* implement this!
+ /// Local query method delegates up to T which *must* implement this!
const TargetSubtargetInfo *getST() const {
return static_cast<const T *>(this)->getST();
}
- /// \brief Local query method delegates up to T which *must* implement this!
+ /// Local query method delegates up to T which *must* implement this!
const TargetLoweringBase *getTLI() const {
return static_cast<const T *>(this)->getTLI();
}
@@ -553,11 +553,15 @@
unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
Type *SubTp) {
- if (Kind == TTI::SK_Alternate || Kind == TTI::SK_PermuteTwoSrc ||
- Kind == TTI::SK_PermuteSingleSrc) {
+ switch (Kind) {
+ case TTI::SK_Select:
+ case TTI::SK_Transpose:
+ case TTI::SK_PermuteSingleSrc:
+ case TTI::SK_PermuteTwoSrc:
return getPermuteShuffleOverhead(Tp);
+ default:
+ return 1;
}
- return 1;
}
unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
@@ -1200,7 +1204,7 @@
return SingleCallCost;
}
- /// \brief Compute a cost of the given call instruction.
+ /// Compute a cost of the given call instruction.
///
/// Compute the cost of calling function F with return type RetTy and
/// argument types Tys. F might be nullptr, in this case the cost of an
@@ -1361,7 +1365,7 @@
/// @}
};
-/// \brief Concrete BasicTTIImpl that can be used if no further customization
+/// Concrete BasicTTIImpl that can be used if no further customization
/// is needed.
class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> {
using BaseT = BasicTTIImplBase<BasicTTIImpl>;
@@ -1375,7 +1379,7 @@
const TargetLoweringBase *getTLI() const { return TLI; }
public:
- explicit BasicTTIImpl(const TargetMachine *ST, const Function &F);
+ explicit BasicTTIImpl(const TargetMachine *TM, const Function &F);
};
} // end namespace llvm
diff --git a/linux-x64/clang/include/llvm/CodeGen/CalcSpillWeights.h b/linux-x64/clang/include/llvm/CodeGen/CalcSpillWeights.h
index d9e8206..f85767f 100644
--- a/linux-x64/clang/include/llvm/CodeGen/CalcSpillWeights.h
+++ b/linux-x64/clang/include/llvm/CodeGen/CalcSpillWeights.h
@@ -22,7 +22,7 @@
class MachineLoopInfo;
class VirtRegMap;
- /// \brief Normalize the spill weight of a live interval
+ /// Normalize the spill weight of a live interval
///
/// The spill weight of a live interval is computed as:
///
@@ -42,7 +42,7 @@
return UseDefFreq / (Size + 25*SlotIndex::InstrDist);
}
- /// \brief Calculate auxiliary information for a virtual register such as its
+ /// Calculate auxiliary information for a virtual register such as its
/// spill weight and allocation hint.
class VirtRegAuxInfo {
public:
@@ -64,10 +64,10 @@
NormalizingFn norm = normalizeSpillWeight)
: MF(mf), LIS(lis), VRM(vrm), Loops(loops), MBFI(mbfi), normalize(norm) {}
- /// \brief (re)compute li's spill weight and allocation hint.
+ /// (re)compute li's spill weight and allocation hint.
void calculateSpillWeightAndHint(LiveInterval &li);
- /// \brief Compute future expected spill weight of a split artifact of li
+ /// Compute future expected spill weight of a split artifact of li
/// that will span between start and end slot indexes.
/// \param li The live interval to be split.
/// \param start The expected begining of the split artifact. Instructions
@@ -78,7 +78,7 @@
/// negative weight for unspillable li.
float futureWeight(LiveInterval &li, SlotIndex start, SlotIndex end);
- /// \brief Helper function for weight calculations.
+ /// Helper function for weight calculations.
/// (Re)compute li's spill weight and allocation hint, or, for non null
/// start and end - compute future expected spill weight of a split
/// artifact of li that will span between start and end slot indexes.
@@ -94,7 +94,7 @@
SlotIndex *end = nullptr);
};
- /// \brief Compute spill weights and allocation hints for all virtual register
+ /// Compute spill weights and allocation hints for all virtual register
/// live intervals.
void calculateSpillWeightsAndHints(LiveIntervals &LIS, MachineFunction &MF,
VirtRegMap *VRM,
diff --git a/linux-x64/clang/include/llvm/CodeGen/CallingConvLower.h b/linux-x64/clang/include/llvm/CodeGen/CallingConvLower.h
index d30a273..efcf80b 100644
--- a/linux-x64/clang/include/llvm/CodeGen/CallingConvLower.h
+++ b/linux-x64/clang/include/llvm/CodeGen/CallingConvLower.h
@@ -304,7 +304,7 @@
/// CheckReturn - Analyze the return values of a function, returning
/// true if the return can be performed without sret-demotion, and
/// false otherwise.
- bool CheckReturn(const SmallVectorImpl<ISD::OutputArg> &ArgsFlags,
+ bool CheckReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
CCAssignFn Fn);
/// AnalyzeCallOperands - Analyze the outgoing arguments to a call,
diff --git a/linux-x64/clang/include/llvm/CodeGen/CommandFlags.def b/linux-x64/clang/include/llvm/CodeGen/CommandFlags.inc
similarity index 97%
rename from linux-x64/clang/include/llvm/CodeGen/CommandFlags.def
rename to linux-x64/clang/include/llvm/CodeGen/CommandFlags.inc
index 3708c04..7d2d167 100644
--- a/linux-x64/clang/include/llvm/CodeGen/CommandFlags.def
+++ b/linux-x64/clang/include/llvm/CodeGen/CommandFlags.inc
@@ -17,7 +17,7 @@
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
-#include "llvm/MC/MCTargetOptionsCommandFlags.def"
+#include "llvm/MC/MCTargetOptionsCommandFlags.inc"
#include "llvm/MC/SubtargetFeature.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/CommandLine.h"
@@ -261,6 +261,10 @@
"stack-size-section",
cl::desc("Emit a section containing stack size metadata"), cl::init(false));
+static cl::opt<bool>
+ EnableAddrsig("addrsig", cl::desc("Emit an address-significance table"),
+ cl::init(false));
+
// Common utility function tightly tied to the options listed here. Initializes
// a TargetOptions object with CodeGen flags and returns it.
static TargetOptions InitTargetOptionsFromCodeGenFlags() {
@@ -289,6 +293,7 @@
Options.ExplicitEmulatedTLS = EmulatedTLS.getNumOccurrences() > 0;
Options.ExceptionModel = ExceptionModel;
Options.EmitStackSizeSection = EnableStackSizeSection;
+ Options.EmitAddrsig = EnableAddrsig;
Options.MCOptions = InitMCTargetOptionsFromFlags();
@@ -349,7 +354,7 @@
return Features.getFeatures();
}
-/// \brief Set function attributes of functions in Module M based on CPU,
+/// Set function attributes of functions in Module M based on CPU,
/// Features, and command line flags.
LLVM_ATTRIBUTE_UNUSED static void
setFunctionAttributes(StringRef CPU, StringRef Features, Module &M) {
diff --git a/linux-x64/clang/include/llvm/CodeGen/CostTable.h b/linux-x64/clang/include/llvm/CodeGen/CostTable.h
index 0fc16d3..48ad769 100644
--- a/linux-x64/clang/include/llvm/CodeGen/CostTable.h
+++ b/linux-x64/clang/include/llvm/CodeGen/CostTable.h
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief Cost tables and simple lookup functions
+/// Cost tables and simple lookup functions
///
//===----------------------------------------------------------------------===//
diff --git a/linux-x64/clang/include/llvm/CodeGen/DIE.h b/linux-x64/clang/include/llvm/CodeGen/DIE.h
index f809fc9..7d486b1 100644
--- a/linux-x64/clang/include/llvm/CodeGen/DIE.h
+++ b/linux-x64/clang/include/llvm/CodeGen/DIE.h
@@ -136,7 +136,7 @@
/// The bump allocator to use when creating DIEAbbrev objects in the uniqued
/// storage container.
BumpPtrAllocator &Alloc;
- /// \brief FoldingSet that uniques the abbreviations.
+ /// FoldingSet that uniques the abbreviations.
FoldingSet<DIEAbbrev> AbbreviationsSet;
/// A list of all the unique abbreviations in use.
std::vector<DIEAbbrev *> Abbreviations;
@@ -190,7 +190,7 @@
uint64_t getValue() const { return Integer; }
void setValue(uint64_t Val) { Integer = Val; }
- void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+ void EmitValue(const AsmPrinter *Asm, dwarf::Form Form) const;
unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
void print(raw_ostream &O) const;
@@ -868,7 +868,7 @@
return dwarf::DW_FORM_block;
}
- void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+ void EmitValue(const AsmPrinter *Asm, dwarf::Form Form) const;
unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
void print(raw_ostream &O) const;
@@ -899,7 +899,7 @@
return dwarf::DW_FORM_block;
}
- void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+ void EmitValue(const AsmPrinter *Asm, dwarf::Form Form) const;
unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
void print(raw_ostream &O) const;
diff --git a/linux-x64/clang/include/llvm/CodeGen/DwarfStringPoolEntry.h b/linux-x64/clang/include/llvm/CodeGen/DwarfStringPoolEntry.h
index e6c0483..8b1a7af 100644
--- a/linux-x64/clang/include/llvm/CodeGen/DwarfStringPoolEntry.h
+++ b/linux-x64/clang/include/llvm/CodeGen/DwarfStringPoolEntry.h
@@ -10,6 +10,7 @@
#ifndef LLVM_CODEGEN_DWARFSTRINGPOOLENTRY_H
#define LLVM_CODEGEN_DWARFSTRINGPOOLENTRY_H
+#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/StringMap.h"
namespace llvm {
@@ -18,34 +19,52 @@
/// Data for a string pool entry.
struct DwarfStringPoolEntry {
+ static constexpr unsigned NotIndexed = -1;
+
MCSymbol *Symbol;
unsigned Offset;
unsigned Index;
+
+ bool isIndexed() const { return Index != NotIndexed; }
};
/// String pool entry reference.
-struct DwarfStringPoolEntryRef {
- const StringMapEntry<DwarfStringPoolEntry> *I = nullptr;
+class DwarfStringPoolEntryRef {
+ PointerIntPair<const StringMapEntry<DwarfStringPoolEntry> *, 1, bool>
+ MapEntryAndIndexed;
+
+ const StringMapEntry<DwarfStringPoolEntry> *getMapEntry() const {
+ return MapEntryAndIndexed.getPointer();
+ }
public:
DwarfStringPoolEntryRef() = default;
- explicit DwarfStringPoolEntryRef(
- const StringMapEntry<DwarfStringPoolEntry> &I)
- : I(&I) {}
+ DwarfStringPoolEntryRef(const StringMapEntry<DwarfStringPoolEntry> &Entry,
+ bool Indexed)
+ : MapEntryAndIndexed(&Entry, Indexed) {}
- explicit operator bool() const { return I; }
+ explicit operator bool() const { return getMapEntry(); }
MCSymbol *getSymbol() const {
- assert(I->second.Symbol && "No symbol available!");
- return I->second.Symbol;
+ assert(getMapEntry()->second.Symbol && "No symbol available!");
+ return getMapEntry()->second.Symbol;
}
- unsigned getOffset() const { return I->second.Offset; }
- unsigned getIndex() const { return I->second.Index; }
- StringRef getString() const { return I->first(); }
+ unsigned getOffset() const { return getMapEntry()->second.Offset; }
+ bool isIndexed() const { return MapEntryAndIndexed.getInt(); }
+ unsigned getIndex() const {
+ assert(isIndexed());
+ assert(getMapEntry()->getValue().isIndexed());
+ return getMapEntry()->second.Index;
+ }
+ StringRef getString() const { return getMapEntry()->first(); }
/// Return the entire string pool entry for convenience.
- DwarfStringPoolEntry getEntry() const { return I->getValue(); }
+ DwarfStringPoolEntry getEntry() const { return getMapEntry()->getValue(); }
- bool operator==(const DwarfStringPoolEntryRef &X) const { return I == X.I; }
- bool operator!=(const DwarfStringPoolEntryRef &X) const { return I != X.I; }
+ bool operator==(const DwarfStringPoolEntryRef &X) const {
+ return getMapEntry() == X.getMapEntry();
+ }
+ bool operator!=(const DwarfStringPoolEntryRef &X) const {
+ return getMapEntry() != X.getMapEntry();
+ }
};
} // end namespace llvm
diff --git a/linux-x64/clang/include/llvm/CodeGen/FastISel.h b/linux-x64/clang/include/llvm/CodeGen/FastISel.h
index 772bd6c..865d8a8 100644
--- a/linux-x64/clang/include/llvm/CodeGen/FastISel.h
+++ b/linux-x64/clang/include/llvm/CodeGen/FastISel.h
@@ -61,7 +61,7 @@
class User;
class Value;
-/// \brief This is a fast-path instruction selection class that generates poor
+/// This is a fast-path instruction selection class that generates poor
/// code and doesn't support illegal types or non-trivial lowering, but runs
/// quickly.
class FastISel {
@@ -78,7 +78,7 @@
bool IsReturnValueUsed : 1;
bool IsPatchPoint : 1;
- // \brief IsTailCall Should be modified by implementations of FastLowerCall
+ // IsTailCall Should be modified by implementations of FastLowerCall
// that perform tail call conversions.
bool IsTailCall = false;
@@ -215,70 +215,74 @@
const TargetLibraryInfo *LibInfo;
bool SkipTargetIndependentISel;
- /// \brief The position of the last instruction for materializing constants
+ /// The position of the last instruction for materializing constants
/// for use in the current block. It resets to EmitStartPt when it makes sense
/// (for example, it's usually profitable to avoid function calls between the
/// definition and the use)
MachineInstr *LastLocalValue;
- /// \brief The top most instruction in the current block that is allowed for
+ /// The top most instruction in the current block that is allowed for
/// emitting local variables. LastLocalValue resets to EmitStartPt when it
/// makes sense (for example, on function calls)
MachineInstr *EmitStartPt;
+ /// Last local value flush point. On a subsequent flush, no local value will
+ /// sink past this point.
+ MachineBasicBlock::iterator LastFlushPoint;
+
public:
virtual ~FastISel();
- /// \brief Return the position of the last instruction emitted for
+ /// Return the position of the last instruction emitted for
/// materializing constants for use in the current block.
MachineInstr *getLastLocalValue() { return LastLocalValue; }
- /// \brief Update the position of the last instruction emitted for
+ /// Update the position of the last instruction emitted for
/// materializing constants for use in the current block.
void setLastLocalValue(MachineInstr *I) {
EmitStartPt = I;
LastLocalValue = I;
}
- /// \brief Set the current block to which generated machine instructions will
+ /// Set the current block to which generated machine instructions will
/// be appended.
void startNewBlock();
/// Flush the local value map and sink local values if possible.
void finishBasicBlock();
- /// \brief Return current debug location information.
+ /// Return current debug location information.
DebugLoc getCurDebugLoc() const { return DbgLoc; }
- /// \brief Do "fast" instruction selection for function arguments and append
+ /// Do "fast" instruction selection for function arguments and append
/// the machine instructions to the current block. Returns true when
/// successful.
bool lowerArguments();
- /// \brief Do "fast" instruction selection for the given LLVM IR instruction
+ /// Do "fast" instruction selection for the given LLVM IR instruction
/// and append the generated machine instructions to the current block.
/// Returns true if selection was successful.
bool selectInstruction(const Instruction *I);
- /// \brief Do "fast" instruction selection for the given LLVM IR operator
+ /// Do "fast" instruction selection for the given LLVM IR operator
/// (Instruction or ConstantExpr), and append generated machine instructions
/// to the current block. Return true if selection was successful.
bool selectOperator(const User *I, unsigned Opcode);
- /// \brief Create a virtual register and arrange for it to be assigned the
+ /// Create a virtual register and arrange for it to be assigned the
/// value for the given LLVM value.
unsigned getRegForValue(const Value *V);
- /// \brief Look up the value to see if its value is already cached in a
+ /// Look up the value to see if its value is already cached in a
/// register. It may be defined by instructions across blocks or defined
/// locally.
unsigned lookUpRegForValue(const Value *V);
- /// \brief This is a wrapper around getRegForValue that also takes care of
+ /// This is a wrapper around getRegForValue that also takes care of
/// truncating or sign-extending the given getelementptr index value.
- std::pair<unsigned, bool> getRegForGEPIndex(const Value *V);
+ std::pair<unsigned, bool> getRegForGEPIndex(const Value *Idx);
- /// \brief We're checking to see if we can fold \p LI into \p FoldInst. Note
+ /// We're checking to see if we can fold \p LI into \p FoldInst. Note
/// that we could have a sequence where multiple LLVM IR instructions are
/// folded into the same machineinstr. For example we could have:
///
@@ -292,7 +296,7 @@
/// If we succeed folding, return true.
bool tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst);
- /// \brief The specified machine instr operand is a vreg, and that vreg is
+ /// The specified machine instr operand is a vreg, and that vreg is
/// being provided by the specified load instruction. If possible, try to
/// fold the load as an operand to the instruction, returning true if
/// possible.
@@ -303,11 +307,11 @@
return false;
}
- /// \brief Reset InsertPt to prepare for inserting instructions into the
+ /// Reset InsertPt to prepare for inserting instructions into the
/// current block.
void recomputeInsertPt();
- /// \brief Remove all dead instructions between the I and E.
+ /// Remove all dead instructions between the I and E.
void removeDeadCode(MachineBasicBlock::iterator I,
MachineBasicBlock::iterator E);
@@ -316,11 +320,11 @@
DebugLoc DL;
};
- /// \brief Prepare InsertPt to begin inserting instructions into the local
+ /// Prepare InsertPt to begin inserting instructions into the local
/// value area and return the old insert position.
SavePoint enterLocalValueArea();
- /// \brief Reset InsertPt to the given old insert position.
+ /// Reset InsertPt to the given old insert position.
void leaveLocalValueArea(SavePoint Old);
protected:
@@ -328,45 +332,45 @@
const TargetLibraryInfo *LibInfo,
bool SkipTargetIndependentISel = false);
- /// \brief This method is called by target-independent code when the normal
+ /// This method is called by target-independent code when the normal
/// FastISel process fails to select an instruction. This gives targets a
/// chance to emit code for anything that doesn't fit into FastISel's
/// framework. It returns true if it was successful.
virtual bool fastSelectInstruction(const Instruction *I) = 0;
- /// \brief This method is called by target-independent code to do target-
+ /// This method is called by target-independent code to do target-
/// specific argument lowering. It returns true if it was successful.
virtual bool fastLowerArguments();
- /// \brief This method is called by target-independent code to do target-
+ /// This method is called by target-independent code to do target-
/// specific call lowering. It returns true if it was successful.
virtual bool fastLowerCall(CallLoweringInfo &CLI);
- /// \brief This method is called by target-independent code to do target-
+ /// This method is called by target-independent code to do target-
/// specific intrinsic lowering. It returns true if it was successful.
virtual bool fastLowerIntrinsicCall(const IntrinsicInst *II);
- /// \brief This method is called by target-independent code to request that an
+ /// This method is called by target-independent code to request that an
/// instruction with the given type and opcode be emitted.
virtual unsigned fastEmit_(MVT VT, MVT RetVT, unsigned Opcode);
- /// \brief This method is called by target-independent code to request that an
+ /// This method is called by target-independent code to request that an
/// instruction with the given type, opcode, and register operand be emitted.
virtual unsigned fastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
bool Op0IsKill);
- /// \brief This method is called by target-independent code to request that an
+ /// This method is called by target-independent code to request that an
/// instruction with the given type, opcode, and register operands be emitted.
virtual unsigned fastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
bool Op0IsKill, unsigned Op1, bool Op1IsKill);
- /// \brief This method is called by target-independent code to request that an
+ /// This method is called by target-independent code to request that an
/// instruction with the given type, opcode, and register and immediate
/// operands be emitted.
virtual unsigned fastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
bool Op0IsKill, uint64_t Imm);
- /// \brief This method is a wrapper of fastEmit_ri.
+ /// This method is a wrapper of fastEmit_ri.
///
/// It first tries to emit an instruction with an immediate operand using
/// fastEmit_ri. If that fails, it materializes the immediate into a register
@@ -374,89 +378,89 @@
unsigned fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, bool Op0IsKill,
uint64_t Imm, MVT ImmType);
- /// \brief This method is called by target-independent code to request that an
+ /// This method is called by target-independent code to request that an
/// instruction with the given type, opcode, and immediate operand be emitted.
virtual unsigned fastEmit_i(MVT VT, MVT RetVT, unsigned Opcode, uint64_t Imm);
- /// \brief This method is called by target-independent code to request that an
+ /// This method is called by target-independent code to request that an
/// instruction with the given type, opcode, and floating-point immediate
/// operand be emitted.
virtual unsigned fastEmit_f(MVT VT, MVT RetVT, unsigned Opcode,
const ConstantFP *FPImm);
- /// \brief Emit a MachineInstr with no operands and a result register in the
+ /// Emit a MachineInstr with no operands and a result register in the
/// given register class.
unsigned fastEmitInst_(unsigned MachineInstOpcode,
const TargetRegisterClass *RC);
- /// \brief Emit a MachineInstr with one register operand and a result register
+ /// Emit a MachineInstr with one register operand and a result register
/// in the given register class.
unsigned fastEmitInst_r(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill);
- /// \brief Emit a MachineInstr with two register operands and a result
+ /// Emit a MachineInstr with two register operands and a result
/// register in the given register class.
unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, unsigned Op1, bool Op1IsKill);
- /// \brief Emit a MachineInstr with three register operands and a result
+ /// Emit a MachineInstr with three register operands and a result
/// register in the given register class.
unsigned fastEmitInst_rrr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, unsigned Op1, bool Op1IsKill,
unsigned Op2, bool Op2IsKill);
- /// \brief Emit a MachineInstr with a register operand, an immediate, and a
+ /// Emit a MachineInstr with a register operand, an immediate, and a
/// result register in the given register class.
unsigned fastEmitInst_ri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, uint64_t Imm);
- /// \brief Emit a MachineInstr with one register operand and two immediate
+ /// Emit a MachineInstr with one register operand and two immediate
/// operands.
unsigned fastEmitInst_rii(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, uint64_t Imm1, uint64_t Imm2);
- /// \brief Emit a MachineInstr with a floating point immediate, and a result
+ /// Emit a MachineInstr with a floating point immediate, and a result
/// register in the given register class.
unsigned fastEmitInst_f(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
const ConstantFP *FPImm);
- /// \brief Emit a MachineInstr with two register operands, an immediate, and a
+ /// Emit a MachineInstr with two register operands, an immediate, and a
/// result register in the given register class.
unsigned fastEmitInst_rri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, unsigned Op1, bool Op1IsKill,
uint64_t Imm);
- /// \brief Emit a MachineInstr with a single immediate operand, and a result
+ /// Emit a MachineInstr with a single immediate operand, and a result
/// register in the given register class.
- unsigned fastEmitInst_i(unsigned MachineInstrOpcode,
+ unsigned fastEmitInst_i(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, uint64_t Imm);
- /// \brief Emit a MachineInstr for an extract_subreg from a specified index of
+ /// Emit a MachineInstr for an extract_subreg from a specified index of
/// a superregister to a specified type.
unsigned fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, bool Op0IsKill,
uint32_t Idx);
- /// \brief Emit MachineInstrs to compute the value of Op with all but the
+ /// Emit MachineInstrs to compute the value of Op with all but the
/// least significant bit set to zero.
unsigned fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill);
- /// \brief Emit an unconditional branch to the given block, unless it is the
+ /// Emit an unconditional branch to the given block, unless it is the
/// immediate (fall-through) successor, and update the CFG.
- void fastEmitBranch(MachineBasicBlock *MBB, const DebugLoc &DL);
+ void fastEmitBranch(MachineBasicBlock *MSucc, const DebugLoc &DbgLoc);
/// Emit an unconditional branch to \p FalseMBB, obtains the branch weight
/// and adds TrueMBB and FalseMBB to the successor list.
void finishCondBranch(const BasicBlock *BranchBB, MachineBasicBlock *TrueMBB,
MachineBasicBlock *FalseMBB);
- /// \brief Update the value map to include the new mapping for this
+ /// Update the value map to include the new mapping for this
/// instruction, or insert an extra copy to get the result in a previous
/// determined register.
///
@@ -467,26 +471,26 @@
unsigned createResultReg(const TargetRegisterClass *RC);
- /// \brief Try to constrain Op so that it is usable by argument OpNum of the
+ /// Try to constrain Op so that it is usable by argument OpNum of the
/// provided MCInstrDesc. If this fails, create a new virtual register in the
/// correct class and COPY the value there.
unsigned constrainOperandRegClass(const MCInstrDesc &II, unsigned Op,
unsigned OpNum);
- /// \brief Emit a constant in a register using target-specific logic, such as
+ /// Emit a constant in a register using target-specific logic, such as
/// constant pool loads.
virtual unsigned fastMaterializeConstant(const Constant *C) { return 0; }
- /// \brief Emit an alloca address in a register using target-specific logic.
+ /// Emit an alloca address in a register using target-specific logic.
virtual unsigned fastMaterializeAlloca(const AllocaInst *C) { return 0; }
- /// \brief Emit the floating-point constant +0.0 in a register using target-
+ /// Emit the floating-point constant +0.0 in a register using target-
/// specific logic.
virtual unsigned fastMaterializeFloatZero(const ConstantFP *CF) {
return 0;
}
- /// \brief Check if \c Add is an add that can be safely folded into \c GEP.
+ /// Check if \c Add is an add that can be safely folded into \c GEP.
///
/// \c Add can be folded into \c GEP if:
/// - \c Add is an add,
@@ -495,16 +499,16 @@
/// - \c Add has a constant operand.
bool canFoldAddIntoGEP(const User *GEP, const Value *Add);
- /// \brief Test whether the given value has exactly one use.
+ /// Test whether the given value has exactly one use.
bool hasTrivialKill(const Value *V);
- /// \brief Create a machine mem operand from the given instruction.
+ /// Create a machine mem operand from the given instruction.
MachineMemOperand *createMachineMemOperandFor(const Instruction *I) const;
CmpInst::Predicate optimizeCmpPredicate(const CmpInst *CI) const;
bool lowerCallTo(const CallInst *CI, MCSymbol *Symbol, unsigned NumArgs);
- bool lowerCallTo(const CallInst *CI, const char *SymbolName,
+ bool lowerCallTo(const CallInst *CI, const char *SymName,
unsigned NumArgs);
bool lowerCallTo(CallLoweringInfo &CLI);
@@ -521,23 +525,24 @@
}
bool lowerCall(const CallInst *I);
- /// \brief Select and emit code for a binary operator instruction, which has
+ /// Select and emit code for a binary operator instruction, which has
/// an opcode which directly corresponds to the given ISD opcode.
bool selectBinaryOp(const User *I, unsigned ISDOpcode);
bool selectFNeg(const User *I);
bool selectGetElementPtr(const User *I);
bool selectStackmap(const CallInst *I);
bool selectPatchpoint(const CallInst *I);
- bool selectCall(const User *Call);
+ bool selectCall(const User *I);
bool selectIntrinsicCall(const IntrinsicInst *II);
bool selectBitCast(const User *I);
bool selectCast(const User *I, unsigned Opcode);
- bool selectExtractValue(const User *I);
+ bool selectExtractValue(const User *U);
bool selectInsertValue(const User *I);
bool selectXRayCustomEvent(const CallInst *II);
+ bool selectXRayTypedEvent(const CallInst *II);
private:
- /// \brief Handle PHI nodes in successor blocks.
+ /// Handle PHI nodes in successor blocks.
///
/// Emit code to ensure constants are copied into registers when needed.
/// Remember the virtual registers that need to be added to the Machine PHI
@@ -546,21 +551,21 @@
/// correspond to a different MBB than the end.
bool handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB);
- /// \brief Helper for materializeRegForValue to materialize a constant in a
+ /// Helper for materializeRegForValue to materialize a constant in a
/// target-independent way.
unsigned materializeConstant(const Value *V, MVT VT);
- /// \brief Helper for getRegForVale. This function is called when the value
+ /// Helper for getRegForVale. This function is called when the value
/// isn't already available in a register and must be materialized with new
/// instructions.
unsigned materializeRegForValue(const Value *V, MVT VT);
- /// \brief Clears LocalValueMap and moves the area for the new local variables
+ /// Clears LocalValueMap and moves the area for the new local variables
/// to the beginning of the block. It helps to avoid spilling cached variables
/// across heavy instructions like calls.
void flushLocalValueMap();
- /// \brief Removes dead local value instructions after SavedLastLocalvalue.
+ /// Removes dead local value instructions after SavedLastLocalvalue.
void removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue);
struct InstOrderMap {
@@ -568,7 +573,8 @@
MachineInstr *FirstTerminator = nullptr;
unsigned FirstTerminatorOrder = std::numeric_limits<unsigned>::max();
- void initialize(MachineBasicBlock *MBB);
+ void initialize(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator LastFlushPoint);
};
/// Sinks the local value materialization instruction LocalMI to its first use
@@ -576,10 +582,10 @@
void sinkLocalValueMaterialization(MachineInstr &LocalMI, unsigned DefReg,
InstOrderMap &OrderMap);
- /// \brief Insertion point before trying to select the current instruction.
+ /// Insertion point before trying to select the current instruction.
MachineBasicBlock::iterator SavedInsertPt;
- /// \brief Add a stackmap or patchpoint intrinsic call's live variable
+ /// Add a stackmap or patchpoint intrinsic call's live variable
/// operands to a stackmap or patchpoint machine instruction.
bool addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
const CallInst *CI, unsigned StartIdx);
diff --git a/linux-x64/clang/include/llvm/CodeGen/FunctionLoweringInfo.h b/linux-x64/clang/include/llvm/CodeGen/FunctionLoweringInfo.h
index 2da00b7..5fe4f89 100644
--- a/linux-x64/clang/include/llvm/CodeGen/FunctionLoweringInfo.h
+++ b/linux-x64/clang/include/llvm/CodeGen/FunctionLoweringInfo.h
@@ -1,4 +1,4 @@
-//===- FunctionLoweringInfo.h - Lower functions from LLVM IR to CodeGen ---===//
+//===- FunctionLoweringInfo.h - Lower functions from LLVM IR ---*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/linux-x64/clang/include/llvm/CodeGen/GCStrategy.h b/linux-x64/clang/include/llvm/CodeGen/GCStrategy.h
index 16168e7..f835bac 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GCStrategy.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GCStrategy.h
@@ -104,13 +104,13 @@
const std::string &getName() const { return Name; }
/// By default, write barriers are replaced with simple store
- /// instructions. If true, you must provide a custom pass to lower
- /// calls to @llvm.gcwrite.
+ /// instructions. If true, you must provide a custom pass to lower
+ /// calls to \@llvm.gcwrite.
bool customWriteBarrier() const { return CustomWriteBarriers; }
/// By default, read barriers are replaced with simple load
- /// instructions. If true, you must provide a custom pass to lower
- /// calls to @llvm.gcread.
+ /// instructions. If true, you must provide a custom pass to lower
+ /// calls to \@llvm.gcread.
bool customReadBarrier() const { return CustomReadBarriers; }
/// Returns true if this strategy is expecting the use of gc.statepoints,
@@ -146,8 +146,8 @@
}
/// By default, roots are left for the code generator so it can generate a
- /// stack map. If true, you must provide a custom pass to lower
- /// calls to @llvm.gcroot.
+ /// stack map. If true, you must provide a custom pass to lower
+ /// calls to \@llvm.gcroot.
bool customRoots() const { return CustomRoots; }
/// If set, gcroot intrinsics should initialize their allocas to null
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CallLowering.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CallLowering.h
index 8d91cc4..32980ce 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CallLowering.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CallLowering.h
@@ -123,7 +123,7 @@
}
template <typename FuncInfoTy>
- void setArgFlags(ArgInfo &Arg, unsigned OpNum, const DataLayout &DL,
+ void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL,
const FuncInfoTy &FuncInfo) const;
/// Invoke Handler::assignArg on each of the given \p Args and then use
@@ -131,19 +131,19 @@
///
/// \return True if everything has succeeded, false otherwise.
bool handleAssignments(MachineIRBuilder &MIRBuilder, ArrayRef<ArgInfo> Args,
- ValueHandler &Callback) const;
+ ValueHandler &Handler) const;
public:
CallLowering(const TargetLowering *TLI) : TLI(TLI) {}
virtual ~CallLowering() = default;
/// This hook must be implemented to lower outgoing return values, described
- /// by \p Val, into the specified virtual register \p VReg.
+ /// by \p Val, into the specified virtual registers \p VRegs.
/// This hook is used by GlobalISel.
///
/// \return True if the lowering succeeds, false otherwise.
- virtual bool lowerReturn(MachineIRBuilder &MIRBuilder,
- const Value *Val, unsigned VReg) const {
+ virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
+ ArrayRef<unsigned> VRegs) const {
return false;
}
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/ConstantFoldingMIRBuilder.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/ConstantFoldingMIRBuilder.h
new file mode 100644
index 0000000..8d61f9a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/ConstantFoldingMIRBuilder.h
@@ -0,0 +1,134 @@
+//===-- llvm/CodeGen/GlobalISel/ConstantFoldingMIRBuilder.h --*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file implements a version of MachineIRBuilder which does trivial
+/// constant folding.
+//===----------------------------------------------------------------------===//
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
+
+namespace llvm {
+
+static Optional<APInt> ConstantFoldBinOp(unsigned Opcode, const unsigned Op1,
+ const unsigned Op2,
+ const MachineRegisterInfo &MRI) {
+ auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI);
+ auto MaybeOp2Cst = getConstantVRegVal(Op2, MRI);
+ if (MaybeOp1Cst && MaybeOp2Cst) {
+ LLT Ty = MRI.getType(Op1);
+ APInt C1(Ty.getSizeInBits(), *MaybeOp1Cst, true);
+ APInt C2(Ty.getSizeInBits(), *MaybeOp2Cst, true);
+ switch (Opcode) {
+ default:
+ break;
+ case TargetOpcode::G_ADD:
+ return C1 + C2;
+ case TargetOpcode::G_AND:
+ return C1 & C2;
+ case TargetOpcode::G_ASHR:
+ return C1.ashr(C2);
+ case TargetOpcode::G_LSHR:
+ return C1.lshr(C2);
+ case TargetOpcode::G_MUL:
+ return C1 * C2;
+ case TargetOpcode::G_OR:
+ return C1 | C2;
+ case TargetOpcode::G_SHL:
+ return C1 << C2;
+ case TargetOpcode::G_SUB:
+ return C1 - C2;
+ case TargetOpcode::G_XOR:
+ return C1 ^ C2;
+ case TargetOpcode::G_UDIV:
+ if (!C2.getBoolValue())
+ break;
+ return C1.udiv(C2);
+ case TargetOpcode::G_SDIV:
+ if (!C2.getBoolValue())
+ break;
+ return C1.sdiv(C2);
+ case TargetOpcode::G_UREM:
+ if (!C2.getBoolValue())
+ break;
+ return C1.urem(C2);
+ case TargetOpcode::G_SREM:
+ if (!C2.getBoolValue())
+ break;
+ return C1.srem(C2);
+ }
+ }
+ return None;
+}
+
+/// An MIRBuilder which does trivial constant folding of binary ops.
+/// Calls to buildInstr will also try to constant fold binary ops.
+class ConstantFoldingMIRBuilder
+ : public FoldableInstructionsBuilder<ConstantFoldingMIRBuilder> {
+public:
+ // Pull in base class constructors.
+ using FoldableInstructionsBuilder<
+ ConstantFoldingMIRBuilder>::FoldableInstructionsBuilder;
+ // Unhide buildInstr
+ using FoldableInstructionsBuilder<ConstantFoldingMIRBuilder>::buildInstr;
+
+ // Implement buildBinaryOp required by FoldableInstructionsBuilder which
+ // tries to constant fold.
+ MachineInstrBuilder buildBinaryOp(unsigned Opcode, unsigned Dst,
+ unsigned Src0, unsigned Src1) {
+ validateBinaryOp(Dst, Src0, Src1);
+ auto MaybeCst = ConstantFoldBinOp(Opcode, Src0, Src1, getMF().getRegInfo());
+ if (MaybeCst)
+ return buildConstant(Dst, MaybeCst->getSExtValue());
+ return buildInstr(Opcode).addDef(Dst).addUse(Src0).addUse(Src1);
+ }
+
+ template <typename DstTy, typename UseArg1Ty, typename UseArg2Ty>
+ MachineInstrBuilder buildInstr(unsigned Opc, DstTy &&Ty, UseArg1Ty &&Arg1,
+ UseArg2Ty &&Arg2) {
+ unsigned Dst = getDestFromArg(Ty);
+ return buildInstr(Opc, Dst, getRegFromArg(std::forward<UseArg1Ty>(Arg1)),
+ getRegFromArg(std::forward<UseArg2Ty>(Arg2)));
+ }
+
+ // Try to provide an overload for buildInstr for binary ops in order to
+ // constant fold.
+ MachineInstrBuilder buildInstr(unsigned Opc, unsigned Dst, unsigned Src0,
+ unsigned Src1) {
+ switch (Opc) {
+ default:
+ break;
+ case TargetOpcode::G_ADD:
+ case TargetOpcode::G_AND:
+ case TargetOpcode::G_ASHR:
+ case TargetOpcode::G_LSHR:
+ case TargetOpcode::G_MUL:
+ case TargetOpcode::G_OR:
+ case TargetOpcode::G_SHL:
+ case TargetOpcode::G_SUB:
+ case TargetOpcode::G_XOR:
+ case TargetOpcode::G_UDIV:
+ case TargetOpcode::G_SDIV:
+ case TargetOpcode::G_UREM:
+ case TargetOpcode::G_SREM: {
+ return buildBinaryOp(Opc, Dst, Src0, Src1);
+ }
+ }
+ return buildInstr(Opc).addDef(Dst).addUse(Src0).addUse(Src1);
+ }
+
+ // Fallback implementation of buildInstr.
+ template <typename DstTy, typename... UseArgsTy>
+ MachineInstrBuilder buildInstr(unsigned Opc, DstTy &&Ty,
+ UseArgsTy &&... Args) {
+ auto MIB = buildInstr(Opc).addDef(getDestFromArg(Ty));
+ addUsesFromArgs(MIB, std::forward<UseArgsTy>(Args)...);
+ return MIB;
+ }
+};
+} // namespace llvm
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/IRTranslator.h
index 7061c01..f355396 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/IRTranslator.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/IRTranslator.h
@@ -24,6 +24,7 @@
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/GlobalISel/Types.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/Support/Allocator.h"
#include "llvm/IR/Intrinsics.h"
#include <memory>
#include <utility>
@@ -63,9 +64,83 @@
/// Interface used to lower the everything related to calls.
const CallLowering *CLI;
- /// Mapping of the values of the current LLVM IR function
- /// to the related virtual registers.
- ValueToVReg ValToVReg;
+ /// This class contains the mapping between the Values to vreg related data.
+ class ValueToVRegInfo {
+ public:
+ ValueToVRegInfo() = default;
+
+ using VRegListT = SmallVector<unsigned, 1>;
+ using OffsetListT = SmallVector<uint64_t, 1>;
+
+ using const_vreg_iterator =
+ DenseMap<const Value *, VRegListT *>::const_iterator;
+ using const_offset_iterator =
+ DenseMap<const Value *, OffsetListT *>::const_iterator;
+
+ inline const_vreg_iterator vregs_end() const { return ValToVRegs.end(); }
+
+ VRegListT *getVRegs(const Value &V) {
+ auto It = ValToVRegs.find(&V);
+ if (It != ValToVRegs.end())
+ return It->second;
+
+ return insertVRegs(V);
+ }
+
+ OffsetListT *getOffsets(const Value &V) {
+ auto It = TypeToOffsets.find(V.getType());
+ if (It != TypeToOffsets.end())
+ return It->second;
+
+ return insertOffsets(V);
+ }
+
+ const_vreg_iterator findVRegs(const Value &V) const {
+ return ValToVRegs.find(&V);
+ }
+
+ bool contains(const Value &V) const {
+ return ValToVRegs.find(&V) != ValToVRegs.end();
+ }
+
+ void reset() {
+ ValToVRegs.clear();
+ TypeToOffsets.clear();
+ VRegAlloc.DestroyAll();
+ OffsetAlloc.DestroyAll();
+ }
+
+ private:
+ VRegListT *insertVRegs(const Value &V) {
+ assert(ValToVRegs.find(&V) == ValToVRegs.end() && "Value already exists");
+
+ // We placement new using our fast allocator since we never try to free
+ // the vectors until translation is finished.
+ auto *VRegList = new (VRegAlloc.Allocate()) VRegListT();
+ ValToVRegs[&V] = VRegList;
+ return VRegList;
+ }
+
+ OffsetListT *insertOffsets(const Value &V) {
+ assert(TypeToOffsets.find(V.getType()) == TypeToOffsets.end() &&
+ "Type already exists");
+
+ auto *OffsetList = new (OffsetAlloc.Allocate()) OffsetListT();
+ TypeToOffsets[V.getType()] = OffsetList;
+ return OffsetList;
+ }
+ SpecificBumpPtrAllocator<VRegListT> VRegAlloc;
+ SpecificBumpPtrAllocator<OffsetListT> OffsetAlloc;
+
+ // We store pointers to vectors here since references may be invalidated
+ // while we hold them if we stored the vectors directly.
+ DenseMap<const Value *, VRegListT*> ValToVRegs;
+ DenseMap<const Type *, OffsetListT*> TypeToOffsets;
+ };
+
+ /// Mapping of the values of the current LLVM IR function to the related
+ /// virtual registers and offsets.
+ ValueToVRegInfo VMap;
// N.b. it's not completely obvious that this will be sufficient for every
// LLVM IR construct (with "invoke" being the obvious candidate to mess up our
@@ -82,7 +157,8 @@
// List of stubbed PHI instructions, for values and basic blocks to be filled
// in once all MachineBasicBlocks have been created.
- SmallVector<std::pair<const PHINode *, MachineInstr *>, 4> PendingPHIs;
+ SmallVector<std::pair<const PHINode *, SmallVector<MachineInstr *, 1>>, 4>
+ PendingPHIs;
/// Record of what frame index has been allocated to specified allocas for
/// this function.
@@ -99,7 +175,7 @@
/// The general algorithm is:
/// 1. Look for a virtual register for each operand or
/// create one.
- /// 2 Update the ValToVReg accordingly.
+ /// 2 Update the VMap accordingly.
/// 2.alt. For constant arguments, if they are compile time constants,
/// produce an immediate in the right operand and do not touch
/// ValToReg. Actually we will go with a virtual register for each
@@ -134,7 +210,7 @@
/// Translate an LLVM string intrinsic (memcpy, memset, ...).
bool translateMemfunc(const CallInst &CI, MachineIRBuilder &MIRBuilder,
- unsigned Intrinsic);
+ unsigned ID);
void getStackGuard(unsigned DstReg, MachineIRBuilder &MIRBuilder);
@@ -146,6 +222,19 @@
bool translateInlineAsm(const CallInst &CI, MachineIRBuilder &MIRBuilder);
+ // FIXME: temporary function to expose previous interface to call lowering
+ // until it is refactored.
+ /// Combines all component registers of \p V into a single scalar with size
+ /// "max(Offsets) + last size".
+ unsigned packRegs(const Value &V, MachineIRBuilder &MIRBuilder);
+
+ void unpackRegs(const Value &V, unsigned Src, MachineIRBuilder &MIRBuilder);
+
+ /// Returns true if the value should be split into multiple LLTs.
+ /// If \p Offsets is given then the split type's offsets will be stored in it.
+ bool valueIsSplit(const Value &V,
+ SmallVectorImpl<uint64_t> *Offsets = nullptr);
+
/// Translate call instruction.
/// \pre \p U is a call instruction.
bool translateCall(const User &U, MachineIRBuilder &MIRBuilder);
@@ -310,6 +399,9 @@
bool translateShuffleVector(const User &U, MachineIRBuilder &MIRBuilder);
+ bool translateAtomicCmpXchg(const User &U, MachineIRBuilder &MIRBuilder);
+ bool translateAtomicRMW(const User &U, MachineIRBuilder &MIRBuilder);
+
// Stubs to keep the compiler happy while we implement the rest of the
// translation.
bool translateResume(const User &U, MachineIRBuilder &MIRBuilder) {
@@ -327,14 +419,8 @@
bool translateFence(const User &U, MachineIRBuilder &MIRBuilder) {
return false;
}
- bool translateAtomicCmpXchg(const User &U, MachineIRBuilder &MIRBuilder) {
- return false;
- }
- bool translateAtomicRMW(const User &U, MachineIRBuilder &MIRBuilder) {
- return false;
- }
bool translateAddrSpaceCast(const User &U, MachineIRBuilder &MIRBuilder) {
- return false;
+ return translateCast(TargetOpcode::G_ADDRSPACE_CAST, U, MIRBuilder);
}
bool translateCleanupPad(const User &U, MachineIRBuilder &MIRBuilder) {
return false;
@@ -381,9 +467,24 @@
// * Clear the different maps.
void finalizeFunction();
- /// Get the VReg that represents \p Val.
- /// If such VReg does not exist, it is created.
- unsigned getOrCreateVReg(const Value &Val);
+ /// Get the VRegs that represent \p Val.
+ /// Non-aggregate types have just one corresponding VReg and the list can be
+ /// used as a single "unsigned". Aggregates get flattened. If such VRegs do
+ /// not exist, they are created.
+ ArrayRef<unsigned> getOrCreateVRegs(const Value &Val);
+
+ unsigned getOrCreateVReg(const Value &Val) {
+ auto Regs = getOrCreateVRegs(Val);
+ if (Regs.empty())
+ return 0;
+ assert(Regs.size() == 1 &&
+ "attempt to get single VReg for aggregate or void");
+ return Regs[0];
+ }
+
+ /// Allocate some vregs and offsets in the VMap. Then populate just the
+ /// offsets while leaving the vregs empty.
+ ValueToVRegInfo::VRegListT &allocateVRegs(const Value &Val);
/// Get the frame index that represents \p Val.
/// If such VReg does not exist, it is created.
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelector.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
index eacd135..471def7 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
@@ -20,6 +20,7 @@
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/CodeGenCoverage.h"
+#include "llvm/Support/LowLevelTypeImpl.h"
#include <bitset>
#include <cstddef>
#include <cstdint>
@@ -31,7 +32,6 @@
class APInt;
class APFloat;
-class LLT;
class MachineInstr;
class MachineInstrBuilder;
class MachineFunction;
@@ -81,6 +81,23 @@
/// failed match.
GIM_Try,
+ /// Switch over the opcode on the specified instruction
+ /// - InsnID - Instruction ID
+ /// - LowerBound - numerically minimum opcode supported
+ /// - UpperBound - numerically maximum + 1 opcode supported
+ /// - Default - failure jump target
+ /// - JumpTable... - (UpperBound - LowerBound) (at least 2) jump targets
+ GIM_SwitchOpcode,
+
+ /// Switch over the LLT on the specified instruction operand
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ /// - LowerBound - numerically minimum Type ID supported
+ /// - UpperBound - numerically maximum + 1 Type ID supported
+ /// - Default - failure jump target
+ /// - JumpTable... - (UpperBound - LowerBound) (at least 2) jump targets
+ GIM_SwitchType,
+
/// Record the specified instruction
/// - NewInsnID - Instruction ID to define
/// - InsnID - Instruction ID
@@ -117,6 +134,23 @@
GIM_CheckAtomicOrdering,
GIM_CheckAtomicOrderingOrStrongerThan,
GIM_CheckAtomicOrderingWeakerThan,
+ /// Check the size of the memory access for the given machine memory operand.
+ /// - InsnID - Instruction ID
+ /// - MMOIdx - MMO index
+ /// - Size - The size in bytes of the memory access
+ GIM_CheckMemorySizeEqualTo,
+ /// Check the size of the memory access for the given machine memory operand
+ /// against the size of an operand.
+ /// - InsnID - Instruction ID
+ /// - MMOIdx - MMO index
+ /// - OpIdx - The operand index to compare the MMO against
+ GIM_CheckMemorySizeEqualToLLT,
+ GIM_CheckMemorySizeLessThanLLT,
+ GIM_CheckMemorySizeGreaterThanLLT,
+ /// Check a generic C++ instruction predicate
+ /// - InsnID - Instruction ID
+ /// - PredicateID - The ID of the predicate function to call
+ GIM_CheckCxxInsnPredicate,
/// Check the type for the specified operand
/// - InsnID - Instruction ID
@@ -133,12 +167,14 @@
/// - OpIdx - Operand index
/// - Expected register bank (specified as a register class)
GIM_CheckRegBankForClass,
+
/// Check the operand matches a complex predicate
/// - InsnID - Instruction ID
/// - OpIdx - Operand index
/// - RendererID - The renderer to hold the result
/// - Complex predicate ID
GIM_CheckComplexPattern,
+
/// Check the operand is a specific integer
/// - InsnID - Instruction ID
/// - OpIdx - Operand index
@@ -155,6 +191,7 @@
/// - OpIdx - Operand index
/// - Expected Intrinsic ID
GIM_CheckIntrinsicID,
+
/// Check the specified operand is an MBB
/// - InsnID - Instruction ID
/// - OpIdx - Operand index
@@ -183,6 +220,7 @@
/// - OldInsnID - Instruction ID to mutate
/// - NewOpcode - The new opcode to use
GIR_MutateOpcode,
+
/// Build a new instruction
/// - InsnID - Instruction ID to define
/// - Opcode - The new opcode to use
@@ -193,6 +231,7 @@
/// - OldInsnID - Instruction ID to copy from
/// - OpIdx - The operand to copy
GIR_Copy,
+
/// Copy an operand to the specified instruction or add a zero register if the
/// operand is a zero immediate.
/// - NewInsnID - Instruction ID to modify
@@ -206,6 +245,7 @@
/// - OpIdx - The operand to copy
/// - SubRegIdx - The subregister to copy
GIR_CopySubReg,
+
/// Add an implicit register def to the specified instruction
/// - InsnID - Instruction ID to modify
/// - RegNum - The register to add
@@ -218,10 +258,13 @@
/// - InsnID - Instruction ID to modify
/// - RegNum - The register to add
GIR_AddRegister,
+
/// Add a temporary register to the specified instruction
/// - InsnID - Instruction ID to modify
/// - TempRegID - The temporary register ID to add
+ /// - TempRegFlags - The register flags to set
GIR_AddTempRegister,
+
/// Add an immediate to the specified instruction
/// - InsnID - Instruction ID to modify
/// - Imm - The immediate to add
@@ -230,6 +273,7 @@
/// - InsnID - Instruction ID to modify
/// - RendererID - The renderer to call
GIR_ComplexRenderer,
+
/// Render sub-operands of complex operands to the specified instruction
/// - InsnID - Instruction ID to modify
/// - RendererID - The renderer to call
@@ -247,24 +291,34 @@
/// The operand index is implicitly 1.
GIR_CopyConstantAsSImm,
+ /// Render a G_FCONSTANT operator as a sign-extended immediate.
+ /// - NewInsnID - Instruction ID to modify
+ /// - OldInsnID - Instruction ID to copy from
+ /// The operand index is implicitly 1.
+ GIR_CopyFConstantAsFPImm,
+
/// Constrain an instruction operand to a register class.
/// - InsnID - Instruction ID to modify
/// - OpIdx - Operand index
/// - RCEnum - Register class enumeration value
GIR_ConstrainOperandRC,
+
/// Constrain an instructions operands according to the instruction
/// description.
/// - InsnID - Instruction ID to modify
GIR_ConstrainSelectedInstOperands,
+
/// Merge all memory operands into instruction.
/// - InsnID - Instruction ID to modify
/// - MergeInsnID... - One or more Instruction ID to merge into the result.
/// - GIU_MergeMemOperands_EndOfList - Terminates the list of instructions to
/// merge.
GIR_MergeMemOperands,
+
/// Erase from parent.
/// - InsnID - Instruction ID to erase
GIR_EraseFromParent,
+
/// Create a new temporary register that's not constrained.
/// - TempRegID - The temporary register ID to initialize.
/// - Expected type
@@ -276,6 +330,9 @@
/// Increment the rule coverage counter.
/// - RuleID - The ID of the rule that was covered.
GIR_Coverage,
+
+ /// Keeping track of the number of the GI opcodes. Must be the last entry.
+ GIU_NumOpcodes,
};
enum {
@@ -319,10 +376,24 @@
template <class PredicateBitset, class ComplexMatcherMemFn,
class CustomRendererFn>
struct ISelInfoTy {
+ ISelInfoTy(const LLT *TypeObjects, size_t NumTypeObjects,
+ const PredicateBitset *FeatureBitsets,
+ const ComplexMatcherMemFn *ComplexPredicates,
+ const CustomRendererFn *CustomRenderers)
+ : TypeObjects(TypeObjects),
+ FeatureBitsets(FeatureBitsets),
+ ComplexPredicates(ComplexPredicates),
+ CustomRenderers(CustomRenderers) {
+
+ for (size_t I = 0; I < NumTypeObjects; ++I)
+ TypeIDMap[TypeObjects[I]] = I;
+ }
const LLT *TypeObjects;
const PredicateBitset *FeatureBitsets;
const ComplexMatcherMemFn *ComplexPredicates;
const CustomRendererFn *CustomRenderers;
+
+ SmallDenseMap<LLT, unsigned, 64> TypeIDMap;
};
protected:
@@ -341,14 +412,25 @@
const RegisterBankInfo &RBI, const PredicateBitset &AvailableFeatures,
CodeGenCoverage &CoverageInfo) const;
+ virtual const int64_t *getMatchTable() const {
+ llvm_unreachable("Should have been overridden by tablegen if used");
+ }
+
virtual bool testImmPredicate_I64(unsigned, int64_t) const {
- llvm_unreachable("Subclasses must override this to use tablegen");
+ llvm_unreachable(
+ "Subclasses must override this with a tablegen-erated function");
}
virtual bool testImmPredicate_APInt(unsigned, const APInt &) const {
- llvm_unreachable("Subclasses must override this to use tablegen");
+ llvm_unreachable(
+ "Subclasses must override this with a tablegen-erated function");
}
virtual bool testImmPredicate_APFloat(unsigned, const APFloat &) const {
- llvm_unreachable("Subclasses must override this to use tablegen");
+ llvm_unreachable(
+ "Subclasses must override this with a tablegen-erated function");
+ }
+ virtual bool testMIPredicate_MI(unsigned, const MachineInstr &) const {
+ llvm_unreachable(
+ "Subclasses must override this with a tablegen-erated function");
}
/// Constrain a register operand of an instruction \p I to a specified
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
index f7593ba..2003a79 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
@@ -41,6 +41,7 @@
GIPFP_I64_Invalid = 0,
GIPFP_APInt_Invalid = 0,
GIPFP_APFloat_Invalid = 0,
+ GIPFP_MI_Invalid = 0,
};
template <class TgtInstructionSelector, class PredicateBitset,
@@ -53,8 +54,9 @@
MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
const RegisterBankInfo &RBI, const PredicateBitset &AvailableFeatures,
CodeGenCoverage &CoverageInfo) const {
+
uint64_t CurrentIdx = 0;
- SmallVector<uint64_t, 8> OnFailResumeAt;
+ SmallVector<uint64_t, 4> OnFailResumeAt;
enum RejectAction { RejectAndGiveUp, RejectAndResume };
auto handleReject = [&]() -> RejectAction {
@@ -62,8 +64,7 @@
dbgs() << CurrentIdx << ": Rejected\n");
if (OnFailResumeAt.empty())
return RejectAndGiveUp;
- CurrentIdx = OnFailResumeAt.back();
- OnFailResumeAt.pop_back();
+ CurrentIdx = OnFailResumeAt.pop_back_val();
DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
dbgs() << CurrentIdx << ": Resume at " << CurrentIdx << " ("
<< OnFailResumeAt.size() << " try-blocks remain)\n");
@@ -72,7 +73,8 @@
while (true) {
assert(CurrentIdx != ~0u && "Invalid MatchTable index");
- switch (MatchTable[CurrentIdx++]) {
+ int64_t MatcherOpcode = MatchTable[CurrentIdx++];
+ switch (MatcherOpcode) {
case GIM_Try: {
DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
dbgs() << CurrentIdx << ": Begin try-block\n");
@@ -138,12 +140,13 @@
int64_t InsnID = MatchTable[CurrentIdx++];
int64_t Expected = MatchTable[CurrentIdx++];
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
unsigned Opcode = State.MIs[InsnID]->getOpcode();
+
DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
dbgs() << CurrentIdx << ": GIM_CheckOpcode(MIs[" << InsnID
<< "], ExpectedOpcode=" << Expected
<< ") // Got=" << Opcode << "\n");
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
if (Opcode != Expected) {
if (handleReject() == RejectAndGiveUp)
return false;
@@ -151,6 +154,77 @@
break;
}
+ case GIM_SwitchOpcode: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t LowerBound = MatchTable[CurrentIdx++];
+ int64_t UpperBound = MatchTable[CurrentIdx++];
+ int64_t Default = MatchTable[CurrentIdx++];
+
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ const int64_t Opcode = State.MIs[InsnID]->getOpcode();
+
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(), {
+ dbgs() << CurrentIdx << ": GIM_SwitchOpcode(MIs[" << InsnID << "], ["
+ << LowerBound << ", " << UpperBound << "), Default=" << Default
+ << ", JumpTable...) // Got=" << Opcode << "\n";
+ });
+ if (Opcode < LowerBound || UpperBound <= Opcode) {
+ CurrentIdx = Default;
+ break;
+ }
+ CurrentIdx = MatchTable[CurrentIdx + (Opcode - LowerBound)];
+ if (!CurrentIdx) {
+ CurrentIdx = Default;
+ break;
+ }
+ OnFailResumeAt.push_back(Default);
+ break;
+ }
+
+ case GIM_SwitchType: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ int64_t LowerBound = MatchTable[CurrentIdx++];
+ int64_t UpperBound = MatchTable[CurrentIdx++];
+ int64_t Default = MatchTable[CurrentIdx++];
+
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(), {
+ dbgs() << CurrentIdx << ": GIM_SwitchType(MIs[" << InsnID
+ << "]->getOperand(" << OpIdx << "), [" << LowerBound << ", "
+ << UpperBound << "), Default=" << Default
+ << ", JumpTable...) // Got=";
+ if (!MO.isReg())
+ dbgs() << "Not a VReg\n";
+ else
+ dbgs() << MRI.getType(MO.getReg()) << "\n";
+ });
+ if (!MO.isReg()) {
+ CurrentIdx = Default;
+ break;
+ }
+ const LLT Ty = MRI.getType(MO.getReg());
+ const auto TyI = ISelInfo.TypeIDMap.find(Ty);
+ if (TyI == ISelInfo.TypeIDMap.end()) {
+ CurrentIdx = Default;
+ break;
+ }
+ const int64_t TypeID = TyI->second;
+ if (TypeID < LowerBound || UpperBound <= TypeID) {
+ CurrentIdx = Default;
+ break;
+ }
+ CurrentIdx = MatchTable[CurrentIdx + (TypeID - LowerBound)];
+ if (!CurrentIdx) {
+ CurrentIdx = Default;
+ break;
+ }
+ OnFailResumeAt.push_back(Default);
+ break;
+ }
+
case GIM_CheckNumOperands: {
int64_t InsnID = MatchTable[CurrentIdx++];
int64_t Expected = MatchTable[CurrentIdx++];
@@ -196,7 +270,8 @@
<< CurrentIdx << ": GIM_CheckAPIntImmPredicate(MIs["
<< InsnID << "], Predicate=" << Predicate << ")\n");
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- assert(State.MIs[InsnID]->getOpcode() && "Expected G_CONSTANT");
+ assert(State.MIs[InsnID]->getOpcode() == TargetOpcode::G_CONSTANT &&
+ "Expected G_CONSTANT");
assert(Predicate > GIPFP_APInt_Invalid && "Expected a valid predicate");
APInt Value;
if (State.MIs[InsnID]->getOperand(1).isCImm())
@@ -228,6 +303,21 @@
return false;
break;
}
+ case GIM_CheckCxxInsnPredicate: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t Predicate = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs()
+ << CurrentIdx << ": GIM_CheckCxxPredicate(MIs["
+ << InsnID << "], Predicate=" << Predicate << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ assert(Predicate > GIPFP_MI_Invalid && "Expected a valid predicate");
+
+ if (!testMIPredicate_MI(Predicate, *State.MIs[InsnID]))
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
case GIM_CheckAtomicOrdering: {
int64_t InsnID = MatchTable[CurrentIdx++];
AtomicOrdering Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
@@ -235,7 +325,6 @@
dbgs() << CurrentIdx << ": GIM_CheckAtomicOrdering(MIs["
<< InsnID << "], " << (uint64_t)Ordering << ")\n");
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
-
if (!State.MIs[InsnID]->hasOneMemOperand())
if (handleReject() == RejectAndGiveUp)
return false;
@@ -254,7 +343,6 @@
<< ": GIM_CheckAtomicOrderingOrStrongerThan(MIs["
<< InsnID << "], " << (uint64_t)Ordering << ")\n");
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
-
if (!State.MIs[InsnID]->hasOneMemOperand())
if (handleReject() == RejectAndGiveUp)
return false;
@@ -273,7 +361,6 @@
<< ": GIM_CheckAtomicOrderingWeakerThan(MIs["
<< InsnID << "], " << (uint64_t)Ordering << ")\n");
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
-
if (!State.MIs[InsnID]->hasOneMemOperand())
if (handleReject() == RejectAndGiveUp)
return false;
@@ -284,6 +371,87 @@
return false;
break;
}
+ case GIM_CheckMemorySizeEqualTo: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t MMOIdx = MatchTable[CurrentIdx++];
+ uint64_t Size = MatchTable[CurrentIdx++];
+
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx
+ << ": GIM_CheckMemorySizeEqual(MIs[" << InsnID
+ << "]->memoperands() + " << MMOIdx
+ << ", Size=" << Size << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+ if (State.MIs[InsnID]->getNumMemOperands() <= MMOIdx) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+
+ MachineMemOperand *MMO = *(State.MIs[InsnID]->memoperands_begin() + MMOIdx);
+
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << MMO->getSize() << " bytes vs " << Size
+ << " bytes\n");
+ if (MMO->getSize() != Size)
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+
+ break;
+ }
+ case GIM_CheckMemorySizeEqualToLLT:
+ case GIM_CheckMemorySizeLessThanLLT:
+ case GIM_CheckMemorySizeGreaterThanLLT: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t MMOIdx = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+
+ DEBUG_WITH_TYPE(
+ TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckMemorySize"
+ << (MatcherOpcode == GIM_CheckMemorySizeEqualToLLT
+ ? "EqualTo"
+ : MatcherOpcode == GIM_CheckMemorySizeGreaterThanLLT
+ ? "GreaterThan"
+ : "LessThan")
+ << "LLT(MIs[" << InsnID << "]->memoperands() + " << MMOIdx
+ << ", OpIdx=" << OpIdx << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+ MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+ if (!MO.isReg()) {
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": Not a register\n");
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+
+ if (State.MIs[InsnID]->getNumMemOperands() <= MMOIdx) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+
+ MachineMemOperand *MMO = *(State.MIs[InsnID]->memoperands_begin() + MMOIdx);
+
+ unsigned Size = MRI.getType(MO.getReg()).getSizeInBits();
+ if (MatcherOpcode == GIM_CheckMemorySizeEqualToLLT &&
+ MMO->getSize() * 8 != Size) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ } else if (MatcherOpcode == GIM_CheckMemorySizeLessThanLLT &&
+ MMO->getSize() * 8 >= Size) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ } else if (MatcherOpcode == GIM_CheckMemorySizeGreaterThanLLT &&
+ MMO->getSize() * 8 <= Size)
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+
+ break;
+ }
case GIM_CheckType: {
int64_t InsnID = MatchTable[CurrentIdx++];
int64_t OpIdx = MatchTable[CurrentIdx++];
@@ -293,7 +461,6 @@
<< "]->getOperand(" << OpIdx
<< "), TypeID=" << TypeID << ")\n");
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
-
MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
if (!MO.isReg() ||
MRI.getType(MO.getReg()) != ISelInfo.TypeObjects[TypeID]) {
@@ -312,7 +479,6 @@
<< InsnID << "]->getOperand(" << OpIdx
<< "), SizeInBits=" << SizeInBits << ")\n");
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
-
// iPTR must be looked up in the target.
if (SizeInBits == 0) {
MachineFunction *MF = State.MIs[InsnID]->getParent()->getParent();
@@ -384,7 +550,6 @@
<< InsnID << "]->getOperand(" << OpIdx
<< "), Value=" << Value << ")\n");
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
-
MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
if (MO.isReg()) {
// isOperandImmEqual() will sign-extend to 64-bits, so should we.
@@ -480,7 +645,7 @@
}
case GIM_Reject:
DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
- dbgs() << CurrentIdx << ": GIM_Reject");
+ dbgs() << CurrentIdx << ": GIM_Reject\n");
if (handleReject() == RejectAndGiveUp)
return false;
break;
@@ -662,6 +827,23 @@
break;
}
+ // TODO: Needs a test case once we have a pattern that uses this.
+ case GIR_CopyFConstantAsFPImm: {
+ int64_t NewInsnID = MatchTable[CurrentIdx++];
+ int64_t OldInsnID = MatchTable[CurrentIdx++];
+ assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
+ assert(State.MIs[OldInsnID]->getOpcode() == TargetOpcode::G_FCONSTANT && "Expected G_FCONSTANT");
+ if (State.MIs[OldInsnID]->getOperand(1).isFPImm())
+ OutMIs[NewInsnID].addFPImm(
+ State.MIs[OldInsnID]->getOperand(1).getFPImm());
+ else
+ llvm_unreachable("Expected FPImm operand");
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_CopyFPConstantAsFPImm(OutMIs["
+ << NewInsnID << "], MIs[" << OldInsnID << "])\n");
+ break;
+ }
+
case GIR_CustomRenderer: {
int64_t InsnID = MatchTable[CurrentIdx++];
int64_t OldInsnID = MatchTable[CurrentIdx++];
@@ -755,7 +937,7 @@
case GIR_Done:
DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
- dbgs() << CurrentIdx << ": GIR_Done");
+ dbgs() << CurrentIdx << ": GIR_Done\n");
return true;
default:
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
index a1f564b..8735876 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
@@ -38,7 +38,7 @@
return false;
if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_TRUNC,
MI.getOperand(1).getReg(), MRI)) {
- DEBUG(dbgs() << ".. Combine MI: " << MI;);
+ LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
unsigned DstReg = MI.getOperand(0).getReg();
unsigned SrcReg = DefMI->getOperand(1).getReg();
Builder.setInstr(MI);
@@ -62,7 +62,7 @@
if (isInstUnsupported({TargetOpcode::G_AND, {DstTy}}) ||
isInstUnsupported({TargetOpcode::G_CONSTANT, {DstTy}}))
return false;
- DEBUG(dbgs() << ".. Combine MI: " << MI;);
+ LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
Builder.setInstr(MI);
unsigned ZExtSrc = MI.getOperand(1).getReg();
LLT ZExtSrcTy = MRI.getType(ZExtSrc);
@@ -91,7 +91,7 @@
isInstUnsupported({TargetOpcode::G_ASHR, {DstTy}}) ||
isInstUnsupported({TargetOpcode::G_CONSTANT, {DstTy}}))
return false;
- DEBUG(dbgs() << ".. Combine MI: " << MI;);
+ LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
Builder.setInstr(MI);
unsigned SExtSrc = MI.getOperand(1).getReg();
LLT SExtSrcTy = MRI.getType(SExtSrc);
@@ -123,7 +123,7 @@
LLT DstTy = MRI.getType(DstReg);
if (isInstUnsupported({TargetOpcode::G_IMPLICIT_DEF, {DstTy}}))
return false;
- DEBUG(dbgs() << ".. Combine EXT(IMPLICIT_DEF) " << MI;);
+ LLVM_DEBUG(dbgs() << ".. Combine EXT(IMPLICIT_DEF) " << MI;);
Builder.setInstr(MI);
Builder.buildInstr(TargetOpcode::G_IMPLICIT_DEF, DstReg);
markInstAndDefDead(MI, *DefMI, DeadInsts);
@@ -139,9 +139,9 @@
return false;
unsigned NumDefs = MI.getNumOperands() - 1;
- unsigned SrcReg = MI.getOperand(NumDefs).getReg();
- MachineInstr *MergeI = MRI.getVRegDef(SrcReg);
- if (!MergeI || (MergeI->getOpcode() != TargetOpcode::G_MERGE_VALUES))
+ MachineInstr *MergeI = getOpcodeDef(TargetOpcode::G_MERGE_VALUES,
+ MI.getOperand(NumDefs).getReg(), MRI);
+ if (!MergeI)
return false;
const unsigned NumMergeRegs = MergeI->getNumOperands() - 1;
@@ -253,11 +253,8 @@
// and as a result, %3, %2, %1 are dead.
MachineInstr *PrevMI = &MI;
while (PrevMI != &DefMI) {
- // If we're dealing with G_UNMERGE_VALUES, tryCombineMerges doesn't really try
- // to fold copies in between and we can ignore them here.
- if (PrevMI->getOpcode() == TargetOpcode::G_UNMERGE_VALUES)
- break;
- unsigned PrevRegSrc = PrevMI->getOperand(1).getReg();
+ unsigned PrevRegSrc =
+ PrevMI->getOperand(PrevMI->getNumOperands() - 1).getReg();
MachineInstr *TmpDef = MRI.getVRegDef(PrevRegSrc);
if (MRI.hasOneUse(PrevRegSrc)) {
if (TmpDef != &DefMI) {
@@ -269,9 +266,7 @@
break;
PrevMI = TmpDef;
}
- if ((PrevMI == &DefMI ||
- DefMI.getOpcode() == TargetOpcode::G_MERGE_VALUES) &&
- MRI.hasOneUse(DefMI.getOperand(0).getReg()))
+ if (PrevMI == &DefMI && MRI.hasOneUse(DefMI.getOperand(0).getReg()))
DeadInsts.push_back(&DefMI);
}
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index 8bd8a9d..d122e67 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -93,12 +93,24 @@
const LegalizerInfo &getLegalizerInfo() const { return LI; }
private:
+ /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
+ /// Use by extending the operand's type to \p WideTy using the specified \p
+ /// ExtOpcode for the extension instruction, and replacing the vreg of the
+ /// operand in place.
+ void widenScalarSrc(MachineInstr &MI, LLT WideTy, unsigned OpIdx,
+ unsigned ExtOpcode);
+
+ /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
+ /// Def by extending the operand's type to \p WideTy and truncating it back
+ /// with the \p TruncOpcode, and replacing the vreg of the operand in place.
+ void widenScalarDst(MachineInstr &MI, LLT WideTy, unsigned OpIdx = 0,
+ unsigned TruncOpcode = TargetOpcode::G_TRUNC);
/// Helper function to split a wide generic register into bitwise blocks with
/// the given Type (which implies the number of blocks needed). The generic
/// registers created are appended to Ops, starting at bit 0 of Reg.
void extractParts(unsigned Reg, LLT Ty, int NumParts,
- SmallVectorImpl<unsigned> &Ops);
+ SmallVectorImpl<unsigned> &VRegs);
MachineRegisterInfo &MRI;
const LegalizerInfo &LI;
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
index 117c791..a8c2608 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
@@ -19,6 +19,7 @@
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/TargetOpcodes.h"
@@ -37,6 +38,7 @@
class MachineInstr;
class MachineIRBuilder;
class MachineRegisterInfo;
+class MCInstrInfo;
namespace LegalizeActions {
enum LegalizeAction : std::uint8_t {
@@ -118,6 +120,21 @@
unsigned Opcode;
ArrayRef<LLT> Types;
+ struct MemDesc {
+ uint64_t Size;
+ AtomicOrdering Ordering;
+ };
+
+ /// Operations which require memory can use this to place requirements on the
+ /// memory type for each MMO.
+ ArrayRef<MemDesc> MMODescrs;
+
+ constexpr LegalityQuery(unsigned Opcode, const ArrayRef<LLT> Types,
+ const ArrayRef<MemDesc> MMODescrs)
+ : Opcode(Opcode), Types(Types), MMODescrs(MMODescrs) {}
+ constexpr LegalityQuery(unsigned Opcode, const ArrayRef<LLT> Types)
+ : LegalityQuery(Opcode, Types, {}) {}
+
raw_ostream &print(raw_ostream &OS) const;
};
@@ -147,8 +164,31 @@
std::function<std::pair<unsigned, LLT>(const LegalityQuery &)>;
namespace LegalityPredicates {
+struct TypePairAndMemSize {
+ LLT Type0;
+ LLT Type1;
+ uint64_t MemSize;
+
+ bool operator==(const TypePairAndMemSize &Other) const {
+ return Type0 == Other.Type0 && Type1 == Other.Type1 &&
+ MemSize == Other.MemSize;
+ }
+};
+
/// True iff P0 and P1 are true.
-LegalityPredicate all(LegalityPredicate P0, LegalityPredicate P1);
+template<typename Predicate>
+Predicate all(Predicate P0, Predicate P1) {
+ return [=](const LegalityQuery &Query) {
+ return P0(Query) && P1(Query);
+ };
+}
+/// True iff all given predicates are true.
+template<typename Predicate, typename... Args>
+Predicate all(Predicate P0, Predicate P1, Args... args) {
+ return all(all(P0, P1), args...);
+}
+/// True iff the given type index is the specified types.
+LegalityPredicate typeIs(unsigned TypeIdx, LLT TypesInit);
/// True iff the given type index is one of the specified types.
LegalityPredicate typeInSet(unsigned TypeIdx,
std::initializer_list<LLT> TypesInit);
@@ -157,6 +197,11 @@
LegalityPredicate
typePairInSet(unsigned TypeIdx0, unsigned TypeIdx1,
std::initializer_list<std::pair<LLT, LLT>> TypesInit);
+/// True iff the given types for the given pair of type indexes is one of the
+/// specified type pairs.
+LegalityPredicate typePairAndMemSizeInSet(
+ unsigned TypeIdx0, unsigned TypeIdx1, unsigned MMOIdx,
+ std::initializer_list<TypePairAndMemSize> TypesAndMemSizeInit);
/// True iff the specified type index is a scalar.
LegalityPredicate isScalar(unsigned TypeIdx);
/// True iff the specified type index is a scalar that's narrower than the given
@@ -168,14 +213,22 @@
/// True iff the specified type index is a scalar whose size is not a power of
/// 2.
LegalityPredicate sizeNotPow2(unsigned TypeIdx);
+/// True iff the specified MMO index has a size that is not a power of 2
+LegalityPredicate memSizeInBytesNotPow2(unsigned MMOIdx);
/// True iff the specified type index is a vector whose element count is not a
/// power of 2.
LegalityPredicate numElementsNotPow2(unsigned TypeIdx);
+/// True iff the specified MMO index has at an atomic ordering of at Ordering or
+/// stronger.
+LegalityPredicate atomicOrderingAtLeastOrStrongerThan(unsigned MMOIdx,
+ AtomicOrdering Ordering);
} // end namespace LegalityPredicates
namespace LegalizeMutations {
/// Select this specific type for the given type index.
LegalizeMutation changeTo(unsigned TypeIdx, LLT Ty);
+/// Keep the same type as the given type index.
+LegalizeMutation changeTo(unsigned TypeIdx, unsigned FromTypeIdx);
/// Widen the type for the given type index to the next power of 2.
LegalizeMutation widenScalarToNextPow2(unsigned TypeIdx, unsigned Min = 0);
/// Add more elements to the type for the given type index to the next power of
@@ -219,6 +272,33 @@
bool IsAliasedByAnother;
SmallVector<LegalizeRule, 2> Rules;
+#ifndef NDEBUG
+ /// If bit I is set, this rule set contains a rule that may handle (predicate
+ /// or perform an action upon (or both)) the type index I. The uncertainty
+ /// comes from free-form rules executing user-provided lambda functions. We
+ /// conservatively assume such rules do the right thing and cover all type
+ /// indices. The bitset is intentionally 1 bit wider than it absolutely needs
+ /// to be to distinguish such cases from the cases where all type indices are
+ /// individually handled.
+ SmallBitVector TypeIdxsCovered{MCOI::OPERAND_LAST_GENERIC -
+ MCOI::OPERAND_FIRST_GENERIC + 2};
+#endif
+
+ unsigned typeIdx(unsigned TypeIdx) {
+ assert(TypeIdx <=
+ (MCOI::OPERAND_LAST_GENERIC - MCOI::OPERAND_FIRST_GENERIC) &&
+ "Type Index is out of bounds");
+#ifndef NDEBUG
+ TypeIdxsCovered.set(TypeIdx);
+#endif
+ return TypeIdx;
+ }
+ void markAllTypeIdxsAsCovered() {
+#ifndef NDEBUG
+ TypeIdxsCovered.set();
+#endif
+ }
+
void add(const LegalizeRule &Rule) {
assert(AliasOf == 0 &&
"RuleSet is aliased, change the representative opcode instead");
@@ -235,7 +315,7 @@
return *this;
}
/// Use the given action when the predicate is true.
- /// Action should not be an action that requires mutation.
+ /// Action should be an action that requires mutation.
LegalizeRuleSet &actionIf(LegalizeAction Action, LegalityPredicate Predicate,
LegalizeMutation Mutation) {
add({Predicate, Action, Mutation});
@@ -246,16 +326,33 @@
LegalizeRuleSet &actionFor(LegalizeAction Action,
std::initializer_list<LLT> Types) {
using namespace LegalityPredicates;
- return actionIf(Action, typeInSet(0, Types));
+ return actionIf(Action, typeInSet(typeIdx(0), Types));
+ }
+ /// Use the given action when type index 0 is any type in the given list.
+ /// Action should be an action that requires mutation.
+ LegalizeRuleSet &actionFor(LegalizeAction Action,
+ std::initializer_list<LLT> Types,
+ LegalizeMutation Mutation) {
+ using namespace LegalityPredicates;
+ return actionIf(Action, typeInSet(typeIdx(0), Types), Mutation);
}
/// Use the given action when type indexes 0 and 1 is any type pair in the
/// given list.
/// Action should not be an action that requires mutation.
- LegalizeRuleSet &
- actionFor(LegalizeAction Action,
- std::initializer_list<std::pair<LLT, LLT>> Types) {
+ LegalizeRuleSet &actionFor(LegalizeAction Action,
+ std::initializer_list<std::pair<LLT, LLT>> Types) {
using namespace LegalityPredicates;
- return actionIf(Action, typePairInSet(0, 1, Types));
+ return actionIf(Action, typePairInSet(typeIdx(0), typeIdx(1), Types));
+ }
+ /// Use the given action when type indexes 0 and 1 is any type pair in the
+ /// given list.
+ /// Action should be an action that requires mutation.
+ LegalizeRuleSet &actionFor(LegalizeAction Action,
+ std::initializer_list<std::pair<LLT, LLT>> Types,
+ LegalizeMutation Mutation) {
+ using namespace LegalityPredicates;
+ return actionIf(Action, typePairInSet(typeIdx(0), typeIdx(1), Types),
+ Mutation);
}
/// Use the given action when type indexes 0 and 1 are both in the given list.
/// That is, the type pair is in the cartesian product of the list.
@@ -263,10 +360,11 @@
LegalizeRuleSet &actionForCartesianProduct(LegalizeAction Action,
std::initializer_list<LLT> Types) {
using namespace LegalityPredicates;
- return actionIf(Action, all(typeInSet(0, Types), typeInSet(1, Types)));
+ return actionIf(Action, all(typeInSet(typeIdx(0), Types),
+ typeInSet(typeIdx(1), Types)));
}
- /// Use the given action when type indexes 0 and 1 are both their respective
- /// lists.
+ /// Use the given action when type indexes 0 and 1 are both in their
+ /// respective lists.
/// That is, the type pair is in the cartesian product of the lists
/// Action should not be an action that requires mutation.
LegalizeRuleSet &
@@ -274,7 +372,20 @@
std::initializer_list<LLT> Types0,
std::initializer_list<LLT> Types1) {
using namespace LegalityPredicates;
- return actionIf(Action, all(typeInSet(0, Types0), typeInSet(1, Types1)));
+ return actionIf(Action, all(typeInSet(typeIdx(0), Types0),
+ typeInSet(typeIdx(1), Types1)));
+ }
+ /// Use the given action when type indexes 0, 1, and 2 are all in their
+ /// respective lists.
+ /// That is, the type triple is in the cartesian product of the lists
+ /// Action should not be an action that requires mutation.
+ LegalizeRuleSet &actionForCartesianProduct(
+ LegalizeAction Action, std::initializer_list<LLT> Types0,
+ std::initializer_list<LLT> Types1, std::initializer_list<LLT> Types2) {
+ using namespace LegalityPredicates;
+ return actionIf(Action, all(typeInSet(typeIdx(0), Types0),
+ all(typeInSet(typeIdx(1), Types1),
+ typeInSet(typeIdx(2), Types2))));
}
public:
@@ -292,6 +403,9 @@
/// The instruction is legal if predicate is true.
LegalizeRuleSet &legalIf(LegalityPredicate Predicate) {
+ // We have no choice but conservatively assume that the free-form
+ // user-provided Predicate properly handles all type indices:
+ markAllTypeIdxsAsCovered();
return actionIf(LegalizeAction::Legal, Predicate);
}
/// The instruction is legal when type index 0 is any type in the given list.
@@ -303,6 +417,15 @@
LegalizeRuleSet &legalFor(std::initializer_list<std::pair<LLT, LLT>> Types) {
return actionFor(LegalizeAction::Legal, Types);
}
+ /// The instruction is legal when type indexes 0 and 1 along with the memory
+ /// size is any type and size tuple in the given list.
+ LegalizeRuleSet &legalForTypesWithMemSize(
+ std::initializer_list<LegalityPredicates::TypePairAndMemSize>
+ TypesAndMemSize) {
+ return actionIf(LegalizeAction::Legal,
+ LegalityPredicates::typePairAndMemSizeInSet(
+ typeIdx(0), typeIdx(1), /*MMOIdx*/ 0, TypesAndMemSize));
+ }
/// The instruction is legal when type indexes 0 and 1 are both in the given
/// list. That is, the type pair is in the cartesian product of the list.
LegalizeRuleSet &legalForCartesianProduct(std::initializer_list<LLT> Types) {
@@ -315,8 +438,77 @@
return actionForCartesianProduct(LegalizeAction::Legal, Types0, Types1);
}
+ /// The instruction is lowered.
+ LegalizeRuleSet &lower() {
+ using namespace LegalizeMutations;
+ // We have no choice but conservatively assume that predicate-less lowering
+ // properly handles all type indices by design:
+ markAllTypeIdxsAsCovered();
+ return actionIf(LegalizeAction::Lower, always);
+ }
+ /// The instruction is lowered if predicate is true. Keep type index 0 as the
+ /// same type.
+ LegalizeRuleSet &lowerIf(LegalityPredicate Predicate) {
+ using namespace LegalizeMutations;
+ // We have no choice but conservatively assume that lowering with a
+ // free-form user provided Predicate properly handles all type indices:
+ markAllTypeIdxsAsCovered();
+ return actionIf(LegalizeAction::Lower, Predicate);
+ }
+ /// The instruction is lowered if predicate is true.
+ LegalizeRuleSet &lowerIf(LegalityPredicate Predicate,
+ LegalizeMutation Mutation) {
+ // We have no choice but conservatively assume that lowering with a
+ // free-form user provided Predicate properly handles all type indices:
+ markAllTypeIdxsAsCovered();
+ return actionIf(LegalizeAction::Lower, Predicate, Mutation);
+ }
+ /// The instruction is lowered when type index 0 is any type in the given
+ /// list. Keep type index 0 as the same type.
+ LegalizeRuleSet &lowerFor(std::initializer_list<LLT> Types) {
+ return actionFor(LegalizeAction::Lower, Types,
+ LegalizeMutations::changeTo(0, 0));
+ }
+ /// The instruction is lowered when type index 0 is any type in the given
+ /// list.
+ LegalizeRuleSet &lowerFor(std::initializer_list<LLT> Types,
+ LegalizeMutation Mutation) {
+ return actionFor(LegalizeAction::Lower, Types, Mutation);
+ }
+ /// The instruction is lowered when type indexes 0 and 1 is any type pair in
+ /// the given list. Keep type index 0 as the same type.
+ LegalizeRuleSet &lowerFor(std::initializer_list<std::pair<LLT, LLT>> Types) {
+ return actionFor(LegalizeAction::Lower, Types,
+ LegalizeMutations::changeTo(0, 0));
+ }
+ /// The instruction is lowered when type indexes 0 and 1 is any type pair in
+ /// the given list.
+ LegalizeRuleSet &lowerFor(std::initializer_list<std::pair<LLT, LLT>> Types,
+ LegalizeMutation Mutation) {
+ return actionFor(LegalizeAction::Lower, Types, Mutation);
+ }
+ /// The instruction is lowered when type indexes 0 and 1 are both in their
+ /// respective lists.
+ LegalizeRuleSet &lowerForCartesianProduct(std::initializer_list<LLT> Types0,
+ std::initializer_list<LLT> Types1) {
+ using namespace LegalityPredicates;
+ return actionForCartesianProduct(LegalizeAction::Lower, Types0, Types1);
+ }
+ /// The instruction is lowered when when type indexes 0, 1, and 2 are all in
+ /// their respective lists.
+ LegalizeRuleSet &lowerForCartesianProduct(std::initializer_list<LLT> Types0,
+ std::initializer_list<LLT> Types1,
+ std::initializer_list<LLT> Types2) {
+ using namespace LegalityPredicates;
+ return actionForCartesianProduct(LegalizeAction::Lower, Types0, Types1,
+ Types2);
+ }
+
/// Like legalIf, but for the Libcall action.
LegalizeRuleSet &libcallIf(LegalityPredicate Predicate) {
+ // We have no choice but conservatively assume that a libcall with a
+ // free-form user provided Predicate properly handles all type indices:
+ markAllTypeIdxsAsCovered();
return actionIf(LegalizeAction::Libcall, Predicate);
}
LegalizeRuleSet &libcallFor(std::initializer_list<LLT> Types) {
@@ -340,12 +532,18 @@
/// true.
LegalizeRuleSet &widenScalarIf(LegalityPredicate Predicate,
LegalizeMutation Mutation) {
+ // We have no choice but conservatively assume that an action with a
+ // free-form user provided Predicate properly handles all type indices:
+ markAllTypeIdxsAsCovered();
return actionIf(LegalizeAction::WidenScalar, Predicate, Mutation);
}
/// Narrow the scalar to the one selected by the mutation if the predicate is
/// true.
LegalizeRuleSet &narrowScalarIf(LegalityPredicate Predicate,
LegalizeMutation Mutation) {
+ // We have no choice but conservatively assume that an action with a
+ // free-form user provided Predicate properly handles all type indices:
+ markAllTypeIdxsAsCovered();
return actionIf(LegalizeAction::NarrowScalar, Predicate, Mutation);
}
@@ -353,12 +551,18 @@
/// predicate is true.
LegalizeRuleSet &moreElementsIf(LegalityPredicate Predicate,
LegalizeMutation Mutation) {
+ // We have no choice but conservatively assume that an action with a
+ // free-form user provided Predicate properly handles all type indices:
+ markAllTypeIdxsAsCovered();
return actionIf(LegalizeAction::MoreElements, Predicate, Mutation);
}
/// Remove elements to reach the type selected by the mutation if the
/// predicate is true.
LegalizeRuleSet &fewerElementsIf(LegalityPredicate Predicate,
LegalizeMutation Mutation) {
+ // We have no choice but conservatively assume that an action with a
+ // free-form user provided Predicate properly handles all type indices:
+ markAllTypeIdxsAsCovered();
return actionIf(LegalizeAction::FewerElements, Predicate, Mutation);
}
@@ -369,8 +573,15 @@
LegalizeRuleSet &unsupportedIf(LegalityPredicate Predicate) {
return actionIf(LegalizeAction::Unsupported, Predicate);
}
+ LegalizeRuleSet &unsupportedIfMemSizeNotPow2() {
+ return actionIf(LegalizeAction::Unsupported,
+ LegalityPredicates::memSizeInBytesNotPow2(0));
+ }
LegalizeRuleSet &customIf(LegalityPredicate Predicate) {
+ // We have no choice but conservatively assume that a custom action with a
+ // free-form user provided Predicate properly handles all type indices:
+ markAllTypeIdxsAsCovered();
return actionIf(LegalizeAction::Custom, Predicate);
}
LegalizeRuleSet &customFor(std::initializer_list<LLT> Types) {
@@ -387,54 +598,57 @@
/// Widen the scalar to the next power of two that is at least MinSize.
/// No effect if the type is not a scalar or is a power of two.
- LegalizeRuleSet &widenScalarToNextPow2(unsigned TypeIdx, unsigned MinSize = 0) {
+ LegalizeRuleSet &widenScalarToNextPow2(unsigned TypeIdx,
+ unsigned MinSize = 0) {
using namespace LegalityPredicates;
- return widenScalarIf(
- sizeNotPow2(TypeIdx),
- LegalizeMutations::widenScalarToNextPow2(TypeIdx, MinSize));
+ return actionIf(LegalizeAction::WidenScalar, sizeNotPow2(typeIdx(TypeIdx)),
+ LegalizeMutations::widenScalarToNextPow2(TypeIdx, MinSize));
}
LegalizeRuleSet &narrowScalar(unsigned TypeIdx, LegalizeMutation Mutation) {
using namespace LegalityPredicates;
- return narrowScalarIf(isScalar(TypeIdx), Mutation);
+ return actionIf(LegalizeAction::NarrowScalar, isScalar(typeIdx(TypeIdx)),
+ Mutation);
}
/// Ensure the scalar is at least as wide as Ty.
LegalizeRuleSet &minScalar(unsigned TypeIdx, const LLT &Ty) {
using namespace LegalityPredicates;
using namespace LegalizeMutations;
- return widenScalarIf(narrowerThan(TypeIdx, Ty.getSizeInBits()),
- changeTo(TypeIdx, Ty));
+ return actionIf(LegalizeAction::WidenScalar,
+ narrowerThan(TypeIdx, Ty.getSizeInBits()),
+ changeTo(typeIdx(TypeIdx), Ty));
}
/// Ensure the scalar is at most as wide as Ty.
LegalizeRuleSet &maxScalar(unsigned TypeIdx, const LLT &Ty) {
using namespace LegalityPredicates;
using namespace LegalizeMutations;
- return narrowScalarIf(widerThan(TypeIdx, Ty.getSizeInBits()),
- changeTo(TypeIdx, Ty));
+ return actionIf(LegalizeAction::NarrowScalar,
+ widerThan(TypeIdx, Ty.getSizeInBits()),
+ changeTo(typeIdx(TypeIdx), Ty));
}
/// Conditionally limit the maximum size of the scalar.
/// For example, when the maximum size of one type depends on the size of
/// another such as extracting N bits from an M bit container.
- LegalizeRuleSet &maxScalarIf(LegalityPredicate Predicate, unsigned TypeIdx, const LLT &Ty) {
+ LegalizeRuleSet &maxScalarIf(LegalityPredicate Predicate, unsigned TypeIdx,
+ const LLT &Ty) {
using namespace LegalityPredicates;
using namespace LegalizeMutations;
- return narrowScalarIf(
- [=](const LegalityQuery &Query) {
- return widerThan(TypeIdx, Ty.getSizeInBits()) &&
- Predicate(Query);
- },
- changeTo(TypeIdx, Ty));
+ return actionIf(LegalizeAction::NarrowScalar,
+ [=](const LegalityQuery &Query) {
+ return widerThan(TypeIdx, Ty.getSizeInBits()) &&
+ Predicate(Query);
+ },
+ changeTo(typeIdx(TypeIdx), Ty));
}
/// Limit the range of scalar sizes to MinTy and MaxTy.
- LegalizeRuleSet &clampScalar(unsigned TypeIdx, const LLT &MinTy, const LLT &MaxTy) {
+ LegalizeRuleSet &clampScalar(unsigned TypeIdx, const LLT &MinTy,
+ const LLT &MaxTy) {
assert(MinTy.isScalar() && MaxTy.isScalar() && "Expected scalar types");
-
- return minScalar(TypeIdx, MinTy)
- .maxScalar(TypeIdx, MaxTy);
+ return minScalar(TypeIdx, MinTy).maxScalar(TypeIdx, MaxTy);
}
/// Add more elements to the vector to reach the next power of two.
@@ -442,17 +656,21 @@
/// two.
LegalizeRuleSet &moreElementsToNextPow2(unsigned TypeIdx) {
using namespace LegalityPredicates;
- return moreElementsIf(numElementsNotPow2(TypeIdx),
- LegalizeMutations::moreElementsToNextPow2(TypeIdx));
+ return actionIf(LegalizeAction::MoreElements,
+ numElementsNotPow2(typeIdx(TypeIdx)),
+ LegalizeMutations::moreElementsToNextPow2(TypeIdx));
}
/// Limit the number of elements in EltTy vectors to at least MinElements.
LegalizeRuleSet &clampMinNumElements(unsigned TypeIdx, const LLT &EltTy,
unsigned MinElements) {
- return moreElementsIf(
+ // Mark the type index as covered:
+ typeIdx(TypeIdx);
+ return actionIf(
+ LegalizeAction::MoreElements,
[=](const LegalityQuery &Query) {
LLT VecTy = Query.Types[TypeIdx];
- return VecTy.getElementType() == EltTy &&
+ return VecTy.isVector() && VecTy.getElementType() == EltTy &&
VecTy.getNumElements() < MinElements;
},
[=](const LegalityQuery &Query) {
@@ -464,10 +682,13 @@
/// Limit the number of elements in EltTy vectors to at most MaxElements.
LegalizeRuleSet &clampMaxNumElements(unsigned TypeIdx, const LLT &EltTy,
unsigned MaxElements) {
- return fewerElementsIf(
+ // Mark the type index as covered:
+ typeIdx(TypeIdx);
+ return actionIf(
+ LegalizeAction::FewerElements,
[=](const LegalityQuery &Query) {
LLT VecTy = Query.Types[TypeIdx];
- return VecTy.getElementType() == EltTy &&
+ return VecTy.isVector() && VecTy.getElementType() == EltTy &&
VecTy.getNumElements() > MaxElements;
},
[=](const LegalityQuery &Query) {
@@ -499,6 +720,11 @@
return *this;
}
+ /// Check if there is no type index which is obviously not handled by the
+ /// LegalizeRuleSet in any way at all.
+ /// \pre Type indices of the opcode form a dense [0, \p NumTypeIdxs) set.
+ bool verifyTypeIdxsCoverage(unsigned NumTypeIdxs) const;
+
/// Apply the ruleset to the given LegalityQuery.
LegalizeActionStep apply(const LegalityQuery &Query) const;
};
@@ -516,6 +742,10 @@
/// before any query is made or incorrect results may be returned.
void computeTables();
+ /// Perform simple self-diagnostic and assert if there is anything obviously
+ /// wrong with the actions set up.
+ void verify(const MCInstrInfo &MII) const;
+
static bool needsLegalizingToDifferentSize(const LegalizeAction Action) {
using namespace LegalizeActions;
switch (Action) {
@@ -556,7 +786,7 @@
/// setAction ({G_ADD, 0, LLT::scalar(32)}, Legal);
/// setLegalizeScalarToDifferentSizeStrategy(
/// G_ADD, 0, widenToLargerTypesAndNarrowToLargest);
- /// will end up defining getAction({G_ADD, 0, T}) to return the following
+ /// will end up defining getAction({G_ADD, 0, T}) to return the following
/// actions for different scalar types T:
/// LLT::scalar(1)..LLT::scalar(31): {WidenScalar, 0, LLT::scalar(32)}
/// LLT::scalar(32): {Legal, 0, LLT::scalar(32)}
@@ -584,7 +814,7 @@
VectorElementSizeChangeStrategies[OpcodeIdx][TypeIdx] = S;
}
- /// A SizeChangeStrategy for the common case where legalization for a
+ /// A SizeChangeStrategy for the common case where legalization for a
/// particular operation consists of only supporting a specific set of type
/// sizes. E.g.
/// setAction ({G_DIV, 0, LLT::scalar(32)}, Legal);
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Localizer.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Localizer.h
index 0a46eb9..1e2d476 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Localizer.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Localizer.h
@@ -70,6 +70,8 @@
.set(MachineFunctionProperties::Property::RegBankSelected);
}
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
bool runOnMachineFunction(MachineFunction &MF) override;
};
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
index 797f5e5..f77f9a8 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
@@ -222,6 +222,12 @@
}
template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_FSUB, false>
+m_GFSub(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, TargetOpcode::G_FSUB, false>(L, R);
+}
+
+template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, TargetOpcode::G_AND, true>
m_GAnd(const LHS &L, const RHS &R) {
return BinaryOp_match<LHS, RHS, TargetOpcode::G_AND, true>(L, R);
@@ -305,6 +311,11 @@
}
template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_FNEG> m_GFNeg(const SrcTy &Src) {
+ return UnaryOp_match<SrcTy, TargetOpcode::G_FNEG>(Src);
+}
+
+template <typename SrcTy>
inline UnaryOp_match<SrcTy, TargetOpcode::COPY> m_Copy(SrcTy &&Src) {
return UnaryOp_match<SrcTy, TargetOpcode::COPY>(std::forward<SrcTy>(Src));
}
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index ef4e0ad..ac1673d 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -31,11 +31,10 @@
class MachineInstr;
class TargetInstrInfo;
-/// Helper class to build MachineInstr.
-/// It keeps internally the insertion point and debug location for all
-/// the new instructions we want to create.
-/// This information can be modify via the related setters.
-class MachineIRBuilder {
+/// Class which stores all the state required in a MachineIRBuilder.
+/// Since MachineIRBuilders will only store state in this object, it allows
+/// to transfer BuilderState between different kinds of MachineIRBuilders.
+struct MachineIRBuilderState {
/// MachineFunction under construction.
MachineFunction *MF;
/// Information used to access the description of the opcodes.
@@ -52,15 +51,23 @@
/// @}
std::function<void(MachineInstr *)> InsertedInstr;
+};
+/// Helper class to build MachineInstr.
+/// It keeps internally the insertion point and debug location for all
+/// the new instructions we want to create.
+/// This information can be modify via the related setters.
+class MachineIRBuilderBase {
+
+ MachineIRBuilderState State;
const TargetInstrInfo &getTII() {
- assert(TII && "TargetInstrInfo is not set");
- return *TII;
+ assert(State.TII && "TargetInstrInfo is not set");
+ return *State.TII;
}
void validateTruncExt(unsigned Dst, unsigned Src, bool IsExtend);
- MachineInstrBuilder buildBinaryOp(unsigned Opcode, unsigned Res, unsigned Op0, unsigned Op1);
+protected:
unsigned getDestFromArg(unsigned Reg) { return Reg; }
unsigned getDestFromArg(LLT Ty) {
return getMF().getRegInfo().createGenericVirtualRegister(Ty);
@@ -88,30 +95,41 @@
return MIB->getOperand(0).getReg();
}
+ void validateBinaryOp(unsigned Res, unsigned Op0, unsigned Op1);
+
public:
/// Some constructors for easy use.
- MachineIRBuilder() = default;
- MachineIRBuilder(MachineFunction &MF) { setMF(MF); }
- MachineIRBuilder(MachineInstr &MI) : MachineIRBuilder(*MI.getMF()) {
+ MachineIRBuilderBase() = default;
+ MachineIRBuilderBase(MachineFunction &MF) { setMF(MF); }
+ MachineIRBuilderBase(MachineInstr &MI) : MachineIRBuilderBase(*MI.getMF()) {
setInstr(MI);
}
+ MachineIRBuilderBase(const MachineIRBuilderState &BState) : State(BState) {}
+
/// Getter for the function we currently build.
MachineFunction &getMF() {
- assert(MF && "MachineFunction is not set");
- return *MF;
+ assert(State.MF && "MachineFunction is not set");
+ return *State.MF;
}
+ /// Getter for DebugLoc
+ const DebugLoc &getDL() { return State.DL; }
+
+ /// Getter for MRI
+ MachineRegisterInfo *getMRI() { return State.MRI; }
+
+ /// Getter for the State
+ MachineIRBuilderState &getState() { return State; }
+
/// Getter for the basic block we currently build.
MachineBasicBlock &getMBB() {
- assert(MBB && "MachineBasicBlock is not set");
- return *MBB;
+ assert(State.MBB && "MachineBasicBlock is not set");
+ return *State.MBB;
}
/// Current insertion point for new instructions.
- MachineBasicBlock::iterator getInsertPt() {
- return II;
- }
+ MachineBasicBlock::iterator getInsertPt() { return State.II; }
/// Set the insertion point before the specified position.
/// \pre MBB must be in getMF().
@@ -136,15 +154,16 @@
/// \name Control where instructions we create are recorded (typically for
/// visiting again later during legalization).
/// @{
+ void recordInsertion(MachineInstr *InsertedInstr) const;
void recordInsertions(std::function<void(MachineInstr *)> InsertedInstr);
void stopRecordingInsertions();
/// @}
/// Set the debug location to \p DL for all the next build instructions.
- void setDebugLoc(const DebugLoc &DL) { this->DL = DL; }
+ void setDebugLoc(const DebugLoc &DL) { this->State.DL = DL; }
/// Get the current instruction's debug location.
- DebugLoc getDebugLoc() { return DL; }
+ DebugLoc getDebugLoc() { return State.DL; }
/// Build and insert <empty> = \p Opcode <empty>.
/// The insertion point is the one set by the last call of either
@@ -155,20 +174,6 @@
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildInstr(unsigned Opcode);
- /// DAG like Generic method for building arbitrary instructions as above.
- /// \Opc opcode for the instruction.
- /// \Ty Either LLT/TargetRegisterClass/unsigned types for Dst
- /// \Args Variadic list of uses of types(unsigned/MachineInstrBuilder)
- /// Uses of type MachineInstrBuilder will perform
- /// getOperand(0).getReg() to convert to register.
- template <typename DstTy, typename... UseArgsTy>
- MachineInstrBuilder buildInstr(unsigned Opc, DstTy &&Ty,
- UseArgsTy &&... Args) {
- auto MIB = buildInstr(Opc).addDef(getDestFromArg(Ty));
- addUsesFromArgs(MIB, std::forward<UseArgsTy>(Args)...);
- return MIB;
- }
-
/// Build but don't insert <empty> = \p Opcode <empty>.
///
/// \pre setMF, setBasicBlock or setMI must have been called.
@@ -226,59 +231,6 @@
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildGlobalValue(unsigned Res, const GlobalValue *GV);
- /// Build and insert \p Res = G_ADD \p Op0, \p Op1
- ///
- /// G_ADD sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
- /// truncated to their width.
- ///
- /// \pre setBasicBlock or setMI must have been called.
- /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
- /// with the same (scalar or vector) type).
- ///
- /// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildAdd(unsigned Res, unsigned Op0,
- unsigned Op1);
- template <typename DstTy, typename... UseArgsTy>
- MachineInstrBuilder buildAdd(DstTy &&Ty, UseArgsTy &&... UseArgs) {
- unsigned Res = getDestFromArg(Ty);
- return buildAdd(Res, (getRegFromArg(UseArgs))...);
- }
-
- /// Build and insert \p Res = G_SUB \p Op0, \p Op1
- ///
- /// G_SUB sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
- /// truncated to their width.
- ///
- /// \pre setBasicBlock or setMI must have been called.
- /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
- /// with the same (scalar or vector) type).
- ///
- /// \return a MachineInstrBuilder for the newly created instruction.
- template <typename DstTy, typename... UseArgsTy>
- MachineInstrBuilder buildSub(DstTy &&Ty, UseArgsTy &&... UseArgs) {
- unsigned Res = getDestFromArg(Ty);
- return buildSub(Res, (getRegFromArg(UseArgs))...);
- }
- MachineInstrBuilder buildSub(unsigned Res, unsigned Op0,
- unsigned Op1);
-
- /// Build and insert \p Res = G_MUL \p Op0, \p Op1
- ///
- /// G_MUL sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
- /// truncated to their width.
- ///
- /// \pre setBasicBlock or setMI must have been called.
- /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
- /// with the same (scalar or vector) type).
- ///
- /// \return a MachineInstrBuilder for the newly created instruction.
- template <typename DstTy, typename... UseArgsTy>
- MachineInstrBuilder buildMul(DstTy &&Ty, UseArgsTy &&... UseArgs) {
- unsigned Res = getDestFromArg(Ty);
- return buildMul(Res, (getRegFromArg(UseArgs))...);
- }
- MachineInstrBuilder buildMul(unsigned Res, unsigned Op0,
- unsigned Op1);
/// Build and insert \p Res = G_GEP \p Op0, \p Op1
///
@@ -347,38 +299,6 @@
MachineInstrBuilder buildUAdde(unsigned Res, unsigned CarryOut, unsigned Op0,
unsigned Op1, unsigned CarryIn);
- /// Build and insert \p Res = G_AND \p Op0, \p Op1
- ///
- /// G_AND sets \p Res to the bitwise and of integer parameters \p Op0 and \p
- /// Op1.
- ///
- /// \pre setBasicBlock or setMI must have been called.
- /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
- /// with the same (scalar or vector) type).
- ///
- /// \return a MachineInstrBuilder for the newly created instruction.
- template <typename DstTy, typename... UseArgsTy>
- MachineInstrBuilder buildAnd(DstTy &&Dst, UseArgsTy &&... UseArgs) {
- return buildAnd(getDestFromArg(Dst), getRegFromArg(UseArgs)...);
- }
- MachineInstrBuilder buildAnd(unsigned Res, unsigned Op0,
- unsigned Op1);
-
- /// Build and insert \p Res = G_OR \p Op0, \p Op1
- ///
- /// G_OR sets \p Res to the bitwise or of integer parameters \p Op0 and \p
- /// Op1.
- ///
- /// \pre setBasicBlock or setMI must have been called.
- /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
- /// with the same (scalar or vector) type).
- ///
- /// \return a MachineInstrBuilder for the newly created instruction.
- template <typename DstTy, typename... UseArgsTy>
- MachineInstrBuilder buildOr(DstTy &&Dst, UseArgsTy &&... UseArgs) {
- return buildOr(getDestFromArg(Dst), getRegFromArg(UseArgs)...);
- }
- MachineInstrBuilder buildOr(unsigned Res, unsigned Op0, unsigned Op1);
/// Build and insert \p Res = G_ANYEXT \p Op0
///
@@ -504,7 +424,7 @@
/// \pre setBasicBlock or setMI must have been called.
///
/// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildBr(MachineBasicBlock &BB);
+ MachineInstrBuilder buildBr(MachineBasicBlock &Dest);
/// Build and insert G_BRCOND \p Tst, \p Dest
///
@@ -518,7 +438,7 @@
/// depend on bit 0 (for now).
///
/// \return The newly created instruction.
- MachineInstrBuilder buildBrCond(unsigned Tst, MachineBasicBlock &BB);
+ MachineInstrBuilder buildBrCond(unsigned Tst, MachineBasicBlock &Dest);
/// Build and insert G_BRINDIRECT \p Tgt
///
@@ -602,6 +522,18 @@
MachineInstrBuilder buildLoad(unsigned Res, unsigned Addr,
MachineMemOperand &MMO);
+ /// Build and insert `Res = <opcode> Addr, MMO`.
+ ///
+ /// Loads the value stored at \p Addr. Puts the result in \p Res.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildLoadInstr(unsigned Opcode, unsigned Res,
+ unsigned Addr, MachineMemOperand &MMO);
+
/// Build and insert `G_STORE Val, Addr, MMO`.
///
/// Stores the value \p Val to \p Addr.
@@ -626,7 +558,7 @@
template <typename DstType> MachineInstrBuilder buildUndef(DstType &&Res) {
return buildUndef(getDestFromArg(Res));
}
- MachineInstrBuilder buildUndef(unsigned Dst);
+ MachineInstrBuilder buildUndef(unsigned Res);
/// Build and insert instructions to put \p Ops together at the specified p
/// Indices to form a larger register.
@@ -785,7 +717,28 @@
MachineInstrBuilder buildExtractVectorElement(unsigned Res, unsigned Val,
unsigned Idx);
- /// Build and insert `OldValRes = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal,
+ /// Build and insert `OldValRes<def>, SuccessRes<def> =
+ /// G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr, CmpVal, NewVal, MMO`.
+ ///
+ /// Atomically replace the value at \p Addr with \p NewVal if it is currently
+ /// \p CmpVal otherwise leaves it unchanged. Puts the original value from \p
+ /// Addr in \p Res, along with an s1 indicating whether it was replaced.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register of scalar type.
+ /// \pre \p SuccessRes must be a generic virtual register of scalar type. It
+ /// will be assigned 0 on failure and 1 on success.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, \p CmpVal, and \p NewVal must be generic virtual
+ /// registers of the same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder
+ buildAtomicCmpXchgWithSuccess(unsigned OldValRes, unsigned SuccessRes,
+ unsigned Addr, unsigned CmpVal, unsigned NewVal,
+ MachineMemOperand &MMO);
+
+ /// Build and insert `OldValRes<def> = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal,
/// MMO`.
///
/// Atomically replace the value at \p Addr with \p NewVal if it is currently
@@ -802,6 +755,338 @@
MachineInstrBuilder buildAtomicCmpXchg(unsigned OldValRes, unsigned Addr,
unsigned CmpVal, unsigned NewVal,
MachineMemOperand &MMO);
+
+ /// Build and insert `OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO`.
+ ///
+ /// Atomically read-modify-update the value at \p Addr with \p Val. Puts the
+ /// original value from \p Addr in \p OldValRes. The modification is
+ /// determined by the opcode.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+ /// same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAtomicRMW(unsigned Opcode, unsigned OldValRes,
+ unsigned Addr, unsigned Val,
+ MachineMemOperand &MMO);
+
+ /// Build and insert `OldValRes<def> = G_ATOMICRMW_XCHG Addr, Val, MMO`.
+ ///
+ /// Atomically replace the value at \p Addr with \p Val. Puts the original
+ /// value from \p Addr in \p OldValRes.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+ /// same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAtomicRMWXchg(unsigned OldValRes, unsigned Addr,
+ unsigned Val, MachineMemOperand &MMO);
+
+ /// Build and insert `OldValRes<def> = G_ATOMICRMW_ADD Addr, Val, MMO`.
+ ///
+ /// Atomically replace the value at \p Addr with the addition of \p Val and
+ /// the original value. Puts the original value from \p Addr in \p OldValRes.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+ /// same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAtomicRMWAdd(unsigned OldValRes, unsigned Addr,
+ unsigned Val, MachineMemOperand &MMO);
+
+ /// Build and insert `OldValRes<def> = G_ATOMICRMW_SUB Addr, Val, MMO`.
+ ///
+ /// Atomically replace the value at \p Addr with the subtraction of \p Val and
+ /// the original value. Puts the original value from \p Addr in \p OldValRes.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+ /// same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAtomicRMWSub(unsigned OldValRes, unsigned Addr,
+ unsigned Val, MachineMemOperand &MMO);
+
+ /// Build and insert `OldValRes<def> = G_ATOMICRMW_AND Addr, Val, MMO`.
+ ///
+ /// Atomically replace the value at \p Addr with the bitwise and of \p Val and
+ /// the original value. Puts the original value from \p Addr in \p OldValRes.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+ /// same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAtomicRMWAnd(unsigned OldValRes, unsigned Addr,
+ unsigned Val, MachineMemOperand &MMO);
+
+ /// Build and insert `OldValRes<def> = G_ATOMICRMW_NAND Addr, Val, MMO`.
+ ///
+ /// Atomically replace the value at \p Addr with the bitwise nand of \p Val
+ /// and the original value. Puts the original value from \p Addr in \p
+ /// OldValRes.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+ /// same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAtomicRMWNand(unsigned OldValRes, unsigned Addr,
+ unsigned Val, MachineMemOperand &MMO);
+
+ /// Build and insert `OldValRes<def> = G_ATOMICRMW_OR Addr, Val, MMO`.
+ ///
+ /// Atomically replace the value at \p Addr with the bitwise or of \p Val and
+ /// the original value. Puts the original value from \p Addr in \p OldValRes.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+ /// same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAtomicRMWOr(unsigned OldValRes, unsigned Addr,
+ unsigned Val, MachineMemOperand &MMO);
+
+ /// Build and insert `OldValRes<def> = G_ATOMICRMW_XOR Addr, Val, MMO`.
+ ///
+ /// Atomically replace the value at \p Addr with the bitwise xor of \p Val and
+ /// the original value. Puts the original value from \p Addr in \p OldValRes.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+ /// same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAtomicRMWXor(unsigned OldValRes, unsigned Addr,
+ unsigned Val, MachineMemOperand &MMO);
+
+ /// Build and insert `OldValRes<def> = G_ATOMICRMW_MAX Addr, Val, MMO`.
+ ///
+ /// Atomically replace the value at \p Addr with the signed maximum of \p
+ /// Val and the original value. Puts the original value from \p Addr in \p
+ /// OldValRes.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+ /// same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAtomicRMWMax(unsigned OldValRes, unsigned Addr,
+ unsigned Val, MachineMemOperand &MMO);
+
+ /// Build and insert `OldValRes<def> = G_ATOMICRMW_MIN Addr, Val, MMO`.
+ ///
+ /// Atomically replace the value at \p Addr with the signed minimum of \p
+ /// Val and the original value. Puts the original value from \p Addr in \p
+ /// OldValRes.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+ /// same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAtomicRMWMin(unsigned OldValRes, unsigned Addr,
+ unsigned Val, MachineMemOperand &MMO);
+
+ /// Build and insert `OldValRes<def> = G_ATOMICRMW_UMAX Addr, Val, MMO`.
+ ///
+ /// Atomically replace the value at \p Addr with the unsigned maximum of \p
+ /// Val and the original value. Puts the original value from \p Addr in \p
+ /// OldValRes.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+ /// same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAtomicRMWUmax(unsigned OldValRes, unsigned Addr,
+ unsigned Val, MachineMemOperand &MMO);
+
+ /// Build and insert `OldValRes<def> = G_ATOMICRMW_UMIN Addr, Val, MMO`.
+ ///
+ /// Atomically replace the value at \p Addr with the unsigned minimum of \p
+ /// Val and the original value. Puts the original value from \p Addr in \p
+ /// OldValRes.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+ /// same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAtomicRMWUmin(unsigned OldValRes, unsigned Addr,
+ unsigned Val, MachineMemOperand &MMO);
+
+ /// Build and insert \p Res = G_BLOCK_ADDR \p BA
+ ///
+ /// G_BLOCK_ADDR computes the address of a basic block.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register of a pointer type.
+ ///
+ /// \return The newly created instruction.
+ MachineInstrBuilder buildBlockAddress(unsigned Res, const BlockAddress *BA);
+};
+
+/// A CRTP class that contains methods for building instructions that can
+/// be constant folded. MachineIRBuilders that want to inherit from this will
+/// need to implement buildBinaryOp (for constant folding binary ops).
+/// Alternatively, they can implement buildInstr(Opc, Dst, Uses...) to perform
+/// additional folding for Opc.
+template <typename Base>
+class FoldableInstructionsBuilder : public MachineIRBuilderBase {
+ Base &base() { return static_cast<Base &>(*this); }
+
+public:
+ using MachineIRBuilderBase::MachineIRBuilderBase;
+ /// Build and insert \p Res = G_ADD \p Op0, \p Op1
+ ///
+ /// G_ADD sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
+ /// truncated to their width.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+ /// with the same (scalar or vector) type).
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+
+ MachineInstrBuilder buildAdd(unsigned Dst, unsigned Src0, unsigned Src1) {
+ return base().buildBinaryOp(TargetOpcode::G_ADD, Dst, Src0, Src1);
+ }
+ template <typename DstTy, typename... UseArgsTy>
+ MachineInstrBuilder buildAdd(DstTy &&Ty, UseArgsTy &&... UseArgs) {
+ unsigned Res = base().getDestFromArg(Ty);
+ return base().buildAdd(Res, (base().getRegFromArg(UseArgs))...);
+ }
+
+ /// Build and insert \p Res = G_SUB \p Op0, \p Op1
+ ///
+ /// G_SUB sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
+ /// truncated to their width.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+ /// with the same (scalar or vector) type).
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+
+ MachineInstrBuilder buildSub(unsigned Dst, unsigned Src0, unsigned Src1) {
+ return base().buildBinaryOp(TargetOpcode::G_SUB, Dst, Src0, Src1);
+ }
+ template <typename DstTy, typename... UseArgsTy>
+ MachineInstrBuilder buildSub(DstTy &&Ty, UseArgsTy &&... UseArgs) {
+ unsigned Res = base().getDestFromArg(Ty);
+ return base().buildSub(Res, (base().getRegFromArg(UseArgs))...);
+ }
+
+ /// Build and insert \p Res = G_MUL \p Op0, \p Op1
+ ///
+ /// G_MUL sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
+ /// truncated to their width.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+ /// with the same (scalar or vector) type).
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildMul(unsigned Dst, unsigned Src0, unsigned Src1) {
+ return base().buildBinaryOp(TargetOpcode::G_MUL, Dst, Src0, Src1);
+ }
+ template <typename DstTy, typename... UseArgsTy>
+ MachineInstrBuilder buildMul(DstTy &&Ty, UseArgsTy &&... UseArgs) {
+ unsigned Res = base().getDestFromArg(Ty);
+ return base().buildMul(Res, (base().getRegFromArg(UseArgs))...);
+ }
+
+ /// Build and insert \p Res = G_AND \p Op0, \p Op1
+ ///
+ /// G_AND sets \p Res to the bitwise and of integer parameters \p Op0 and \p
+ /// Op1.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+ /// with the same (scalar or vector) type).
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+
+ MachineInstrBuilder buildAnd(unsigned Dst, unsigned Src0, unsigned Src1) {
+ return base().buildBinaryOp(TargetOpcode::G_AND, Dst, Src0, Src1);
+ }
+ template <typename DstTy, typename... UseArgsTy>
+ MachineInstrBuilder buildAnd(DstTy &&Ty, UseArgsTy &&... UseArgs) {
+ unsigned Res = base().getDestFromArg(Ty);
+ return base().buildAnd(Res, (base().getRegFromArg(UseArgs))...);
+ }
+
+ /// Build and insert \p Res = G_OR \p Op0, \p Op1
+ ///
+ /// G_OR sets \p Res to the bitwise or of integer parameters \p Op0 and \p
+ /// Op1.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+ /// with the same (scalar or vector) type).
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildOr(unsigned Dst, unsigned Src0, unsigned Src1) {
+ return base().buildBinaryOp(TargetOpcode::G_OR, Dst, Src0, Src1);
+ }
+ template <typename DstTy, typename... UseArgsTy>
+ MachineInstrBuilder buildOr(DstTy &&Ty, UseArgsTy &&... UseArgs) {
+ unsigned Res = base().getDestFromArg(Ty);
+ return base().buildOr(Res, (base().getRegFromArg(UseArgs))...);
+ }
+};
+
+class MachineIRBuilder : public FoldableInstructionsBuilder<MachineIRBuilder> {
+public:
+ using FoldableInstructionsBuilder<
+ MachineIRBuilder>::FoldableInstructionsBuilder;
+ MachineInstrBuilder buildBinaryOp(unsigned Opcode, unsigned Dst,
+ unsigned Src0, unsigned Src1) {
+ validateBinaryOp(Dst, Src0, Src1);
+ return buildInstr(Opcode).addDef(Dst).addUse(Src0).addUse(Src1);
+ }
+ using FoldableInstructionsBuilder<MachineIRBuilder>::buildInstr;
+ /// DAG like Generic method for building arbitrary instructions as above.
+ /// \Opc opcode for the instruction.
+ /// \Ty Either LLT/TargetRegisterClass/unsigned types for Dst
+ /// \Args Variadic list of uses of types(unsigned/MachineInstrBuilder)
+ /// Uses of type MachineInstrBuilder will perform
+ /// getOperand(0).getReg() to convert to register.
+ template <typename DstTy, typename... UseArgsTy>
+ MachineInstrBuilder buildInstr(unsigned Opc, DstTy &&Ty,
+ UseArgsTy &&... Args) {
+ auto MIB = buildInstr(Opc).addDef(getDestFromArg(Ty));
+ addUsesFromArgs(MIB, std::forward<UseArgsTy>(Args)...);
+ return MIB;
+ }
};
} // End namespace llvm.
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBank.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBank.h
index 5d75842..d5612e1 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBank.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBank.h
@@ -42,7 +42,7 @@
public:
RegisterBank(unsigned ID, const char *Name, unsigned Size,
- const uint32_t *ContainedRegClasses, unsigned NumRegClasses);
+ const uint32_t *CoveredClasses, unsigned NumRegClasses);
/// Get the identifier of this register bank.
unsigned getID() const { return ID; }
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Utils.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Utils.h
index 837035f..51e3a27 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Utils.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Utils.h
@@ -19,6 +19,7 @@
namespace llvm {
+class AnalysisUsage;
class MachineFunction;
class MachineInstr;
class MachineOperand;
@@ -102,5 +103,10 @@
/// Returns an APFloat from Val converted to the appropriate size.
APFloat getAPFloatFromSize(double Val, unsigned Size);
+
+/// Modify analysis usage so it preserves passes required for the SelectionDAG
+/// fallback.
+void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU);
+
} // End namespace llvm.
#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/ISDOpcodes.h b/linux-x64/clang/include/llvm/CodeGen/ISDOpcodes.h
index ea94871..80bd796 100644
--- a/linux-x64/clang/include/llvm/CodeGen/ISDOpcodes.h
+++ b/linux-x64/clang/include/llvm/CodeGen/ISDOpcodes.h
@@ -377,6 +377,8 @@
/// When the 1st operand is a vector, the shift amount must be in the same
/// type. (TLI.getShiftAmountTy() will return the same type when the input
/// type is a vector.)
+ /// For rotates, the shift amount is treated as an unsigned amount modulo
+ /// the element size of the first operand.
SHL, SRA, SRL, ROTL, ROTR,
/// Byte Swap and Counting operators.
@@ -412,19 +414,11 @@
/// then the result type must also be a vector type.
SETCC,
- /// Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, and
- /// op #2 is a *carry value*. This operator checks the result of
- /// "LHS - RHS - Carry", and can be used to compare two wide integers:
- /// (setcce lhshi rhshi (subc lhslo rhslo) cc). Only valid for integers.
- /// FIXME: This node is deprecated in favor of SETCCCARRY.
- /// It is kept around for now to provide a smooth transition path
- /// toward the use of SETCCCARRY and will eventually be removed.
- SETCCE,
-
/// Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but
/// op #2 is a boolean indicating if there is an incoming carry. This
/// operator checks the result of "LHS - RHS - Carry", and can be used to
- /// compare two wide integers: (setcce lhshi rhshi (subc lhslo rhslo) cc).
+ /// compare two wide integers:
+ /// (setcccarry lhshi rhshi (subcarry lhslo rhslo) cc).
/// Only valid for integers.
SETCCCARRY,
@@ -495,7 +489,8 @@
ZERO_EXTEND_VECTOR_INREG,
/// FP_TO_[US]INT - Convert a floating point value to a signed or unsigned
- /// integer.
+ /// integer. These have the same semantics as fptosi and fptoui in IR. If
+ /// the FP value cannot fit in the integer type, the results are undefined.
FP_TO_SINT,
FP_TO_UINT,
diff --git a/linux-x64/clang/include/llvm/CodeGen/LatencyPriorityQueue.h b/linux-x64/clang/include/llvm/CodeGen/LatencyPriorityQueue.h
index 988e6d6..9b8d83c 100644
--- a/linux-x64/clang/include/llvm/CodeGen/LatencyPriorityQueue.h
+++ b/linux-x64/clang/include/llvm/CodeGen/LatencyPriorityQueue.h
@@ -17,6 +17,7 @@
#define LLVM_CODEGEN_LATENCYPRIORITYQUEUE_H
#include "llvm/CodeGen/ScheduleDAG.h"
+#include "llvm/Config/llvm-config.h"
namespace llvm {
class LatencyPriorityQueue;
@@ -26,7 +27,7 @@
LatencyPriorityQueue *PQ;
explicit latency_sort(LatencyPriorityQueue *pq) : PQ(pq) {}
- bool operator()(const SUnit* left, const SUnit* right) const;
+ bool operator()(const SUnit* LHS, const SUnit* RHS) const;
};
class LatencyPriorityQueue : public SchedulingPriorityQueue {
@@ -83,11 +84,15 @@
void remove(SUnit *SU) override;
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ LLVM_DUMP_METHOD void dump(ScheduleDAG *DAG) const override;
+#endif
+
// scheduledNode - As nodes are scheduled, we look to see if there are any
// successor nodes that have a single unscheduled predecessor. If so, that
// single predecessor has a higher priority, since scheduling it will make
// the node available.
- void scheduledNode(SUnit *Node) override;
+ void scheduledNode(SUnit *SU) override;
private:
void AdjustPriorityOfUnscheduledPreds(SUnit *SU);
diff --git a/linux-x64/clang/include/llvm/CodeGen/LazyMachineBlockFrequencyInfo.h b/linux-x64/clang/include/llvm/CodeGen/LazyMachineBlockFrequencyInfo.h
index 848ee1d..221f16a 100644
--- a/linux-x64/clang/include/llvm/CodeGen/LazyMachineBlockFrequencyInfo.h
+++ b/linux-x64/clang/include/llvm/CodeGen/LazyMachineBlockFrequencyInfo.h
@@ -23,7 +23,7 @@
#include "llvm/CodeGen/MachineLoopInfo.h"
namespace llvm {
-/// \brief This is an alternative analysis pass to MachineBlockFrequencyInfo.
+/// This is an alternative analysis pass to MachineBlockFrequencyInfo.
/// The difference is that with this pass, the block frequencies are not
/// computed when the analysis pass is executed but rather when the BFI result
/// is explicitly requested by the analysis client.
@@ -49,7 +49,7 @@
/// The function.
MachineFunction *MF = nullptr;
- /// \brief Calculate MBFI and all other analyses that's not available and
+ /// Calculate MBFI and all other analyses that's not available and
/// required by BFI.
MachineBlockFrequencyInfo &calculateIfNotAvailable() const;
@@ -58,10 +58,10 @@
LazyMachineBlockFrequencyInfoPass();
- /// \brief Compute and return the block frequencies.
+ /// Compute and return the block frequencies.
MachineBlockFrequencyInfo &getBFI() { return calculateIfNotAvailable(); }
- /// \brief Compute and return the block frequencies.
+ /// Compute and return the block frequencies.
const MachineBlockFrequencyInfo &getBFI() const {
return calculateIfNotAvailable();
}
diff --git a/linux-x64/clang/include/llvm/CodeGen/LiveInterval.h b/linux-x64/clang/include/llvm/CodeGen/LiveInterval.h
index f4fa872..cdf9ad2 100644
--- a/linux-x64/clang/include/llvm/CodeGen/LiveInterval.h
+++ b/linux-x64/clang/include/llvm/CodeGen/LiveInterval.h
@@ -326,7 +326,7 @@
/// createDeadDef - Make sure the range has a value defined at Def.
/// If one already exists, return it. Otherwise allocate a new value and
/// add liveness for a dead def.
- VNInfo *createDeadDef(SlotIndex Def, VNInfo::Allocator &VNInfoAllocator);
+ VNInfo *createDeadDef(SlotIndex Def, VNInfo::Allocator &VNIAlloc);
/// Create a def of value @p VNI. Return @p VNI. If there already exists
/// a definition at VNI->def, the value defined there must be @p VNI.
@@ -454,7 +454,7 @@
/// overlapsFrom - Return true if the intersection of the two live ranges
/// is not empty. The specified iterator is a hint that we can begin
/// scanning the Other range starting at I.
- bool overlapsFrom(const LiveRange &Other, const_iterator I) const;
+ bool overlapsFrom(const LiveRange &Other, const_iterator StartPos) const;
/// Returns true if all segments of the @p Other live range are completely
/// covered by this live range.
@@ -482,7 +482,7 @@
/// @p Use, return {nullptr, false}. If there is an "undef" before @p Use,
/// return {nullptr, true}.
std::pair<VNInfo*,bool> extendInBlock(ArrayRef<SlotIndex> Undefs,
- SlotIndex StartIdx, SlotIndex Use);
+ SlotIndex StartIdx, SlotIndex Kill);
/// Simplified version of the above "extendInBlock", which assumes that
/// no register lanes are undefined by <def,read-undef> operands.
@@ -609,7 +609,7 @@
void print(raw_ostream &OS) const;
void dump() const;
- /// \brief Walk the range and assert if any invariants fail to hold.
+ /// Walk the range and assert if any invariants fail to hold.
///
/// Note that this is a no-op when asserts are disabled.
#ifdef NDEBUG
@@ -791,7 +791,7 @@
/// L00E0 and L0010 and the L000F lane into L0007 and L0008. The Mod
/// function will be applied to the L0010 and L0008 subranges.
void refineSubRanges(BumpPtrAllocator &Allocator, LaneBitmask LaneMask,
- std::function<void(LiveInterval::SubRange&)> Mod);
+ std::function<void(LiveInterval::SubRange&)> Apply);
bool operator<(const LiveInterval& other) const {
const SlotIndex &thisIndex = beginIndex();
@@ -802,7 +802,7 @@
void print(raw_ostream &OS) const;
void dump() const;
- /// \brief Walks the interval and assert if any invariants fail to hold.
+ /// Walks the interval and assert if any invariants fail to hold.
///
/// Note that this is a no-op when asserts are disabled.
#ifdef NDEBUG
diff --git a/linux-x64/clang/include/llvm/CodeGen/LiveIntervalUnion.h b/linux-x64/clang/include/llvm/CodeGen/LiveIntervalUnion.h
index b922e54..9e2799b 100644
--- a/linux-x64/clang/include/llvm/CodeGen/LiveIntervalUnion.h
+++ b/linux-x64/clang/include/llvm/CodeGen/LiveIntervalUnion.h
@@ -154,7 +154,7 @@
unsigned MaxInterferingRegs = std::numeric_limits<unsigned>::max());
// Was this virtual register visited during collectInterferingVRegs?
- bool isSeenInterference(LiveInterval *VReg) const;
+ bool isSeenInterference(LiveInterval *VirtReg) const;
// Did collectInterferingVRegs collect all interferences?
bool seenAllInterferences() const { return SeenAllInterferences; }
diff --git a/linux-x64/clang/include/llvm/CodeGen/LiveIntervals.h b/linux-x64/clang/include/llvm/CodeGen/LiveIntervals.h
index 1150f3c..291a07a 100644
--- a/linux-x64/clang/include/llvm/CodeGen/LiveIntervals.h
+++ b/linux-x64/clang/include/llvm/CodeGen/LiveIntervals.h
@@ -105,7 +105,7 @@
/// Calculate the spill weight to assign to a single instruction.
static float getSpillWeight(bool isDef, bool isUse,
const MachineBlockFrequencyInfo *MBFI,
- const MachineInstr &Instr);
+ const MachineInstr &MI);
/// Calculate the spill weight to assign to a single instruction.
static float getSpillWeight(bool isDef, bool isUse,
@@ -462,6 +462,10 @@
void computeRegUnitRange(LiveRange&, unsigned Unit);
void computeVirtRegInterval(LiveInterval&);
+ using ShrinkToUsesWorkList = SmallVector<std::pair<SlotIndex, VNInfo*>, 16>;
+ void extendSegmentsToUses(LiveRange &Segments,
+ ShrinkToUsesWorkList &WorkList, unsigned Reg,
+ LaneBitmask LaneMask);
/// Helper function for repairIntervalsInRange(), walks backwards and
/// creates/modifies live segments in \p LR to match the operands found.
diff --git a/linux-x64/clang/include/llvm/CodeGen/LivePhysRegs.h b/linux-x64/clang/include/llvm/CodeGen/LivePhysRegs.h
index f9aab0d..301a450 100644
--- a/linux-x64/clang/include/llvm/CodeGen/LivePhysRegs.h
+++ b/linux-x64/clang/include/llvm/CodeGen/LivePhysRegs.h
@@ -44,7 +44,7 @@
class MachineRegisterInfo;
class raw_ostream;
-/// \brief A set of physical registers with utility functions to track liveness
+/// A set of physical registers with utility functions to track liveness
/// when walking backward/forward through a basic block.
class LivePhysRegs {
const TargetRegisterInfo *TRI = nullptr;
@@ -84,7 +84,7 @@
LiveRegs.insert(*SubRegs);
}
- /// \brief Removes a physical register, all its sub-registers, and all its
+ /// Removes a physical register, all its sub-registers, and all its
/// super-registers from the set.
void removeReg(unsigned Reg) {
assert(TRI && "LivePhysRegs is not initialized.");
@@ -98,7 +98,7 @@
SmallVectorImpl<std::pair<unsigned, const MachineOperand*>> *Clobbers =
nullptr);
- /// \brief Returns true if register \p Reg is contained in the set. This also
+ /// Returns true if register \p Reg is contained in the set. This also
/// works if only the super register of \p Reg has been defined, because
/// addReg() always adds all sub-registers to the set as well.
/// Note: Returns false if just some sub registers are live, use available()
@@ -155,7 +155,7 @@
void dump() const;
private:
- /// \brief Adds live-in registers from basic block \p MBB, taking associated
+ /// Adds live-in registers from basic block \p MBB, taking associated
/// lane masks into consideration.
void addBlockLiveIns(const MachineBasicBlock &MBB);
@@ -169,7 +169,7 @@
return OS;
}
-/// \brief Computes registers live-in to \p MBB assuming all of its successors
+/// Computes registers live-in to \p MBB assuming all of its successors
/// live-in lists are up-to-date. Puts the result into the given LivePhysReg
/// instance \p LiveRegs.
void computeLiveIns(LivePhysRegs &LiveRegs, const MachineBasicBlock &MBB);
@@ -185,6 +185,13 @@
void computeAndAddLiveIns(LivePhysRegs &LiveRegs,
MachineBasicBlock &MBB);
+/// Convenience function for recomputing live-in's for \p MBB.
+static inline void recomputeLiveIns(MachineBasicBlock &MBB) {
+ LivePhysRegs LPR;
+ MBB.clearLiveIns();
+ computeAndAddLiveIns(LPR, MBB);
+}
+
} // end namespace llvm
#endif // LLVM_CODEGEN_LIVEPHYSREGS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/LiveRangeEdit.h b/linux-x64/clang/include/llvm/CodeGen/LiveRangeEdit.h
index 82b1f0b..5383029 100644
--- a/linux-x64/clang/include/llvm/CodeGen/LiveRangeEdit.h
+++ b/linux-x64/clang/include/llvm/CodeGen/LiveRangeEdit.h
@@ -117,7 +117,7 @@
/// registers are created.
void MRI_NoteNewVirtualRegister(unsigned VReg) override;
- /// \brief Check if MachineOperand \p MO is a last use/kill either in the
+ /// Check if MachineOperand \p MO is a last use/kill either in the
/// main live range of \p LI or in one of the matching subregister ranges.
bool useIsKill(const LiveInterval &LI, const MachineOperand &MO) const;
diff --git a/linux-x64/clang/include/llvm/CodeGen/LiveRegUnits.h b/linux-x64/clang/include/llvm/CodeGen/LiveRegUnits.h
index dc4956d..2495459 100644
--- a/linux-x64/clang/include/llvm/CodeGen/LiveRegUnits.h
+++ b/linux-x64/clang/include/llvm/CodeGen/LiveRegUnits.h
@@ -16,6 +16,7 @@
#define LLVM_CODEGEN_LIVEREGUNITS_H
#include "llvm/ADT/BitVector.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/MC/LaneBitmask.h"
#include "llvm/MC/MCRegisterInfo.h"
@@ -40,6 +41,36 @@
init(TRI);
}
+ /// For a machine instruction \p MI, adds all register units used in
+ /// \p UsedRegUnits and defined or clobbered in \p ModifiedRegUnits. This is
+ /// useful when walking over a range of instructions to track registers
+ /// used or defined seperately.
+ static void accumulateUsedDefed(const MachineInstr &MI,
+ LiveRegUnits &ModifiedRegUnits,
+ LiveRegUnits &UsedRegUnits,
+ const TargetRegisterInfo *TRI) {
+ for (ConstMIBundleOperands O(MI); O.isValid(); ++O) {
+ if (O->isRegMask())
+ ModifiedRegUnits.addRegsInMask(O->getRegMask());
+ if (!O->isReg())
+ continue;
+ unsigned Reg = O->getReg();
+ if (!TargetRegisterInfo::isPhysicalRegister(Reg))
+ continue;
+ if (O->isDef()) {
+ // Some architectures (e.g. AArch64 XZR/WZR) have registers that are
+ // constant and may be used as destinations to indicate the generated
+ // value is discarded. No need to track such case as a def.
+ if (!TRI->isConstantPhysReg(Reg))
+ ModifiedRegUnits.addReg(Reg);
+ } else {
+ assert(O->isUse() && "Reg operand not a def and not a use");
+ UsedRegUnits.addReg(Reg);
+ }
+ }
+ return;
+ }
+
/// Initialize and clear the set.
void init(const TargetRegisterInfo &TRI) {
this->TRI = &TRI;
@@ -59,7 +90,7 @@
Units.set(*Unit);
}
- /// \brief Adds register units covered by physical register \p Reg that are
+ /// Adds register units covered by physical register \p Reg that are
/// part of the lanemask \p Mask.
void addRegMasked(unsigned Reg, LaneBitmask Mask) {
for (MCRegUnitMaskIterator Unit(Reg, TRI); Unit.isValid(); ++Unit) {
diff --git a/linux-x64/clang/include/llvm/CodeGen/LoopTraversal.h b/linux-x64/clang/include/llvm/CodeGen/LoopTraversal.h
index a816f6d..750da01 100644
--- a/linux-x64/clang/include/llvm/CodeGen/LoopTraversal.h
+++ b/linux-x64/clang/include/llvm/CodeGen/LoopTraversal.h
@@ -101,7 +101,7 @@
};
LoopTraversal() {}
- /// \brief Identifies basic blocks that are part of loops and should to be
+ /// Identifies basic blocks that are part of loops and should to be
/// visited twice and returns efficient traversal order for all the blocks.
typedef SmallVector<TraversedMBBInfo, 4> TraversalOrder;
TraversalOrder traverse(MachineFunction &MF);
diff --git a/linux-x64/clang/include/llvm/CodeGen/MIRParser/MIRParser.h b/linux-x64/clang/include/llvm/CodeGen/MIRParser/MIRParser.h
index b631a8c..e199a1f 100644
--- a/linux-x64/clang/include/llvm/CodeGen/MIRParser/MIRParser.h
+++ b/linux-x64/clang/include/llvm/CodeGen/MIRParser/MIRParser.h
@@ -45,7 +45,7 @@
/// \returns nullptr if a parsing error occurred.
std::unique_ptr<Module> parseIRModule();
- /// \brief Parses MachineFunctions in the MIR file and add them to the given
+ /// Parses MachineFunctions in the MIR file and add them to the given
/// MachineModuleInfo \p MMI.
///
/// \returns true if an error occurred.
diff --git a/linux-x64/clang/include/llvm/CodeGen/MIRPrinter.h b/linux-x64/clang/include/llvm/CodeGen/MIRPrinter.h
index c73adc3..078c4b2 100644
--- a/linux-x64/clang/include/llvm/CodeGen/MIRPrinter.h
+++ b/linux-x64/clang/include/llvm/CodeGen/MIRPrinter.h
@@ -38,7 +38,7 @@
/// this funciton and the parser will use this function to construct a list if
/// it is missing.
void guessSuccessors(const MachineBasicBlock &MBB,
- SmallVectorImpl<MachineBasicBlock*> &Successors,
+ SmallVectorImpl<MachineBasicBlock*> &Result,
bool &IsFallthrough);
} // end namespace llvm
diff --git a/linux-x64/clang/include/llvm/CodeGen/MIRYamlMapping.h b/linux-x64/clang/include/llvm/CodeGen/MIRYamlMapping.h
index b75f9c8..7f46406 100644
--- a/linux-x64/clang/include/llvm/CodeGen/MIRYamlMapping.h
+++ b/linux-x64/clang/include/llvm/CodeGen/MIRYamlMapping.h
@@ -258,11 +258,11 @@
YamlIO.mapOptional("callee-saved-restored", Object.CalleeSavedRestored,
true);
YamlIO.mapOptional("local-offset", Object.LocalOffset, Optional<int64_t>());
- YamlIO.mapOptional("di-variable", Object.DebugVar,
+ YamlIO.mapOptional("debug-info-variable", Object.DebugVar,
StringValue()); // Don't print it out when it's empty.
- YamlIO.mapOptional("di-expression", Object.DebugExpr,
+ YamlIO.mapOptional("debug-info-expression", Object.DebugExpr,
StringValue()); // Don't print it out when it's empty.
- YamlIO.mapOptional("di-location", Object.DebugLoc,
+ YamlIO.mapOptional("debug-info-location", Object.DebugLoc,
StringValue()); // Don't print it out when it's empty.
}
@@ -283,6 +283,9 @@
bool IsAliased = false;
StringValue CalleeSavedRegister;
bool CalleeSavedRestored = true;
+ StringValue DebugVar;
+ StringValue DebugExpr;
+ StringValue DebugLoc;
bool operator==(const FixedMachineStackObject &Other) const {
return ID == Other.ID && Type == Other.Type && Offset == Other.Offset &&
@@ -290,7 +293,9 @@
StackID == Other.StackID &&
IsImmutable == Other.IsImmutable && IsAliased == Other.IsAliased &&
CalleeSavedRegister == Other.CalleeSavedRegister &&
- CalleeSavedRestored == Other.CalleeSavedRestored;
+ CalleeSavedRestored == Other.CalleeSavedRestored &&
+ DebugVar == Other.DebugVar && DebugExpr == Other.DebugExpr
+ && DebugLoc == Other.DebugLoc;
}
};
@@ -321,6 +326,12 @@
StringValue()); // Don't print it out when it's empty.
YamlIO.mapOptional("callee-saved-restored", Object.CalleeSavedRestored,
true);
+ YamlIO.mapOptional("debug-info-variable", Object.DebugVar,
+ StringValue()); // Don't print it out when it's empty.
+ YamlIO.mapOptional("debug-info-expression", Object.DebugExpr,
+ StringValue()); // Don't print it out when it's empty.
+ YamlIO.mapOptional("debug-info-location", Object.DebugLoc,
+ StringValue()); // Don't print it out when it's empty.
}
static const bool flow = true;
@@ -417,6 +428,7 @@
bool HasOpaqueSPAdjustment = false;
bool HasVAStart = false;
bool HasMustTailInVarArgFunc = false;
+ unsigned LocalFrameSize = 0;
StringValue SavePoint;
StringValue RestorePoint;
@@ -434,6 +446,7 @@
HasOpaqueSPAdjustment == Other.HasOpaqueSPAdjustment &&
HasVAStart == Other.HasVAStart &&
HasMustTailInVarArgFunc == Other.HasMustTailInVarArgFunc &&
+ LocalFrameSize == Other.LocalFrameSize &&
SavePoint == Other.SavePoint && RestorePoint == Other.RestorePoint;
}
};
@@ -457,6 +470,7 @@
YamlIO.mapOptional("hasVAStart", MFI.HasVAStart, false);
YamlIO.mapOptional("hasMustTailInVarArgFunc", MFI.HasMustTailInVarArgFunc,
false);
+ YamlIO.mapOptional("localFrameSize", MFI.LocalFrameSize, (unsigned)0);
YamlIO.mapOptional("savePoint", MFI.SavePoint,
StringValue()); // Don't print it out when it's empty.
YamlIO.mapOptional("restorePoint", MFI.RestorePoint,
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachORelocation.h b/linux-x64/clang/include/llvm/CodeGen/MachORelocation.h
index 8c9b7a8..cbb4969 100644
--- a/linux-x64/clang/include/llvm/CodeGen/MachORelocation.h
+++ b/linux-x64/clang/include/llvm/CodeGen/MachORelocation.h
@@ -27,15 +27,15 @@
uint32_t r_symbolnum; // symbol index if r_extern == 1 else section index
bool r_pcrel; // was relocated pc-relative already
uint8_t r_length; // length = 2 ^ r_length
- bool r_extern; //
+ bool r_extern; //
uint8_t r_type; // if not 0, machine-specific relocation type.
bool r_scattered; // 1 = scattered, 0 = non-scattered
int32_t r_value; // the value the item to be relocated is referring
// to.
- public:
+ public:
uint32_t getPackedFields() const {
if (r_scattered)
- return (1 << 31) | (r_pcrel << 30) | ((r_length & 3) << 28) |
+ return (1 << 31) | (r_pcrel << 30) | ((r_length & 3) << 28) |
((r_type & 15) << 24) | (r_address & 0x00FFFFFF);
else
return (r_symbolnum << 8) | (r_pcrel << 7) | ((r_length & 3) << 5) |
@@ -45,8 +45,8 @@
uint32_t getRawAddress() const { return r_address; }
MachORelocation(uint32_t addr, uint32_t index, bool pcrel, uint8_t len,
- bool ext, uint8_t type, bool scattered = false,
- int32_t value = 0) :
+ bool ext, uint8_t type, bool scattered = false,
+ int32_t value = 0) :
r_address(addr), r_symbolnum(index), r_pcrel(pcrel), r_length(len),
r_extern(ext), r_type(type), r_scattered(scattered), r_value(value) {}
};
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineBasicBlock.h b/linux-x64/clang/include/llvm/CodeGen/MachineBasicBlock.h
index f3130b6..ace33ef 100644
--- a/linux-x64/clang/include/llvm/CodeGen/MachineBasicBlock.h
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineBasicBlock.h
@@ -58,7 +58,7 @@
public:
void addNodeToList(MachineInstr *N);
void removeNodeFromList(MachineInstr *N);
- void transferNodesFromList(ilist_traits &OldList, instr_iterator First,
+ void transferNodesFromList(ilist_traits &FromList, instr_iterator First,
instr_iterator Last);
void deleteNode(MachineInstr *MI);
};
@@ -115,13 +115,18 @@
/// branch.
bool AddressTaken = false;
+ /// Indicate that this basic block is the entry block of an EH scope, i.e.,
+ /// the block that used to have a catchpad or cleanuppad instruction in the
+ /// LLVM IR.
+ bool IsEHScopeEntry = false;
+
/// Indicate that this basic block is the entry block of an EH funclet.
bool IsEHFuncletEntry = false;
/// Indicate that this basic block is the entry block of a cleanup funclet.
bool IsCleanupFuncletEntry = false;
- /// \brief since getSymbol is a relatively heavy-weight operation, the symbol
+ /// since getSymbol is a relatively heavy-weight operation, the symbol
/// is only computed once and is cached.
mutable MCSymbol *CachedMCSymbol = nullptr;
@@ -375,6 +380,14 @@
bool hasEHPadSuccessor() const;
+ /// Returns true if this is the entry block of an EH scope, i.e., the block
+ /// that used to have a catchpad or cleanuppad instruction in the LLVM IR.
+ bool isEHScopeEntry() const { return IsEHScopeEntry; }
+
+ /// Indicates if this is the entry block of an EH scope, i.e., the block that
+ /// that used to have a catchpad or cleanuppad instruction in the LLVM IR.
+ void setIsEHScopeEntry(bool V = true) { IsEHScopeEntry = V; }
+
/// Returns true if this is the entry block of an EH funclet.
bool isEHFuncletEntry() const { return IsEHFuncletEntry; }
@@ -464,6 +477,11 @@
/// probabilities may need to be normalized.
void copySuccessor(MachineBasicBlock *Orig, succ_iterator I);
+ /// Split the old successor into old plus new and updates the probability
+ /// info.
+ void splitSuccessor(MachineBasicBlock *Old, MachineBasicBlock *New,
+ bool NormalizeSuccProbs = false);
+
/// Transfers all the successors from MBB to this machine basic block (i.e.,
/// copies all the successors FromMBB and remove all the successors from
/// FromMBB).
@@ -700,7 +718,7 @@
bool IsCond);
/// Find the next valid DebugLoc starting at MBBI, skipping any DBG_VALUE
- /// instructions. Return UnknownLoc if there is none.
+ /// and DBG_LABEL instructions. Return UnknownLoc if there is none.
DebugLoc findDebugLoc(instr_iterator MBBI);
DebugLoc findDebugLoc(iterator MBBI) {
return findDebugLoc(MBBI.getInstrIterator());
@@ -897,7 +915,7 @@
/// const_instr_iterator} and the respective reverse iterators.
template<typename IterT>
inline IterT skipDebugInstructionsForward(IterT It, IterT End) {
- while (It != End && It->isDebugValue())
+ while (It != End && It->isDebugInstr())
It++;
return It;
}
@@ -908,7 +926,7 @@
/// const_instr_iterator} and the respective reverse iterators.
template<class IterT>
inline IterT skipDebugInstructionsBackward(IterT It, IterT Begin) {
- while (It != Begin && It->isDebugValue())
+ while (It != Begin && It->isDebugInstr())
It--;
return It;
}
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineConstantPool.h b/linux-x64/clang/include/llvm/CodeGen/MachineConstantPool.h
index 1705a0f..b0b5420 100644
--- a/linux-x64/clang/include/llvm/CodeGen/MachineConstantPool.h
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineConstantPool.h
@@ -63,7 +63,7 @@
/// This class is a data container for one entry in a MachineConstantPool.
/// It contains a pointer to the value and an offset from the start of
/// the constant pool.
-/// @brief An entry in a MachineConstantPool
+/// An entry in a MachineConstantPool
class MachineConstantPoolEntry {
public:
/// The constant itself.
@@ -117,7 +117,7 @@
/// the use of MO_ConstantPoolIndex values. When emitting assembly or machine
/// code, these virtual address references are converted to refer to the
/// address of the function constant pool values.
-/// @brief The machine constant pool.
+/// The machine constant pool.
class MachineConstantPool {
unsigned PoolAlignment; ///< The alignment for the pool.
std::vector<MachineConstantPoolEntry> Constants; ///< The pool of constants.
@@ -128,7 +128,7 @@
const DataLayout &getDataLayout() const { return DL; }
public:
- /// @brief The only constructor.
+ /// The only constructor.
explicit MachineConstantPool(const DataLayout &DL)
: PoolAlignment(1), DL(DL) {}
~MachineConstantPool();
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineDominanceFrontier.h b/linux-x64/clang/include/llvm/CodeGen/MachineDominanceFrontier.h
index ffbcc62..75d75bc 100644
--- a/linux-x64/clang/include/llvm/CodeGen/MachineDominanceFrontier.h
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineDominanceFrontier.h
@@ -37,9 +37,9 @@
MachineDominanceFrontier();
- DominanceFrontierBase<MachineBasicBlock, false> &getBase() { return Base; }
+ ForwardDominanceFrontierBase<MachineBasicBlock> &getBase() { return Base; }
- const SmallVectorImpl<MachineBasicBlock *> &getRoots() const {
+ const SmallVectorImpl<MachineBasicBlock *> &getRoots() const {
return Base.getRoots();
}
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineDominators.h b/linux-x64/clang/include/llvm/CodeGen/MachineDominators.h
index af642d9..e3d3d16 100644
--- a/linux-x64/clang/include/llvm/CodeGen/MachineDominators.h
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineDominators.h
@@ -45,7 +45,7 @@
/// compute a normal dominator tree.
///
class MachineDominatorTree : public MachineFunctionPass {
- /// \brief Helper structure used to hold all the basic blocks
+ /// Helper structure used to hold all the basic blocks
/// involved in the split of a critical edge.
struct CriticalEdge {
MachineBasicBlock *FromBB;
@@ -53,12 +53,12 @@
MachineBasicBlock *NewBB;
};
- /// \brief Pile up all the critical edges to be split.
+ /// Pile up all the critical edges to be split.
/// The splitting of a critical edge is local and thus, it is possible
/// to apply several of those changes at the same time.
mutable SmallVector<CriticalEdge, 32> CriticalEdgesToSplit;
- /// \brief Remember all the basic blocks that are inserted during
+ /// Remember all the basic blocks that are inserted during
/// edge splitting.
/// Invariant: NewBBs == all the basic blocks contained in the NewBB
/// field of all the elements of CriticalEdgesToSplit.
@@ -69,7 +69,7 @@
/// The DominatorTreeBase that is used to compute a normal dominator tree
std::unique_ptr<DomTreeBase<MachineBasicBlock>> DT;
- /// \brief Apply all the recorded critical edges to the DT.
+ /// Apply all the recorded critical edges to the DT.
/// This updates the underlying DT information in a way that uses
/// the fast query path of DT as much as possible.
///
@@ -228,7 +228,7 @@
void print(raw_ostream &OS, const Module*) const override;
- /// \brief Record that the critical edge (FromBB, ToBB) has been
+ /// Record that the critical edge (FromBB, ToBB) has been
/// split with NewBB.
/// This is best to use this method instead of directly update the
/// underlying information, because this helps mitigating the
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineFrameInfo.h b/linux-x64/clang/include/llvm/CodeGen/MachineFrameInfo.h
index f887517..2d6081f 100644
--- a/linux-x64/clang/include/llvm/CodeGen/MachineFrameInfo.h
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineFrameInfo.h
@@ -85,9 +85,23 @@
/// stack offsets of the object, eliminating all MO_FrameIndex operands from
/// the program.
///
-/// @brief Abstract Stack Frame Information
+/// Abstract Stack Frame Information
class MachineFrameInfo {
+public:
+ /// Stack Smashing Protection (SSP) rules require that vulnerable stack
+ /// allocations are located close the stack protector.
+ enum SSPLayoutKind {
+ SSPLK_None, ///< Did not trigger a stack protector. No effect on data
+ ///< layout.
+ SSPLK_LargeArray, ///< Array or nested array >= SSP-buffer-size. Closest
+ ///< to the stack protector.
+ SSPLK_SmallArray, ///< Array or nested array < SSP-buffer-size. 2nd closest
+ ///< to the stack protector.
+ SSPLK_AddrOf ///< The address of this allocation is exposed and
+ ///< triggered protection. 3rd closest to the protector.
+ };
+private:
// Represent a single object allocated on the stack.
struct StackObject {
// The offset of this object from the stack pointer on entry to
@@ -123,6 +137,9 @@
/// necessarily reside in the same contiguous memory block as other stack
/// objects. Objects with differing stack IDs should not be merged or
/// replaced substituted for each other.
+ //
+ /// It is assumed a target uses consecutive, increasing stack IDs starting
+ /// from 1.
uint8_t StackID;
/// If this stack object is originated from an Alloca instruction
@@ -145,12 +162,15 @@
/// If true, the object has been zero-extended.
bool isSExt = false;
+ uint8_t SSPLayout;
+
StackObject(uint64_t Size, unsigned Alignment, int64_t SPOffset,
bool IsImmutable, bool IsSpillSlot, const AllocaInst *Alloca,
bool IsAliased, uint8_t StackID = 0)
: SPOffset(SPOffset), Size(Size), Alignment(Alignment),
isImmutable(IsImmutable), isSpillSlot(IsSpillSlot),
- StackID(StackID), Alloca(Alloca), isAliased(IsAliased) {}
+ StackID(StackID), Alloca(Alloca), isAliased(IsAliased),
+ SSPLayout(SSPLK_None) {}
};
/// The alignment of the stack.
@@ -485,6 +505,20 @@
Objects[ObjectIdx+NumFixedObjects].SPOffset = SPOffset;
}
+ SSPLayoutKind getObjectSSPLayout(int ObjectIdx) const {
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ return (SSPLayoutKind)Objects[ObjectIdx+NumFixedObjects].SSPLayout;
+ }
+
+ void setObjectSSPLayout(int ObjectIdx, SSPLayoutKind Kind) {
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ assert(!isDeadObjectIndex(ObjectIdx) &&
+ "Setting SSP layout for a dead object?");
+ Objects[ObjectIdx+NumFixedObjects].SSPLayout = Kind;
+ }
+
/// Return the number of bytes that must be allocated to hold
/// all of the fixed size frame objects. This is only valid after
/// Prolog/Epilog code insertion has finalized the stack frame layout.
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineFunction.h b/linux-x64/clang/include/llvm/CodeGen/MachineFunction.h
index 7d8b7eb..e8a4d52 100644
--- a/linux-x64/clang/include/llvm/CodeGen/MachineFunction.h
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineFunction.h
@@ -73,6 +73,7 @@
class TargetMachine;
class TargetRegisterClass;
class TargetSubtargetInfo;
+struct WasmEHFuncInfo;
struct WinEHFuncInfo;
template <> struct ilist_alloc_traits<MachineBasicBlock> {
@@ -80,8 +81,8 @@
};
template <> struct ilist_callback_traits<MachineBasicBlock> {
- void addNodeToList(MachineBasicBlock* MBB);
- void removeNodeFromList(MachineBasicBlock* MBB);
+ void addNodeToList(MachineBasicBlock* N);
+ void removeNodeFromList(MachineBasicBlock* N);
template <class Iterator>
void transferNodesFromList(ilist_callback_traits &OldList, Iterator, Iterator) {
@@ -96,7 +97,7 @@
struct MachineFunctionInfo {
virtual ~MachineFunctionInfo();
- /// \brief Factory function: default behavior is to call new using the
+ /// Factory function: default behavior is to call new using the
/// supplied allocator.
///
/// This function can be overridden in a derive class.
@@ -245,6 +246,10 @@
// Keep track of jump tables for switch instructions
MachineJumpTableInfo *JumpTableInfo;
+ // Keeps track of Wasm exception handling related data. This will be null for
+ // functions that aren't using a wasm EH personality.
+ WasmEHFuncInfo *WasmEHInfo = nullptr;
+
// Keeps track of Windows exception handling related data. This will be null
// for functions that aren't using a funclet-based EH personality.
WinEHFuncInfo *WinEHInfo = nullptr;
@@ -319,6 +324,7 @@
bool CallsEHReturn = false;
bool CallsUnwindInit = false;
+ bool HasEHScopes = false;
bool HasEHFunclets = false;
/// List of C++ TypeInfo used.
@@ -349,17 +355,18 @@
struct VariableDbgInfo {
const DILocalVariable *Var;
const DIExpression *Expr;
- unsigned Slot;
+ // The Slot can be negative for fixed stack objects.
+ int Slot;
const DILocation *Loc;
VariableDbgInfo(const DILocalVariable *Var, const DIExpression *Expr,
- unsigned Slot, const DILocation *Loc)
+ int Slot, const DILocation *Loc)
: Var(Var), Expr(Expr), Slot(Slot), Loc(Loc) {}
};
using VariableDbgInfoMapTy = SmallVector<VariableDbgInfo, 4>;
VariableDbgInfoMapTy VariableDbgInfos;
- MachineFunction(const Function &F, const TargetMachine &TM,
+ MachineFunction(const Function &F, const TargetMachine &Target,
const TargetSubtargetInfo &STI, unsigned FunctionNum,
MachineModuleInfo &MMI);
MachineFunction(const MachineFunction &) = delete;
@@ -430,6 +437,12 @@
MachineConstantPool *getConstantPool() { return ConstantPool; }
const MachineConstantPool *getConstantPool() const { return ConstantPool; }
+ /// getWasmEHFuncInfo - Return information about how the current function uses
+ /// Wasm exception handling. Returns null for functions that don't use wasm
+ /// exception handling.
+ const WasmEHFuncInfo *getWasmEHFuncInfo() const { return WasmEHInfo; }
+ WasmEHFuncInfo *getWasmEHFuncInfo() { return WasmEHInfo; }
+
/// getWinEHFuncInfo - Return information about how the current function uses
/// Windows exception handling. Returns null for functions that don't use
/// funclets for exception handling.
@@ -609,7 +622,7 @@
//===--------------------------------------------------------------------===//
// Internal functions used to automatically number MachineBasicBlocks
- /// \brief Adds the MBB to the internal numbering. Returns the unique number
+ /// Adds the MBB to the internal numbering. Returns the unique number
/// assigned to the MBB.
unsigned addToMBBNumbering(MachineBasicBlock *MBB) {
MBBNumbering.push_back(MBB);
@@ -695,14 +708,8 @@
OperandRecycler.deallocate(Cap, Array);
}
- /// \brief Allocate and initialize a register mask with @p NumRegister bits.
- uint32_t *allocateRegisterMask(unsigned NumRegister) {
- unsigned Size = (NumRegister + 31) / 32;
- uint32_t *Mask = Allocator.Allocate<uint32_t>(Size);
- for (unsigned i = 0; i != Size; ++i)
- Mask[i] = 0;
- return Mask;
- }
+ /// Allocate and initialize a register mask with @p NumRegister bits.
+ uint32_t *allocateRegMask();
/// allocateMemRefsArray - Allocate an array to hold MachineMemOperand
/// pointers. This array is owned by the MachineFunction.
@@ -759,6 +766,9 @@
bool callsUnwindInit() const { return CallsUnwindInit; }
void setCallsUnwindInit(bool b) { CallsUnwindInit = b; }
+ bool hasEHScopes() const { return HasEHScopes; }
+ void setHasEHScopes(bool V) { HasEHScopes = V; }
+
bool hasEHFunclets() const { return HasEHFunclets; }
void setHasEHFunclets(bool V) { HasEHFunclets = V; }
@@ -793,7 +803,7 @@
void addCleanup(MachineBasicBlock *LandingPad);
void addSEHCatchHandler(MachineBasicBlock *LandingPad, const Function *Filter,
- const BlockAddress *RecoverLabel);
+ const BlockAddress *RecoverBA);
void addSEHCleanupHandler(MachineBasicBlock *LandingPad,
const Function *Cleanup);
@@ -860,7 +870,7 @@
/// Collect information used to emit debugging information of a variable.
void setVariableDbgInfo(const DILocalVariable *Var, const DIExpression *Expr,
- unsigned Slot, const DILocation *Loc) {
+ int Slot, const DILocation *Loc) {
VariableDbgInfos.emplace_back(Var, Expr, Slot, Loc);
}
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineInstr.h b/linux-x64/clang/include/llvm/CodeGen/MachineInstr.h
index ea94be0..88e13cd 100644
--- a/linux-x64/clang/include/llvm/CodeGen/MachineInstr.h
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineInstr.h
@@ -80,7 +80,21 @@
FrameDestroy = 1 << 1, // Instruction is used as a part of
// function frame destruction code.
BundledPred = 1 << 2, // Instruction has bundled predecessors.
- BundledSucc = 1 << 3 // Instruction has bundled successors.
+ BundledSucc = 1 << 3, // Instruction has bundled successors.
+ FmNoNans = 1 << 4, // Instruction does not support Fast
+ // math nan values.
+ FmNoInfs = 1 << 5, // Instruction does not support Fast
+ // math infinity values.
+ FmNsz = 1 << 6, // Instruction is not required to retain
+ // signed zero values.
+ FmArcp = 1 << 7, // Instruction supports Fast math
+ // reciprocal approximations.
+ FmContract = 1 << 8, // Instruction supports Fast math
+ // contraction operations like fma.
+ FmAfn = 1 << 9, // Instruction may map to Fast math
+ // instrinsic approximation.
+ FmReassoc = 1 << 10 // Instruction supports Fast math
+ // reassociation of operand order.
};
private:
@@ -93,7 +107,7 @@
using OperandCapacity = ArrayRecycler<MachineOperand>::Capacity;
OperandCapacity CapOperands; // Capacity of the Operands array.
- uint8_t Flags = 0; // Various bits of additional
+ uint16_t Flags = 0; // Various bits of additional
// information about machine
// instruction.
@@ -127,7 +141,7 @@
/// This constructor create a MachineInstr and add the implicit operands.
/// It reserves space for number of operands specified by
/// MCInstrDesc. An explicit DebugLoc is supplied.
- MachineInstr(MachineFunction &, const MCInstrDesc &MCID, DebugLoc dl,
+ MachineInstr(MachineFunction &, const MCInstrDesc &tid, DebugLoc dl,
bool NoImp = false);
// MachineInstrs are pool-allocated and owned by MachineFunction.
@@ -175,7 +189,7 @@
}
/// Return the MI flags bitvector.
- uint8_t getFlags() const {
+ uint16_t getFlags() const {
return Flags;
}
@@ -186,7 +200,7 @@
/// Set a MI flag.
void setFlag(MIFlag Flag) {
- Flags |= (uint8_t)Flag;
+ Flags |= (uint16_t)Flag;
}
void setFlags(unsigned flags) {
@@ -197,7 +211,7 @@
/// clearFlag - Clear a MI flag.
void clearFlag(MIFlag Flag) {
- Flags &= ~((uint8_t)Flag);
+ Flags &= ~((uint16_t)Flag);
}
/// Return true if MI is in a bundle (but not the first MI in a bundle).
@@ -278,6 +292,10 @@
/// this DBG_VALUE instruction.
const DIExpression *getDebugExpression() const;
+ /// Return the debug label referenced by
+ /// this DBG_LABEL instruction.
+ const DILabel *getDebugLabel() const;
+
/// Emit an error referring to the source location of this instruction.
/// This should only be used for inline assembly that is somehow
/// impossible to compile. Other errors should have been handled much
@@ -304,6 +322,11 @@
return Operands[i];
}
+ /// Returns the total number of definitions.
+ unsigned getNumDefs() const {
+ return getNumExplicitDefs() + MCID->getNumImplicitDefs();
+ }
+
/// Return true if operand \p OpIdx is a subregister index.
bool isOperandSubregIdx(unsigned OpIdx) const {
assert(getOperand(OpIdx).getType() == MachineOperand::MO_Immediate &&
@@ -322,6 +345,9 @@
/// Returns the number of non-implicit operands.
unsigned getNumExplicitOperands() const;
+ /// Returns the number of non-implicit definitions.
+ unsigned getNumExplicitDefs() const;
+
/// iterator/begin/end - Iterate over all operands of a machine instruction.
using mop_iterator = MachineOperand *;
using const_mop_iterator = const MachineOperand *;
@@ -356,31 +382,29 @@
/// Implicit definition are not included!
iterator_range<mop_iterator> defs() {
return make_range(operands_begin(),
- operands_begin() + getDesc().getNumDefs());
+ operands_begin() + getNumExplicitDefs());
}
/// \copydoc defs()
iterator_range<const_mop_iterator> defs() const {
return make_range(operands_begin(),
- operands_begin() + getDesc().getNumDefs());
+ operands_begin() + getNumExplicitDefs());
}
/// Returns a range that includes all operands that are register uses.
/// This may include unrelated operands which are not register uses.
iterator_range<mop_iterator> uses() {
- return make_range(operands_begin() + getDesc().getNumDefs(),
- operands_end());
+ return make_range(operands_begin() + getNumExplicitDefs(), operands_end());
}
/// \copydoc uses()
iterator_range<const_mop_iterator> uses() const {
- return make_range(operands_begin() + getDesc().getNumDefs(),
- operands_end());
+ return make_range(operands_begin() + getNumExplicitDefs(), operands_end());
}
iterator_range<mop_iterator> explicit_uses() {
- return make_range(operands_begin() + getDesc().getNumDefs(),
- operands_begin() + getNumExplicitOperands() );
+ return make_range(operands_begin() + getNumExplicitDefs(),
+ operands_begin() + getNumExplicitOperands());
}
iterator_range<const_mop_iterator> explicit_uses() const {
- return make_range(operands_begin() + getDesc().getNumDefs(),
- operands_begin() + getNumExplicitOperands() );
+ return make_range(operands_begin() + getNumExplicitDefs(),
+ operands_begin() + getNumExplicitOperands());
}
/// Returns the number of the operand iterator \p I points to.
@@ -529,6 +553,12 @@
return hasProperty(MCID::MoveImm, Type);
}
+ /// Return true if this instruction is a register move.
+ /// (including moving values from subreg to reg)
+ bool isMoveReg(QueryType Type = IgnoreBundle) const {
+ return hasProperty(MCID::MoveReg, Type);
+ }
+
/// Return true if this instruction is a bitcast instruction.
bool isBitcast(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::Bitcast, Type);
@@ -576,7 +606,7 @@
return hasProperty(MCID::FoldableAsLoad, Type);
}
- /// \brief Return true if this instruction behaves
+ /// Return true if this instruction behaves
/// the same way as the generic REG_SEQUENCE instructions.
/// E.g., on ARM,
/// dX VMOVDRR rY, rZ
@@ -590,7 +620,7 @@
return hasProperty(MCID::RegSequence, Type);
}
- /// \brief Return true if this instruction behaves
+ /// Return true if this instruction behaves
/// the same way as the generic EXTRACT_SUBREG instructions.
/// E.g., on ARM,
/// rX, rY VMOVRRD dZ
@@ -605,7 +635,7 @@
return hasProperty(MCID::ExtractSubreg, Type);
}
- /// \brief Return true if this instruction behaves
+ /// Return true if this instruction behaves
/// the same way as the generic INSERT_SUBREG instructions.
/// E.g., on ARM,
/// dX = VSETLNi32 dY, rZ, Imm
@@ -817,6 +847,8 @@
bool isPosition() const { return isLabel() || isCFIInstruction(); }
bool isDebugValue() const { return getOpcode() == TargetOpcode::DBG_VALUE; }
+ bool isDebugLabel() const { return getOpcode() == TargetOpcode::DBG_LABEL; }
+ bool isDebugInstr() const { return isDebugValue() || isDebugLabel(); }
/// A DBG_VALUE is indirect iff the first operand is a register and
/// the second operand is an immediate.
@@ -893,6 +925,7 @@
case TargetOpcode::EH_LABEL:
case TargetOpcode::GC_LABEL:
case TargetOpcode::DBG_VALUE:
+ case TargetOpcode::DBG_LABEL:
case TargetOpcode::LIFETIME_START:
case TargetOpcode::LIFETIME_END:
return true;
@@ -1049,7 +1082,7 @@
const TargetInstrInfo *TII,
const TargetRegisterInfo *TRI) const;
- /// \brief Applies the constraints (def/use) implied by this MI on \p Reg to
+ /// Applies the constraints (def/use) implied by this MI on \p Reg to
/// the given \p CurRC.
/// If \p ExploreBundle is set and MI is part of a bundle, all the
/// instructions inside the bundle will be taken into account. In other words,
@@ -1066,7 +1099,7 @@
const TargetInstrInfo *TII, const TargetRegisterInfo *TRI,
bool ExploreBundle = false) const;
- /// \brief Applies the constraints (def/use) implied by the \p OpIdx operand
+ /// Applies the constraints (def/use) implied by the \p OpIdx operand
/// to the given \p CurRC.
///
/// Returns the register class that satisfies both \p CurRC and the
@@ -1244,10 +1277,11 @@
/// \p TII is used to print the opcode name. If it's not present, but the
/// MI is in a function, the opcode will be printed using the function's TII.
void print(raw_ostream &OS, bool IsStandalone = true, bool SkipOpers = false,
- bool SkipDebugLoc = false,
+ bool SkipDebugLoc = false, bool AddNewLine = true,
const TargetInstrInfo *TII = nullptr) const;
void print(raw_ostream &OS, ModuleSlotTracker &MST, bool IsStandalone = true,
bool SkipOpers = false, bool SkipDebugLoc = false,
+ bool AddNewLine = true,
const TargetInstrInfo *TII = nullptr) const;
void dump() const;
/// @}
@@ -1287,7 +1321,7 @@
/// Erase an operand from an instruction, leaving it with one
/// fewer operand than it started with.
- void RemoveOperand(unsigned i);
+ void RemoveOperand(unsigned OpNo);
/// Add a MachineMemOperand to the machine instruction.
/// This function should be used only occasionally. The setMemRefs function
@@ -1320,7 +1354,7 @@
/// Return the MIFlags which represent both MachineInstrs. This
/// should be used when merging two MachineInstrs into one. This routine does
/// not modify the MIFlags of this MachineInstr.
- uint8_t mergeFlagsWith(const MachineInstr& Other) const;
+ uint16_t mergeFlagsWith(const MachineInstr& Other) const;
/// Clear this MachineInstr's memory reference descriptor list. This resets
/// the memrefs to their most conservative state. This should be used only
@@ -1362,7 +1396,7 @@
/// Slow path for hasProperty when we're dealing with a bundle.
bool hasPropertyInBundle(unsigned Mask, QueryType Type) const;
- /// \brief Implements the logic of getRegClassConstraintEffectForVReg for the
+ /// Implements the logic of getRegClassConstraintEffectForVReg for the
/// this MI and the given operand index \p OpIdx.
/// If the related operand does not constrained Reg, this returns CurRC.
const TargetRegisterClass *getRegClassConstraintEffectForVRegImpl(
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineInstrBuilder.h b/linux-x64/clang/include/llvm/CodeGen/MachineInstrBuilder.h
index 2df89b1..6656087 100644
--- a/linux-x64/clang/include/llvm/CodeGen/MachineInstrBuilder.h
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineInstrBuilder.h
@@ -220,6 +220,9 @@
assert((MI->isDebugValue() ? static_cast<bool>(MI->getDebugVariable())
: true) &&
"first MDNode argument of a DBG_VALUE not a variable");
+ assert((MI->isDebugLabel() ? static_cast<bool>(MI->getDebugLabel())
+ : true) &&
+ "first MDNode argument of a DBG_LABEL not a label");
return *this;
}
@@ -415,6 +418,13 @@
const MDNode *Expr);
/// This version of the builder builds a DBG_VALUE intrinsic
+/// for a MachineOperand.
+MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL,
+ const MCInstrDesc &MCID, bool IsIndirect,
+ MachineOperand &MO, const MDNode *Variable,
+ const MDNode *Expr);
+
+/// This version of the builder builds a DBG_VALUE intrinsic
/// for either a value in a register or a register-indirect
/// address and inserts it at position I.
MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
@@ -423,6 +433,14 @@
unsigned Reg, const MDNode *Variable,
const MDNode *Expr);
+/// This version of the builder builds a DBG_VALUE intrinsic
+/// for a machine operand and inserts it at position I.
+MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
+ MachineBasicBlock::iterator I, const DebugLoc &DL,
+ const MCInstrDesc &MCID, bool IsIndirect,
+ MachineOperand &MO, const MDNode *Variable,
+ const MDNode *Expr);
+
/// Clone a DBG_VALUE whose value has been spilled to FrameIndex.
MachineInstr *buildDbgValueForSpill(MachineBasicBlock &BB,
MachineBasicBlock::iterator I,
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineLoopInfo.h b/linux-x64/clang/include/llvm/CodeGen/MachineLoopInfo.h
index 104655e..917fb90 100644
--- a/linux-x64/clang/include/llvm/CodeGen/MachineLoopInfo.h
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineLoopInfo.h
@@ -54,7 +54,7 @@
/// that contains the header.
MachineBasicBlock *getBottomBlock();
- /// \brief Find the block that contains the loop control variable and the
+ /// Find the block that contains the loop control variable and the
/// loop test. This will return the latch block if it's one of the exiting
/// blocks. Otherwise, return the exiting block. Return 'null' when
/// multiple exiting blocks are present.
@@ -97,7 +97,7 @@
LoopInfoBase<MachineBasicBlock, MachineLoop>& getBase() { return LI; }
- /// \brief Find the block that either is the loop preheader, or could
+ /// Find the block that either is the loop preheader, or could
/// speculatively be used as the preheader. This is e.g. useful to place
/// loop setup code. Code that cannot be speculated should not be placed
/// here. SpeculativePreheader is controlling whether it also tries to
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineMemOperand.h b/linux-x64/clang/include/llvm/CodeGen/MachineMemOperand.h
index dea0d80..078ef7c 100644
--- a/linux-x64/clang/include/llvm/CodeGen/MachineMemOperand.h
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineMemOperand.h
@@ -184,7 +184,7 @@
/// atomic operations the atomic ordering requirements when store does not
/// occur must also be specified.
MachineMemOperand(MachinePointerInfo PtrInfo, Flags flags, uint64_t s,
- unsigned base_alignment,
+ uint64_t a,
const AAMDNodes &AAInfo = AAMDNodes(),
const MDNode *Ranges = nullptr,
SyncScope::ID SSID = SyncScope::System,
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineModuleInfo.h b/linux-x64/clang/include/llvm/CodeGen/MachineModuleInfo.h
index 6be304f..554e890 100644
--- a/linux-x64/clang/include/llvm/CodeGen/MachineModuleInfo.h
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineModuleInfo.h
@@ -105,7 +105,7 @@
/// basic block's address of label.
MMIAddrLabelMap *AddrLabelSymbols;
- // TODO: Ideally, what we'd like is to have a switch that allows emitting
+ // TODO: Ideally, what we'd like is to have a switch that allows emitting
// synchronous (precise at call-sites only) CFA into .eh_frame. However,
// even under this switch, we'd like .debug_frame to be precise when using
// -g. At this moment, there's no way to specify that some CFI directives
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineOperand.h b/linux-x64/clang/include/llvm/CodeGen/MachineOperand.h
index 4f0db1c..53e8889 100644
--- a/linux-x64/clang/include/llvm/CodeGen/MachineOperand.h
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineOperand.h
@@ -295,6 +295,12 @@
unsigned TiedOperandIdx, const TargetRegisterInfo *TRI,
const TargetIntrinsicInfo *IntrinsicInfo) const;
+ /// Same as print(os, TRI, IntrinsicInfo), but allows to specify the low-level
+ /// type to be printed the same way the full version of print(...) does it.
+ void print(raw_ostream &os, LLT TypeToPrint,
+ const TargetRegisterInfo *TRI = nullptr,
+ const TargetIntrinsicInfo *IntrinsicInfo = nullptr) const;
+
void dump() const;
//===--------------------------------------------------------------------===//
@@ -610,6 +616,11 @@
return Contents.RegMask;
}
+ /// Returns number of elements needed for a regmask array.
+ static unsigned getRegMaskSize(unsigned NumRegs) {
+ return (NumRegs + 31) / 32;
+ }
+
/// getRegLiveOut - Returns a bit mask of live-out registers.
const uint32_t *getRegLiveOut() const {
assert(isRegLiveOut() && "Wrong MachineOperand accessor");
@@ -630,6 +641,11 @@
Contents.ImmVal = immVal;
}
+ void setCImm(const ConstantInt *CI) {
+ assert(isCImm() && "Wrong MachineOperand mutator");
+ Contents.CI = CI;
+ }
+
void setFPImm(const ConstantFP *CFP) {
assert(isFPImm() && "Wrong MachineOperand mutator");
Contents.CFP = CFP;
@@ -677,7 +693,7 @@
/// should stay in sync with the hash_value overload below.
bool isIdenticalTo(const MachineOperand &Other) const;
- /// \brief MachineOperand hash_value overload.
+ /// MachineOperand hash_value overload.
///
/// Note that this includes the same information in the hash that
/// isIdenticalTo uses for comparison. It is thus suited for use in hash
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineOptimizationRemarkEmitter.h b/linux-x64/clang/include/llvm/CodeGen/MachineOptimizationRemarkEmitter.h
index 2fdefbe..a7ce870 100644
--- a/linux-x64/clang/include/llvm/CodeGen/MachineOptimizationRemarkEmitter.h
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineOptimizationRemarkEmitter.h
@@ -24,7 +24,7 @@
class MachineBlockFrequencyInfo;
class MachineInstr;
-/// \brief Common features for diagnostics dealing with optimization remarks
+/// Common features for diagnostics dealing with optimization remarks
/// that are used by machine passes.
class DiagnosticInfoMIROptimization : public DiagnosticInfoOptimizationBase {
public:
@@ -151,7 +151,7 @@
/// Emit an optimization remark.
void emit(DiagnosticInfoOptimizationBase &OptDiag);
- /// \brief Whether we allow for extra compile-time budget to perform more
+ /// Whether we allow for extra compile-time budget to perform more
/// analysis to be more informative.
///
/// This is useful to enable additional missed optimizations to be reported
@@ -164,7 +164,7 @@
.getDiagHandlerPtr()->isAnyRemarkEnabled(PassName));
}
- /// \brief Take a lambda that returns a remark which will be emitted. Second
+ /// Take a lambda that returns a remark which will be emitted. Second
/// argument is only used to restrict this to functions.
template <typename T>
void emit(T RemarkBuilder, decltype(RemarkBuilder()) * = nullptr) {
@@ -192,7 +192,7 @@
/// Similar but use value from \p OptDiag and update hotness there.
void computeHotness(DiagnosticInfoMIROptimization &Remark);
- /// \brief Only allow verbose messages if we know we're filtering by hotness
+ /// Only allow verbose messages if we know we're filtering by hotness
/// (BFI is only set in this case).
bool shouldEmitVerbose() { return MBFI != nullptr; }
};
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineOutliner.h b/linux-x64/clang/include/llvm/CodeGen/MachineOutliner.h
new file mode 100644
index 0000000..95bfc24
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineOutliner.h
@@ -0,0 +1,240 @@
+//===---- MachineOutliner.h - Outliner data structures ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Contains all data structures shared between the outliner implemented in
+/// MachineOutliner.cpp and target implementations of the outliner.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MACHINEOUTLINER_H
+#define LLVM_MACHINEOUTLINER_H
+
+#include "llvm/CodeGen/LiveRegUnits.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/CodeGen/LivePhysRegs.h"
+
+namespace llvm {
+namespace outliner {
+
+/// Represents how an instruction should be mapped by the outliner.
+/// \p Legal instructions are those which are safe to outline.
+/// \p LegalTerminator instructions are safe to outline, but only as the
+/// last instruction in a sequence.
+/// \p Illegal instructions are those which cannot be outlined.
+/// \p Invisible instructions are instructions which can be outlined, but
+/// shouldn't actually impact the outlining result.
+enum InstrType { Legal, LegalTerminator, Illegal, Invisible };
+
+/// An individual sequence of instructions to be replaced with a call to
+/// an outlined function.
+struct Candidate {
+private:
+ /// The start index of this \p Candidate in the instruction list.
+ unsigned StartIdx;
+
+ /// The number of instructions in this \p Candidate.
+ unsigned Len;
+
+ // The first instruction in this \p Candidate.
+ MachineBasicBlock::iterator FirstInst;
+
+ // The last instruction in this \p Candidate.
+ MachineBasicBlock::iterator LastInst;
+
+ // The basic block that contains this Candidate.
+ MachineBasicBlock *MBB;
+
+ /// Cost of calling an outlined function from this point as defined by the
+ /// target.
+ unsigned CallOverhead;
+
+public:
+ /// The index of this \p Candidate's \p OutlinedFunction in the list of
+ /// \p OutlinedFunctions.
+ unsigned FunctionIdx;
+
+ /// Set to false if the candidate overlapped with another candidate.
+ bool InCandidateList = true;
+
+ /// Identifier denoting the instructions to emit to call an outlined function
+ /// from this point. Defined by the target.
+ unsigned CallConstructionID;
+
+ /// Contains physical register liveness information for the MBB containing
+ /// this \p Candidate.
+ ///
+ /// This is optionally used by the target to calculate more fine-grained
+ /// cost model information.
+ LiveRegUnits LRU;
+
+ /// Contains the accumulated register liveness information for the
+ /// instructions in this \p Candidate.
+ ///
+ /// This is optionally used by the target to determine which registers have
+ /// been used across the sequence.
+ LiveRegUnits UsedInSequence;
+
+ /// Return the number of instructions in this Candidate.
+ unsigned getLength() const { return Len; }
+
+ /// Return the start index of this candidate.
+ unsigned getStartIdx() const { return StartIdx; }
+
+ /// Return the end index of this candidate.
+ unsigned getEndIdx() const { return StartIdx + Len - 1; }
+
+ /// Set the CallConstructionID and CallOverhead of this candidate to CID and
+ /// CO respectively.
+ void setCallInfo(unsigned CID, unsigned CO) {
+ CallConstructionID = CID;
+ CallOverhead = CO;
+ }
+
+ /// Returns the call overhead of this candidate if it is in the list.
+ unsigned getCallOverhead() const {
+ return InCandidateList ? CallOverhead : 0;
+ }
+
+ MachineBasicBlock::iterator &front() { return FirstInst; }
+ MachineBasicBlock::iterator &back() { return LastInst; }
+ MachineFunction *getMF() const { return MBB->getParent(); }
+ MachineBasicBlock *getMBB() const { return MBB; }
+
+ /// The number of instructions that would be saved by outlining every
+ /// candidate of this type.
+ ///
+ /// This is a fixed value which is not updated during the candidate pruning
+ /// process. It is only used for deciding which candidate to keep if two
+ /// candidates overlap. The true benefit is stored in the OutlinedFunction
+ /// for some given candidate.
+ unsigned Benefit = 0;
+
+ Candidate(unsigned StartIdx, unsigned Len,
+ MachineBasicBlock::iterator &FirstInst,
+ MachineBasicBlock::iterator &LastInst, MachineBasicBlock *MBB,
+ unsigned FunctionIdx)
+ : StartIdx(StartIdx), Len(Len), FirstInst(FirstInst), LastInst(LastInst),
+ MBB(MBB), FunctionIdx(FunctionIdx) {}
+ Candidate() {}
+
+ /// Used to ensure that \p Candidates are outlined in an order that
+ /// preserves the start and end indices of other \p Candidates.
+ bool operator<(const Candidate &RHS) const {
+ return getStartIdx() > RHS.getStartIdx();
+ }
+
+ /// Compute the registers that are live across this Candidate.
+ /// Used by targets that need this information for cost model calculation.
+ /// If a target does not need this information, then this should not be
+ /// called.
+ void initLRU(const TargetRegisterInfo &TRI) {
+ assert(MBB->getParent()->getRegInfo().tracksLiveness() &&
+ "Candidate's Machine Function must track liveness");
+ LRU.init(TRI);
+ LRU.addLiveOuts(*MBB);
+
+ // Compute liveness from the end of the block up to the beginning of the
+ // outlining candidate.
+ std::for_each(MBB->rbegin(), (MachineBasicBlock::reverse_iterator)front(),
+ [this](MachineInstr &MI) { LRU.stepBackward(MI); });
+
+ // Walk over the sequence itself and figure out which registers were used
+ // in the sequence.
+ UsedInSequence.init(TRI);
+ std::for_each(front(), std::next(back()),
+ [this](MachineInstr &MI) { UsedInSequence.accumulate(MI); });
+ }
+};
+
+/// The information necessary to create an outlined function for some
+/// class of candidate.
+struct OutlinedFunction {
+
+private:
+ /// The number of candidates for this \p OutlinedFunction.
+ unsigned OccurrenceCount = 0;
+
+public:
+ std::vector<std::shared_ptr<Candidate>> Candidates;
+
+ /// The actual outlined function created.
+ /// This is initialized after we go through and create the actual function.
+ MachineFunction *MF = nullptr;
+
+ /// A number assigned to this function which appears at the end of its name.
+ unsigned Name;
+
+ /// The sequence of integers corresponding to the instructions in this
+ /// function.
+ std::vector<unsigned> Sequence;
+
+ /// Represents the size of a sequence in bytes. (Some instructions vary
+ /// widely in size, so just counting the instructions isn't very useful.)
+ unsigned SequenceSize;
+
+ /// Target-defined overhead of constructing a frame for this function.
+ unsigned FrameOverhead;
+
+ /// Target-defined identifier for constructing a frame for this function.
+ unsigned FrameConstructionID;
+
+ /// Return the number of candidates for this \p OutlinedFunction.
+ unsigned getOccurrenceCount() { return OccurrenceCount; }
+
+ /// Decrement the occurrence count of this OutlinedFunction and return the
+ /// new count.
+ unsigned decrement() {
+ assert(OccurrenceCount > 0 && "Can't decrement an empty function!");
+ OccurrenceCount--;
+ return getOccurrenceCount();
+ }
+
+ /// Return the number of bytes it would take to outline this
+ /// function.
+ unsigned getOutliningCost() {
+ unsigned CallOverhead = 0;
+ for (std::shared_ptr<Candidate> &C : Candidates)
+ CallOverhead += C->getCallOverhead();
+ return CallOverhead + SequenceSize + FrameOverhead;
+ }
+
+ /// Return the size in bytes of the unoutlined sequences.
+ unsigned getNotOutlinedCost() { return OccurrenceCount * SequenceSize; }
+
+ /// Return the number of instructions that would be saved by outlining
+ /// this function.
+ unsigned getBenefit() {
+ unsigned NotOutlinedCost = getNotOutlinedCost();
+ unsigned OutlinedCost = getOutliningCost();
+ return (NotOutlinedCost < OutlinedCost) ? 0
+ : NotOutlinedCost - OutlinedCost;
+ }
+
+ OutlinedFunction(std::vector<Candidate> &Cands,
+ unsigned SequenceSize, unsigned FrameOverhead,
+ unsigned FrameConstructionID)
+ : SequenceSize(SequenceSize), FrameOverhead(FrameOverhead),
+ FrameConstructionID(FrameConstructionID) {
+ OccurrenceCount = Cands.size();
+ for (Candidate &C : Cands)
+ Candidates.push_back(std::make_shared<outliner::Candidate>(C));
+
+ unsigned B = getBenefit();
+ for (std::shared_ptr<Candidate> &C : Candidates)
+ C->Benefit = B;
+ }
+
+ OutlinedFunction() {}
+};
+} // namespace outliner
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineRegisterInfo.h b/linux-x64/clang/include/llvm/CodeGen/MachineRegisterInfo.h
index b0dfd02..5bf4a49 100644
--- a/linux-x64/clang/include/llvm/CodeGen/MachineRegisterInfo.h
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineRegisterInfo.h
@@ -136,9 +136,9 @@
/// started.
BitVector ReservedRegs;
- using VRegToTypeMap = DenseMap<unsigned, LLT>;
- /// Map generic virtual registers to their actual size.
- mutable std::unique_ptr<VRegToTypeMap> VRegToType;
+ using VRegToTypeMap = IndexedMap<LLT, VirtReg2IndexFunctor>;
+ /// Map generic virtual registers to their low-level type.
+ VRegToTypeMap VRegToType;
/// Keep track of the physical registers that are live in to the function.
/// Live in values are typically arguments in registers. LiveIn values are
@@ -714,26 +714,23 @@
/// createVirtualRegister - Create and return a new virtual register in the
/// function with the specified register class.
- unsigned createVirtualRegister(const TargetRegisterClass *RegClass);
+ unsigned createVirtualRegister(const TargetRegisterClass *RegClass,
+ StringRef Name = "");
- /// Accessor for VRegToType. This accessor should only be used
- /// by global-isel related work.
- VRegToTypeMap &getVRegToType() const {
- if (!VRegToType)
- VRegToType.reset(new VRegToTypeMap);
- return *VRegToType.get();
- }
-
- /// Get the low-level type of \p VReg or LLT{} if VReg is not a generic
+ /// Get the low-level type of \p Reg or LLT{} if Reg is not a generic
/// (target independent) virtual register.
- LLT getType(unsigned VReg) const;
+ LLT getType(unsigned Reg) const {
+ if (TargetRegisterInfo::isVirtualRegister(Reg) && VRegToType.inBounds(Reg))
+ return VRegToType[Reg];
+ return LLT{};
+ }
/// Set the low-level type of \p VReg to \p Ty.
void setType(unsigned VReg, LLT Ty);
/// Create and return a new generic virtual register with low-level
/// type \p Ty.
- unsigned createGenericVirtualRegister(LLT Ty);
+ unsigned createGenericVirtualRegister(LLT Ty, StringRef Name = "");
/// Remove all types associated to virtual registers (after instruction
/// selection and constraining of all generic virtual registers).
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineSSAUpdater.h b/linux-x64/clang/include/llvm/CodeGen/MachineSSAUpdater.h
index b5ea208..5e91246 100644
--- a/linux-x64/clang/include/llvm/CodeGen/MachineSSAUpdater.h
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineSSAUpdater.h
@@ -56,7 +56,7 @@
/// MachineSSAUpdater constructor. If InsertedPHIs is specified, it will be
/// filled in with all PHI Nodes created by rewriting.
explicit MachineSSAUpdater(MachineFunction &MF,
- SmallVectorImpl<MachineInstr*> *InsertedPHIs = nullptr);
+ SmallVectorImpl<MachineInstr*> *NewPHI = nullptr);
MachineSSAUpdater(const MachineSSAUpdater &) = delete;
MachineSSAUpdater &operator=(const MachineSSAUpdater &) = delete;
~MachineSSAUpdater();
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineScheduler.h b/linux-x64/clang/include/llvm/CodeGen/MachineScheduler.h
index e327881..85ffa4e 100644
--- a/linux-x64/clang/include/llvm/CodeGen/MachineScheduler.h
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineScheduler.h
@@ -237,7 +237,7 @@
/// be scheduled at the bottom.
virtual SUnit *pickNode(bool &IsTopNode) = 0;
- /// \brief Scheduler callback to notify that a new subtree is scheduled.
+ /// Scheduler callback to notify that a new subtree is scheduled.
virtual void scheduleTree(unsigned SubtreeID) {}
/// Notify MachineSchedStrategy that ScheduleDAGMI has scheduled an
@@ -318,11 +318,11 @@
Mutations.push_back(std::move(Mutation));
}
- /// \brief True if an edge can be added from PredSU to SuccSU without creating
+ /// True if an edge can be added from PredSU to SuccSU without creating
/// a cycle.
bool canAddEdge(SUnit *SuccSU, SUnit *PredSU);
- /// \brief Add a DAG edge to the given SU with the given predecessor
+ /// Add a DAG edge to the given SU with the given predecessor
/// dependence data.
///
/// \returns true if the edge may be added without creating a cycle OR if an
@@ -374,7 +374,7 @@
/// Reinsert debug_values recorded in ScheduleDAGInstrs::DbgValues.
void placeDebugValues();
- /// \brief dump the scheduled Sequence.
+ /// dump the scheduled Sequence.
void dumpSchedule() const;
// Lesser helpers...
@@ -445,7 +445,7 @@
/// Return true if this DAG supports VReg liveness and RegPressure.
bool hasVRegLiveness() const override { return true; }
- /// \brief Return true if register pressure tracking is enabled.
+ /// Return true if register pressure tracking is enabled.
bool isTrackingPressure() const { return ShouldTrackPressure; }
/// Get current register pressure for the top scheduled instructions.
@@ -897,6 +897,28 @@
#endif
};
+// Utility functions used by heuristics in tryCandidate().
+bool tryLess(int TryVal, int CandVal,
+ GenericSchedulerBase::SchedCandidate &TryCand,
+ GenericSchedulerBase::SchedCandidate &Cand,
+ GenericSchedulerBase::CandReason Reason);
+bool tryGreater(int TryVal, int CandVal,
+ GenericSchedulerBase::SchedCandidate &TryCand,
+ GenericSchedulerBase::SchedCandidate &Cand,
+ GenericSchedulerBase::CandReason Reason);
+bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand,
+ GenericSchedulerBase::SchedCandidate &Cand,
+ SchedBoundary &Zone);
+bool tryPressure(const PressureChange &TryP,
+ const PressureChange &CandP,
+ GenericSchedulerBase::SchedCandidate &TryCand,
+ GenericSchedulerBase::SchedCandidate &Cand,
+ GenericSchedulerBase::CandReason Reason,
+ const TargetRegisterInfo *TRI,
+ const MachineFunction &MF);
+unsigned getWeakLeft(const SUnit *SU, bool isTop);
+int biasPhysRegCopy(const SUnit *SU, bool isTop);
+
/// GenericScheduler shrinks the unscheduled zone using heuristics to balance
/// the schedule.
class GenericScheduler : public GenericSchedulerBase {
@@ -963,9 +985,8 @@
const RegPressureTracker &RPTracker,
RegPressureTracker &TempTracker);
- void tryCandidate(SchedCandidate &Cand,
- SchedCandidate &TryCand,
- SchedBoundary *Zone);
+ virtual void tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand,
+ SchedBoundary *Zone) const;
SUnit *pickNodeBidirectional(bool &IsTopNode);
diff --git a/linux-x64/clang/include/llvm/CodeGen/MacroFusion.h b/linux-x64/clang/include/llvm/CodeGen/MacroFusion.h
index dc105fd..a77226d 100644
--- a/linux-x64/clang/include/llvm/CodeGen/MacroFusion.h
+++ b/linux-x64/clang/include/llvm/CodeGen/MacroFusion.h
@@ -25,7 +25,7 @@
class TargetInstrInfo;
class TargetSubtargetInfo;
-/// \brief Check if the instr pair, FirstMI and SecondMI, should be fused
+/// Check if the instr pair, FirstMI and SecondMI, should be fused
/// together. Given SecondMI, when FirstMI is unspecified, then check if
/// SecondMI may be part of a fused pair at all.
using ShouldSchedulePredTy = std::function<bool(const TargetInstrInfo &TII,
@@ -33,13 +33,13 @@
const MachineInstr *FirstMI,
const MachineInstr &SecondMI)>;
-/// \brief Create a DAG scheduling mutation to pair instructions back to back
+/// Create a DAG scheduling mutation to pair instructions back to back
/// for instructions that benefit according to the target-specific
/// shouldScheduleAdjacent predicate function.
std::unique_ptr<ScheduleDAGMutation>
createMacroFusionDAGMutation(ShouldSchedulePredTy shouldScheduleAdjacent);
-/// \brief Create a DAG scheduling mutation to pair branch instructions with one
+/// Create a DAG scheduling mutation to pair branch instructions with one
/// of their predecessors back to back for instructions that benefit according
/// to the target-specific shouldScheduleAdjacent predicate function.
std::unique_ptr<ScheduleDAGMutation>
diff --git a/linux-x64/clang/include/llvm/CodeGen/PBQP/Graph.h b/linux-x64/clang/include/llvm/CodeGen/PBQP/Graph.h
index e94878c..a6d88b0 100644
--- a/linux-x64/clang/include/llvm/CodeGen/PBQP/Graph.h
+++ b/linux-x64/clang/include/llvm/CodeGen/PBQP/Graph.h
@@ -29,12 +29,12 @@
using NodeId = unsigned;
using EdgeId = unsigned;
- /// @brief Returns a value representing an invalid (non-existent) node.
+ /// Returns a value representing an invalid (non-existent) node.
static NodeId invalidNodeId() {
return std::numeric_limits<NodeId>::max();
}
- /// @brief Returns a value representing an invalid (non-existent) edge.
+ /// Returns a value representing an invalid (non-existent) edge.
static EdgeId invalidEdgeId() {
return std::numeric_limits<EdgeId>::max();
}
@@ -338,19 +338,19 @@
const NodeEntry &NE;
};
- /// @brief Construct an empty PBQP graph.
+ /// Construct an empty PBQP graph.
Graph() = default;
- /// @brief Construct an empty PBQP graph with the given graph metadata.
+ /// Construct an empty PBQP graph with the given graph metadata.
Graph(GraphMetadata Metadata) : Metadata(std::move(Metadata)) {}
- /// @brief Get a reference to the graph metadata.
+ /// Get a reference to the graph metadata.
GraphMetadata& getMetadata() { return Metadata; }
- /// @brief Get a const-reference to the graph metadata.
+ /// Get a const-reference to the graph metadata.
const GraphMetadata& getMetadata() const { return Metadata; }
- /// @brief Lock this graph to the given solver instance in preparation
+ /// Lock this graph to the given solver instance in preparation
/// for running the solver. This method will call solver.handleAddNode for
/// each node in the graph, and handleAddEdge for each edge, to give the
/// solver an opportunity to set up any requried metadata.
@@ -363,13 +363,13 @@
Solver->handleAddEdge(EId);
}
- /// @brief Release from solver instance.
+ /// Release from solver instance.
void unsetSolver() {
assert(Solver && "Solver not set.");
Solver = nullptr;
}
- /// @brief Add a node with the given costs.
+ /// Add a node with the given costs.
/// @param Costs Cost vector for the new node.
/// @return Node iterator for the added node.
template <typename OtherVectorT>
@@ -382,7 +382,7 @@
return NId;
}
- /// @brief Add a node bypassing the cost allocator.
+ /// Add a node bypassing the cost allocator.
/// @param Costs Cost vector ptr for the new node (must be convertible to
/// VectorPtr).
/// @return Node iterator for the added node.
@@ -401,7 +401,7 @@
return NId;
}
- /// @brief Add an edge between the given nodes with the given costs.
+ /// Add an edge between the given nodes with the given costs.
/// @param N1Id First node.
/// @param N2Id Second node.
/// @param Costs Cost matrix for new edge.
@@ -419,7 +419,7 @@
return EId;
}
- /// @brief Add an edge bypassing the cost allocator.
+ /// Add an edge bypassing the cost allocator.
/// @param N1Id First node.
/// @param N2Id Second node.
/// @param Costs Cost matrix for new edge.
@@ -444,7 +444,7 @@
return EId;
}
- /// @brief Returns true if the graph is empty.
+ /// Returns true if the graph is empty.
bool empty() const { return NodeIdSet(*this).empty(); }
NodeIdSet nodeIds() const { return NodeIdSet(*this); }
@@ -452,15 +452,15 @@
AdjEdgeIdSet adjEdgeIds(NodeId NId) { return AdjEdgeIdSet(getNode(NId)); }
- /// @brief Get the number of nodes in the graph.
+ /// Get the number of nodes in the graph.
/// @return Number of nodes in the graph.
unsigned getNumNodes() const { return NodeIdSet(*this).size(); }
- /// @brief Get the number of edges in the graph.
+ /// Get the number of edges in the graph.
/// @return Number of edges in the graph.
unsigned getNumEdges() const { return EdgeIdSet(*this).size(); }
- /// @brief Set a node's cost vector.
+ /// Set a node's cost vector.
/// @param NId Node to update.
/// @param Costs New costs to set.
template <typename OtherVectorT>
@@ -471,7 +471,7 @@
getNode(NId).Costs = AllocatedCosts;
}
- /// @brief Get a VectorPtr to a node's cost vector. Rarely useful - use
+ /// Get a VectorPtr to a node's cost vector. Rarely useful - use
/// getNodeCosts where possible.
/// @param NId Node id.
/// @return VectorPtr to node cost vector.
@@ -483,7 +483,7 @@
return getNode(NId).Costs;
}
- /// @brief Get a node's cost vector.
+ /// Get a node's cost vector.
/// @param NId Node id.
/// @return Node cost vector.
const Vector& getNodeCosts(NodeId NId) const {
@@ -502,7 +502,7 @@
return getNode(NId).getAdjEdgeIds().size();
}
- /// @brief Update an edge's cost matrix.
+ /// Update an edge's cost matrix.
/// @param EId Edge id.
/// @param Costs New cost matrix.
template <typename OtherMatrixT>
@@ -513,7 +513,7 @@
getEdge(EId).Costs = AllocatedCosts;
}
- /// @brief Get a MatrixPtr to a node's cost matrix. Rarely useful - use
+ /// Get a MatrixPtr to a node's cost matrix. Rarely useful - use
/// getEdgeCosts where possible.
/// @param EId Edge id.
/// @return MatrixPtr to edge cost matrix.
@@ -525,7 +525,7 @@
return getEdge(EId).Costs;
}
- /// @brief Get an edge's cost matrix.
+ /// Get an edge's cost matrix.
/// @param EId Edge id.
/// @return Edge cost matrix.
const Matrix& getEdgeCosts(EdgeId EId) const {
@@ -540,21 +540,21 @@
return getEdge(EId).Metadata;
}
- /// @brief Get the first node connected to this edge.
+ /// Get the first node connected to this edge.
/// @param EId Edge id.
/// @return The first node connected to the given edge.
NodeId getEdgeNode1Id(EdgeId EId) const {
return getEdge(EId).getN1Id();
}
- /// @brief Get the second node connected to this edge.
+ /// Get the second node connected to this edge.
/// @param EId Edge id.
/// @return The second node connected to the given edge.
NodeId getEdgeNode2Id(EdgeId EId) const {
return getEdge(EId).getN2Id();
}
- /// @brief Get the "other" node connected to this edge.
+ /// Get the "other" node connected to this edge.
/// @param EId Edge id.
/// @param NId Node id for the "given" node.
/// @return The iterator for the "other" node connected to this edge.
@@ -566,7 +566,7 @@
return E.getN1Id();
}
- /// @brief Get the edge connecting two nodes.
+ /// Get the edge connecting two nodes.
/// @param N1Id First node id.
/// @param N2Id Second node id.
/// @return An id for edge (N1Id, N2Id) if such an edge exists,
@@ -581,7 +581,7 @@
return invalidEdgeId();
}
- /// @brief Remove a node from the graph.
+ /// Remove a node from the graph.
/// @param NId Node id.
void removeNode(NodeId NId) {
if (Solver)
@@ -598,7 +598,7 @@
FreeNodeIds.push_back(NId);
}
- /// @brief Disconnect an edge from the given node.
+ /// Disconnect an edge from the given node.
///
/// Removes the given edge from the adjacency list of the given node.
/// This operation leaves the edge in an 'asymmetric' state: It will no
@@ -631,14 +631,14 @@
E.disconnectFrom(*this, NId);
}
- /// @brief Convenience method to disconnect all neighbours from the given
+ /// Convenience method to disconnect all neighbours from the given
/// node.
void disconnectAllNeighborsFromNode(NodeId NId) {
for (auto AEId : adjEdgeIds(NId))
disconnectEdge(AEId, getEdgeOtherNodeId(AEId, NId));
}
- /// @brief Re-attach an edge to its nodes.
+ /// Re-attach an edge to its nodes.
///
/// Adds an edge that had been previously disconnected back into the
/// adjacency set of the nodes that the edge connects.
@@ -649,7 +649,7 @@
Solver->handleReconnectEdge(EId, NId);
}
- /// @brief Remove an edge from the graph.
+ /// Remove an edge from the graph.
/// @param EId Edge id.
void removeEdge(EdgeId EId) {
if (Solver)
@@ -660,7 +660,7 @@
Edges[EId].invalidate();
}
- /// @brief Remove all nodes and edges from the graph.
+ /// Remove all nodes and edges from the graph.
void clear() {
Nodes.clear();
FreeNodeIds.clear();
diff --git a/linux-x64/clang/include/llvm/CodeGen/PBQP/Math.h b/linux-x64/clang/include/llvm/CodeGen/PBQP/Math.h
index ba405e8..d1432a3 100644
--- a/linux-x64/clang/include/llvm/CodeGen/PBQP/Math.h
+++ b/linux-x64/clang/include/llvm/CodeGen/PBQP/Math.h
@@ -22,34 +22,34 @@
using PBQPNum = float;
-/// \brief PBQP Vector class.
+/// PBQP Vector class.
class Vector {
friend hash_code hash_value(const Vector &);
public:
- /// \brief Construct a PBQP vector of the given size.
+ /// Construct a PBQP vector of the given size.
explicit Vector(unsigned Length)
: Length(Length), Data(llvm::make_unique<PBQPNum []>(Length)) {}
- /// \brief Construct a PBQP vector with initializer.
+ /// Construct a PBQP vector with initializer.
Vector(unsigned Length, PBQPNum InitVal)
: Length(Length), Data(llvm::make_unique<PBQPNum []>(Length)) {
std::fill(Data.get(), Data.get() + Length, InitVal);
}
- /// \brief Copy construct a PBQP vector.
+ /// Copy construct a PBQP vector.
Vector(const Vector &V)
: Length(V.Length), Data(llvm::make_unique<PBQPNum []>(Length)) {
std::copy(V.Data.get(), V.Data.get() + Length, Data.get());
}
- /// \brief Move construct a PBQP vector.
+ /// Move construct a PBQP vector.
Vector(Vector &&V)
: Length(V.Length), Data(std::move(V.Data)) {
V.Length = 0;
}
- /// \brief Comparison operator.
+ /// Comparison operator.
bool operator==(const Vector &V) const {
assert(Length != 0 && Data && "Invalid vector");
if (Length != V.Length)
@@ -57,27 +57,27 @@
return std::equal(Data.get(), Data.get() + Length, V.Data.get());
}
- /// \brief Return the length of the vector
+ /// Return the length of the vector
unsigned getLength() const {
assert(Length != 0 && Data && "Invalid vector");
return Length;
}
- /// \brief Element access.
+ /// Element access.
PBQPNum& operator[](unsigned Index) {
assert(Length != 0 && Data && "Invalid vector");
assert(Index < Length && "Vector element access out of bounds.");
return Data[Index];
}
- /// \brief Const element access.
+ /// Const element access.
const PBQPNum& operator[](unsigned Index) const {
assert(Length != 0 && Data && "Invalid vector");
assert(Index < Length && "Vector element access out of bounds.");
return Data[Index];
}
- /// \brief Add another vector to this one.
+ /// Add another vector to this one.
Vector& operator+=(const Vector &V) {
assert(Length != 0 && Data && "Invalid vector");
assert(Length == V.Length && "Vector length mismatch.");
@@ -86,7 +86,7 @@
return *this;
}
- /// \brief Returns the index of the minimum value in this vector
+ /// Returns the index of the minimum value in this vector
unsigned minIndex() const {
assert(Length != 0 && Data && "Invalid vector");
return std::min_element(Data.get(), Data.get() + Length) - Data.get();
@@ -97,14 +97,14 @@
std::unique_ptr<PBQPNum []> Data;
};
-/// \brief Return a hash_value for the given vector.
+/// Return a hash_value for the given vector.
inline hash_code hash_value(const Vector &V) {
unsigned *VBegin = reinterpret_cast<unsigned*>(V.Data.get());
unsigned *VEnd = reinterpret_cast<unsigned*>(V.Data.get() + V.Length);
return hash_combine(V.Length, hash_combine_range(VBegin, VEnd));
}
-/// \brief Output a textual representation of the given vector on the given
+/// Output a textual representation of the given vector on the given
/// output stream.
template <typename OStream>
OStream& operator<<(OStream &OS, const Vector &V) {
@@ -118,18 +118,18 @@
return OS;
}
-/// \brief PBQP Matrix class
+/// PBQP Matrix class
class Matrix {
private:
friend hash_code hash_value(const Matrix &);
public:
- /// \brief Construct a PBQP Matrix with the given dimensions.
+ /// Construct a PBQP Matrix with the given dimensions.
Matrix(unsigned Rows, unsigned Cols) :
Rows(Rows), Cols(Cols), Data(llvm::make_unique<PBQPNum []>(Rows * Cols)) {
}
- /// \brief Construct a PBQP Matrix with the given dimensions and initial
+ /// Construct a PBQP Matrix with the given dimensions and initial
/// value.
Matrix(unsigned Rows, unsigned Cols, PBQPNum InitVal)
: Rows(Rows), Cols(Cols),
@@ -137,20 +137,20 @@
std::fill(Data.get(), Data.get() + (Rows * Cols), InitVal);
}
- /// \brief Copy construct a PBQP matrix.
+ /// Copy construct a PBQP matrix.
Matrix(const Matrix &M)
: Rows(M.Rows), Cols(M.Cols),
Data(llvm::make_unique<PBQPNum []>(Rows * Cols)) {
std::copy(M.Data.get(), M.Data.get() + (Rows * Cols), Data.get());
}
- /// \brief Move construct a PBQP matrix.
+ /// Move construct a PBQP matrix.
Matrix(Matrix &&M)
: Rows(M.Rows), Cols(M.Cols), Data(std::move(M.Data)) {
M.Rows = M.Cols = 0;
}
- /// \brief Comparison operator.
+ /// Comparison operator.
bool operator==(const Matrix &M) const {
assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
if (Rows != M.Rows || Cols != M.Cols)
@@ -158,33 +158,33 @@
return std::equal(Data.get(), Data.get() + (Rows * Cols), M.Data.get());
}
- /// \brief Return the number of rows in this matrix.
+ /// Return the number of rows in this matrix.
unsigned getRows() const {
assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
return Rows;
}
- /// \brief Return the number of cols in this matrix.
+ /// Return the number of cols in this matrix.
unsigned getCols() const {
assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
return Cols;
}
- /// \brief Matrix element access.
+ /// Matrix element access.
PBQPNum* operator[](unsigned R) {
assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
assert(R < Rows && "Row out of bounds.");
return Data.get() + (R * Cols);
}
- /// \brief Matrix element access.
+ /// Matrix element access.
const PBQPNum* operator[](unsigned R) const {
assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
assert(R < Rows && "Row out of bounds.");
return Data.get() + (R * Cols);
}
- /// \brief Returns the given row as a vector.
+ /// Returns the given row as a vector.
Vector getRowAsVector(unsigned R) const {
assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
Vector V(Cols);
@@ -193,7 +193,7 @@
return V;
}
- /// \brief Returns the given column as a vector.
+ /// Returns the given column as a vector.
Vector getColAsVector(unsigned C) const {
assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
Vector V(Rows);
@@ -202,7 +202,7 @@
return V;
}
- /// \brief Matrix transpose.
+ /// Matrix transpose.
Matrix transpose() const {
assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
Matrix M(Cols, Rows);
@@ -212,7 +212,7 @@
return M;
}
- /// \brief Add the given matrix to this one.
+ /// Add the given matrix to this one.
Matrix& operator+=(const Matrix &M) {
assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
assert(Rows == M.Rows && Cols == M.Cols &&
@@ -234,7 +234,7 @@
std::unique_ptr<PBQPNum []> Data;
};
-/// \brief Return a hash_code for the given matrix.
+/// Return a hash_code for the given matrix.
inline hash_code hash_value(const Matrix &M) {
unsigned *MBegin = reinterpret_cast<unsigned*>(M.Data.get());
unsigned *MEnd =
@@ -242,7 +242,7 @@
return hash_combine(M.Rows, M.Cols, hash_combine_range(MBegin, MEnd));
}
-/// \brief Output a textual representation of the given matrix on the given
+/// Output a textual representation of the given matrix on the given
/// output stream.
template <typename OStream>
OStream& operator<<(OStream &OS, const Matrix &M) {
diff --git a/linux-x64/clang/include/llvm/CodeGen/PBQP/ReductionRules.h b/linux-x64/clang/include/llvm/CodeGen/PBQP/ReductionRules.h
index 8aeb519..21b9902 100644
--- a/linux-x64/clang/include/llvm/CodeGen/PBQP/ReductionRules.h
+++ b/linux-x64/clang/include/llvm/CodeGen/PBQP/ReductionRules.h
@@ -23,7 +23,7 @@
namespace llvm {
namespace PBQP {
- /// \brief Reduce a node of degree one.
+ /// Reduce a node of degree one.
///
/// Propagate costs from the given node, which must be of degree one, to its
/// neighbor. Notify the problem domain.
@@ -166,7 +166,7 @@
}
#endif
- // \brief Find a solution to a fully reduced graph by backpropagation.
+ // Find a solution to a fully reduced graph by backpropagation.
//
// Given a graph and a reduction order, pop each node from the reduction
// order and greedily compute a minimum solution based on the node costs, and
diff --git a/linux-x64/clang/include/llvm/CodeGen/PBQP/Solution.h b/linux-x64/clang/include/llvm/CodeGen/PBQP/Solution.h
index 6a24727..4d4379f 100644
--- a/linux-x64/clang/include/llvm/CodeGen/PBQP/Solution.h
+++ b/linux-x64/clang/include/llvm/CodeGen/PBQP/Solution.h
@@ -21,7 +21,7 @@
namespace llvm {
namespace PBQP {
- /// \brief Represents a solution to a PBQP problem.
+ /// Represents a solution to a PBQP problem.
///
/// To get the selection for each node in the problem use the getSelection method.
class Solution {
@@ -30,17 +30,17 @@
SelectionsMap selections;
public:
- /// \brief Initialise an empty solution.
+ /// Initialise an empty solution.
Solution() = default;
- /// \brief Set the selection for a given node.
+ /// Set the selection for a given node.
/// @param nodeId Node id.
/// @param selection Selection for nodeId.
void setSelection(GraphBase::NodeId nodeId, unsigned selection) {
selections[nodeId] = selection;
}
- /// \brief Get a node's selection.
+ /// Get a node's selection.
/// @param nodeId Node id.
/// @return The selection for nodeId;
unsigned getSelection(GraphBase::NodeId nodeId) const {
diff --git a/linux-x64/clang/include/llvm/CodeGen/PBQPRAConstraint.h b/linux-x64/clang/include/llvm/CodeGen/PBQPRAConstraint.h
index 269b7a7..995467d 100644
--- a/linux-x64/clang/include/llvm/CodeGen/PBQPRAConstraint.h
+++ b/linux-x64/clang/include/llvm/CodeGen/PBQPRAConstraint.h
@@ -33,7 +33,7 @@
using PBQPRAGraph = PBQP::RegAlloc::PBQPRAGraph;
-/// @brief Abstract base for classes implementing PBQP register allocation
+/// Abstract base for classes implementing PBQP register allocation
/// constraints (e.g. Spill-costs, interference, coalescing).
class PBQPRAConstraint {
public:
@@ -44,7 +44,7 @@
virtual void anchor();
};
-/// @brief PBQP register allocation constraint composer.
+/// PBQP register allocation constraint composer.
///
/// Constraints added to this list will be applied, in the order that they are
/// added, to the PBQP graph.
diff --git a/linux-x64/clang/include/llvm/CodeGen/ParallelCG.h b/linux-x64/clang/include/llvm/CodeGen/ParallelCG.h
index 14ef0ec..dbf09ea 100644
--- a/linux-x64/clang/include/llvm/CodeGen/ParallelCG.h
+++ b/linux-x64/clang/include/llvm/CodeGen/ParallelCG.h
@@ -40,7 +40,7 @@
splitCodeGen(std::unique_ptr<Module> M, ArrayRef<raw_pwrite_stream *> OSs,
ArrayRef<llvm::raw_pwrite_stream *> BCOSs,
const std::function<std::unique_ptr<TargetMachine>()> &TMFactory,
- TargetMachine::CodeGenFileType FT = TargetMachine::CGFT_ObjectFile,
+ TargetMachine::CodeGenFileType FileType = TargetMachine::CGFT_ObjectFile,
bool PreserveLocals = false);
} // namespace llvm
diff --git a/linux-x64/clang/include/llvm/CodeGen/Passes.h b/linux-x64/clang/include/llvm/CodeGen/Passes.h
index 68fd04b..cb12b14 100644
--- a/linux-x64/clang/include/llvm/CodeGen/Passes.h
+++ b/linux-x64/clang/include/llvm/CodeGen/Passes.h
@@ -301,7 +301,7 @@
/// StackSlotColoring - This pass performs stack slot coloring.
extern char &StackSlotColoringID;
- /// \brief This pass lays out funclets contiguously.
+ /// This pass lays out funclets contiguously.
extern char &FuncletLayoutID;
/// This pass inserts the XRay instrumentation sleds if they are supported by
@@ -311,7 +311,7 @@
/// This pass inserts FEntry calls
extern char &FEntryInserterID;
- /// \brief This pass implements the "patchable-function" attribute.
+ /// This pass implements the "patchable-function" attribute.
extern char &PatchableFunctionID;
/// createStackProtectorPass - This pass adds stack protectors to functions.
@@ -329,13 +329,17 @@
/// createWinEHPass - Prepares personality functions used by MSVC on Windows,
/// in addition to the Itanium LSDA based personalities.
- FunctionPass *createWinEHPass();
+ FunctionPass *createWinEHPass(bool DemoteCatchSwitchPHIOnly = false);
/// createSjLjEHPreparePass - This pass adapts exception handling code to use
/// the GCC-style builtin setjmp/longjmp (sjlj) to handling EH control flow.
///
FunctionPass *createSjLjEHPreparePass();
+ /// createWasmEHPass - This pass adapts exception handling code to use
+ /// WebAssembly's exception handling scheme.
+ FunctionPass *createWasmEHPass();
+
/// LocalStackSlotAllocation - This pass assigns local frame indices to stack
/// slots relative to one another and allocates base registers to access them
/// when it is estimated by the target to be out of range of normal frame
@@ -380,7 +384,7 @@
///
ModulePass *createLowerEmuTLSPass();
- /// This pass lowers the @llvm.load.relative intrinsic to instructions.
+ /// This pass lowers the \@llvm.load.relative intrinsic to instructions.
/// This is unsafe to do earlier because a pass may combine the constant
/// initializer into the load, which may result in an overflowing evaluation.
ModulePass *createPreISelIntrinsicLoweringPass();
@@ -419,7 +423,7 @@
/// This pass performs outlining on machine instructions directly before
/// printing assembly.
- ModulePass *createMachineOutlinerPass(bool OutlineFromLinkOnceODRs = false);
+ ModulePass *createMachineOutlinerPass(bool RunOnAllFunctions = true);
/// This pass expands the experimental reduction intrinsics into sequences of
/// shuffles.
@@ -434,6 +438,9 @@
// This pass expands indirectbr instructions.
FunctionPass *createIndirectBrExpandPass();
+ /// Creates CFI Instruction Inserter pass. \see CFIInstrInserter.cpp
+ FunctionPass *createCFIInstrInserter();
+
} // End llvm namespace
#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/RegAllocPBQP.h b/linux-x64/clang/include/llvm/CodeGen/RegAllocPBQP.h
index 5b34286..ba97630 100644
--- a/linux-x64/clang/include/llvm/CodeGen/RegAllocPBQP.h
+++ b/linux-x64/clang/include/llvm/CodeGen/RegAllocPBQP.h
@@ -43,10 +43,10 @@
namespace PBQP {
namespace RegAlloc {
-/// @brief Spill option index.
+/// Spill option index.
inline unsigned getSpillOptionIdx() { return 0; }
-/// \brief Metadata to speed allocatability test.
+/// Metadata to speed allocatability test.
///
/// Keeps track of the number of infinities in each row and column.
class MatrixMetadata {
@@ -89,7 +89,7 @@
std::unique_ptr<bool[]> UnsafeCols;
};
-/// \brief Holds a vector of the allowed physical regs for a vreg.
+/// Holds a vector of the allowed physical regs for a vreg.
class AllowedRegVector {
friend hash_code hash_value(const AllowedRegVector &);
@@ -127,7 +127,7 @@
hash_combine_range(OStart, OEnd));
}
-/// \brief Holds graph-level metadata relevant to PBQP RA problems.
+/// Holds graph-level metadata relevant to PBQP RA problems.
class GraphMetadata {
private:
using AllowedRegVecPool = ValuePool<AllowedRegVector>;
@@ -164,7 +164,7 @@
AllowedRegVecPool AllowedRegVecs;
};
-/// \brief Holds solver state and other metadata relevant to each PBQP RA node.
+/// Holds solver state and other metadata relevant to each PBQP RA node.
class NodeMetadata {
public:
using AllowedRegVector = RegAlloc::AllowedRegVector;
@@ -505,14 +505,14 @@
public:
PBQPRAGraph(GraphMetadata Metadata) : BaseT(std::move(Metadata)) {}
- /// @brief Dump this graph to dbgs().
+ /// Dump this graph to dbgs().
void dump() const;
- /// @brief Dump this graph to an output stream.
+ /// Dump this graph to an output stream.
/// @param OS Output stream to print on.
void dump(raw_ostream &OS) const;
- /// @brief Print a representation of this graph in DOT format.
+ /// Print a representation of this graph in DOT format.
/// @param OS Output stream to print on.
void printDot(raw_ostream &OS) const;
};
@@ -527,7 +527,7 @@
} // end namespace RegAlloc
} // end namespace PBQP
-/// @brief Create a PBQP register allocator instance.
+/// Create a PBQP register allocator instance.
FunctionPass *
createPBQPRegisterAllocator(char *customPassID = nullptr);
diff --git a/linux-x64/clang/include/llvm/CodeGen/RegisterPressure.h b/linux-x64/clang/include/llvm/CodeGen/RegisterPressure.h
index 2b14b78..79054b9 100644
--- a/linux-x64/clang/include/llvm/CodeGen/RegisterPressure.h
+++ b/linux-x64/clang/include/llvm/CodeGen/RegisterPressure.h
@@ -171,10 +171,10 @@
public:
/// List of virtual registers and register units read by the instruction.
SmallVector<RegisterMaskPair, 8> Uses;
- /// \brief List of virtual registers and register units defined by the
+ /// List of virtual registers and register units defined by the
/// instruction which are not dead.
SmallVector<RegisterMaskPair, 8> Defs;
- /// \brief List of virtual registers and register units defined by the
+ /// List of virtual registers and register units defined by the
/// instruction but dead.
SmallVector<RegisterMaskPair, 8> DeadDefs;
@@ -219,7 +219,7 @@
return const_cast<PressureDiffs*>(this)->operator[](Idx);
}
- /// \brief Record pressure difference induced by the given operand list to
+ /// Record pressure difference induced by the given operand list to
/// node with index \p Idx.
void addInstruction(unsigned Idx, const RegisterOperands &RegOpers,
const MachineRegisterInfo &MRI);
@@ -546,7 +546,7 @@
/// Add Reg to the live in set and increase max pressure.
void discoverLiveIn(RegisterMaskPair Pair);
- /// \brief Get the SlotIndex for the first nondebug instruction including or
+ /// Get the SlotIndex for the first nondebug instruction including or
/// after the current position.
SlotIndex getCurrSlot() const;
diff --git a/linux-x64/clang/include/llvm/CodeGen/RegisterScavenging.h b/linux-x64/clang/include/llvm/CodeGen/RegisterScavenging.h
index 489c72b..b6bd028 100644
--- a/linux-x64/clang/include/llvm/CodeGen/RegisterScavenging.h
+++ b/linux-x64/clang/include/llvm/CodeGen/RegisterScavenging.h
@@ -127,7 +127,7 @@
/// Find an unused register of the specified register class.
/// Return 0 if none is found.
- unsigned FindUnusedReg(const TargetRegisterClass *RegClass) const;
+ unsigned FindUnusedReg(const TargetRegisterClass *RC) const;
/// Add a scavenging frame index.
void addScavengingFrameIndex(int FI) {
@@ -158,7 +158,7 @@
/// Returns the scavenged register.
/// This is deprecated as it depends on the quality of the kill flags being
/// present; Use scavengeRegisterBackwards() instead!
- unsigned scavengeRegister(const TargetRegisterClass *RegClass,
+ unsigned scavengeRegister(const TargetRegisterClass *RC,
MachineBasicBlock::iterator I, int SPAdj);
unsigned scavengeRegister(const TargetRegisterClass *RegClass, int SPAdj) {
return scavengeRegister(RegClass, MBBI, SPAdj);
@@ -218,7 +218,7 @@
/// Spill a register after position \p After and reload it before position
/// \p UseMI.
ScavengedInfo &spill(unsigned Reg, const TargetRegisterClass &RC, int SPAdj,
- MachineBasicBlock::iterator After,
+ MachineBasicBlock::iterator Before,
MachineBasicBlock::iterator &UseMI);
};
diff --git a/linux-x64/clang/include/llvm/CodeGen/RegisterUsageInfo.h b/linux-x64/clang/include/llvm/CodeGen/RegisterUsageInfo.h
index eabadd8..efd175e 100644
--- a/linux-x64/clang/include/llvm/CodeGen/RegisterUsageInfo.h
+++ b/linux-x64/clang/include/llvm/CodeGen/RegisterUsageInfo.h
@@ -19,6 +19,7 @@
#ifndef LLVM_CODEGEN_PHYSICALREGISTERUSAGEINFO_H
#define LLVM_CODEGEN_PHYSICALREGISTERUSAGEINFO_H
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/IR/Instructions.h"
#include "llvm/Pass.h"
@@ -31,8 +32,6 @@
class TargetMachine;
class PhysicalRegisterUsageInfo : public ImmutablePass {
- virtual void anchor();
-
public:
static char ID;
@@ -41,25 +40,20 @@
initializePhysicalRegisterUsageInfoPass(Registry);
}
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.setPreservesAll();
- }
-
- /// To set TargetMachine *, which is used to print
- /// analysis when command line option -print-regusage is used.
- void setTargetMachine(const TargetMachine *TM_) { TM = TM_; }
+ /// Set TargetMachine which is used to print analysis.
+ void setTargetMachine(const TargetMachine &TM);
bool doInitialization(Module &M) override;
bool doFinalization(Module &M) override;
/// To store RegMask for given Function *.
- void storeUpdateRegUsageInfo(const Function *FP,
- std::vector<uint32_t> RegMask);
+ void storeUpdateRegUsageInfo(const Function &FP,
+ ArrayRef<uint32_t> RegMask);
- /// To query stored RegMask for given Function *, it will return nullptr if
- /// function is not known.
- const std::vector<uint32_t> *getRegUsageInfo(const Function *FP);
+ /// To query stored RegMask for given Function *, it will returns ane empty
+ /// array if function is not known.
+ ArrayRef<uint32_t> getRegUsageInfo(const Function &FP);
void print(raw_ostream &OS, const Module *M = nullptr) const override;
diff --git a/linux-x64/clang/include/llvm/CodeGen/ResourcePriorityQueue.h b/linux-x64/clang/include/llvm/CodeGen/ResourcePriorityQueue.h
index 03166cc..8d582ee 100644
--- a/linux-x64/clang/include/llvm/CodeGen/ResourcePriorityQueue.h
+++ b/linux-x64/clang/include/llvm/CodeGen/ResourcePriorityQueue.h
@@ -32,7 +32,7 @@
ResourcePriorityQueue *PQ;
explicit resource_sort(ResourcePriorityQueue *pq) : PQ(pq) {}
- bool operator()(const SUnit* left, const SUnit* right) const;
+ bool operator()(const SUnit* LHS, const SUnit* RHS) const;
};
class ResourcePriorityQueue : public SchedulingPriorityQueue {
@@ -121,7 +121,7 @@
void remove(SUnit *SU) override;
/// scheduledNode - Main resource tracking point.
- void scheduledNode(SUnit *Node) override;
+ void scheduledNode(SUnit *SU) override;
bool isResourceAvailable(SUnit *SU);
void reserveResources(SUnit *SU);
diff --git a/linux-x64/clang/include/llvm/CodeGen/RuntimeLibcalls.def b/linux-x64/clang/include/llvm/CodeGen/RuntimeLibcalls.def
deleted file mode 100644
index 7ed90d9..0000000
--- a/linux-x64/clang/include/llvm/CodeGen/RuntimeLibcalls.def
+++ /dev/null
@@ -1,527 +0,0 @@
-//===-- llvm/RuntimeLibcalls.def - File that describes libcalls -*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines all of the runtime library calls the backend can emit.
-// The various long double types cannot be merged, because 80-bit library
-// functions use "xf" and 128-bit use "tf".
-//
-// When adding PPCF128 functions here, note that their names generally need
-// to be overridden for Darwin with the xxx$LDBL128 form. See
-// PPCISelLowering.cpp.
-//
-//===----------------------------------------------------------------------===//
-
-// NOTE: NO INCLUDE GUARD DESIRED!
-
-// Provide definitions of macros so that users of this file do not have to
-// define everything to use it...
-
-// Declare the enumerator for each libcall, along with its default name. Some
-// libcalls have different names on particular OSes or architectures. These
-// are set in InitLibcallNames() in TargetLoweringBase.cpp and/or by targets
-// using TargetLoweringBase::setLibcallName()
-#ifndef HANDLE_LIBCALL
-#error "HANDLE_LIBCALL must be defined"
-#endif
-
-// Integer
-HANDLE_LIBCALL(SHL_I16, "__ashlhi3")
-HANDLE_LIBCALL(SHL_I32, "__ashlsi3")
-HANDLE_LIBCALL(SHL_I64, "__ashldi3")
-HANDLE_LIBCALL(SHL_I128, "__ashlti3")
-HANDLE_LIBCALL(SRL_I16, "__lshrhi3")
-HANDLE_LIBCALL(SRL_I32, "__lshrsi3")
-HANDLE_LIBCALL(SRL_I64, "__lshrdi3")
-HANDLE_LIBCALL(SRL_I128, "__lshrti3")
-HANDLE_LIBCALL(SRA_I16, "__ashrhi3")
-HANDLE_LIBCALL(SRA_I32, "__ashrsi3")
-HANDLE_LIBCALL(SRA_I64, "__ashrdi3")
-HANDLE_LIBCALL(SRA_I128, "__ashrti3")
-HANDLE_LIBCALL(MUL_I8, "__mulqi3")
-HANDLE_LIBCALL(MUL_I16, "__mulhi3")
-HANDLE_LIBCALL(MUL_I32, "__mulsi3")
-HANDLE_LIBCALL(MUL_I64, "__muldi3")
-HANDLE_LIBCALL(MUL_I128, "__multi3")
-HANDLE_LIBCALL(MULO_I32, "__mulosi4")
-HANDLE_LIBCALL(MULO_I64, "__mulodi4")
-HANDLE_LIBCALL(MULO_I128, "__muloti4")
-HANDLE_LIBCALL(SDIV_I8, "__divqi3")
-HANDLE_LIBCALL(SDIV_I16, "__divhi3")
-HANDLE_LIBCALL(SDIV_I32, "__divsi3")
-HANDLE_LIBCALL(SDIV_I64, "__divdi3")
-HANDLE_LIBCALL(SDIV_I128, "__divti3")
-HANDLE_LIBCALL(UDIV_I8, "__udivqi3")
-HANDLE_LIBCALL(UDIV_I16, "__udivhi3")
-HANDLE_LIBCALL(UDIV_I32, "__udivsi3")
-HANDLE_LIBCALL(UDIV_I64, "__udivdi3")
-HANDLE_LIBCALL(UDIV_I128, "__udivti3")
-HANDLE_LIBCALL(SREM_I8, "__modqi3")
-HANDLE_LIBCALL(SREM_I16, "__modhi3")
-HANDLE_LIBCALL(SREM_I32, "__modsi3")
-HANDLE_LIBCALL(SREM_I64, "__moddi3")
-HANDLE_LIBCALL(SREM_I128, "__modti3")
-HANDLE_LIBCALL(UREM_I8, "__umodqi3")
-HANDLE_LIBCALL(UREM_I16, "__umodhi3")
-HANDLE_LIBCALL(UREM_I32, "__umodsi3")
-HANDLE_LIBCALL(UREM_I64, "__umoddi3")
-HANDLE_LIBCALL(UREM_I128, "__umodti3")
-HANDLE_LIBCALL(SDIVREM_I8, nullptr)
-HANDLE_LIBCALL(SDIVREM_I16, nullptr)
-HANDLE_LIBCALL(SDIVREM_I32, nullptr)
-HANDLE_LIBCALL(SDIVREM_I64, nullptr)
-HANDLE_LIBCALL(SDIVREM_I128, nullptr)
-HANDLE_LIBCALL(UDIVREM_I8, nullptr)
-HANDLE_LIBCALL(UDIVREM_I16, nullptr)
-HANDLE_LIBCALL(UDIVREM_I32, nullptr)
-HANDLE_LIBCALL(UDIVREM_I64, nullptr)
-HANDLE_LIBCALL(UDIVREM_I128, nullptr)
-HANDLE_LIBCALL(NEG_I32, "__negsi2")
-HANDLE_LIBCALL(NEG_I64, "__negdi2")
-
-// Floating-point
-HANDLE_LIBCALL(ADD_F32, "__addsf3")
-HANDLE_LIBCALL(ADD_F64, "__adddf3")
-HANDLE_LIBCALL(ADD_F80, "__addxf3")
-HANDLE_LIBCALL(ADD_F128, "__addtf3")
-HANDLE_LIBCALL(ADD_PPCF128, "__gcc_qadd")
-HANDLE_LIBCALL(SUB_F32, "__subsf3")
-HANDLE_LIBCALL(SUB_F64, "__subdf3")
-HANDLE_LIBCALL(SUB_F80, "__subxf3")
-HANDLE_LIBCALL(SUB_F128, "__subtf3")
-HANDLE_LIBCALL(SUB_PPCF128, "__gcc_qsub")
-HANDLE_LIBCALL(MUL_F32, "__mulsf3")
-HANDLE_LIBCALL(MUL_F64, "__muldf3")
-HANDLE_LIBCALL(MUL_F80, "__mulxf3")
-HANDLE_LIBCALL(MUL_F128, "__multf3")
-HANDLE_LIBCALL(MUL_PPCF128, "__gcc_qmul")
-HANDLE_LIBCALL(DIV_F32, "__divsf3")
-HANDLE_LIBCALL(DIV_F64, "__divdf3")
-HANDLE_LIBCALL(DIV_F80, "__divxf3")
-HANDLE_LIBCALL(DIV_F128, "__divtf3")
-HANDLE_LIBCALL(DIV_PPCF128, "__gcc_qdiv")
-HANDLE_LIBCALL(REM_F32, "fmodf")
-HANDLE_LIBCALL(REM_F64, "fmod")
-HANDLE_LIBCALL(REM_F80, "fmodl")
-HANDLE_LIBCALL(REM_F128, "fmodl")
-HANDLE_LIBCALL(REM_PPCF128, "fmodl")
-HANDLE_LIBCALL(FMA_F32, "fmaf")
-HANDLE_LIBCALL(FMA_F64, "fma")
-HANDLE_LIBCALL(FMA_F80, "fmal")
-HANDLE_LIBCALL(FMA_F128, "fmal")
-HANDLE_LIBCALL(FMA_PPCF128, "fmal")
-HANDLE_LIBCALL(POWI_F32, "__powisf2")
-HANDLE_LIBCALL(POWI_F64, "__powidf2")
-HANDLE_LIBCALL(POWI_F80, "__powixf2")
-HANDLE_LIBCALL(POWI_F128, "__powitf2")
-HANDLE_LIBCALL(POWI_PPCF128, "__powitf2")
-HANDLE_LIBCALL(SQRT_F32, "sqrtf")
-HANDLE_LIBCALL(SQRT_F64, "sqrt")
-HANDLE_LIBCALL(SQRT_F80, "sqrtl")
-HANDLE_LIBCALL(SQRT_F128, "sqrtl")
-HANDLE_LIBCALL(SQRT_PPCF128, "sqrtl")
-HANDLE_LIBCALL(LOG_F32, "logf")
-HANDLE_LIBCALL(LOG_F64, "log")
-HANDLE_LIBCALL(LOG_F80, "logl")
-HANDLE_LIBCALL(LOG_F128, "logl")
-HANDLE_LIBCALL(LOG_PPCF128, "logl")
-HANDLE_LIBCALL(LOG_FINITE_F32, "__logf_finite")
-HANDLE_LIBCALL(LOG_FINITE_F64, "__log_finite")
-HANDLE_LIBCALL(LOG_FINITE_F80, "__logl_finite")
-HANDLE_LIBCALL(LOG_FINITE_F128, "__logl_finite")
-HANDLE_LIBCALL(LOG_FINITE_PPCF128, "__logl_finite")
-HANDLE_LIBCALL(LOG2_F32, "log2f")
-HANDLE_LIBCALL(LOG2_F64, "log2")
-HANDLE_LIBCALL(LOG2_F80, "log2l")
-HANDLE_LIBCALL(LOG2_F128, "log2l")
-HANDLE_LIBCALL(LOG2_PPCF128, "log2l")
-HANDLE_LIBCALL(LOG2_FINITE_F32, "__log2f_finite")
-HANDLE_LIBCALL(LOG2_FINITE_F64, "__log2_finite")
-HANDLE_LIBCALL(LOG2_FINITE_F80, "__log2l_finite")
-HANDLE_LIBCALL(LOG2_FINITE_F128, "__log2l_finite")
-HANDLE_LIBCALL(LOG2_FINITE_PPCF128, "__log2l_finite")
-HANDLE_LIBCALL(LOG10_F32, "log10f")
-HANDLE_LIBCALL(LOG10_F64, "log10")
-HANDLE_LIBCALL(LOG10_F80, "log10l")
-HANDLE_LIBCALL(LOG10_F128, "log10l")
-HANDLE_LIBCALL(LOG10_PPCF128, "log10l")
-HANDLE_LIBCALL(LOG10_FINITE_F32, "__log10f_finite")
-HANDLE_LIBCALL(LOG10_FINITE_F64, "__log10_finite")
-HANDLE_LIBCALL(LOG10_FINITE_F80, "__log10l_finite")
-HANDLE_LIBCALL(LOG10_FINITE_F128, "__log10l_finite")
-HANDLE_LIBCALL(LOG10_FINITE_PPCF128, "__log10l_finite")
-HANDLE_LIBCALL(EXP_F32, "expf")
-HANDLE_LIBCALL(EXP_F64, "exp")
-HANDLE_LIBCALL(EXP_F80, "expl")
-HANDLE_LIBCALL(EXP_F128, "expl")
-HANDLE_LIBCALL(EXP_PPCF128, "expl")
-HANDLE_LIBCALL(EXP_FINITE_F32, "__expf_finite")
-HANDLE_LIBCALL(EXP_FINITE_F64, "__exp_finite")
-HANDLE_LIBCALL(EXP_FINITE_F80, "__expl_finite")
-HANDLE_LIBCALL(EXP_FINITE_F128, "__expl_finite")
-HANDLE_LIBCALL(EXP_FINITE_PPCF128, "__expl_finite")
-HANDLE_LIBCALL(EXP2_F32, "exp2f")
-HANDLE_LIBCALL(EXP2_F64, "exp2")
-HANDLE_LIBCALL(EXP2_F80, "exp2l")
-HANDLE_LIBCALL(EXP2_F128, "exp2l")
-HANDLE_LIBCALL(EXP2_PPCF128, "exp2l")
-HANDLE_LIBCALL(EXP2_FINITE_F32, "__exp2f_finite")
-HANDLE_LIBCALL(EXP2_FINITE_F64, "__exp2_finite")
-HANDLE_LIBCALL(EXP2_FINITE_F80, "__exp2l_finite")
-HANDLE_LIBCALL(EXP2_FINITE_F128, "__exp2l_finite")
-HANDLE_LIBCALL(EXP2_FINITE_PPCF128, "__exp2l_finite")
-HANDLE_LIBCALL(SIN_F32, "sinf")
-HANDLE_LIBCALL(SIN_F64, "sin")
-HANDLE_LIBCALL(SIN_F80, "sinl")
-HANDLE_LIBCALL(SIN_F128, "sinl")
-HANDLE_LIBCALL(SIN_PPCF128, "sinl")
-HANDLE_LIBCALL(COS_F32, "cosf")
-HANDLE_LIBCALL(COS_F64, "cos")
-HANDLE_LIBCALL(COS_F80, "cosl")
-HANDLE_LIBCALL(COS_F128, "cosl")
-HANDLE_LIBCALL(COS_PPCF128, "cosl")
-HANDLE_LIBCALL(SINCOS_F32, nullptr)
-HANDLE_LIBCALL(SINCOS_F64, nullptr)
-HANDLE_LIBCALL(SINCOS_F80, nullptr)
-HANDLE_LIBCALL(SINCOS_F128, nullptr)
-HANDLE_LIBCALL(SINCOS_PPCF128, nullptr)
-HANDLE_LIBCALL(SINCOS_STRET_F32, nullptr)
-HANDLE_LIBCALL(SINCOS_STRET_F64, nullptr)
-HANDLE_LIBCALL(POW_F32, "powf")
-HANDLE_LIBCALL(POW_F64, "pow")
-HANDLE_LIBCALL(POW_F80, "powl")
-HANDLE_LIBCALL(POW_F128, "powl")
-HANDLE_LIBCALL(POW_PPCF128, "powl")
-HANDLE_LIBCALL(POW_FINITE_F32, "__powf_finite")
-HANDLE_LIBCALL(POW_FINITE_F64, "__pow_finite")
-HANDLE_LIBCALL(POW_FINITE_F80, "__powl_finite")
-HANDLE_LIBCALL(POW_FINITE_F128, "__powl_finite")
-HANDLE_LIBCALL(POW_FINITE_PPCF128, "__powl_finite")
-HANDLE_LIBCALL(CEIL_F32, "ceilf")
-HANDLE_LIBCALL(CEIL_F64, "ceil")
-HANDLE_LIBCALL(CEIL_F80, "ceill")
-HANDLE_LIBCALL(CEIL_F128, "ceill")
-HANDLE_LIBCALL(CEIL_PPCF128, "ceill")
-HANDLE_LIBCALL(TRUNC_F32, "truncf")
-HANDLE_LIBCALL(TRUNC_F64, "trunc")
-HANDLE_LIBCALL(TRUNC_F80, "truncl")
-HANDLE_LIBCALL(TRUNC_F128, "truncl")
-HANDLE_LIBCALL(TRUNC_PPCF128, "truncl")
-HANDLE_LIBCALL(RINT_F32, "rintf")
-HANDLE_LIBCALL(RINT_F64, "rint")
-HANDLE_LIBCALL(RINT_F80, "rintl")
-HANDLE_LIBCALL(RINT_F128, "rintl")
-HANDLE_LIBCALL(RINT_PPCF128, "rintl")
-HANDLE_LIBCALL(NEARBYINT_F32, "nearbyintf")
-HANDLE_LIBCALL(NEARBYINT_F64, "nearbyint")
-HANDLE_LIBCALL(NEARBYINT_F80, "nearbyintl")
-HANDLE_LIBCALL(NEARBYINT_F128, "nearbyintl")
-HANDLE_LIBCALL(NEARBYINT_PPCF128, "nearbyintl")
-HANDLE_LIBCALL(ROUND_F32, "roundf")
-HANDLE_LIBCALL(ROUND_F64, "round")
-HANDLE_LIBCALL(ROUND_F80, "roundl")
-HANDLE_LIBCALL(ROUND_F128, "roundl")
-HANDLE_LIBCALL(ROUND_PPCF128, "roundl")
-HANDLE_LIBCALL(FLOOR_F32, "floorf")
-HANDLE_LIBCALL(FLOOR_F64, "floor")
-HANDLE_LIBCALL(FLOOR_F80, "floorl")
-HANDLE_LIBCALL(FLOOR_F128, "floorl")
-HANDLE_LIBCALL(FLOOR_PPCF128, "floorl")
-HANDLE_LIBCALL(COPYSIGN_F32, "copysignf")
-HANDLE_LIBCALL(COPYSIGN_F64, "copysign")
-HANDLE_LIBCALL(COPYSIGN_F80, "copysignl")
-HANDLE_LIBCALL(COPYSIGN_F128, "copysignl")
-HANDLE_LIBCALL(COPYSIGN_PPCF128, "copysignl")
-HANDLE_LIBCALL(FMIN_F32, "fminf")
-HANDLE_LIBCALL(FMIN_F64, "fmin")
-HANDLE_LIBCALL(FMIN_F80, "fminl")
-HANDLE_LIBCALL(FMIN_F128, "fminl")
-HANDLE_LIBCALL(FMIN_PPCF128, "fminl")
-HANDLE_LIBCALL(FMAX_F32, "fmaxf")
-HANDLE_LIBCALL(FMAX_F64, "fmax")
-HANDLE_LIBCALL(FMAX_F80, "fmaxl")
-HANDLE_LIBCALL(FMAX_F128, "fmaxl")
-HANDLE_LIBCALL(FMAX_PPCF128, "fmaxl")
-
-// Conversion
-HANDLE_LIBCALL(FPEXT_F32_PPCF128, "__gcc_stoq")
-HANDLE_LIBCALL(FPEXT_F64_PPCF128, "__gcc_dtoq")
-HANDLE_LIBCALL(FPEXT_F80_F128, "__extendxftf2")
-HANDLE_LIBCALL(FPEXT_F64_F128, "__extenddftf2")
-HANDLE_LIBCALL(FPEXT_F32_F128, "__extendsftf2")
-HANDLE_LIBCALL(FPEXT_F32_F64, "__extendsfdf2")
-HANDLE_LIBCALL(FPEXT_F16_F32, "__gnu_h2f_ieee")
-HANDLE_LIBCALL(FPROUND_F32_F16, "__gnu_f2h_ieee")
-HANDLE_LIBCALL(FPROUND_F64_F16, "__truncdfhf2")
-HANDLE_LIBCALL(FPROUND_F80_F16, "__truncxfhf2")
-HANDLE_LIBCALL(FPROUND_F128_F16, "__trunctfhf2")
-HANDLE_LIBCALL(FPROUND_PPCF128_F16, "__trunctfhf2")
-HANDLE_LIBCALL(FPROUND_F64_F32, "__truncdfsf2")
-HANDLE_LIBCALL(FPROUND_F80_F32, "__truncxfsf2")
-HANDLE_LIBCALL(FPROUND_F128_F32, "__trunctfsf2")
-HANDLE_LIBCALL(FPROUND_PPCF128_F32, "__gcc_qtos")
-HANDLE_LIBCALL(FPROUND_F80_F64, "__truncxfdf2")
-HANDLE_LIBCALL(FPROUND_F128_F64, "__trunctfdf2")
-HANDLE_LIBCALL(FPROUND_PPCF128_F64, "__gcc_qtod")
-HANDLE_LIBCALL(FPROUND_F128_F80, "__trunctfxf2")
-HANDLE_LIBCALL(FPTOSINT_F32_I32, "__fixsfsi")
-HANDLE_LIBCALL(FPTOSINT_F32_I64, "__fixsfdi")
-HANDLE_LIBCALL(FPTOSINT_F32_I128, "__fixsfti")
-HANDLE_LIBCALL(FPTOSINT_F64_I32, "__fixdfsi")
-HANDLE_LIBCALL(FPTOSINT_F64_I64, "__fixdfdi")
-HANDLE_LIBCALL(FPTOSINT_F64_I128, "__fixdfti")
-HANDLE_LIBCALL(FPTOSINT_F80_I32, "__fixxfsi")
-HANDLE_LIBCALL(FPTOSINT_F80_I64, "__fixxfdi")
-HANDLE_LIBCALL(FPTOSINT_F80_I128, "__fixxfti")
-HANDLE_LIBCALL(FPTOSINT_F128_I32, "__fixtfsi")
-HANDLE_LIBCALL(FPTOSINT_F128_I64, "__fixtfdi")
-HANDLE_LIBCALL(FPTOSINT_F128_I128, "__fixtfti")
-HANDLE_LIBCALL(FPTOSINT_PPCF128_I32, "__gcc_qtou")
-HANDLE_LIBCALL(FPTOSINT_PPCF128_I64, "__fixtfdi")
-HANDLE_LIBCALL(FPTOSINT_PPCF128_I128, "__fixtfti")
-HANDLE_LIBCALL(FPTOUINT_F32_I32, "__fixunssfsi")
-HANDLE_LIBCALL(FPTOUINT_F32_I64, "__fixunssfdi")
-HANDLE_LIBCALL(FPTOUINT_F32_I128, "__fixunssfti")
-HANDLE_LIBCALL(FPTOUINT_F64_I32, "__fixunsdfsi")
-HANDLE_LIBCALL(FPTOUINT_F64_I64, "__fixunsdfdi")
-HANDLE_LIBCALL(FPTOUINT_F64_I128, "__fixunsdfti")
-HANDLE_LIBCALL(FPTOUINT_F80_I32, "__fixunsxfsi")
-HANDLE_LIBCALL(FPTOUINT_F80_I64, "__fixunsxfdi")
-HANDLE_LIBCALL(FPTOUINT_F80_I128, "__fixunsxfti")
-HANDLE_LIBCALL(FPTOUINT_F128_I32, "__fixunstfsi")
-HANDLE_LIBCALL(FPTOUINT_F128_I64, "__fixunstfdi")
-HANDLE_LIBCALL(FPTOUINT_F128_I128, "__fixunstfti")
-HANDLE_LIBCALL(FPTOUINT_PPCF128_I32, "__fixunstfsi")
-HANDLE_LIBCALL(FPTOUINT_PPCF128_I64, "__fixunstfdi")
-HANDLE_LIBCALL(FPTOUINT_PPCF128_I128, "__fixunstfti")
-HANDLE_LIBCALL(SINTTOFP_I32_F32, "__floatsisf")
-HANDLE_LIBCALL(SINTTOFP_I32_F64, "__floatsidf")
-HANDLE_LIBCALL(SINTTOFP_I32_F80, "__floatsixf")
-HANDLE_LIBCALL(SINTTOFP_I32_F128, "__floatsitf")
-HANDLE_LIBCALL(SINTTOFP_I32_PPCF128, "__gcc_itoq")
-HANDLE_LIBCALL(SINTTOFP_I64_F32, "__floatdisf")
-HANDLE_LIBCALL(SINTTOFP_I64_F64, "__floatdidf")
-HANDLE_LIBCALL(SINTTOFP_I64_F80, "__floatdixf")
-HANDLE_LIBCALL(SINTTOFP_I64_F128, "__floatditf")
-HANDLE_LIBCALL(SINTTOFP_I64_PPCF128, "__floatditf")
-HANDLE_LIBCALL(SINTTOFP_I128_F32, "__floattisf")
-HANDLE_LIBCALL(SINTTOFP_I128_F64, "__floattidf")
-HANDLE_LIBCALL(SINTTOFP_I128_F80, "__floattixf")
-HANDLE_LIBCALL(SINTTOFP_I128_F128, "__floattitf")
-HANDLE_LIBCALL(SINTTOFP_I128_PPCF128, "__floattitf")
-HANDLE_LIBCALL(UINTTOFP_I32_F32, "__floatunsisf")
-HANDLE_LIBCALL(UINTTOFP_I32_F64, "__floatunsidf")
-HANDLE_LIBCALL(UINTTOFP_I32_F80, "__floatunsixf")
-HANDLE_LIBCALL(UINTTOFP_I32_F128, "__floatunsitf")
-HANDLE_LIBCALL(UINTTOFP_I32_PPCF128, "__gcc_utoq")
-HANDLE_LIBCALL(UINTTOFP_I64_F32, "__floatundisf")
-HANDLE_LIBCALL(UINTTOFP_I64_F64, "__floatundidf")
-HANDLE_LIBCALL(UINTTOFP_I64_F80, "__floatundixf")
-HANDLE_LIBCALL(UINTTOFP_I64_F128, "__floatunditf")
-HANDLE_LIBCALL(UINTTOFP_I64_PPCF128, "__floatunditf")
-HANDLE_LIBCALL(UINTTOFP_I128_F32, "__floatuntisf")
-HANDLE_LIBCALL(UINTTOFP_I128_F64, "__floatuntidf")
-HANDLE_LIBCALL(UINTTOFP_I128_F80, "__floatuntixf")
-HANDLE_LIBCALL(UINTTOFP_I128_F128, "__floatuntitf")
-HANDLE_LIBCALL(UINTTOFP_I128_PPCF128, "__floatuntitf")
-
-// Comparison
-HANDLE_LIBCALL(OEQ_F32, "__eqsf2")
-HANDLE_LIBCALL(OEQ_F64, "__eqdf2")
-HANDLE_LIBCALL(OEQ_F128, "__eqtf2")
-HANDLE_LIBCALL(OEQ_PPCF128, "__gcc_qeq")
-HANDLE_LIBCALL(UNE_F32, "__nesf2")
-HANDLE_LIBCALL(UNE_F64, "__nedf2")
-HANDLE_LIBCALL(UNE_F128, "__netf2")
-HANDLE_LIBCALL(UNE_PPCF128, "__gcc_qne")
-HANDLE_LIBCALL(OGE_F32, "__gesf2")
-HANDLE_LIBCALL(OGE_F64, "__gedf2")
-HANDLE_LIBCALL(OGE_F128, "__getf2")
-HANDLE_LIBCALL(OGE_PPCF128, "__gcc_qge")
-HANDLE_LIBCALL(OLT_F32, "__ltsf2")
-HANDLE_LIBCALL(OLT_F64, "__ltdf2")
-HANDLE_LIBCALL(OLT_F128, "__lttf2")
-HANDLE_LIBCALL(OLT_PPCF128, "__gcc_qlt")
-HANDLE_LIBCALL(OLE_F32, "__lesf2")
-HANDLE_LIBCALL(OLE_F64, "__ledf2")
-HANDLE_LIBCALL(OLE_F128, "__letf2")
-HANDLE_LIBCALL(OLE_PPCF128, "__gcc_qle")
-HANDLE_LIBCALL(OGT_F32, "__gtsf2")
-HANDLE_LIBCALL(OGT_F64, "__gtdf2")
-HANDLE_LIBCALL(OGT_F128, "__gttf2")
-HANDLE_LIBCALL(OGT_PPCF128, "__gcc_qgt")
-HANDLE_LIBCALL(UO_F32, "__unordsf2")
-HANDLE_LIBCALL(UO_F64, "__unorddf2")
-HANDLE_LIBCALL(UO_F128, "__unordtf2")
-HANDLE_LIBCALL(UO_PPCF128, "__gcc_qunord")
-HANDLE_LIBCALL(O_F32, "__unordsf2")
-HANDLE_LIBCALL(O_F64, "__unorddf2")
-HANDLE_LIBCALL(O_F128, "__unordtf2")
-HANDLE_LIBCALL(O_PPCF128, "__gcc_qunord")
-
-// Memory
-HANDLE_LIBCALL(MEMCPY, "memcpy")
-HANDLE_LIBCALL(MEMMOVE, "memmove")
-HANDLE_LIBCALL(MEMSET, "memset")
-HANDLE_LIBCALL(BZERO, nullptr)
-
-// Element-wise unordered-atomic memory of different sizes
-HANDLE_LIBCALL(MEMCPY_ELEMENT_UNORDERED_ATOMIC_1, "__llvm_memcpy_element_unordered_atomic_1")
-HANDLE_LIBCALL(MEMCPY_ELEMENT_UNORDERED_ATOMIC_2, "__llvm_memcpy_element_unordered_atomic_2")
-HANDLE_LIBCALL(MEMCPY_ELEMENT_UNORDERED_ATOMIC_4, "__llvm_memcpy_element_unordered_atomic_4")
-HANDLE_LIBCALL(MEMCPY_ELEMENT_UNORDERED_ATOMIC_8, "__llvm_memcpy_element_unordered_atomic_8")
-HANDLE_LIBCALL(MEMCPY_ELEMENT_UNORDERED_ATOMIC_16, "__llvm_memcpy_element_unordered_atomic_16")
-HANDLE_LIBCALL(MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1, "__llvm_memmove_element_unordered_atomic_1")
-HANDLE_LIBCALL(MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2, "__llvm_memmove_element_unordered_atomic_2")
-HANDLE_LIBCALL(MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4, "__llvm_memmove_element_unordered_atomic_4")
-HANDLE_LIBCALL(MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8, "__llvm_memmove_element_unordered_atomic_8")
-HANDLE_LIBCALL(MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16, "__llvm_memmove_element_unordered_atomic_16")
-HANDLE_LIBCALL(MEMSET_ELEMENT_UNORDERED_ATOMIC_1, "__llvm_memset_element_unordered_atomic_1")
-HANDLE_LIBCALL(MEMSET_ELEMENT_UNORDERED_ATOMIC_2, "__llvm_memset_element_unordered_atomic_2")
-HANDLE_LIBCALL(MEMSET_ELEMENT_UNORDERED_ATOMIC_4, "__llvm_memset_element_unordered_atomic_4")
-HANDLE_LIBCALL(MEMSET_ELEMENT_UNORDERED_ATOMIC_8, "__llvm_memset_element_unordered_atomic_8")
-HANDLE_LIBCALL(MEMSET_ELEMENT_UNORDERED_ATOMIC_16, "__llvm_memset_element_unordered_atomic_16")
-
-// Exception handling
-HANDLE_LIBCALL(UNWIND_RESUME, "_Unwind_Resume")
-
-// Note: there are two sets of atomics libcalls; see
-// <https://llvm.org/docs/Atomics.html> for more info on the
-// difference between them.
-
-// Atomic '__sync_*' libcalls.
-HANDLE_LIBCALL(SYNC_VAL_COMPARE_AND_SWAP_1, "__sync_val_compare_and_swap_1")
-HANDLE_LIBCALL(SYNC_VAL_COMPARE_AND_SWAP_2, "__sync_val_compare_and_swap_2")
-HANDLE_LIBCALL(SYNC_VAL_COMPARE_AND_SWAP_4, "__sync_val_compare_and_swap_4")
-HANDLE_LIBCALL(SYNC_VAL_COMPARE_AND_SWAP_8, "__sync_val_compare_and_swap_8")
-HANDLE_LIBCALL(SYNC_VAL_COMPARE_AND_SWAP_16, "__sync_val_compare_and_swap_16")
-HANDLE_LIBCALL(SYNC_LOCK_TEST_AND_SET_1, "__sync_lock_test_and_set_1")
-HANDLE_LIBCALL(SYNC_LOCK_TEST_AND_SET_2, "__sync_lock_test_and_set_2")
-HANDLE_LIBCALL(SYNC_LOCK_TEST_AND_SET_4, "__sync_lock_test_and_set_4")
-HANDLE_LIBCALL(SYNC_LOCK_TEST_AND_SET_8, "__sync_lock_test_and_set_8")
-HANDLE_LIBCALL(SYNC_LOCK_TEST_AND_SET_16, "__sync_lock_test_and_set_16")
-HANDLE_LIBCALL(SYNC_FETCH_AND_ADD_1, "__sync_fetch_and_add_1")
-HANDLE_LIBCALL(SYNC_FETCH_AND_ADD_2, "__sync_fetch_and_add_2")
-HANDLE_LIBCALL(SYNC_FETCH_AND_ADD_4, "__sync_fetch_and_add_4")
-HANDLE_LIBCALL(SYNC_FETCH_AND_ADD_8, "__sync_fetch_and_add_8")
-HANDLE_LIBCALL(SYNC_FETCH_AND_ADD_16, "__sync_fetch_and_add_16")
-HANDLE_LIBCALL(SYNC_FETCH_AND_SUB_1, "__sync_fetch_and_sub_1")
-HANDLE_LIBCALL(SYNC_FETCH_AND_SUB_2, "__sync_fetch_and_sub_2")
-HANDLE_LIBCALL(SYNC_FETCH_AND_SUB_4, "__sync_fetch_and_sub_4")
-HANDLE_LIBCALL(SYNC_FETCH_AND_SUB_8, "__sync_fetch_and_sub_8")
-HANDLE_LIBCALL(SYNC_FETCH_AND_SUB_16, "__sync_fetch_and_sub_16")
-HANDLE_LIBCALL(SYNC_FETCH_AND_AND_1, "__sync_fetch_and_and_1")
-HANDLE_LIBCALL(SYNC_FETCH_AND_AND_2, "__sync_fetch_and_and_2")
-HANDLE_LIBCALL(SYNC_FETCH_AND_AND_4, "__sync_fetch_and_and_4")
-HANDLE_LIBCALL(SYNC_FETCH_AND_AND_8, "__sync_fetch_and_and_8")
-HANDLE_LIBCALL(SYNC_FETCH_AND_AND_16, "__sync_fetch_and_and_16")
-HANDLE_LIBCALL(SYNC_FETCH_AND_OR_1, "__sync_fetch_and_or_1")
-HANDLE_LIBCALL(SYNC_FETCH_AND_OR_2, "__sync_fetch_and_or_2")
-HANDLE_LIBCALL(SYNC_FETCH_AND_OR_4, "__sync_fetch_and_or_4")
-HANDLE_LIBCALL(SYNC_FETCH_AND_OR_8, "__sync_fetch_and_or_8")
-HANDLE_LIBCALL(SYNC_FETCH_AND_OR_16, "__sync_fetch_and_or_16")
-HANDLE_LIBCALL(SYNC_FETCH_AND_XOR_1, "__sync_fetch_and_xor_1")
-HANDLE_LIBCALL(SYNC_FETCH_AND_XOR_2, "__sync_fetch_and_xor_2")
-HANDLE_LIBCALL(SYNC_FETCH_AND_XOR_4, "__sync_fetch_and_xor_4")
-HANDLE_LIBCALL(SYNC_FETCH_AND_XOR_8, "__sync_fetch_and_xor_8")
-HANDLE_LIBCALL(SYNC_FETCH_AND_XOR_16, "__sync_fetch_and_xor_16")
-HANDLE_LIBCALL(SYNC_FETCH_AND_NAND_1, "__sync_fetch_and_nand_1")
-HANDLE_LIBCALL(SYNC_FETCH_AND_NAND_2, "__sync_fetch_and_nand_2")
-HANDLE_LIBCALL(SYNC_FETCH_AND_NAND_4, "__sync_fetch_and_nand_4")
-HANDLE_LIBCALL(SYNC_FETCH_AND_NAND_8, "__sync_fetch_and_nand_8")
-HANDLE_LIBCALL(SYNC_FETCH_AND_NAND_16, "__sync_fetch_and_nand_16")
-HANDLE_LIBCALL(SYNC_FETCH_AND_MAX_1, "__sync_fetch_and_max_1")
-HANDLE_LIBCALL(SYNC_FETCH_AND_MAX_2, "__sync_fetch_and_max_2")
-HANDLE_LIBCALL(SYNC_FETCH_AND_MAX_4, "__sync_fetch_and_max_4")
-HANDLE_LIBCALL(SYNC_FETCH_AND_MAX_8, "__sync_fetch_and_max_8")
-HANDLE_LIBCALL(SYNC_FETCH_AND_MAX_16, "__sync_fetch_and_max_16")
-HANDLE_LIBCALL(SYNC_FETCH_AND_UMAX_1, "__sync_fetch_and_umax_1")
-HANDLE_LIBCALL(SYNC_FETCH_AND_UMAX_2, "__sync_fetch_and_umax_2")
-HANDLE_LIBCALL(SYNC_FETCH_AND_UMAX_4, "__sync_fetch_and_umax_4")
-HANDLE_LIBCALL(SYNC_FETCH_AND_UMAX_8, "__sync_fetch_and_umax_8")
-HANDLE_LIBCALL(SYNC_FETCH_AND_UMAX_16, "__sync_fetch_and_umax_16")
-HANDLE_LIBCALL(SYNC_FETCH_AND_MIN_1, "__sync_fetch_and_min_1")
-HANDLE_LIBCALL(SYNC_FETCH_AND_MIN_2, "__sync_fetch_and_min_2")
-HANDLE_LIBCALL(SYNC_FETCH_AND_MIN_4, "__sync_fetch_and_min_4")
-HANDLE_LIBCALL(SYNC_FETCH_AND_MIN_8, "__sync_fetch_and_min_8")
-HANDLE_LIBCALL(SYNC_FETCH_AND_MIN_16, "__sync_fetch_and_min_16")
-HANDLE_LIBCALL(SYNC_FETCH_AND_UMIN_1, "__sync_fetch_and_umin_1")
-HANDLE_LIBCALL(SYNC_FETCH_AND_UMIN_2, "__sync_fetch_and_umin_2")
-HANDLE_LIBCALL(SYNC_FETCH_AND_UMIN_4, "__sync_fetch_and_umin_4")
-HANDLE_LIBCALL(SYNC_FETCH_AND_UMIN_8, "__sync_fetch_and_umin_8")
-HANDLE_LIBCALL(SYNC_FETCH_AND_UMIN_16, "__sync_fetch_and_umin_16")
-
-// Atomic `__atomic_*' libcalls.
-HANDLE_LIBCALL(ATOMIC_LOAD, "__atomic_load")
-HANDLE_LIBCALL(ATOMIC_LOAD_1, "__atomic_load_1")
-HANDLE_LIBCALL(ATOMIC_LOAD_2, "__atomic_load_2")
-HANDLE_LIBCALL(ATOMIC_LOAD_4, "__atomic_load_4")
-HANDLE_LIBCALL(ATOMIC_LOAD_8, "__atomic_load_8")
-HANDLE_LIBCALL(ATOMIC_LOAD_16, "__atomic_load_16")
-
-HANDLE_LIBCALL(ATOMIC_STORE, "__atomic_store")
-HANDLE_LIBCALL(ATOMIC_STORE_1, "__atomic_store_1")
-HANDLE_LIBCALL(ATOMIC_STORE_2, "__atomic_store_2")
-HANDLE_LIBCALL(ATOMIC_STORE_4, "__atomic_store_4")
-HANDLE_LIBCALL(ATOMIC_STORE_8, "__atomic_store_8")
-HANDLE_LIBCALL(ATOMIC_STORE_16, "__atomic_store_16")
-
-HANDLE_LIBCALL(ATOMIC_EXCHANGE, "__atomic_exchange")
-HANDLE_LIBCALL(ATOMIC_EXCHANGE_1, "__atomic_exchange_1")
-HANDLE_LIBCALL(ATOMIC_EXCHANGE_2, "__atomic_exchange_2")
-HANDLE_LIBCALL(ATOMIC_EXCHANGE_4, "__atomic_exchange_4")
-HANDLE_LIBCALL(ATOMIC_EXCHANGE_8, "__atomic_exchange_8")
-HANDLE_LIBCALL(ATOMIC_EXCHANGE_16, "__atomic_exchange_16")
-
-HANDLE_LIBCALL(ATOMIC_COMPARE_EXCHANGE, "__atomic_compare_exchange")
-HANDLE_LIBCALL(ATOMIC_COMPARE_EXCHANGE_1, "__atomic_compare_exchange_1")
-HANDLE_LIBCALL(ATOMIC_COMPARE_EXCHANGE_2, "__atomic_compare_exchange_2")
-HANDLE_LIBCALL(ATOMIC_COMPARE_EXCHANGE_4, "__atomic_compare_exchange_4")
-HANDLE_LIBCALL(ATOMIC_COMPARE_EXCHANGE_8, "__atomic_compare_exchange_8")
-HANDLE_LIBCALL(ATOMIC_COMPARE_EXCHANGE_16, "__atomic_compare_exchange_16")
-
-HANDLE_LIBCALL(ATOMIC_FETCH_ADD_1, "__atomic_fetch_add_1")
-HANDLE_LIBCALL(ATOMIC_FETCH_ADD_2, "__atomic_fetch_add_2")
-HANDLE_LIBCALL(ATOMIC_FETCH_ADD_4, "__atomic_fetch_add_4")
-HANDLE_LIBCALL(ATOMIC_FETCH_ADD_8, "__atomic_fetch_add_8")
-HANDLE_LIBCALL(ATOMIC_FETCH_ADD_16, "__atomic_fetch_add_16")
-HANDLE_LIBCALL(ATOMIC_FETCH_SUB_1, "__atomic_fetch_sub_1")
-HANDLE_LIBCALL(ATOMIC_FETCH_SUB_2, "__atomic_fetch_sub_2")
-HANDLE_LIBCALL(ATOMIC_FETCH_SUB_4, "__atomic_fetch_sub_4")
-HANDLE_LIBCALL(ATOMIC_FETCH_SUB_8, "__atomic_fetch_sub_8")
-HANDLE_LIBCALL(ATOMIC_FETCH_SUB_16, "__atomic_fetch_sub_16")
-HANDLE_LIBCALL(ATOMIC_FETCH_AND_1, "__atomic_fetch_and_1")
-HANDLE_LIBCALL(ATOMIC_FETCH_AND_2, "__atomic_fetch_and_2")
-HANDLE_LIBCALL(ATOMIC_FETCH_AND_4, "__atomic_fetch_and_4")
-HANDLE_LIBCALL(ATOMIC_FETCH_AND_8, "__atomic_fetch_and_8")
-HANDLE_LIBCALL(ATOMIC_FETCH_AND_16, "__atomic_fetch_and_16")
-HANDLE_LIBCALL(ATOMIC_FETCH_OR_1, "__atomic_fetch_or_1")
-HANDLE_LIBCALL(ATOMIC_FETCH_OR_2, "__atomic_fetch_or_2")
-HANDLE_LIBCALL(ATOMIC_FETCH_OR_4, "__atomic_fetch_or_4")
-HANDLE_LIBCALL(ATOMIC_FETCH_OR_8, "__atomic_fetch_or_8")
-HANDLE_LIBCALL(ATOMIC_FETCH_OR_16, "__atomic_fetch_or_16")
-HANDLE_LIBCALL(ATOMIC_FETCH_XOR_1, "__atomic_fetch_xor_1")
-HANDLE_LIBCALL(ATOMIC_FETCH_XOR_2, "__atomic_fetch_xor_2")
-HANDLE_LIBCALL(ATOMIC_FETCH_XOR_4, "__atomic_fetch_xor_4")
-HANDLE_LIBCALL(ATOMIC_FETCH_XOR_8, "__atomic_fetch_xor_8")
-HANDLE_LIBCALL(ATOMIC_FETCH_XOR_16, "__atomic_fetch_xor_16")
-HANDLE_LIBCALL(ATOMIC_FETCH_NAND_1, "__atomic_fetch_nand_1")
-HANDLE_LIBCALL(ATOMIC_FETCH_NAND_2, "__atomic_fetch_nand_2")
-HANDLE_LIBCALL(ATOMIC_FETCH_NAND_4, "__atomic_fetch_nand_4")
-HANDLE_LIBCALL(ATOMIC_FETCH_NAND_8, "__atomic_fetch_nand_8")
-HANDLE_LIBCALL(ATOMIC_FETCH_NAND_16, "__atomic_fetch_nand_16")
-
-// Stack Protector Fail
-HANDLE_LIBCALL(STACKPROTECTOR_CHECK_FAIL, "__stack_chk_fail")
-
-// Deoptimization
-HANDLE_LIBCALL(DEOPTIMIZE, "__llvm_deoptimize")
-
-HANDLE_LIBCALL(UNKNOWN_LIBCALL, nullptr)
-
-#undef HANDLE_LIBCALL
diff --git a/linux-x64/clang/include/llvm/CodeGen/RuntimeLibcalls.h b/linux-x64/clang/include/llvm/CodeGen/RuntimeLibcalls.h
index 016bef1..28567a1 100644
--- a/linux-x64/clang/include/llvm/CodeGen/RuntimeLibcalls.h
+++ b/linux-x64/clang/include/llvm/CodeGen/RuntimeLibcalls.h
@@ -29,7 +29,7 @@
///
enum Libcall {
#define HANDLE_LIBCALL(code, name) code,
- #include "RuntimeLibcalls.def"
+ #include "llvm/IR/RuntimeLibcalls.def"
#undef HANDLE_LIBCALL
};
diff --git a/linux-x64/clang/include/llvm/CodeGen/ScheduleDAG.h b/linux-x64/clang/include/llvm/CodeGen/ScheduleDAG.h
index f3f2f05..56adc2e 100644
--- a/linux-x64/clang/include/llvm/CodeGen/ScheduleDAG.h
+++ b/linux-x64/clang/include/llvm/CodeGen/ScheduleDAG.h
@@ -76,7 +76,7 @@
};
private:
- /// \brief A pointer to the depending/depended-on SUnit, and an enum
+ /// A pointer to the depending/depended-on SUnit, and an enum
/// indicating the kind of the dependency.
PointerIntPair<SUnit *, 2, Kind> Dep;
@@ -137,7 +137,7 @@
return !operator==(Other);
}
- /// \brief Returns the latency value for this edge, which roughly means the
+ /// Returns the latency value for this edge, which roughly means the
/// minimum number of cycles that must elapse between the predecessor and
/// the successor, given that they have this edge between them.
unsigned getLatency() const {
@@ -163,7 +163,7 @@
return getKind() != Data;
}
- /// \brief Tests if this is an Order dependence between two memory accesses
+ /// Tests if this is an Order dependence between two memory accesses
/// where both sides of the dependence access memory in non-volatile and
/// fully modeled ways.
bool isNormalMemory() const {
@@ -181,7 +181,7 @@
return (isNormalMemory() || isBarrier());
}
- /// \brief Tests if this is an Order dependence that is marked as
+ /// Tests if this is an Order dependence that is marked as
/// "must alias", meaning that the SUnits at either end of the edge have a
/// memory dependence on a known memory location.
bool isMustAlias() const {
@@ -196,13 +196,13 @@
return getKind() == Order && Contents.OrdKind >= Weak;
}
- /// \brief Tests if this is an Order dependence that is marked as
+ /// Tests if this is an Order dependence that is marked as
/// "artificial", meaning it isn't necessary for correctness.
bool isArtificial() const {
return getKind() == Order && Contents.OrdKind == Artificial;
}
- /// \brief Tests if this is an Order dependence that is marked as "cluster",
+ /// Tests if this is an Order dependence that is marked as "cluster",
/// meaning it is artificial and wants to be adjacent.
bool isCluster() const {
return getKind() == Order && Contents.OrdKind == Cluster;
@@ -252,7 +252,7 @@
MachineInstr *Instr = nullptr; ///< Alternatively, a MachineInstr.
public:
- SUnit *OrigNode = nullptr; ///< If not this, the node from which this node
+ SUnit *OrigNode = nullptr; ///< If not this, the node from which this node
/// was cloned. (SD scheduling only)
const MCSchedClassDesc *SchedClass =
@@ -308,7 +308,7 @@
nullptr; ///< Is a special copy node if != nullptr.
const TargetRegisterClass *CopySrcRC = nullptr;
- /// \brief Constructs an SUnit for pre-regalloc scheduling to represent an
+ /// Constructs an SUnit for pre-regalloc scheduling to represent an
/// SDNode and any nodes flagged to it.
SUnit(SDNode *node, unsigned nodenum)
: Node(node), NodeNum(nodenum), isVRegCycle(false), isCall(false),
@@ -319,7 +319,7 @@
isUnbuffered(false), hasReservedResource(false), isDepthCurrent(false),
isHeightCurrent(false) {}
- /// \brief Constructs an SUnit for post-regalloc scheduling to represent a
+ /// Constructs an SUnit for post-regalloc scheduling to represent a
/// MachineInstr.
SUnit(MachineInstr *instr, unsigned nodenum)
: Instr(instr), NodeNum(nodenum), isVRegCycle(false), isCall(false),
@@ -330,7 +330,7 @@
isUnbuffered(false), hasReservedResource(false), isDepthCurrent(false),
isHeightCurrent(false) {}
- /// \brief Constructs a placeholder SUnit.
+ /// Constructs a placeholder SUnit.
SUnit()
: isVRegCycle(false), isCall(false), isCallOp(false), isTwoAddress(false),
isCommutable(false), hasPhysRegUses(false), hasPhysRegDefs(false),
@@ -339,7 +339,7 @@
isCloned(false), isUnbuffered(false), hasReservedResource(false),
isDepthCurrent(false), isHeightCurrent(false) {}
- /// \brief Boundary nodes are placeholders for the boundary of the
+ /// Boundary nodes are placeholders for the boundary of the
/// scheduling region.
///
/// BoundaryNodes can have DAG edges, including Data edges, but they do not
@@ -362,7 +362,7 @@
return Node;
}
- /// \brief Returns true if this SUnit refers to a machine instruction as
+ /// Returns true if this SUnit refers to a machine instruction as
/// opposed to an SDNode.
bool isInstr() const { return Instr; }
@@ -384,7 +384,7 @@
/// It also adds the current node as a successor of the specified node.
bool addPred(const SDep &D, bool Required = true);
- /// \brief Adds a barrier edge to SU by calling addPred(), with latency 0
+ /// Adds a barrier edge to SU by calling addPred(), with latency 0
/// generally or latency 1 for a store followed by a load.
bool addPredBarrier(SUnit *SU) {
SDep Dep(SU, SDep::Barrier);
@@ -406,7 +406,7 @@
return Depth;
}
- /// \brief Returns the height of this node, which is the length of the
+ /// Returns the height of this node, which is the length of the
/// maximum path down to any node which has no successors.
unsigned getHeight() const {
if (!isHeightCurrent)
@@ -414,21 +414,21 @@
return Height;
}
- /// \brief If NewDepth is greater than this node's depth value, sets it to
+ /// If NewDepth is greater than this node's depth value, sets it to
/// be the new depth value. This also recursively marks successor nodes
/// dirty.
void setDepthToAtLeast(unsigned NewDepth);
- /// \brief If NewDepth is greater than this node's depth value, set it to be
+ /// If NewDepth is greater than this node's depth value, set it to be
/// the new height value. This also recursively marks predecessor nodes
/// dirty.
void setHeightToAtLeast(unsigned NewHeight);
- /// \brief Sets a flag in this node to indicate that its stored Depth value
+ /// Sets a flag in this node to indicate that its stored Depth value
/// will require recomputation the next time getDepth() is called.
void setDepthDirty();
- /// \brief Sets a flag in this node to indicate that its stored Height value
+ /// Sets a flag in this node to indicate that its stored Height value
/// will require recomputation the next time getHeight() is called.
void setHeightDirty();
@@ -455,15 +455,15 @@
return NumSuccsLeft == 0;
}
- /// \brief Orders this node's predecessor edges such that the critical path
+ /// Orders this node's predecessor edges such that the critical path
/// edge occurs first.
void biasCriticalPath();
void dump(const ScheduleDAG *G) const;
void dumpAll(const ScheduleDAG *G) const;
raw_ostream &print(raw_ostream &O,
- const SUnit *N = nullptr,
- const SUnit *X = nullptr) const;
+ const SUnit *Entry = nullptr,
+ const SUnit *Exit = nullptr) const;
raw_ostream &print(raw_ostream &O, const ScheduleDAG *G) const;
private:
@@ -497,7 +497,7 @@
//===--------------------------------------------------------------------===//
- /// \brief This interface is used to plug different priorities computation
+ /// This interface is used to plug different priorities computation
/// algorithms into the list scheduler. It implements the interface of a
/// standard priority queue, where nodes are inserted in arbitrary order and
/// returned in priority order. The computation of the priority and the
@@ -609,7 +609,7 @@
virtual void addCustomGraphFeatures(GraphWriter<ScheduleDAG*> &) const {}
#ifndef NDEBUG
- /// \brief Verifies that all SUnits were scheduled and that their state is
+ /// Verifies that all SUnits were scheduled and that their state is
/// consistent. Returns the number of scheduled SUnits.
unsigned VerifyScheduledDAG(bool isBottomUp);
#endif
@@ -708,7 +708,7 @@
/// method.
void DFS(const SUnit *SU, int UpperBound, bool& HasLoop);
- /// \brief Reassigns topological indexes for the nodes in the DAG to
+ /// Reassigns topological indexes for the nodes in the DAG to
/// preserve the topological ordering.
void Shift(BitVector& Visited, int LowerBound, int UpperBound);
@@ -735,11 +735,11 @@
/// Returns true if addPred(TargetSU, SU) creates a cycle.
bool WillCreateCycle(SUnit *TargetSU, SUnit *SU);
- /// \brief Updates the topological ordering to accommodate an edge to be
+ /// Updates the topological ordering to accommodate an edge to be
/// added from SUnit \p X to SUnit \p Y.
void AddPred(SUnit *Y, SUnit *X);
- /// \brief Updates the topological ordering to accommodate an an edge to be
+ /// Updates the topological ordering to accommodate an an edge to be
/// removed from the specified node \p N from the predecessors of the
/// current node \p M.
void RemovePred(SUnit *M, SUnit *N);
diff --git a/linux-x64/clang/include/llvm/CodeGen/ScheduleDAGInstrs.h b/linux-x64/clang/include/llvm/CodeGen/ScheduleDAGInstrs.h
index 1488220..520a238 100644
--- a/linux-x64/clang/include/llvm/CodeGen/ScheduleDAGInstrs.h
+++ b/linux-x64/clang/include/llvm/CodeGen/ScheduleDAGInstrs.h
@@ -190,7 +190,7 @@
using SUList = std::list<SUnit *>;
protected:
- /// \brief A map from ValueType to SUList, used during DAG construction, as
+ /// A map from ValueType to SUList, used during DAG construction, as
/// a means of remembering which SUs depend on which memory locations.
class Value2SUsMap;
@@ -201,7 +201,7 @@
void reduceHugeMemNodeMaps(Value2SUsMap &stores,
Value2SUsMap &loads, unsigned N);
- /// \brief Adds a chain edge between SUa and SUb, but only if both
+ /// Adds a chain edge between SUa and SUb, but only if both
/// AliasAnalysis and Target fail to deny the dependency.
void addChainDependency(SUnit *SUa, SUnit *SUb,
unsigned Latency = 0);
@@ -286,7 +286,7 @@
/// Cleans up after scheduling in the given block.
virtual void finishBlock();
- /// \brief Initialize the DAG and common scheduler state for a new
+ /// Initialize the DAG and common scheduler state for a new
/// scheduling region. This does not actually create the DAG, only clears
/// it. The scheduling driver may call BuildSchedGraph multiple times per
/// scheduling region.
@@ -308,7 +308,7 @@
LiveIntervals *LIS = nullptr,
bool TrackLaneMasks = false);
- /// \brief Adds dependencies from instructions in the current list of
+ /// Adds dependencies from instructions in the current list of
/// instructions being scheduled to scheduling barrier. We want to make sure
/// instructions which define registers that are either used by the
/// terminator or are live-out are properly scheduled. This is especially
diff --git a/linux-x64/clang/include/llvm/CodeGen/ScheduleDFS.h b/linux-x64/clang/include/llvm/CodeGen/ScheduleDFS.h
index d6a8c79..3ecc033 100644
--- a/linux-x64/clang/include/llvm/CodeGen/ScheduleDFS.h
+++ b/linux-x64/clang/include/llvm/CodeGen/ScheduleDFS.h
@@ -25,7 +25,7 @@
class raw_ostream;
-/// \brief Represent the ILP of the subDAG rooted at a DAG node.
+/// Represent the ILP of the subDAG rooted at a DAG node.
///
/// ILPValues summarize the DAG subtree rooted at each node. ILPValues are
/// valid for all nodes regardless of their subtree membership.
@@ -62,13 +62,13 @@
void dump() const;
};
-/// \brief Compute the values of each DAG node for various metrics during DFS.
+/// Compute the values of each DAG node for various metrics during DFS.
class SchedDFSResult {
friend class SchedDFSImpl;
static const unsigned InvalidSubtreeID = ~0u;
- /// \brief Per-SUnit data computed during DFS for various metrics.
+ /// Per-SUnit data computed during DFS for various metrics.
///
/// A node's SubtreeID is set to itself when it is visited to indicate that it
/// is the root of a subtree. Later it is set to its parent to indicate an
@@ -81,7 +81,7 @@
NodeData() = default;
};
- /// \brief Per-Subtree data computed during DFS.
+ /// Per-Subtree data computed during DFS.
struct TreeData {
unsigned ParentTreeID = InvalidSubtreeID;
unsigned SubInstrCount = 0;
@@ -89,7 +89,7 @@
TreeData() = default;
};
- /// \brief Record a connection between subtrees and the connection level.
+ /// Record a connection between subtrees and the connection level.
struct Connection {
unsigned TreeID;
unsigned Level;
@@ -117,15 +117,15 @@
SchedDFSResult(bool IsBU, unsigned lim)
: IsBottomUp(IsBU), SubtreeLimit(lim) {}
- /// \brief Get the node cutoff before subtrees are considered significant.
+ /// Get the node cutoff before subtrees are considered significant.
unsigned getSubtreeLimit() const { return SubtreeLimit; }
- /// \brief Return true if this DFSResult is uninitialized.
+ /// Return true if this DFSResult is uninitialized.
///
/// resize() initializes DFSResult, while compute() populates it.
bool empty() const { return DFSNodeData.empty(); }
- /// \brief Clear the results.
+ /// Clear the results.
void clear() {
DFSNodeData.clear();
DFSTreeData.clear();
@@ -133,37 +133,37 @@
SubtreeConnectLevels.clear();
}
- /// \brief Initialize the result data with the size of the DAG.
+ /// Initialize the result data with the size of the DAG.
void resize(unsigned NumSUnits) {
DFSNodeData.resize(NumSUnits);
}
- /// \brief Compute various metrics for the DAG with given roots.
+ /// Compute various metrics for the DAG with given roots.
void compute(ArrayRef<SUnit> SUnits);
- /// \brief Get the number of instructions in the given subtree and its
+ /// Get the number of instructions in the given subtree and its
/// children.
unsigned getNumInstrs(const SUnit *SU) const {
return DFSNodeData[SU->NodeNum].InstrCount;
}
- /// \brief Get the number of instructions in the given subtree not including
+ /// Get the number of instructions in the given subtree not including
/// children.
unsigned getNumSubInstrs(unsigned SubtreeID) const {
return DFSTreeData[SubtreeID].SubInstrCount;
}
- /// \brief Get the ILP value for a DAG node.
+ /// Get the ILP value for a DAG node.
///
/// A leaf node has an ILP of 1/1.
ILPValue getILP(const SUnit *SU) const {
return ILPValue(DFSNodeData[SU->NodeNum].InstrCount, 1 + SU->getDepth());
}
- /// \brief The number of subtrees detected in this DAG.
+ /// The number of subtrees detected in this DAG.
unsigned getNumSubtrees() const { return SubtreeConnectLevels.size(); }
- /// \brief Get the ID of the subtree the given DAG node belongs to.
+ /// Get the ID of the subtree the given DAG node belongs to.
///
/// For convenience, if DFSResults have not been computed yet, give everything
/// tree ID 0.
@@ -174,7 +174,7 @@
return DFSNodeData[SU->NodeNum].SubtreeID;
}
- /// \brief Get the connection level of a subtree.
+ /// Get the connection level of a subtree.
///
/// For bottom-up trees, the connection level is the latency depth (in cycles)
/// of the deepest connection to another subtree.
@@ -182,7 +182,7 @@
return SubtreeConnectLevels[SubtreeID];
}
- /// \brief Scheduler callback to update SubtreeConnectLevels when a tree is
+ /// Scheduler callback to update SubtreeConnectLevels when a tree is
/// initially scheduled.
void scheduleTree(unsigned SubtreeID);
};
diff --git a/linux-x64/clang/include/llvm/CodeGen/ScoreboardHazardRecognizer.h b/linux-x64/clang/include/llvm/CodeGen/ScoreboardHazardRecognizer.h
index 466ab53..3f75d10 100644
--- a/linux-x64/clang/include/llvm/CodeGen/ScoreboardHazardRecognizer.h
+++ b/linux-x64/clang/include/llvm/CodeGen/ScoreboardHazardRecognizer.h
@@ -106,7 +106,7 @@
Scoreboard RequiredScoreboard;
public:
- ScoreboardHazardRecognizer(const InstrItineraryData *ItinData,
+ ScoreboardHazardRecognizer(const InstrItineraryData *II,
const ScheduleDAG *DAG,
const char *ParentDebugType = "");
diff --git a/linux-x64/clang/include/llvm/CodeGen/SelectionDAG.h b/linux-x64/clang/include/llvm/CodeGen/SelectionDAG.h
index af43c9b..c691d44 100644
--- a/linux-x64/clang/include/llvm/CodeGen/SelectionDAG.h
+++ b/linux-x64/clang/include/llvm/CodeGen/SelectionDAG.h
@@ -73,6 +73,7 @@
class MCSymbol;
class OptimizationRemarkEmitter;
class SDDbgValue;
+class SDDbgLabel;
class SelectionDAG;
class SelectionDAGTargetInfo;
class TargetLibraryInfo;
@@ -148,6 +149,7 @@
BumpPtrAllocator Alloc;
SmallVector<SDDbgValue*, 32> DbgValues;
SmallVector<SDDbgValue*, 32> ByvalParmDbgValues;
+ SmallVector<SDDbgLabel*, 4> DbgLabels;
using DbgValMapType = DenseMap<const SDNode *, SmallVector<SDDbgValue *, 2>>;
DbgValMapType DbgValMap;
@@ -164,7 +166,11 @@
DbgValMap[Node].push_back(V);
}
- /// \brief Invalidate all DbgValues attached to the node and remove
+ void add(SDDbgLabel *L) {
+ DbgLabels.push_back(L);
+ }
+
+ /// Invalidate all DbgValues attached to the node and remove
/// it from the Node-to-DbgValues map.
void erase(const SDNode *Node);
@@ -172,13 +178,14 @@
DbgValMap.clear();
DbgValues.clear();
ByvalParmDbgValues.clear();
+ DbgLabels.clear();
Alloc.Reset();
}
BumpPtrAllocator &getAlloc() { return Alloc; }
bool empty() const {
- return DbgValues.empty() && ByvalParmDbgValues.empty();
+ return DbgValues.empty() && ByvalParmDbgValues.empty() && DbgLabels.empty();
}
ArrayRef<SDDbgValue*> getSDDbgValues(const SDNode *Node) {
@@ -189,11 +196,14 @@
}
using DbgIterator = SmallVectorImpl<SDDbgValue*>::iterator;
+ using DbgLabelIterator = SmallVectorImpl<SDDbgLabel*>::iterator;
DbgIterator DbgBegin() { return DbgValues.begin(); }
DbgIterator DbgEnd() { return DbgValues.end(); }
DbgIterator ByvalParmDbgBegin() { return ByvalParmDbgValues.begin(); }
DbgIterator ByvalParmDbgEnd() { return ByvalParmDbgValues.end(); }
+ DbgLabelIterator DbgLabelBegin() { return DbgLabels.begin(); }
+ DbgLabelIterator DbgLabelEnd() { return DbgLabels.end(); }
};
void checkForCycles(const SelectionDAG *DAG, bool force = false);
@@ -255,7 +265,7 @@
/// Pool allocation for misc. objects that are created once per SelectionDAG.
BumpPtrAllocator Allocator;
- /// Tracks dbg_value information through SDISel.
+ /// Tracks dbg_value and dbg_label information through SDISel.
SDDbgInfo *DbgInfo;
uint16_t NextPersistentId = 0;
@@ -372,7 +382,7 @@
/// Prepare this SelectionDAG to process code in the given MachineFunction.
void init(MachineFunction &NewMF, OptimizationRemarkEmitter &NewORE,
Pass *PassPtr, const TargetLibraryInfo *LibraryInfo,
- DivergenceAnalysis * DA);
+ DivergenceAnalysis * Divergence);
void setFunctionLoweringInfo(FunctionLoweringInfo * FuncInfo) {
FLI = FuncInfo;
@@ -486,7 +496,7 @@
/// the graph.
void Legalize();
- /// \brief Transforms a SelectionDAG node and any operands to it into a node
+ /// Transforms a SelectionDAG node and any operands to it into a node
/// that is compatible with the target instruction selector, as indicated by
/// the TargetLowering object.
///
@@ -537,7 +547,7 @@
//===--------------------------------------------------------------------===//
// Node creation methods.
- /// \brief Create a ConstantSDNode wrapping a constant value.
+ /// Create a ConstantSDNode wrapping a constant value.
/// If VT is a vector type, the constant is splatted into a BUILD_VECTOR.
///
/// If only legal types can be produced, this does the necessary
@@ -571,12 +581,12 @@
return getConstant(Val, DL, VT, true, isOpaque);
}
- /// \brief Create a true or false constant of type \p VT using the target's
+ /// Create a true or false constant of type \p VT using the target's
/// BooleanContent for type \p OpVT.
SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT);
/// @}
- /// \brief Create a ConstantFPSDNode wrapping a constant value.
+ /// Create a ConstantFPSDNode wrapping a constant value.
/// If VT is a vector type, the constant is splatted into a BUILD_VECTOR.
///
/// If only legal types can be produced, this does the necessary
@@ -588,7 +598,7 @@
bool isTarget = false);
SDValue getConstantFP(const APFloat &Val, const SDLoc &DL, EVT VT,
bool isTarget = false);
- SDValue getConstantFP(const ConstantFP &CF, const SDLoc &DL, EVT VT,
+ SDValue getConstantFP(const ConstantFP &V, const SDLoc &DL, EVT VT,
bool isTarget = false);
SDValue getTargetConstantFP(double Val, const SDLoc &DL, EVT VT) {
return getConstantFP(Val, DL, VT, true);
@@ -748,7 +758,7 @@
return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
}
- /// \brief Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to
+ /// Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to
/// the shuffle node in input but with swapped operands.
///
/// Example: shuffle A, B, <0,5,2,7> -> shuffle B, A, <4,1,6,3>
@@ -772,7 +782,7 @@
/// Return the expression required to zero extend the Op
/// value assuming it was the smaller SrcTy value.
- SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT SrcTy);
+ SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT);
/// Return an operation which will any-extend the low lanes of the operand
/// into the specified vector type. For example,
@@ -800,10 +810,10 @@
/// Create a bitwise NOT operation as (XOR Val, -1).
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT);
- /// \brief Create a logical NOT operation as (XOR Val, BooleanOne).
+ /// Create a logical NOT operation as (XOR Val, BooleanOne).
SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT);
- /// \brief Create an add instruction with appropriate flags when used for
+ /// Create an add instruction with appropriate flags when used for
/// addressing some offset of an object. i.e. if a load is split into multiple
/// components, create an add nuw from the base pointer to the offset.
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Op, int64_t Offset) {
@@ -869,17 +879,18 @@
ArrayRef<SDValue> Ops, const SDNodeFlags Flags = SDNodeFlags());
SDValue getNode(unsigned Opcode, const SDLoc &DL, ArrayRef<EVT> ResultTys,
ArrayRef<SDValue> Ops);
- SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTs,
+ SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
ArrayRef<SDValue> Ops);
// Specialize based on number of operands.
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT);
- SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N,
+ SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue Operand,
const SDNodeFlags Flags = SDNodeFlags());
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
SDValue N2, const SDNodeFlags Flags = SDNodeFlags());
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
- SDValue N2, SDValue N3);
+ SDValue N2, SDValue N3,
+ const SDNodeFlags Flags = SDNodeFlags());
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
SDValue N2, SDValue N3, SDValue N4);
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
@@ -887,15 +898,15 @@
// Specialize again based on number of operands for nodes with a VTList
// rather than a single VT.
- SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTs);
- SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTs, SDValue N);
- SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTs, SDValue N1,
+ SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList);
+ SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N);
+ SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
SDValue N2);
- SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTs, SDValue N1,
+ SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
SDValue N2, SDValue N3);
- SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTs, SDValue N1,
+ SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
SDValue N2, SDValue N3, SDValue N4);
- SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTs, SDValue N1,
+ SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
SDValue N2, SDValue N3, SDValue N4, SDValue N5);
/// Compute a TokenFactor to force all the incoming stack arguments to be
@@ -917,6 +928,23 @@
SDValue Size, unsigned Align, bool isVol, bool isTailCall,
MachinePointerInfo DstPtrInfo);
+ SDValue getAtomicMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst,
+ unsigned DstAlign, SDValue Src, unsigned SrcAlign,
+ SDValue Size, Type *SizeTy, unsigned ElemSz,
+ bool isTailCall, MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo);
+
+ SDValue getAtomicMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst,
+ unsigned DstAlign, SDValue Src, unsigned SrcAlign,
+ SDValue Size, Type *SizeTy, unsigned ElemSz,
+ bool isTailCall, MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo);
+
+ SDValue getAtomicMemset(SDValue Chain, const SDLoc &dl, SDValue Dst,
+ unsigned DstAlign, SDValue Value, SDValue Size,
+ Type *SizeTy, unsigned ElemSz, bool isTailCall,
+ MachinePointerInfo DstPtrInfo);
+
/// Helper function to make it easier to build SetCC's if you just
/// have an ISD::CondCode instead of an SDValue.
///
@@ -1057,12 +1085,12 @@
MachineMemOperand *MMO);
SDValue
getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
- MachinePointerInfo PtrInfo, EVT TVT, unsigned Alignment = 0,
+ MachinePointerInfo PtrInfo, EVT SVT, unsigned Alignment = 0,
MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
const AAMDNodes &AAInfo = AAMDNodes());
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
- SDValue Ptr, EVT TVT, MachineMemOperand *MMO);
- SDValue getIndexedStore(SDValue OrigStoe, const SDLoc &dl, SDValue Base,
+ SDValue Ptr, EVT SVT, MachineMemOperand *MMO);
+ SDValue getIndexedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base,
SDValue Offset, ISD::MemIndexedMode AM);
/// Returns sum of the base pointer and offset.
@@ -1135,24 +1163,24 @@
/// specified node to have the specified return type, Target opcode, and
/// operands. Note that target opcodes are stored as
/// ~TargetOpcode in the node opcode field. The resultant node is returned.
- SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT);
- SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT, SDValue Op1);
- SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT,
+ SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT);
+ SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT, SDValue Op1);
+ SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT,
SDValue Op1, SDValue Op2);
- SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT,
+ SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT,
SDValue Op1, SDValue Op2, SDValue Op3);
- SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT,
+ SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT,
ArrayRef<SDValue> Ops);
- SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1, EVT VT2);
- SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
+ SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1, EVT VT2);
+ SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1,
EVT VT2, ArrayRef<SDValue> Ops);
- SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
+ SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1,
EVT VT2, EVT VT3, ArrayRef<SDValue> Ops);
SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
EVT VT2, SDValue Op1);
- SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
+ SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1,
EVT VT2, SDValue Op1, SDValue Op2);
- SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, SDVTList VTs,
+ SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, SDVTList VTs,
ArrayRef<SDValue> Ops);
/// This *mutates* the specified node to have the specified
@@ -1207,7 +1235,7 @@
SDValue Operand, SDValue Subreg);
/// Get the specified node if it's already available, or else return NULL.
- SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTs, ArrayRef<SDValue> Ops,
+ SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTList, ArrayRef<SDValue> Ops,
const SDNodeFlags Flags = SDNodeFlags());
/// Creates a SDDbgValue node.
@@ -1222,8 +1250,16 @@
/// Creates a FrameIndex SDDbgValue node.
SDDbgValue *getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr,
- unsigned FI, const DebugLoc &DL,
- unsigned O);
+ unsigned FI, bool IsIndirect,
+ const DebugLoc &DL, unsigned O);
+
+ /// Creates a VReg SDDbgValue node.
+ SDDbgValue *getVRegDbgValue(DIVariable *Var, DIExpression *Expr,
+ unsigned VReg, bool IsIndirect,
+ const DebugLoc &DL, unsigned O);
+
+ /// Creates a SDDbgLabel node.
+ SDDbgLabel *getDbgLabel(DILabel *Label, const DebugLoc &DL, unsigned O);
/// Transfer debug values from one node to another, while optionally
/// generating fragment expressions for split-up values. If \p InvalidateDbg
@@ -1255,7 +1291,7 @@
/// to be given new uses. These new uses of From are left in place, and
/// not automatically transferred to To.
///
- void ReplaceAllUsesWith(SDValue From, SDValue Op);
+ void ReplaceAllUsesWith(SDValue From, SDValue To);
void ReplaceAllUsesWith(SDNode *From, SDNode *To);
void ReplaceAllUsesWith(SDNode *From, const SDValue *To);
@@ -1306,6 +1342,9 @@
/// value is produced by SD.
void AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter);
+ /// Add a dbg_label SDNode.
+ void AddDbgLabel(SDDbgLabel *DB);
+
/// Get the debug values which reference the given SDNode.
ArrayRef<SDDbgValue*> GetDbgValues(const SDNode* SD) {
return DbgInfo->getSDDbgValues(SD);
@@ -1327,6 +1366,13 @@
return DbgInfo->ByvalParmDbgEnd();
}
+ SDDbgInfo::DbgLabelIterator DbgLabelBegin() {
+ return DbgInfo->DbgLabelBegin();
+ }
+ SDDbgInfo::DbgLabelIterator DbgLabelEnd() {
+ return DbgInfo->DbgLabelEnd();
+ }
+
/// To be invoked on an SDNode that is slated to be erased. This
/// function mirrors \c llvm::salvageDebugInfo.
void salvageDebugInfo(SDNode &N);
@@ -1438,11 +1484,21 @@
/// X|Cst == X+Cst iff X&Cst = 0.
bool isBaseWithConstantOffset(SDValue Op) const;
- /// Test whether the given SDValue is known to never be NaN.
- bool isKnownNeverNaN(SDValue Op) const;
+ /// Test whether the given SDValue is known to never be NaN. If \p SNaN is
+ /// true, returns if \p Op is known to never be a signaling NaN (it may still
+ /// be a qNaN).
+ bool isKnownNeverNaN(SDValue Op, bool SNaN = false, unsigned Depth = 0) const;
- /// Test whether the given SDValue is known to never be positive or negative
- /// zero.
+ /// \returns true if \p Op is known to never be a signaling NaN.
+ bool isKnownNeverSNaN(SDValue Op, unsigned Depth = 0) const {
+ return isKnownNeverNaN(Op, true, Depth);
+ }
+
+ /// Test whether the given floating point SDValue is known to never be
+ /// positive or negative zero.
+ bool isKnownNeverZeroFloat(SDValue Op) const;
+
+ /// Test whether the given SDValue is known to contain non-zero value(s).
bool isKnownNeverZero(SDValue Op) const;
/// Test whether two SDValues are known to compare equal. This
@@ -1454,6 +1510,15 @@
/// allow an 'add' to be transformed into an 'or'.
bool haveNoCommonBitsSet(SDValue A, SDValue B) const;
+ /// Match a binop + shuffle pyramid that represents a horizontal reduction
+ /// over the elements of a vector starting from the EXTRACT_VECTOR_ELT node /p
+ /// Extract. The reduction must use one of the opcodes listed in /p
+ /// CandidateBinOps and on success /p BinOp will contain the matching opcode.
+ /// Returns the vector that is being reduced on, or SDValue() if a reduction
+ /// was not matched.
+ SDValue matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp,
+ ArrayRef<ISD::NodeType> CandidateBinOps);
+
/// Utility function used by legalize and lowering to
/// "unroll" a vector operation by splitting out the scalars and operating
/// on each element individually. If the ResNE is 0, fully unroll the vector
diff --git a/linux-x64/clang/include/llvm/CodeGen/SelectionDAGISel.h b/linux-x64/clang/include/llvm/CodeGen/SelectionDAGISel.h
index e56eafc..86df0af 100644
--- a/linux-x64/clang/include/llvm/CodeGen/SelectionDAGISel.h
+++ b/linux-x64/clang/include/llvm/CodeGen/SelectionDAGISel.h
@@ -280,7 +280,7 @@
void SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
unsigned TableSize);
- /// \brief Return true if complex patterns for this target can mutate the
+ /// Return true if complex patterns for this target can mutate the
/// DAG.
virtual bool ComplexPatternFuncMutatesDAG() const {
return false;
@@ -292,14 +292,14 @@
// Calls to these functions are generated by tblgen.
void Select_INLINEASM(SDNode *N);
- void Select_READ_REGISTER(SDNode *N);
- void Select_WRITE_REGISTER(SDNode *N);
+ void Select_READ_REGISTER(SDNode *Op);
+ void Select_WRITE_REGISTER(SDNode *Op);
void Select_UNDEF(SDNode *N);
void CannotYetSelect(SDNode *N);
private:
void DoInstructionSelection();
- SDNode *MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTs,
+ SDNode *MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTList,
ArrayRef<SDValue> Ops, unsigned EmitNodeInfo);
SDNode *MutateStrictFPToFP(SDNode *Node, unsigned NewOpc);
@@ -309,10 +309,10 @@
/// instruction selected, false if no code should be emitted for it.
bool PrepareEHLandingPad();
- /// \brief Perform instruction selection on all basic blocks in the function.
+ /// Perform instruction selection on all basic blocks in the function.
void SelectAllBasicBlocks(const Function &Fn);
- /// \brief Perform instruction selection on a single basic block, for
+ /// Perform instruction selection on a single basic block, for
/// instructions between \p Begin and \p End. \p HadTailCall will be set
/// to true if a call in the block was translated as a tail call.
void SelectBasicBlock(BasicBlock::const_iterator Begin,
@@ -322,7 +322,7 @@
void CodeGenAndEmitDAG();
- /// \brief Generate instructions for lowering the incoming arguments of the
+ /// Generate instructions for lowering the incoming arguments of the
/// given function.
void LowerArguments(const Function &F);
diff --git a/linux-x64/clang/include/llvm/CodeGen/SelectionDAGNodes.h b/linux-x64/clang/include/llvm/CodeGen/SelectionDAGNodes.h
index ffb5c00..a6d7e76 100644
--- a/linux-x64/clang/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/linux-x64/clang/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -37,6 +37,7 @@
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Operator.h"
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Casting.h"
@@ -359,21 +360,34 @@
bool NoUnsignedWrap : 1;
bool NoSignedWrap : 1;
bool Exact : 1;
- bool UnsafeAlgebra : 1;
bool NoNaNs : 1;
bool NoInfs : 1;
bool NoSignedZeros : 1;
bool AllowReciprocal : 1;
bool VectorReduction : 1;
bool AllowContract : 1;
+ bool ApproximateFuncs : 1;
+ bool AllowReassociation : 1;
public:
/// Default constructor turns off all optimization flags.
SDNodeFlags()
: AnyDefined(false), NoUnsignedWrap(false), NoSignedWrap(false),
- Exact(false), UnsafeAlgebra(false), NoNaNs(false), NoInfs(false),
+ Exact(false), NoNaNs(false), NoInfs(false),
NoSignedZeros(false), AllowReciprocal(false), VectorReduction(false),
- AllowContract(false) {}
+ AllowContract(false), ApproximateFuncs(false),
+ AllowReassociation(false) {}
+
+ /// Propagate the fast-math-flags from an IR FPMathOperator.
+ void copyFMF(const FPMathOperator &FPMO) {
+ setNoNaNs(FPMO.hasNoNaNs());
+ setNoInfs(FPMO.hasNoInfs());
+ setNoSignedZeros(FPMO.hasNoSignedZeros());
+ setAllowReciprocal(FPMO.hasAllowReciprocal());
+ setAllowContract(FPMO.hasAllowContract());
+ setApproximateFuncs(FPMO.hasApproxFunc());
+ setAllowReassociation(FPMO.hasAllowReassoc());
+ }
/// Sets the state of the flags to the defined state.
void setDefined() { AnyDefined = true; }
@@ -393,10 +407,6 @@
setDefined();
Exact = b;
}
- void setUnsafeAlgebra(bool b) {
- setDefined();
- UnsafeAlgebra = b;
- }
void setNoNaNs(bool b) {
setDefined();
NoNaNs = b;
@@ -421,18 +431,32 @@
setDefined();
AllowContract = b;
}
+ void setApproximateFuncs(bool b) {
+ setDefined();
+ ApproximateFuncs = b;
+ }
+ void setAllowReassociation(bool b) {
+ setDefined();
+ AllowReassociation = b;
+ }
// These are accessors for each flag.
bool hasNoUnsignedWrap() const { return NoUnsignedWrap; }
bool hasNoSignedWrap() const { return NoSignedWrap; }
bool hasExact() const { return Exact; }
- bool hasUnsafeAlgebra() const { return UnsafeAlgebra; }
bool hasNoNaNs() const { return NoNaNs; }
bool hasNoInfs() const { return NoInfs; }
bool hasNoSignedZeros() const { return NoSignedZeros; }
bool hasAllowReciprocal() const { return AllowReciprocal; }
bool hasVectorReduction() const { return VectorReduction; }
bool hasAllowContract() const { return AllowContract; }
+ bool hasApproximateFuncs() const { return ApproximateFuncs; }
+ bool hasAllowReassociation() const { return AllowReassociation; }
+
+ bool isFast() const {
+ return NoSignedZeros && AllowReciprocal && NoNaNs && NoInfs &&
+ AllowContract && ApproximateFuncs && AllowReassociation;
+ }
/// Clear any flags in this flag set that aren't also set in Flags.
/// If the given Flags are undefined then don't do anything.
@@ -442,13 +466,14 @@
NoUnsignedWrap &= Flags.NoUnsignedWrap;
NoSignedWrap &= Flags.NoSignedWrap;
Exact &= Flags.Exact;
- UnsafeAlgebra &= Flags.UnsafeAlgebra;
NoNaNs &= Flags.NoNaNs;
NoInfs &= Flags.NoInfs;
NoSignedZeros &= Flags.NoSignedZeros;
AllowReciprocal &= Flags.AllowReciprocal;
VectorReduction &= Flags.VectorReduction;
AllowContract &= Flags.AllowContract;
+ ApproximateFuncs &= Flags.ApproximateFuncs;
+ AllowReassociation &= Flags.AllowReassociation;
}
};
@@ -544,7 +569,7 @@
static_assert(sizeof(ConstantSDNodeBitfields) <= 2, "field too wide");
static_assert(sizeof(MemSDNodeBitfields) <= 2, "field too wide");
static_assert(sizeof(LSBaseSDNodeBitfields) <= 2, "field too wide");
- static_assert(sizeof(LoadSDNodeBitfields) <= 4, "field too wide");
+ static_assert(sizeof(LoadSDNodeBitfields) <= 2, "field too wide");
static_assert(sizeof(StoreSDNodeBitfields) <= 2, "field too wide");
private:
@@ -923,6 +948,7 @@
const SDNodeFlags getFlags() const { return Flags; }
void setFlags(SDNodeFlags NewFlags) { Flags = NewFlags; }
+ bool isFast() { return Flags.isFast(); }
/// Clear any flags in this node that aren't also set in Flags.
/// If Flags is not in a defined state then this has no effect.
@@ -1220,7 +1246,7 @@
public:
MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs,
- EVT MemoryVT, MachineMemOperand *MMO);
+ EVT memvt, MachineMemOperand *MMO);
bool readMem() const { return MMO->isLoad(); }
bool writeMem() const { return MMO->isStore(); }
@@ -1472,9 +1498,8 @@
const ConstantInt *Value;
- ConstantSDNode(bool isTarget, bool isOpaque, const ConstantInt *val,
- const DebugLoc &DL, EVT VT)
- : SDNode(isTarget ? ISD::TargetConstant : ISD::Constant, 0, DL,
+ ConstantSDNode(bool isTarget, bool isOpaque, const ConstantInt *val, EVT VT)
+ : SDNode(isTarget ? ISD::TargetConstant : ISD::Constant, 0, DebugLoc(),
getSDVTList(VT)),
Value(val) {
ConstantSDNodeBits.IsOpaque = isOpaque;
@@ -1510,10 +1535,9 @@
const ConstantFP *Value;
- ConstantFPSDNode(bool isTarget, const ConstantFP *val, const DebugLoc &DL,
- EVT VT)
- : SDNode(isTarget ? ISD::TargetConstantFP : ISD::ConstantFP, 0, DL,
- getSDVTList(VT)),
+ ConstantFPSDNode(bool isTarget, const ConstantFP *val, EVT VT)
+ : SDNode(isTarget ? ISD::TargetConstantFP : ISD::ConstantFP, 0,
+ DebugLoc(), getSDVTList(VT)),
Value(val) {}
public:
@@ -1570,10 +1594,10 @@
bool isBitwiseNot(SDValue V);
/// Returns the SDNode if it is a constant splat BuildVector or constant int.
-ConstantSDNode *isConstOrConstSplat(SDValue V);
+ConstantSDNode *isConstOrConstSplat(SDValue N);
/// Returns the SDNode if it is a constant splat BuildVector or constant float.
-ConstantFPSDNode *isConstOrConstSplatFP(SDValue V);
+ConstantFPSDNode *isConstOrConstSplatFP(SDValue N);
class GlobalAddressSDNode : public SDNode {
friend class SelectionDAG;
@@ -1584,7 +1608,7 @@
GlobalAddressSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL,
const GlobalValue *GA, EVT VT, int64_t o,
- unsigned char TargetFlags);
+ unsigned char TF);
public:
const GlobalValue *getGlobal() const { return TheGlobal; }
@@ -1765,13 +1789,13 @@
unsigned MinSplatBits = 0,
bool isBigEndian = false) const;
- /// \brief Returns the splatted value or a null value if this is not a splat.
+ /// Returns the splatted value or a null value if this is not a splat.
///
/// If passed a non-null UndefElements bitvector, it will resize it to match
/// the vector width and set the bits where elements are undef.
SDValue getSplatValue(BitVector *UndefElements = nullptr) const;
- /// \brief Returns the splatted constant or null if this is not a constant
+ /// Returns the splatted constant or null if this is not a constant
/// splat.
///
/// If passed a non-null UndefElements bitvector, it will resize it to match
@@ -1779,7 +1803,7 @@
ConstantSDNode *
getConstantSplatNode(BitVector *UndefElements = nullptr) const;
- /// \brief Returns the splatted constant FP or null if this is not a constant
+ /// Returns the splatted constant FP or null if this is not a constant
/// FP splat.
///
/// If passed a non-null UndefElements bitvector, it will resize it to match
@@ -1787,7 +1811,7 @@
ConstantFPSDNode *
getConstantFPSplatNode(BitVector *UndefElements = nullptr) const;
- /// \brief If this is a constant FP splat and the splatted constant FP is an
+ /// If this is a constant FP splat and the splatted constant FP is an
/// exact power or 2, return the log base 2 integer value. Otherwise,
/// return -1.
///
@@ -2119,7 +2143,7 @@
return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
}
- const SDValue &getSrc0() const { return getOperand(3); }
+ const SDValue &getPassThru() const { return getOperand(3); }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::MLOAD;
}
@@ -2177,7 +2201,6 @@
const SDValue &getBasePtr() const { return getOperand(3); }
const SDValue &getIndex() const { return getOperand(4); }
const SDValue &getMask() const { return getOperand(2); }
- const SDValue &getValue() const { return getOperand(1); }
const SDValue &getScale() const { return getOperand(5); }
static bool classof(const SDNode *N) {
@@ -2196,6 +2219,8 @@
EVT MemVT, MachineMemOperand *MMO)
: MaskedGatherScatterSDNode(ISD::MGATHER, Order, dl, VTs, MemVT, MMO) {}
+ const SDValue &getPassThru() const { return getOperand(1); }
+
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::MGATHER;
}
@@ -2211,6 +2236,8 @@
EVT MemVT, MachineMemOperand *MMO)
: MaskedGatherScatterSDNode(ISD::MSCATTER, Order, dl, VTs, MemVT, MMO) {}
+ const SDValue &getValue() const { return getOperand(1); }
+
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::MSCATTER;
}
diff --git a/linux-x64/clang/include/llvm/CodeGen/SlotIndexes.h b/linux-x64/clang/include/llvm/CodeGen/SlotIndexes.h
index 3a91e36..334267d 100644
--- a/linux-x64/clang/include/llvm/CodeGen/SlotIndexes.h
+++ b/linux-x64/clang/include/llvm/CodeGen/SlotIndexes.h
@@ -578,9 +578,9 @@
assert(!MI.isInsideBundle() &&
"Instructions inside bundles should use bundle start's slot.");
assert(mi2iMap.find(&MI) == mi2iMap.end() && "Instr already indexed.");
- // Numbering DBG_VALUE instructions could cause code generation to be
+ // Numbering debug instructions could cause code generation to be
// affected by debug information.
- assert(!MI.isDebugValue() && "Cannot number DBG_VALUE instructions.");
+ assert(!MI.isDebugInstr() && "Cannot number debug instructions.");
assert(MI.getParent() != nullptr && "Instr must be added to function.");
@@ -674,10 +674,10 @@
idx2MBBMap.push_back(IdxMBBPair(startIdx, mbb));
renumberIndexes(newItr);
- std::sort(idx2MBBMap.begin(), idx2MBBMap.end(), Idx2MBBCompare());
+ llvm::sort(idx2MBBMap.begin(), idx2MBBMap.end(), Idx2MBBCompare());
}
- /// \brief Free the resources that were required to maintain a SlotIndex.
+ /// Free the resources that were required to maintain a SlotIndex.
///
/// Once an index is no longer needed (for instance because the instruction
/// at that index has been moved), the resources required to maintain the
diff --git a/linux-x64/clang/include/llvm/CodeGen/StackMaps.h b/linux-x64/clang/include/llvm/CodeGen/StackMaps.h
index 4407114..e584a41 100644
--- a/linux-x64/clang/include/llvm/CodeGen/StackMaps.h
+++ b/linux-x64/clang/include/llvm/CodeGen/StackMaps.h
@@ -29,7 +29,7 @@
class raw_ostream;
class TargetRegisterInfo;
-/// \brief MI-level stackmap operands.
+/// MI-level stackmap operands.
///
/// MI stackmap operations take the form:
/// <id>, <numBytes>, live args...
@@ -60,7 +60,7 @@
}
};
-/// \brief MI-level patchpoint operands.
+/// MI-level patchpoint operands.
///
/// MI patchpoint operations take the form:
/// [<def>], <id>, <numBytes>, <target>, <numArgs>, <cc>, ...
@@ -137,7 +137,7 @@
return getVarIdx();
}
- /// \brief Get the next scratch register operand index.
+ /// Get the next scratch register operand index.
unsigned getNextScratchIdx(unsigned StartIdx = 0) const;
};
@@ -156,7 +156,7 @@
// TODO:: we should change the STATEPOINT representation so that CC and
// Flags should be part of meta operands, with args and deopt operands, and
// gc operands all prefixed by their length and a type code. This would be
- // much more consistent.
+ // much more consistent.
public:
// These values are aboolute offsets into the operands of the statepoint
// instruction.
@@ -236,15 +236,15 @@
FnInfos.clear();
}
- /// \brief Generate a stackmap record for a stackmap instruction.
+ /// Generate a stackmap record for a stackmap instruction.
///
/// MI must be a raw STACKMAP, not a PATCHPOINT.
void recordStackMap(const MachineInstr &MI);
- /// \brief Generate a stackmap record for a patchpoint instruction.
+ /// Generate a stackmap record for a patchpoint instruction.
void recordPatchPoint(const MachineInstr &MI);
- /// \brief Generate a stackmap record for a statepoint instruction.
+ /// Generate a stackmap record for a statepoint instruction.
void recordStatepoint(const MachineInstr &MI);
/// If there is any stack map data, create a stack map section and serialize
@@ -293,11 +293,11 @@
MachineInstr::const_mop_iterator MOE, LocationVec &Locs,
LiveOutVec &LiveOuts) const;
- /// \brief Create a live-out register record for the given register @p Reg.
+ /// Create a live-out register record for the given register @p Reg.
LiveOutReg createLiveOutReg(unsigned Reg,
const TargetRegisterInfo *TRI) const;
- /// \brief Parse the register live-out mask and return a vector of live-out
+ /// Parse the register live-out mask and return a vector of live-out
/// registers that need to be recorded in the stackmap.
LiveOutVec parseRegisterLiveOutMask(const uint32_t *Mask) const;
@@ -311,16 +311,16 @@
MachineInstr::const_mop_iterator MOE,
bool recordResult = false);
- /// \brief Emit the stackmap header.
+ /// Emit the stackmap header.
void emitStackmapHeader(MCStreamer &OS);
- /// \brief Emit the function frame record for each function.
+ /// Emit the function frame record for each function.
void emitFunctionFrameRecords(MCStreamer &OS);
- /// \brief Emit the constant pool.
+ /// Emit the constant pool.
void emitConstantPoolEntries(MCStreamer &OS);
- /// \brief Emit the callsite info for each stackmap/patchpoint intrinsic call.
+ /// Emit the callsite info for each stackmap/patchpoint intrinsic call.
void emitCallsiteEntries(MCStreamer &OS);
void print(raw_ostream &OS);
diff --git a/linux-x64/clang/include/llvm/CodeGen/StackProtector.h b/linux-x64/clang/include/llvm/CodeGen/StackProtector.h
index 72de212..a506ac6 100644
--- a/linux-x64/clang/include/llvm/CodeGen/StackProtector.h
+++ b/linux-x64/clang/include/llvm/CodeGen/StackProtector.h
@@ -19,6 +19,7 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/ValueMap.h"
#include "llvm/Pass.h"
@@ -35,24 +36,11 @@
class Type;
class StackProtector : public FunctionPass {
-public:
- /// SSPLayoutKind. Stack Smashing Protection (SSP) rules require that
- /// vulnerable stack allocations are located close the stack protector.
- enum SSPLayoutKind {
- SSPLK_None, ///< Did not trigger a stack protector. No effect on data
- ///< layout.
- SSPLK_LargeArray, ///< Array or nested array >= SSP-buffer-size. Closest
- ///< to the stack protector.
- SSPLK_SmallArray, ///< Array or nested array < SSP-buffer-size. 2nd closest
- ///< to the stack protector.
- SSPLK_AddrOf ///< The address of this allocation is exposed and
- ///< triggered protection. 3rd closest to the protector.
- };
-
- /// A mapping of AllocaInsts to their required SSP layout.
- using SSPLayoutMap = ValueMap<const AllocaInst *, SSPLayoutKind>;
-
private:
+ /// A mapping of AllocaInsts to their required SSP layout.
+ using SSPLayoutMap = DenseMap<const AllocaInst *,
+ MachineFrameInfo::SSPLayoutKind>;
+
const TargetMachine *TM = nullptr;
/// TLI - Keep a pointer of a TargetLowering to consult for determining
@@ -70,7 +58,7 @@
/// AllocaInst triggers a stack protector.
SSPLayoutMap Layout;
- /// \brief The minimum size of buffers that will receive stack smashing
+ /// The minimum size of buffers that will receive stack smashing
/// protection when -fstack-protection is used.
unsigned SSPBufferSize = 0;
@@ -107,7 +95,7 @@
bool ContainsProtectableArray(Type *Ty, bool &IsLarge, bool Strong = false,
bool InStruct = false) const;
- /// \brief Check whether a stack allocation has its address taken.
+ /// Check whether a stack allocation has its address taken.
bool HasAddressTaken(const Instruction *AI);
/// RequiresStackProtector - Check whether or not this function needs a
@@ -123,14 +111,12 @@
void getAnalysisUsage(AnalysisUsage &AU) const override;
- SSPLayoutKind getSSPLayout(const AllocaInst *AI) const;
-
// Return true if StackProtector is supposed to be handled by SelectionDAG.
bool shouldEmitSDCheck(const BasicBlock &BB) const;
- void adjustForColoring(const AllocaInst *From, const AllocaInst *To);
-
bool runOnFunction(Function &Fn) override;
+
+ void copyToMachineFrameInfo(MachineFrameInfo &MFI) const;
};
} // end namespace llvm
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetFrameLowering.h b/linux-x64/clang/include/llvm/CodeGen/TargetFrameLowering.h
index 61f1cf0..f8effee 100644
--- a/linux-x64/clang/include/llvm/CodeGen/TargetFrameLowering.h
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetFrameLowering.h
@@ -158,6 +158,10 @@
return false;
}
+ /// Returns true if the target can safely skip saving callee-saved registers
+ /// for noreturn nounwind functions.
+ virtual bool enableCalleeSaveSkip(const MachineFunction &MF) const;
+
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
/// the function.
virtual void emitPrologue(MachineFunction &MF,
@@ -341,6 +345,14 @@
return false;
return true;
}
+
+ /// Return initial CFA offset value i.e. the one valid at the beginning of the
+ /// function (before any stack operations).
+ virtual int getInitialCFAOffset(const MachineFunction &MF) const;
+
+ /// Return initial CFA register value i.e. the one valid at the beginning of
+ /// the function (before any stack operations).
+ virtual unsigned getInitialCFARegister(const MachineFunction &MF) const;
};
} // End llvm namespace
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetInstrInfo.h b/linux-x64/clang/include/llvm/CodeGen/TargetInstrInfo.h
index 5c2a530..b5bc561 100644
--- a/linux-x64/clang/include/llvm/CodeGen/TargetInstrInfo.h
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetInstrInfo.h
@@ -18,12 +18,14 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/None.h"
+#include "llvm/CodeGen/LiveRegUnits.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineCombinerPattern.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/MachineOutliner.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/Support/BranchProbability.h"
@@ -79,7 +81,7 @@
/// Given a machine instruction descriptor, returns the register
/// class constraint for OpNum, or NULL.
- const TargetRegisterClass *getRegClass(const MCInstrDesc &TID, unsigned OpNum,
+ const TargetRegisterClass *getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
const TargetRegisterInfo *TRI,
const MachineFunction &MF) const;
@@ -225,6 +227,17 @@
return 0;
}
+ /// Optional extension of isLoadFromStackSlot that returns the number of
+ /// bytes loaded from the stack. This must be implemented if a backend
+ /// supports partial stack slot spills/loads to further disambiguate
+ /// what the load does.
+ virtual unsigned isLoadFromStackSlot(const MachineInstr &MI,
+ int &FrameIndex,
+ unsigned &MemBytes) const {
+ MemBytes = 0;
+ return isLoadFromStackSlot(MI, FrameIndex);
+ }
+
/// Check for post-frame ptr elimination stack locations as well.
/// This uses a heuristic so it isn't reliable for correctness.
virtual unsigned isLoadFromStackSlotPostFE(const MachineInstr &MI,
@@ -252,6 +265,17 @@
return 0;
}
+ /// Optional extension of isStoreToStackSlot that returns the number of
+ /// bytes stored to the stack. This must be implemented if a backend
+ /// supports partial stack slot spills/loads to further disambiguate
+ /// what the store does.
+ virtual unsigned isStoreToStackSlot(const MachineInstr &MI,
+ int &FrameIndex,
+ unsigned &MemBytes) const {
+ MemBytes = 0;
+ return isStoreToStackSlot(MI, FrameIndex);
+ }
+
/// Check for post-frame ptr elimination stack locations as well.
/// This uses a heuristic, so it isn't reliable for correctness.
virtual unsigned isStoreToStackSlotPostFE(const MachineInstr &MI,
@@ -325,7 +349,7 @@
unsigned SubIdx, const MachineInstr &Orig,
const TargetRegisterInfo &TRI) const;
- /// \brief Clones instruction or the whole instruction bundle \p Orig and
+ /// Clones instruction or the whole instruction bundle \p Orig and
/// insert into \p MBB before \p InsertBefore. The target may update operands
/// that are required to be unique.
///
@@ -822,6 +846,15 @@
llvm_unreachable("Target didn't implement TargetInstrInfo::copyPhysReg!");
}
+ /// If the specific machine instruction is a instruction that moves/copies
+ /// value from one register to another register return true along with
+ /// @Source machine operand and @Destination machine operand.
+ virtual bool isCopyInstr(const MachineInstr &MI,
+ const MachineOperand *&SourceOpNum,
+ const MachineOperand *&Destination) const {
+ return false;
+ }
+
/// Store the specified register of the given register class to the specified
/// stack frame index. The store instruction is to be added to the given
/// machine basic block before the specified machine instruction. If isKill
@@ -876,7 +909,7 @@
/// The new instruction is inserted before MI, and the client is responsible
/// for removing the old instruction.
MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops,
- int FrameIndex,
+ int FI,
LiveIntervals *LIS = nullptr) const;
/// Same as the previous version except it allows folding of any load and
@@ -928,13 +961,13 @@
/// \param InsInstrs - Vector of new instructions that implement P
/// \param DelInstrs - Old instructions, including Root, that could be
/// replaced by InsInstr
- /// \param InstrIdxForVirtReg - map of virtual register to instruction in
+ /// \param InstIdxForVirtReg - map of virtual register to instruction in
/// InsInstr that defines it
virtual void genAlternativeCodeSequence(
MachineInstr &Root, MachineCombinerPattern Pattern,
SmallVectorImpl<MachineInstr *> &InsInstrs,
SmallVectorImpl<MachineInstr *> &DelInstrs,
- DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const;
+ DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const;
/// Attempt to reassociate \P Root and \P Prev according to \P Pattern to
/// reduce critical path length.
@@ -957,11 +990,6 @@
/// even if it has glue.
virtual bool canCopyGluedNodeDuringSchedule(SDNode *N) const { return false; }
- /// Remember what registers the specified instruction uses and modifies.
- virtual void trackRegDefsUses(const MachineInstr &MI, BitVector &ModifiedRegs,
- BitVector &UsedRegs,
- const TargetRegisterInfo *TRI) const;
-
protected:
/// Target-dependent implementation for foldMemoryOperand.
/// Target-independent code in foldMemoryOperand will
@@ -988,7 +1016,7 @@
return nullptr;
}
- /// \brief Target-dependent implementation of getRegSequenceInputs.
+ /// Target-dependent implementation of getRegSequenceInputs.
///
/// \returns true if it is possible to build the equivalent
/// REG_SEQUENCE inputs with the pair \p MI, \p DefIdx. False otherwise.
@@ -1002,7 +1030,7 @@
return false;
}
- /// \brief Target-dependent implementation of getExtractSubregInputs.
+ /// Target-dependent implementation of getExtractSubregInputs.
///
/// \returns true if it is possible to build the equivalent
/// EXTRACT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
@@ -1016,7 +1044,7 @@
return false;
}
- /// \brief Target-dependent implementation of getInsertSubregInputs.
+ /// Target-dependent implementation of getInsertSubregInputs.
///
/// \returns true if it is possible to build the equivalent
/// INSERT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
@@ -1438,7 +1466,7 @@
return 0;
}
- /// \brief Return the minimum clearance before an instruction that reads an
+ /// Return the minimum clearance before an instruction that reads an
/// unused register.
///
/// For example, AVX instructions may copy part of a register operand into
@@ -1505,7 +1533,7 @@
return false;
}
- /// \brief Return the value to use for the MachineCSE's LookAheadLimit,
+ /// Return the value to use for the MachineCSE's LookAheadLimit,
/// which is a heuristic used for CSE'ing phys reg defs.
virtual unsigned getMachineCSELookAheadLimit() const {
// The default lookahead is small to prevent unprofitable quadratic
@@ -1574,71 +1602,32 @@
return false;
}
- /// \brief Describes the number of instructions that it will take to call and
- /// construct a frame for a given outlining candidate.
- struct MachineOutlinerInfo {
- /// Number of instructions to call an outlined function for this candidate.
- unsigned CallOverhead;
-
- /// \brief Number of instructions to construct an outlined function frame
- /// for this candidate.
- unsigned FrameOverhead;
-
- /// \brief Represents the specific instructions that must be emitted to
- /// construct a call to this candidate.
- unsigned CallConstructionID;
-
- /// \brief Represents the specific instructions that must be emitted to
- /// construct a frame for this candidate's outlined function.
- unsigned FrameConstructionID;
-
- MachineOutlinerInfo() {}
- MachineOutlinerInfo(unsigned CallOverhead, unsigned FrameOverhead,
- unsigned CallConstructionID,
- unsigned FrameConstructionID)
- : CallOverhead(CallOverhead), FrameOverhead(FrameOverhead),
- CallConstructionID(CallConstructionID),
- FrameConstructionID(FrameConstructionID) {}
- };
-
- /// \brief Returns a \p MachineOutlinerInfo struct containing target-specific
+ /// Returns a \p outliner::OutlinedFunction struct containing target-specific
/// information for a set of outlining candidates.
- virtual MachineOutlinerInfo getOutlininingCandidateInfo(
- std::vector<
- std::pair<MachineBasicBlock::iterator, MachineBasicBlock::iterator>>
- &RepeatedSequenceLocs) const {
+ virtual outliner::OutlinedFunction getOutliningCandidateInfo(
+ std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
llvm_unreachable(
- "Target didn't implement TargetInstrInfo::getOutliningOverhead!");
+ "Target didn't implement TargetInstrInfo::getOutliningCandidateInfo!");
}
- /// Represents how an instruction should be mapped by the outliner.
- /// \p Legal instructions are those which are safe to outline.
- /// \p Illegal instructions are those which cannot be outlined.
- /// \p Invisible instructions are instructions which can be outlined, but
- /// shouldn't actually impact the outlining result.
- enum MachineOutlinerInstrType { Legal, Illegal, Invisible };
-
/// Returns how or if \p MI should be outlined.
- virtual MachineOutlinerInstrType
+ virtual outliner::InstrType
getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const {
llvm_unreachable(
"Target didn't implement TargetInstrInfo::getOutliningType!");
}
- /// \brief Returns target-defined flags defining properties of the MBB for
+ /// Returns target-defined flags defining properties of the MBB for
/// the outliner.
virtual unsigned getMachineOutlinerMBBFlags(MachineBasicBlock &MBB) const {
return 0x0;
}
- /// Insert a custom epilogue for outlined functions.
- /// This may be empty, in which case no epilogue or return statement will be
- /// emitted.
- virtual void insertOutlinerEpilogue(MachineBasicBlock &MBB,
- MachineFunction &MF,
- const MachineOutlinerInfo &MInfo) const {
+ /// Insert a custom frame for outlined functions.
+ virtual void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF,
+ const outliner::OutlinedFunction &OF) const {
llvm_unreachable(
- "Target didn't implement TargetInstrInfo::insertOutlinerEpilogue!");
+ "Target didn't implement TargetInstrInfo::buildOutlinedFrame!");
}
/// Insert a call to an outlined function into the program.
@@ -1647,20 +1636,11 @@
virtual MachineBasicBlock::iterator
insertOutlinedCall(Module &M, MachineBasicBlock &MBB,
MachineBasicBlock::iterator &It, MachineFunction &MF,
- const MachineOutlinerInfo &MInfo) const {
+ const outliner::Candidate &C) const {
llvm_unreachable(
"Target didn't implement TargetInstrInfo::insertOutlinedCall!");
}
- /// Insert a custom prologue for outlined functions.
- /// This may be empty, in which case no prologue will be emitted.
- virtual void insertOutlinerPrologue(MachineBasicBlock &MBB,
- MachineFunction &MF,
- const MachineOutlinerInfo &MInfo) const {
- llvm_unreachable(
- "Target didn't implement TargetInstrInfo::insertOutlinerPrologue!");
- }
-
/// Return true if the function can safely be outlined from.
/// A function \p MF is considered safe for outlining if an outlined function
/// produced from instructions in F will produce a program which produces the
@@ -1671,13 +1651,18 @@
"TargetInstrInfo::isFunctionSafeToOutlineFrom!");
}
+ /// Return true if the function should be outlined from by default.
+ virtual bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const {
+ return false;
+ }
+
private:
unsigned CallFrameSetupOpcode, CallFrameDestroyOpcode;
unsigned CatchRetOpcode;
unsigned ReturnOpcode;
};
-/// \brief Provide DenseMapInfo for TargetInstrInfo::RegSubRegPair.
+/// Provide DenseMapInfo for TargetInstrInfo::RegSubRegPair.
template <> struct DenseMapInfo<TargetInstrInfo::RegSubRegPair> {
using RegInfo = DenseMapInfo<unsigned>;
@@ -1691,7 +1676,7 @@
RegInfo::getTombstoneKey());
}
- /// \brief Reuse getHashValue implementation from
+ /// Reuse getHashValue implementation from
/// std::pair<unsigned, unsigned>.
static unsigned getHashValue(const TargetInstrInfo::RegSubRegPair &Val) {
std::pair<unsigned, unsigned> PairVal = std::make_pair(Val.Reg, Val.SubReg);
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetLowering.h b/linux-x64/clang/include/llvm/CodeGen/TargetLowering.h
index 483223a..dce4168 100644
--- a/linux-x64/clang/include/llvm/CodeGen/TargetLowering.h
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetLowering.h
@@ -223,7 +223,7 @@
virtual ~TargetLoweringBase() = default;
protected:
- /// \brief Initialize all of the actions to default values.
+ /// Initialize all of the actions to default values.
void initActions();
public:
@@ -423,17 +423,17 @@
return true;
}
- /// \brief Return true if it is cheap to speculate a call to intrinsic cttz.
+ /// Return true if it is cheap to speculate a call to intrinsic cttz.
virtual bool isCheapToSpeculateCttz() const {
return false;
}
- /// \brief Return true if it is cheap to speculate a call to intrinsic ctlz.
+ /// Return true if it is cheap to speculate a call to intrinsic ctlz.
virtual bool isCheapToSpeculateCtlz() const {
return false;
}
- /// \brief Return true if ctlz instruction is fast.
+ /// Return true if ctlz instruction is fast.
virtual bool isCtlzFast() const {
return false;
}
@@ -446,13 +446,13 @@
return false;
}
- /// \brief Return true if it is cheaper to split the store of a merged int val
+ /// Return true if it is cheaper to split the store of a merged int val
/// from a pair of smaller values into multiple stores.
virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const {
return false;
}
- /// \brief Return if the target supports combining a
+ /// Return if the target supports combining a
/// chain like:
/// \code
/// %andResult = and %val1, #mask
@@ -509,7 +509,30 @@
return hasAndNotCompare(X);
}
- /// \brief Return true if the target wants to use the optimization that
+ /// There are two ways to clear extreme bits (either low or high):
+ /// Mask: x & (-1 << y) (the instcombine canonical form)
+ /// Shifts: x >> y << y
+ /// Return true if the variant with 2 shifts is preferred.
+ /// Return false if there is no preference.
+ virtual bool preferShiftsToClearExtremeBits(SDValue X) const {
+ // By default, let's assume that no one prefers shifts.
+ return false;
+ }
+
+ /// Should we tranform the IR-optimal check for whether given truncation
+ /// down into KeptBits would be truncating or not:
+ /// (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits)
+ /// Into it's more traditional form:
+ /// ((%x << C) a>> C) dstcond %x
+ /// Return true if we should transform.
+ /// Return false if there is no preference.
+ virtual bool shouldTransformSignedTruncationCheck(EVT XVT,
+ unsigned KeptBits) const {
+ // By default, let's assume that no one prefers shifts.
+ return false;
+ }
+
+ /// Return true if the target wants to use the optimization that
/// turns ext(promotableInst1(...(promotableInstN(load)))) into
/// promotedInst1(...(promotedInstN(ext(load)))).
bool enableExtLdPromotion() const { return EnableExtLdPromotion; }
@@ -695,7 +718,7 @@
/// always broken down into scalars in some contexts. This occurs even if the
/// vector type is legal.
virtual unsigned getVectorTypeBreakdownForCallingConv(
- LLVMContext &Context, EVT VT, EVT &IntermediateVT,
+ LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
unsigned &NumIntermediates, MVT &RegisterVT) const {
return getVectorTypeBreakdown(Context, VT, IntermediateVT, NumIntermediates,
RegisterVT);
@@ -748,10 +771,10 @@
/// operations don't trap except for integer divide and remainder.
virtual bool canOpTrap(unsigned Op, EVT VT) const;
- /// Similar to isShuffleMaskLegal. This is used by Targets can use this to
- /// indicate if there is a suitable VECTOR_SHUFFLE that can be used to replace
- /// a VAND with a constant pool entry.
- virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
+ /// Similar to isShuffleMaskLegal. Targets can use this to indicate if there
+ /// is a suitable VECTOR_SHUFFLE that can be used to replace a VAND with a
+ /// constant pool entry.
+ virtual bool isVectorClearMaskLegal(ArrayRef<int> /*Mask*/,
EVT /*VT*/) const {
return false;
}
@@ -767,6 +790,39 @@
return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op];
}
+ LegalizeAction getStrictFPOperationAction(unsigned Op, EVT VT) const {
+ unsigned EqOpc;
+ switch (Op) {
+ default: llvm_unreachable("Unexpected FP pseudo-opcode");
+ case ISD::STRICT_FADD: EqOpc = ISD::FADD; break;
+ case ISD::STRICT_FSUB: EqOpc = ISD::FSUB; break;
+ case ISD::STRICT_FMUL: EqOpc = ISD::FMUL; break;
+ case ISD::STRICT_FDIV: EqOpc = ISD::FDIV; break;
+ case ISD::STRICT_FSQRT: EqOpc = ISD::FSQRT; break;
+ case ISD::STRICT_FPOW: EqOpc = ISD::FPOW; break;
+ case ISD::STRICT_FPOWI: EqOpc = ISD::FPOWI; break;
+ case ISD::STRICT_FMA: EqOpc = ISD::FMA; break;
+ case ISD::STRICT_FSIN: EqOpc = ISD::FSIN; break;
+ case ISD::STRICT_FCOS: EqOpc = ISD::FCOS; break;
+ case ISD::STRICT_FEXP: EqOpc = ISD::FEXP; break;
+ case ISD::STRICT_FEXP2: EqOpc = ISD::FEXP2; break;
+ case ISD::STRICT_FLOG: EqOpc = ISD::FLOG; break;
+ case ISD::STRICT_FLOG10: EqOpc = ISD::FLOG10; break;
+ case ISD::STRICT_FLOG2: EqOpc = ISD::FLOG2; break;
+ case ISD::STRICT_FRINT: EqOpc = ISD::FRINT; break;
+ case ISD::STRICT_FNEARBYINT: EqOpc = ISD::FNEARBYINT; break;
+ }
+
+ auto Action = getOperationAction(EqOpc, VT);
+
+ // We don't currently handle Custom or Promote for strict FP pseudo-ops.
+ // For now, we just expand for those cases.
+ if (Action != Legal)
+ Action = Expand;
+
+ return Action;
+ }
+
/// Return true if the specified operation is legal on this target or can be
/// made legal with custom lowering. This is used to help guide high-level
/// lowering decisions.
@@ -1117,12 +1173,8 @@
/// Certain combinations of ABIs, Targets and features require that types
/// are legal for some operations and not for other operations.
/// For MIPS all vector types must be passed through the integer register set.
- virtual MVT getRegisterTypeForCallingConv(MVT VT) const {
- return getRegisterType(VT);
- }
-
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context,
- EVT VT) const {
+ CallingConv::ID CC, EVT VT) const {
return getRegisterType(Context, VT);
}
@@ -1130,6 +1182,7 @@
/// this occurs when a vector type is used, as vector are passed through the
/// integer register set.
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context,
+ CallingConv::ID CC,
EVT VT) const {
return getNumRegisters(Context, VT);
}
@@ -1179,7 +1232,7 @@
return getPointerTy(DL).getSizeInBits();
}
- /// \brief Get maximum # of store operations permitted for llvm.memset
+ /// Get maximum # of store operations permitted for llvm.memset
///
/// This function returns the maximum number of store operations permitted
/// to replace a call to llvm.memset. The value is set by the target at the
@@ -1189,7 +1242,7 @@
return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset;
}
- /// \brief Get maximum # of store operations permitted for llvm.memcpy
+ /// Get maximum # of store operations permitted for llvm.memcpy
///
/// This function returns the maximum number of store operations permitted
/// to replace a call to llvm.memcpy. The value is set by the target at the
@@ -1199,6 +1252,15 @@
return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy;
}
+ /// \brief Get maximum # of store operations to be glued together
+ ///
+ /// This function returns the maximum number of store operations permitted
+ /// to glue together during lowering of llvm.memcpy. The value is set by
+ // the target at the performance threshold for such a replacement.
+ virtual unsigned getMaxGluedStoresPerMemcpy() const {
+ return MaxGluedStoresPerMemcpy;
+ }
+
/// Get maximum # of load operations permitted for memcmp
///
/// This function returns the maximum number of load operations permitted
@@ -1221,7 +1283,7 @@
return 1;
}
- /// \brief Get maximum # of store operations permitted for llvm.memmove
+ /// Get maximum # of store operations permitted for llvm.memmove
///
/// This function returns the maximum number of store operations permitted
/// to replace a call to llvm.memmove. The value is set by the target at the
@@ -1231,7 +1293,7 @@
return OptSize ? MaxStoresPerMemmoveOptSize : MaxStoresPerMemmove;
}
- /// \brief Determine if the target supports unaligned memory accesses.
+ /// Determine if the target supports unaligned memory accesses.
///
/// This function returns true if the target allows unaligned memory accesses
/// of the specified type in the given address space. If true, it also returns
@@ -1369,7 +1431,7 @@
/// If the target has a standard location for the stack protector guard,
/// returns the address of that location. Otherwise, returns nullptr.
/// DEPRECATED: please override useLoadStackGuardNode and customize
- /// LOAD_STACK_GUARD, or customize @llvm.stackguard().
+ /// LOAD_STACK_GUARD, or customize \@llvm.stackguard().
virtual Value *getIRStackGuard(IRBuilder<> &IRB) const;
/// Inserts necessary declarations for SSP (stack protection) purpose.
@@ -1924,7 +1986,7 @@
Type *Ty, unsigned AddrSpace,
Instruction *I = nullptr) const;
- /// \brief Return the cost of the scaling factor used in the addressing mode
+ /// Return the cost of the scaling factor used in the addressing mode
/// represented by AM for this target, for a load/store of the specified type.
///
/// If the AM is supported, the return value must be >= 0.
@@ -2120,11 +2182,11 @@
/// Return true if the target has a vector blend instruction.
virtual bool hasVectorBlend() const { return false; }
- /// \brief Get the maximum supported factor for interleaved memory accesses.
+ /// Get the maximum supported factor for interleaved memory accesses.
/// Default to be the minimum interleave factor: 2.
virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; }
- /// \brief Lower an interleaved load to target specific intrinsics. Return
+ /// Lower an interleaved load to target specific intrinsics. Return
/// true on success.
///
/// \p LI is the vector load instruction.
@@ -2138,7 +2200,7 @@
return false;
}
- /// \brief Lower an interleaved store to target specific intrinsics. Return
+ /// Lower an interleaved store to target specific intrinsics. Return
/// true on success.
///
/// \p SI is the vector store instruction.
@@ -2211,7 +2273,7 @@
return false;
}
- /// \brief Return true if it is beneficial to convert a load of a constant to
+ /// Return true if it is beneficial to convert a load of a constant to
/// just the constant itself.
/// On some targets it might be more efficient to use a combination of
/// arithmetic instructions to materialize the constant instead of loading it
@@ -2236,6 +2298,11 @@
return false;
}
+ // Return true if CodeGenPrepare should consider splitting large offset of a
+ // GEP to make the GEP fit into the addressing mode and can be sunk into the
+ // same blocks of its users.
+ virtual bool shouldConsiderGEPOffsetSplit() const { return false; }
+
//===--------------------------------------------------------------------===//
// Runtime Library hooks
//
@@ -2475,7 +2542,7 @@
/// expected to be merged.
unsigned GatherAllAliasesMaxDepth;
- /// \brief Specify maximum number of store instructions per memset call.
+ /// Specify maximum number of store instructions per memset call.
///
/// When lowering \@llvm.memset this field specifies the maximum number of
/// store operations that may be substituted for the call to memset. Targets
@@ -2491,7 +2558,7 @@
/// to memset, used for functions with OptSize attribute.
unsigned MaxStoresPerMemsetOptSize;
- /// \brief Specify maximum bytes of store instructions per memcpy call.
+ /// Specify maximum bytes of store instructions per memcpy call.
///
/// When lowering \@llvm.memcpy this field specifies the maximum number of
/// store operations that may be substituted for a call to memcpy. Targets
@@ -2504,13 +2571,21 @@
/// constant size.
unsigned MaxStoresPerMemcpy;
+
+ /// \brief Specify max number of store instructions to glue in inlined memcpy.
+ ///
+ /// When memcpy is inlined based on MaxStoresPerMemcpy, specify maximum number
+ /// of store instructions to keep together. This helps in pairing and
+ // vectorization later on.
+ unsigned MaxGluedStoresPerMemcpy = 0;
+
/// Maximum number of store operations that may be substituted for a call to
/// memcpy, used for functions with OptSize attribute.
unsigned MaxStoresPerMemcpyOptSize;
unsigned MaxLoadsPerMemcmp;
unsigned MaxLoadsPerMemcmpOptSize;
- /// \brief Specify maximum bytes of store instructions per memmove call.
+ /// Specify maximum bytes of store instructions per memmove call.
///
/// When lowering \@llvm.memmove this field specifies the maximum number of
/// store instructions that may be substituted for a call to memmove. Targets
@@ -2547,6 +2622,11 @@
/// details.
MachineBasicBlock *emitXRayCustomEvent(MachineInstr &MI,
MachineBasicBlock *MBB) const;
+
+ /// Replace/modify the XRay typed event operands with target-dependent
+ /// details.
+ MachineBasicBlock *emitXRayTypedEvent(MachineInstr &MI,
+ MachineBasicBlock *MBB) const;
};
/// This class defines information used to lower LLVM code to legal SelectionDAG
@@ -2741,7 +2821,7 @@
/// results of this function, because simply replacing replacing TLO.Old
/// with TLO.New will be incorrect when this parameter is true and TLO.Old
/// has multiple uses.
- bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedElts,
+ bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask,
APInt &KnownUndef, APInt &KnownZero,
TargetLoweringOpt &TLO, unsigned Depth = 0,
bool AssumeSingleUse = false) const;
@@ -2788,6 +2868,13 @@
SDValue Op, const APInt &DemandedElts, APInt &KnownUndef,
APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth = 0) const;
+ /// If \p SNaN is false, \returns true if \p Op is known to never be any
+ /// NaN. If \p sNaN is true, returns if \p Op is known to never be a signaling
+ /// NaN.
+ virtual bool isKnownNeverNaNForTargetNode(SDValue Op,
+ const SelectionDAG &DAG,
+ bool SNaN = false,
+ unsigned Depth = 0) const;
struct DAGCombinerInfo {
void *DC; // The DAG Combiner object.
CombineLevel Level;
@@ -2824,7 +2911,7 @@
bool isConstFalseVal(const SDNode *N) const;
/// Return if \p N is a True value when extended to \p VT.
- bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool Signed) const;
+ bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool SExt) const;
/// Try to simplify a setcc built with the specified operands and cc. If it is
/// unable to simplify it, return a null SDValue.
@@ -3408,12 +3495,10 @@
//===--------------------------------------------------------------------===//
// Div utility functions
//
- SDValue BuildSDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
- bool IsAfterLegalization,
- std::vector<SDNode *> *Created) const;
- SDValue BuildUDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
- bool IsAfterLegalization,
- std::vector<SDNode *> *Created) const;
+ SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
+ SmallVectorImpl<SDNode *> &Created) const;
+ SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
+ SmallVectorImpl<SDNode *> &Created) const;
/// Targets may override this function to provide custom SDIV lowering for
/// power-of-2 denominators. If the target returns an empty SDValue, LLVM
@@ -3421,7 +3506,7 @@
/// operations.
virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor,
SelectionDAG &DAG,
- std::vector<SDNode *> *Created) const;
+ SmallVectorImpl<SDNode *> &Created) const;
/// Indicate whether this target prefers to combine FDIVs with the same
/// divisor. If the transform should never be done, return zero. If the
@@ -3545,7 +3630,7 @@
/// bounds the returned pointer is unspecified, but will be within the vector
/// bounds.
SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT,
- SDValue Idx) const;
+ SDValue Index) const;
//===--------------------------------------------------------------------===//
// Instruction Emitting Hooks
@@ -3601,12 +3686,17 @@
SDValue simplifySetCCWithAnd(EVT VT, SDValue N0, SDValue N1,
ISD::CondCode Cond, DAGCombinerInfo &DCI,
const SDLoc &DL) const;
+
+ SDValue optimizeSetCCOfSignedTruncationCheck(EVT SCCVT, SDValue N0,
+ SDValue N1, ISD::CondCode Cond,
+ DAGCombinerInfo &DCI,
+ const SDLoc &DL) const;
};
/// Given an LLVM IR type and return type attributes, compute the return value
/// EVTs and flags, and optionally also the offsets, if the return value is
/// being lowered to memory.
-void GetReturnInfo(Type *ReturnType, AttributeList attr,
+void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr,
SmallVectorImpl<ISD::OutputArg> &Outs,
const TargetLowering &TLI, const DataLayout &DL);
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h b/linux-x64/clang/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
index 78da77f..f5c7fc8 100644
--- a/linux-x64/clang/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
@@ -36,16 +36,18 @@
protected:
MCSymbolRefExpr::VariantKind PLTRelativeVariantKind =
MCSymbolRefExpr::VK_None;
+ const TargetMachine *TM;
public:
TargetLoweringObjectFileELF() = default;
~TargetLoweringObjectFileELF() override = default;
- /// Emit Obj-C garbage collection and linker options.
- void emitModuleMetadata(MCStreamer &Streamer, Module &M,
- const TargetMachine &TM) const override;
+ void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
- void emitPersonalityValue(MCStreamer &Streamer, const DataLayout &TM,
+ /// Emit Obj-C garbage collection and linker options.
+ void emitModuleMetadata(MCStreamer &Streamer, Module &M) const override;
+
+ void emitPersonalityValue(MCStreamer &Streamer, const DataLayout &DL,
const MCSymbol *Sym) const override;
/// Given a constant with the SectionKind, return a section that it should be
@@ -98,8 +100,7 @@
void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
/// Emit the module flags that specify the garbage collection information.
- void emitModuleMetadata(MCStreamer &Streamer, Module &M,
- const TargetMachine &TM) const override;
+ void emitModuleMetadata(MCStreamer &Streamer, Module &M) const override;
MCSection *SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind,
const TargetMachine &TM) const override;
@@ -153,8 +154,7 @@
const TargetMachine &TM) const override;
/// Emit Obj-C garbage collection and linker options.
- void emitModuleMetadata(MCStreamer &Streamer, Module &M,
- const TargetMachine &TM) const override;
+ void emitModuleMetadata(MCStreamer &Streamer, Module &M) const override;
MCSection *getStaticCtorSection(unsigned Priority,
const MCSymbol *KeySym) const override;
@@ -166,6 +166,16 @@
void emitLinkerFlagsForUsed(raw_ostream &OS,
const GlobalValue *GV) const override;
+
+ const MCExpr *lowerRelativeReference(const GlobalValue *LHS,
+ const GlobalValue *RHS,
+ const TargetMachine &TM) const override;
+
+ /// Given a mergeable constant with the specified size and relocation
+ /// information, return a section that it should be placed in.
+ MCSection *getSectionForConstant(const DataLayout &DL, SectionKind Kind,
+ const Constant *C,
+ unsigned &Align) const override;
};
class TargetLoweringObjectFileWasm : public TargetLoweringObjectFile {
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetPassConfig.h b/linux-x64/clang/include/llvm/CodeGen/TargetPassConfig.h
index 5918c52..8f5c9cb 100644
--- a/linux-x64/clang/include/llvm/CodeGen/TargetPassConfig.h
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetPassConfig.h
@@ -16,7 +16,7 @@
#include "llvm/Pass.h"
#include "llvm/Support/CodeGen.h"
-#include <cassert>
+#include <cassert>
#include <string>
namespace llvm {
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetRegisterInfo.h b/linux-x64/clang/include/llvm/CodeGen/TargetRegisterInfo.h
index ea47a24..55a8ba6 100644
--- a/linux-x64/clang/include/llvm/CodeGen/TargetRegisterInfo.h
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetRegisterInfo.h
@@ -238,12 +238,12 @@
protected:
TargetRegisterInfo(const TargetRegisterInfoDesc *ID,
- regclass_iterator RegClassBegin,
- regclass_iterator RegClassEnd,
+ regclass_iterator RCB,
+ regclass_iterator RCE,
const char *const *SRINames,
const LaneBitmask *SRILaneMasks,
LaneBitmask CoveringLanes,
- const RegClassInfo *const RSI,
+ const RegClassInfo *const RCIs,
unsigned Mode = 0);
virtual ~TargetRegisterInfo();
@@ -456,7 +456,7 @@
/// stack frame offset. The first register is closest to the incoming stack
/// pointer if stack grows down, and vice versa.
/// Notice: This function does not take into account disabled CSRs.
- /// In most cases you will want to use instead the function
+ /// In most cases you will want to use instead the function
/// getCalleeSavedRegs that is implemented in MachineRegisterInfo.
virtual const MCPhysReg*
getCalleeSavedRegs(const MachineFunction *MF) const = 0;
@@ -518,7 +518,7 @@
/// guaranteed to be restored before any uses. This is useful for targets that
/// have call sequences where a GOT register may be updated by the caller
/// prior to a call and is guaranteed to be restored (also by the caller)
- /// after the call.
+ /// after the call.
virtual bool isCallerPreservedPhysReg(unsigned PhysReg,
const MachineFunction &MF) const {
return false;
@@ -971,7 +971,7 @@
//===--------------------------------------------------------------------===//
/// Subtarget Hooks
- /// \brief SrcRC and DstRC will be morphed into NewRC if this returns true.
+ /// SrcRC and DstRC will be morphed into NewRC if this returns true.
virtual bool shouldCoalesce(MachineInstr *MI,
const TargetRegisterClass *SrcRC,
unsigned SubReg,
@@ -995,6 +995,12 @@
/// of the set as well.
bool checkAllSuperRegsMarked(const BitVector &RegisterSet,
ArrayRef<MCPhysReg> Exceptions = ArrayRef<MCPhysReg>()) const;
+
+ virtual const TargetRegisterClass *
+ getConstrainedRegClassForOperand(const MachineOperand &MO,
+ const MachineRegisterInfo &MRI) const {
+ return nullptr;
+ }
};
//===----------------------------------------------------------------------===//
@@ -1161,7 +1167,7 @@
///
/// Usage: OS << printReg(Reg, TRI, SubRegIdx) << '\n';
Printable printReg(unsigned Reg, const TargetRegisterInfo *TRI = nullptr,
- unsigned SubRegIdx = 0,
+ unsigned SubIdx = 0,
const MachineRegisterInfo *MRI = nullptr);
/// Create Printable object to print register units on a \ref raw_ostream.
@@ -1174,11 +1180,11 @@
/// Usage: OS << printRegUnit(Unit, TRI) << '\n';
Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI);
-/// \brief Create Printable object to print virtual registers and physical
+/// Create Printable object to print virtual registers and physical
/// registers on a \ref raw_ostream.
Printable printVRegOrUnit(unsigned VRegOrUnit, const TargetRegisterInfo *TRI);
-/// \brief Create Printable object to print register classes or register banks
+/// Create Printable object to print register classes or register banks
/// on a \ref raw_ostream.
Printable printRegClassOrBank(unsigned Reg, const MachineRegisterInfo &RegInfo,
const TargetRegisterInfo *TRI);
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetSchedule.h b/linux-x64/clang/include/llvm/CodeGen/TargetSchedule.h
index 1044f0b..6173925 100644
--- a/linux-x64/clang/include/llvm/CodeGen/TargetSchedule.h
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetSchedule.h
@@ -19,6 +19,7 @@
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include "llvm/Config/llvm-config.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/MC/MCSchedule.h"
@@ -45,24 +46,23 @@
public:
TargetSchedModel() : SchedModel(MCSchedModel::GetDefaultSchedModel()) {}
- /// \brief Initialize the machine model for instruction scheduling.
+ /// Initialize the machine model for instruction scheduling.
///
/// The machine model API keeps a copy of the top-level MCSchedModel table
/// indices and may query TargetSubtargetInfo and TargetInstrInfo to resolve
/// dynamic properties.
- void init(const MCSchedModel &sm, const TargetSubtargetInfo *sti,
- const TargetInstrInfo *tii);
+ void init(const TargetSubtargetInfo *TSInfo);
/// Return the MCSchedClassDesc for this instruction.
const MCSchedClassDesc *resolveSchedClass(const MachineInstr *MI) const;
- /// \brief TargetSubtargetInfo getter.
+ /// TargetSubtargetInfo getter.
const TargetSubtargetInfo *getSubtargetInfo() const { return STI; }
- /// \brief TargetInstrInfo getter.
+ /// TargetInstrInfo getter.
const TargetInstrInfo *getInstrInfo() const { return TII; }
- /// \brief Return true if this machine model includes an instruction-level
+ /// Return true if this machine model includes an instruction-level
/// scheduling model.
///
/// This is more detailed than the course grain IssueWidth and default
@@ -71,7 +71,7 @@
const MCSchedModel *getMCSchedModel() const { return &SchedModel; }
- /// \brief Return true if this machine model includes cycle-to-cycle itinerary
+ /// Return true if this machine model includes cycle-to-cycle itinerary
/// data.
///
/// This models scheduling at each stage in the processor pipeline.
@@ -83,35 +83,35 @@
return nullptr;
}
- /// \brief Return true if this machine model includes an instruction-level
+ /// Return true if this machine model includes an instruction-level
/// scheduling model or cycle-to-cycle itinerary data.
bool hasInstrSchedModelOrItineraries() const {
return hasInstrSchedModel() || hasInstrItineraries();
}
- /// \brief Identify the processor corresponding to the current subtarget.
+ /// Identify the processor corresponding to the current subtarget.
unsigned getProcessorID() const { return SchedModel.getProcessorID(); }
- /// \brief Maximum number of micro-ops that may be scheduled per cycle.
+ /// Maximum number of micro-ops that may be scheduled per cycle.
unsigned getIssueWidth() const { return SchedModel.IssueWidth; }
- /// \brief Return true if new group must begin.
+ /// Return true if new group must begin.
bool mustBeginGroup(const MachineInstr *MI,
const MCSchedClassDesc *SC = nullptr) const;
- /// \brief Return true if current group must end.
+ /// Return true if current group must end.
bool mustEndGroup(const MachineInstr *MI,
const MCSchedClassDesc *SC = nullptr) const;
- /// \brief Return the number of issue slots required for this MI.
+ /// Return the number of issue slots required for this MI.
unsigned getNumMicroOps(const MachineInstr *MI,
const MCSchedClassDesc *SC = nullptr) const;
- /// \brief Get the number of kinds of resources for this target.
+ /// Get the number of kinds of resources for this target.
unsigned getNumProcResourceKinds() const {
return SchedModel.getNumProcResourceKinds();
}
- /// \brief Get a processor resource by ID for convenience.
+ /// Get a processor resource by ID for convenience.
const MCProcResourceDesc *getProcResource(unsigned PIdx) const {
return SchedModel.getProcResource(PIdx);
}
@@ -126,7 +126,7 @@
using ProcResIter = const MCWriteProcResEntry *;
- // \brief Get an iterator into the processor resources consumed by this
+ // Get an iterator into the processor resources consumed by this
// scheduling class.
ProcResIter getWriteProcResBegin(const MCSchedClassDesc *SC) const {
// The subtarget holds a single resource table for all processors.
@@ -136,34 +136,34 @@
return STI->getWriteProcResEnd(SC);
}
- /// \brief Multiply the number of units consumed for a resource by this factor
+ /// Multiply the number of units consumed for a resource by this factor
/// to normalize it relative to other resources.
unsigned getResourceFactor(unsigned ResIdx) const {
return ResourceFactors[ResIdx];
}
- /// \brief Multiply number of micro-ops by this factor to normalize it
+ /// Multiply number of micro-ops by this factor to normalize it
/// relative to other resources.
unsigned getMicroOpFactor() const {
return MicroOpFactor;
}
- /// \brief Multiply cycle count by this factor to normalize it relative to
+ /// Multiply cycle count by this factor to normalize it relative to
/// other resources. This is the number of resource units per cycle.
unsigned getLatencyFactor() const {
return ResourceLCM;
}
- /// \brief Number of micro-ops that may be buffered for OOO execution.
+ /// Number of micro-ops that may be buffered for OOO execution.
unsigned getMicroOpBufferSize() const { return SchedModel.MicroOpBufferSize; }
- /// \brief Number of resource units that may be buffered for OOO execution.
+ /// Number of resource units that may be buffered for OOO execution.
/// \return The buffer size in resource units or -1 for unlimited.
int getResourceBufferSize(unsigned PIdx) const {
return SchedModel.getProcResource(PIdx)->BufferSize;
}
- /// \brief Compute operand latency based on the available machine model.
+ /// Compute operand latency based on the available machine model.
///
/// Compute and return the latency of the given data dependent def and use
/// when the operand indices are already known. UseMI may be NULL for an
@@ -172,7 +172,7 @@
const MachineInstr *UseMI, unsigned UseOperIdx)
const;
- /// \brief Compute the instruction latency based on the available machine
+ /// Compute the instruction latency based on the available machine
/// model.
///
/// Compute and return the expected latency of this instruction independent of
@@ -185,18 +185,20 @@
/// if converter after moving it to TargetSchedModel).
unsigned computeInstrLatency(const MachineInstr *MI,
bool UseDefaultDefLatency = true) const;
+ unsigned computeInstrLatency(const MCInst &Inst) const;
unsigned computeInstrLatency(unsigned Opcode) const;
- /// \brief Output dependency latency of a pair of defs of the same register.
+ /// Output dependency latency of a pair of defs of the same register.
///
/// This is typically one cycle.
- unsigned computeOutputLatency(const MachineInstr *DefMI, unsigned DefIdx,
+ unsigned computeOutputLatency(const MachineInstr *DefMI, unsigned DefOperIdx,
const MachineInstr *DepMI) const;
- /// \brief Compute the reciprocal throughput of the given instruction.
- Optional<double> computeInstrRThroughput(const MachineInstr *MI) const;
- Optional<double> computeInstrRThroughput(unsigned Opcode) const;
+ /// Compute the reciprocal throughput of the given instruction.
+ double computeReciprocalThroughput(const MachineInstr *MI) const;
+ double computeReciprocalThroughput(const MCInst &MI) const;
+ double computeReciprocalThroughput(unsigned Opcode) const;
};
} // end namespace llvm
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetSubtargetInfo.h b/linux-x64/clang/include/llvm/CodeGen/TargetSubtargetInfo.h
index 5e5faac..227e591 100644
--- a/linux-x64/clang/include/llvm/CodeGen/TargetSubtargetInfo.h
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetSubtargetInfo.h
@@ -144,7 +144,7 @@
return 0;
}
- /// \brief True if the subtarget should run MachineScheduler after aggressive
+ /// True if the subtarget should run MachineScheduler after aggressive
/// coalescing.
///
/// This currently replaces the SelectionDAG scheduler with the "source" order
@@ -152,14 +152,14 @@
/// TargetLowering preference). It does not yet disable the postRA scheduler.
virtual bool enableMachineScheduler() const;
- /// \brief Support printing of [latency:throughput] comment in output .S file.
+ /// Support printing of [latency:throughput] comment in output .S file.
virtual bool supportPrintSchedInfo() const { return false; }
- /// \brief True if the machine scheduler should disable the TLI preference
+ /// True if the machine scheduler should disable the TLI preference
/// for preRA scheduling with the source level scheduler.
virtual bool enableMachineSchedDefaultSched() const { return true; }
- /// \brief True if the subtarget should enable joining global copies.
+ /// True if the subtarget should enable joining global copies.
///
/// By default this is enabled if the machine scheduler is enabled, but
/// can be overridden.
@@ -171,13 +171,13 @@
/// which is the preferred way to influence this.
virtual bool enablePostRAScheduler() const;
- /// \brief True if the subtarget should run the atomic expansion pass.
+ /// True if the subtarget should run the atomic expansion pass.
virtual bool enableAtomicExpand() const;
/// True if the subtarget should run the indirectbr expansion pass.
virtual bool enableIndirectBrExpand() const;
- /// \brief Override generic scheduling policy within a region.
+ /// Override generic scheduling policy within a region.
///
/// This is a convenient way for targets that don't provide any custom
/// scheduling heuristics (no custom MachineSchedStrategy) to make
@@ -185,7 +185,7 @@
virtual void overrideSchedPolicy(MachineSchedPolicy &Policy,
unsigned NumRegionInstrs) const {}
- // \brief Perform target specific adjustments to the latency of a schedule
+ // Perform target specific adjustments to the latency of a schedule
// dependency.
virtual void adjustSchedDependency(SUnit *def, SUnit *use, SDep &dep) const {}
@@ -200,13 +200,13 @@
return CriticalPathRCs.clear();
}
- // \brief Provide an ordered list of schedule DAG mutations for the post-RA
+ // Provide an ordered list of schedule DAG mutations for the post-RA
// scheduler.
virtual void getPostRAMutations(
std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations) const {
}
- // \brief Provide an ordered list of schedule DAG mutations for the machine
+ // Provide an ordered list of schedule DAG mutations for the machine
// pipeliner.
virtual void getSMSMutations(
std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations) const {
@@ -218,25 +218,25 @@
return CodeGenOpt::Default;
}
- /// \brief True if the subtarget should run the local reassignment
+ /// True if the subtarget should run the local reassignment
/// heuristic of the register allocator.
/// This heuristic may be compile time intensive, \p OptLevel provides
/// a finer grain to tune the register allocator.
virtual bool enableRALocalReassignment(CodeGenOpt::Level OptLevel) const;
- /// \brief True if the subtarget should consider the cost of local intervals
+ /// True if the subtarget should consider the cost of local intervals
/// created by a split candidate when choosing the best split candidate. This
/// heuristic may be compile time intensive.
virtual bool enableAdvancedRASplitCost() const;
- /// \brief Enable use of alias analysis during code generation (during MI
+ /// Enable use of alias analysis during code generation (during MI
/// scheduling, DAGCombine, etc.).
virtual bool useAA() const;
- /// \brief Enable the use of the early if conversion pass.
+ /// Enable the use of the early if conversion pass.
virtual bool enableEarlyIfConversion() const { return false; }
- /// \brief Return PBQPConstraint(s) for the target.
+ /// Return PBQPConstraint(s) for the target.
///
/// Override to provide custom PBQP constraints.
virtual std::unique_ptr<PBQPRAConstraint> getCustomPBQPConstraints() const {
@@ -249,7 +249,7 @@
virtual bool enableSubRegLiveness() const { return false; }
/// Returns string representation of scheduler comment
- std::string getSchedInfoStr(const MachineInstr &MI) const override;
+ std::string getSchedInfoStr(const MachineInstr &MI) const;
std::string getSchedInfoStr(MCInst const &MCI) const override;
/// This is called after a .mir file was loaded.
diff --git a/linux-x64/clang/include/llvm/CodeGen/ValueTypes.td b/linux-x64/clang/include/llvm/CodeGen/ValueTypes.td
index 673eec9..0abb4ec 100644
--- a/linux-x64/clang/include/llvm/CodeGen/ValueTypes.td
+++ b/linux-x64/clang/include/llvm/CodeGen/ValueTypes.td
@@ -8,8 +8,8 @@
//===----------------------------------------------------------------------===//
//
// Value types - These values correspond to the register types defined in the
-// ValueTypes.h file. If you update anything here, you must update it there as
-// well!
+// MachineValueTypes.h file. If you update anything here, you must update it
+// there as well!
//
//===----------------------------------------------------------------------===//
diff --git a/linux-x64/clang/include/llvm/CodeGen/VirtRegMap.h b/linux-x64/clang/include/llvm/CodeGen/VirtRegMap.h
index 3b06f03..6a8e50a 100644
--- a/linux-x64/clang/include/llvm/CodeGen/VirtRegMap.h
+++ b/linux-x64/clang/include/llvm/CodeGen/VirtRegMap.h
@@ -90,24 +90,24 @@
void grow();
- /// @brief returns true if the specified virtual register is
+ /// returns true if the specified virtual register is
/// mapped to a physical register
bool hasPhys(unsigned virtReg) const {
return getPhys(virtReg) != NO_PHYS_REG;
}
- /// @brief returns the physical register mapped to the specified
+ /// returns the physical register mapped to the specified
/// virtual register
unsigned getPhys(unsigned virtReg) const {
assert(TargetRegisterInfo::isVirtualRegister(virtReg));
return Virt2PhysMap[virtReg];
}
- /// @brief creates a mapping for the specified virtual register to
+ /// creates a mapping for the specified virtual register to
/// the specified physical register
void assignVirt2Phys(unsigned virtReg, MCPhysReg physReg);
- /// @brief clears the specified virtual register's, physical
+ /// clears the specified virtual register's, physical
/// register mapping
void clearVirt(unsigned virtReg) {
assert(TargetRegisterInfo::isVirtualRegister(virtReg));
@@ -116,26 +116,26 @@
Virt2PhysMap[virtReg] = NO_PHYS_REG;
}
- /// @brief clears all virtual to physical register mappings
+ /// clears all virtual to physical register mappings
void clearAllVirt() {
Virt2PhysMap.clear();
grow();
}
- /// @brief returns true if VirtReg is assigned to its preferred physreg.
+ /// returns true if VirtReg is assigned to its preferred physreg.
bool hasPreferredPhys(unsigned VirtReg);
- /// @brief returns true if VirtReg has a known preferred register.
+ /// returns true if VirtReg has a known preferred register.
/// This returns false if VirtReg has a preference that is a virtual
/// register that hasn't been assigned yet.
bool hasKnownPreference(unsigned VirtReg);
- /// @brief records virtReg is a split live interval from SReg.
+ /// records virtReg is a split live interval from SReg.
void setIsSplitFromReg(unsigned virtReg, unsigned SReg) {
Virt2SplitMap[virtReg] = SReg;
}
- /// @brief returns the live interval virtReg is split from.
+ /// returns the live interval virtReg is split from.
unsigned getPreSplitReg(unsigned virtReg) const {
return Virt2SplitMap[virtReg];
}
@@ -149,7 +149,7 @@
return Orig ? Orig : VirtReg;
}
- /// @brief returns true if the specified virtual register is not
+ /// returns true if the specified virtual register is not
/// mapped to a stack slot or rematerialized.
bool isAssignedReg(unsigned virtReg) const {
if (getStackSlot(virtReg) == NO_STACK_SLOT)
@@ -159,20 +159,20 @@
return (Virt2SplitMap[virtReg] && Virt2PhysMap[virtReg] != NO_PHYS_REG);
}
- /// @brief returns the stack slot mapped to the specified virtual
+ /// returns the stack slot mapped to the specified virtual
/// register
int getStackSlot(unsigned virtReg) const {
assert(TargetRegisterInfo::isVirtualRegister(virtReg));
return Virt2StackSlotMap[virtReg];
}
- /// @brief create a mapping for the specifed virtual register to
+ /// create a mapping for the specifed virtual register to
/// the next available stack slot
int assignVirt2StackSlot(unsigned virtReg);
- /// @brief create a mapping for the specified virtual register to
+ /// create a mapping for the specified virtual register to
/// the specified stack slot
- void assignVirt2StackSlot(unsigned virtReg, int frameIndex);
+ void assignVirt2StackSlot(unsigned virtReg, int SS);
void print(raw_ostream &OS, const Module* M = nullptr) const override;
void dump() const;
diff --git a/linux-x64/clang/include/llvm/CodeGen/WasmEHFuncInfo.h b/linux-x64/clang/include/llvm/CodeGen/WasmEHFuncInfo.h
new file mode 100644
index 0000000..3ad6760
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/WasmEHFuncInfo.h
@@ -0,0 +1,80 @@
+//===--- llvm/CodeGen/WasmEHFuncInfo.h --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Data structures for Wasm exception handling schemes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_WASMEHFUNCINFO_H
+#define LLVM_CODEGEN_WASMEHFUNCINFO_H
+
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/IR/BasicBlock.h"
+
+namespace llvm {
+
+using BBOrMBB = PointerUnion<const BasicBlock *, MachineBasicBlock *>;
+
+struct WasmEHFuncInfo {
+ // When there is an entry <A, B>, if an exception is not caught by A, it
+ // should next unwind to the EH pad B.
+ DenseMap<BBOrMBB, BBOrMBB> EHPadUnwindMap;
+ // For entry <A, B>, A is a BB with an instruction that may throw
+ // (invoke/cleanupret in LLVM IR, call/rethrow in the backend) and B is an EH
+ // pad that A unwinds to.
+ DenseMap<BBOrMBB, BBOrMBB> ThrowUnwindMap;
+
+ // Helper functions
+ const BasicBlock *getEHPadUnwindDest(const BasicBlock *BB) const {
+ return EHPadUnwindMap.lookup(BB).get<const BasicBlock *>();
+ }
+ void setEHPadUnwindDest(const BasicBlock *BB, const BasicBlock *Dest) {
+ EHPadUnwindMap[BB] = Dest;
+ }
+ const BasicBlock *getThrowUnwindDest(BasicBlock *BB) const {
+ return ThrowUnwindMap.lookup(BB).get<const BasicBlock *>();
+ }
+ void setThrowUnwindDest(const BasicBlock *BB, const BasicBlock *Dest) {
+ ThrowUnwindMap[BB] = Dest;
+ }
+ bool hasEHPadUnwindDest(const BasicBlock *BB) const {
+ return EHPadUnwindMap.count(BB);
+ }
+ bool hasThrowUnwindDest(const BasicBlock *BB) const {
+ return ThrowUnwindMap.count(BB);
+ }
+
+ MachineBasicBlock *getEHPadUnwindDest(MachineBasicBlock *MBB) const {
+ return EHPadUnwindMap.lookup(MBB).get<MachineBasicBlock *>();
+ }
+ void setEHPadUnwindDest(MachineBasicBlock *MBB, MachineBasicBlock *Dest) {
+ EHPadUnwindMap[MBB] = Dest;
+ }
+ MachineBasicBlock *getThrowUnwindDest(MachineBasicBlock *MBB) const {
+ return ThrowUnwindMap.lookup(MBB).get<MachineBasicBlock *>();
+ }
+ void setThrowUnwindDest(MachineBasicBlock *MBB, MachineBasicBlock *Dest) {
+ ThrowUnwindMap[MBB] = Dest;
+ }
+ bool hasEHPadUnwindDest(MachineBasicBlock *MBB) const {
+ return EHPadUnwindMap.count(MBB);
+ }
+ bool hasThrowUnwindDest(MachineBasicBlock *MBB) const {
+ return ThrowUnwindMap.count(MBB);
+ }
+};
+
+// Analyze the IR in the given function to build WasmEHFuncInfo.
+void calculateWasmEHInfo(const Function *F, WasmEHFuncInfo &EHInfo);
+
+} // namespace llvm
+
+#endif // LLVM_CODEGEN_WASMEHFUNCINFO_H