Update clang to r339409.
Change-Id: I800772d2d838223be1f6b40d490c4591b937fca2
diff --git a/linux-x64/clang/include/llvm/Analysis/AliasAnalysis.h b/linux-x64/clang/include/llvm/Analysis/AliasAnalysis.h
index ec4a90c..be3496b 100644
--- a/linux-x64/clang/include/llvm/Analysis/AliasAnalysis.h
+++ b/linux-x64/clang/include/llvm/Analysis/AliasAnalysis.h
@@ -91,6 +91,9 @@
MustAlias,
};
+/// << operator for AliasResult.
+raw_ostream &operator<<(raw_ostream &OS, AliasResult AR);
+
/// Flags indicating whether a memory access modifies or references memory.
///
/// This is no access at all, a modification, a reference, or both
@@ -325,8 +328,8 @@
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
/// A convenience wrapper around the primary \c alias interface.
- AliasResult alias(const Value *V1, uint64_t V1Size, const Value *V2,
- uint64_t V2Size) {
+ AliasResult alias(const Value *V1, LocationSize V1Size, const Value *V2,
+ LocationSize V2Size) {
return alias(MemoryLocation(V1, V1Size), MemoryLocation(V2, V2Size));
}
@@ -343,8 +346,8 @@
}
/// A convenience wrapper around the \c isNoAlias helper interface.
- bool isNoAlias(const Value *V1, uint64_t V1Size, const Value *V2,
- uint64_t V2Size) {
+ bool isNoAlias(const Value *V1, LocationSize V1Size, const Value *V2,
+ LocationSize V2Size) {
return isNoAlias(MemoryLocation(V1, V1Size), MemoryLocation(V2, V2Size));
}
@@ -501,7 +504,7 @@
/// getModRefInfo (for call sites) - A convenience wrapper.
ModRefInfo getModRefInfo(ImmutableCallSite CS, const Value *P,
- uint64_t Size) {
+ LocationSize Size) {
return getModRefInfo(CS, MemoryLocation(P, Size));
}
@@ -512,7 +515,8 @@
}
/// getModRefInfo (for calls) - A convenience wrapper.
- ModRefInfo getModRefInfo(const CallInst *C, const Value *P, uint64_t Size) {
+ ModRefInfo getModRefInfo(const CallInst *C, const Value *P,
+ LocationSize Size) {
return getModRefInfo(C, MemoryLocation(P, Size));
}
@@ -523,7 +527,8 @@
}
/// getModRefInfo (for invokes) - A convenience wrapper.
- ModRefInfo getModRefInfo(const InvokeInst *I, const Value *P, uint64_t Size) {
+ ModRefInfo getModRefInfo(const InvokeInst *I, const Value *P,
+ LocationSize Size) {
return getModRefInfo(I, MemoryLocation(P, Size));
}
@@ -532,7 +537,8 @@
ModRefInfo getModRefInfo(const LoadInst *L, const MemoryLocation &Loc);
/// getModRefInfo (for loads) - A convenience wrapper.
- ModRefInfo getModRefInfo(const LoadInst *L, const Value *P, uint64_t Size) {
+ ModRefInfo getModRefInfo(const LoadInst *L, const Value *P,
+ LocationSize Size) {
return getModRefInfo(L, MemoryLocation(P, Size));
}
@@ -541,7 +547,8 @@
ModRefInfo getModRefInfo(const StoreInst *S, const MemoryLocation &Loc);
/// getModRefInfo (for stores) - A convenience wrapper.
- ModRefInfo getModRefInfo(const StoreInst *S, const Value *P, uint64_t Size) {
+ ModRefInfo getModRefInfo(const StoreInst *S, const Value *P,
+ LocationSize Size) {
return getModRefInfo(S, MemoryLocation(P, Size));
}
@@ -550,7 +557,8 @@
ModRefInfo getModRefInfo(const FenceInst *S, const MemoryLocation &Loc);
/// getModRefInfo (for fences) - A convenience wrapper.
- ModRefInfo getModRefInfo(const FenceInst *S, const Value *P, uint64_t Size) {
+ ModRefInfo getModRefInfo(const FenceInst *S, const Value *P,
+ LocationSize Size) {
return getModRefInfo(S, MemoryLocation(P, Size));
}
@@ -580,7 +588,8 @@
ModRefInfo getModRefInfo(const VAArgInst *I, const MemoryLocation &Loc);
/// getModRefInfo (for va_args) - A convenience wrapper.
- ModRefInfo getModRefInfo(const VAArgInst *I, const Value *P, uint64_t Size) {
+ ModRefInfo getModRefInfo(const VAArgInst *I, const Value *P,
+ LocationSize Size) {
return getModRefInfo(I, MemoryLocation(P, Size));
}
@@ -590,7 +599,7 @@
/// getModRefInfo (for catchpads) - A convenience wrapper.
ModRefInfo getModRefInfo(const CatchPadInst *I, const Value *P,
- uint64_t Size) {
+ LocationSize Size) {
return getModRefInfo(I, MemoryLocation(P, Size));
}
@@ -600,7 +609,7 @@
/// getModRefInfo (for catchrets) - A convenience wrapper.
ModRefInfo getModRefInfo(const CatchReturnInst *I, const Value *P,
- uint64_t Size) {
+ LocationSize Size) {
return getModRefInfo(I, MemoryLocation(P, Size));
}
@@ -646,7 +655,7 @@
/// A convenience wrapper for constructing the memory location.
ModRefInfo getModRefInfo(const Instruction *I, const Value *P,
- uint64_t Size) {
+ LocationSize Size) {
return getModRefInfo(I, MemoryLocation(P, Size));
}
@@ -659,7 +668,7 @@
/// http://llvm.org/docs/AliasAnalysis.html#ModRefInfo
ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2);
- /// \brief Return information about whether a particular call site modifies
+ /// Return information about whether a particular call site modifies
/// or reads the specified memory location \p MemLoc before instruction \p I
/// in a BasicBlock. An ordered basic block \p OBB can be used to speed up
/// instruction ordering queries inside the BasicBlock containing \p I.
@@ -669,9 +678,9 @@
const MemoryLocation &MemLoc, DominatorTree *DT,
OrderedBasicBlock *OBB = nullptr);
- /// \brief A convenience wrapper to synthesize a memory location.
+ /// A convenience wrapper to synthesize a memory location.
ModRefInfo callCapturesBefore(const Instruction *I, const Value *P,
- uint64_t Size, DominatorTree *DT,
+ LocationSize Size, DominatorTree *DT,
OrderedBasicBlock *OBB = nullptr) {
return callCapturesBefore(I, MemoryLocation(P, Size), DT, OBB);
}
@@ -687,7 +696,7 @@
/// A convenience wrapper synthesizing a memory location.
bool canBasicBlockModify(const BasicBlock &BB, const Value *P,
- uint64_t Size) {
+ LocationSize Size) {
return canBasicBlockModify(BB, MemoryLocation(P, Size));
}
@@ -702,7 +711,7 @@
/// A convenience wrapper synthesizing a memory location.
bool canInstructionRangeModRef(const Instruction &I1, const Instruction &I2,
- const Value *Ptr, uint64_t Size,
+ const Value *Ptr, LocationSize Size,
const ModRefInfo Mode) {
return canInstructionRangeModRef(I1, I2, MemoryLocation(Ptr, Size), Mode);
}
diff --git a/linux-x64/clang/include/llvm/Analysis/AliasAnalysisEvaluator.h b/linux-x64/clang/include/llvm/Analysis/AliasAnalysisEvaluator.h
index cd2f631..0941814 100644
--- a/linux-x64/clang/include/llvm/Analysis/AliasAnalysisEvaluator.h
+++ b/linux-x64/clang/include/llvm/Analysis/AliasAnalysisEvaluator.h
@@ -56,7 +56,7 @@
}
~AAEvaluator();
- /// \brief Run the pass over the function.
+ /// Run the pass over the function.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
private:
diff --git a/linux-x64/clang/include/llvm/Analysis/AliasSetTracker.h b/linux-x64/clang/include/llvm/Analysis/AliasSetTracker.h
index 7da3eba..0e6d229 100644
--- a/linux-x64/clang/include/llvm/Analysis/AliasSetTracker.h
+++ b/linux-x64/clang/include/llvm/Analysis/AliasSetTracker.h
@@ -37,8 +37,8 @@
class AliasSetTracker;
class BasicBlock;
class LoadInst;
-class MemSetInst;
-class MemTransferInst;
+class AnyMemSetInst;
+class AnyMemTransferInst;
class raw_ostream;
class StoreInst;
class VAArgInst;
@@ -52,7 +52,7 @@
PointerRec **PrevInList = nullptr;
PointerRec *NextInList = nullptr;
AliasSet *AS = nullptr;
- uint64_t Size = 0;
+ LocationSize Size = 0;
AAMDNodes AAInfo;
public:
@@ -69,7 +69,7 @@
return &NextInList;
}
- bool updateSizeAndAAInfo(uint64_t NewSize, const AAMDNodes &NewAAInfo) {
+ bool updateSizeAndAAInfo(LocationSize NewSize, const AAMDNodes &NewAAInfo) {
bool SizeChanged = false;
if (NewSize > Size) {
Size = NewSize;
@@ -91,7 +91,7 @@
return SizeChanged;
}
- uint64_t getSize() const { return Size; }
+ LocationSize getSize() const { return Size; }
/// Return the AAInfo, or null if there is no information or conflicting
/// information.
@@ -224,6 +224,20 @@
// track of the list's exact size.
unsigned size() { return SetSize; }
+ /// If this alias set is known to contain a single instruction and *only* a
+ /// single unique instruction, return it. Otherwise, return nullptr.
+ Instruction* getUniqueInstruction() {
+ if (size() != 0)
+ // Can't track source of pointer, might be many instruction
+ return nullptr;
+ if (AliasAny)
+ // May have collapses alias set
+ return nullptr;
+ if (1 != UnknownInsts.size())
+ return nullptr;
+ return cast<Instruction>(UnknownInsts[0]);
+ }
+
void print(raw_ostream &OS) const;
void dump() const;
@@ -247,7 +261,7 @@
value_type *operator->() const { return &operator*(); }
Value *getPointer() const { return CurNode->getValue(); }
- uint64_t getSize() const { return CurNode->getSize(); }
+ LocationSize getSize() const { return CurNode->getSize(); }
AAMDNodes getAAInfo() const { return CurNode->getAAInfo(); }
iterator& operator++() { // Preincrement
@@ -287,9 +301,8 @@
void removeFromTracker(AliasSetTracker &AST);
- void addPointer(AliasSetTracker &AST, PointerRec &Entry, uint64_t Size,
- const AAMDNodes &AAInfo,
- bool KnownMustAlias = false);
+ void addPointer(AliasSetTracker &AST, PointerRec &Entry, LocationSize Size,
+ const AAMDNodes &AAInfo, bool KnownMustAlias = false);
void addUnknownInst(Instruction *I, AliasAnalysis &AA);
void removeUnknownInst(AliasSetTracker &AST, Instruction *I) {
@@ -309,8 +322,8 @@
public:
/// Return true if the specified pointer "may" (or must) alias one of the
/// members in the set.
- bool aliasesPointer(const Value *Ptr, uint64_t Size, const AAMDNodes &AAInfo,
- AliasAnalysis &AA) const;
+ bool aliasesPointer(const Value *Ptr, LocationSize Size,
+ const AAMDNodes &AAInfo, AliasAnalysis &AA) const;
bool aliasesUnknownInst(const Instruction *Inst, AliasAnalysis &AA) const;
};
@@ -364,12 +377,12 @@
/// These methods return true if inserting the instruction resulted in the
/// addition of a new alias set (i.e., the pointer did not alias anything).
///
- void add(Value *Ptr, uint64_t Size, const AAMDNodes &AAInfo); // Add a loc.
+ void add(Value *Ptr, LocationSize Size, const AAMDNodes &AAInfo); // Add a loc
void add(LoadInst *LI);
void add(StoreInst *SI);
void add(VAArgInst *VAAI);
- void add(MemSetInst *MSI);
- void add(MemTransferInst *MTI);
+ void add(AnyMemSetInst *MSI);
+ void add(AnyMemTransferInst *MTI);
void add(Instruction *I); // Dispatch to one of the other add methods...
void add(BasicBlock &BB); // Add all instructions in basic block
void add(const AliasSetTracker &AST); // Add alias relations from another AST
@@ -384,12 +397,12 @@
/// argument is non-null, this method sets the value to true if a new alias
/// set is created to contain the pointer (because the pointer didn't alias
/// anything).
- AliasSet &getAliasSetForPointer(Value *P, uint64_t Size,
+ AliasSet &getAliasSetForPointer(Value *P, LocationSize Size,
const AAMDNodes &AAInfo);
/// Return the alias set containing the location specified if one exists,
/// otherwise return null.
- AliasSet *getAliasSetForPointerIfExists(const Value *P, uint64_t Size,
+ AliasSet *getAliasSetForPointerIfExists(const Value *P, LocationSize Size,
const AAMDNodes &AAInfo) {
return mergeAliasSetsForPointer(P, Size, AAInfo);
}
@@ -446,9 +459,9 @@
return *Entry;
}
- AliasSet &addPointer(Value *P, uint64_t Size, const AAMDNodes &AAInfo,
+ AliasSet &addPointer(Value *P, LocationSize Size, const AAMDNodes &AAInfo,
AliasSet::AccessLattice E);
- AliasSet *mergeAliasSetsForPointer(const Value *Ptr, uint64_t Size,
+ AliasSet *mergeAliasSetsForPointer(const Value *Ptr, LocationSize Size,
const AAMDNodes &AAInfo);
/// Merge all alias sets into a single set that is considered to alias any
diff --git a/linux-x64/clang/include/llvm/Analysis/AssumptionCache.h b/linux-x64/clang/include/llvm/Analysis/AssumptionCache.h
index c965e62..46538b1 100644
--- a/linux-x64/clang/include/llvm/Analysis/AssumptionCache.h
+++ b/linux-x64/clang/include/llvm/Analysis/AssumptionCache.h
@@ -32,20 +32,20 @@
class raw_ostream;
class Value;
-/// \brief A cache of @llvm.assume calls within a function.
+/// A cache of \@llvm.assume calls within a function.
///
/// This cache provides fast lookup of assumptions within a function by caching
/// them and amortizing the cost of scanning for them across all queries. Passes
/// that create new assumptions are required to call registerAssumption() to
-/// register any new @llvm.assume calls that they create. Deletions of
-/// @llvm.assume calls do not require special handling.
+/// register any new \@llvm.assume calls that they create. Deletions of
+/// \@llvm.assume calls do not require special handling.
class AssumptionCache {
- /// \brief The function for which this cache is handling assumptions.
+ /// The function for which this cache is handling assumptions.
///
/// We track this to lazily populate our assumptions.
Function &F;
- /// \brief Vector of weak value handles to calls of the @llvm.assume
+ /// Vector of weak value handles to calls of the \@llvm.assume
/// intrinsic.
SmallVector<WeakTrackingVH, 4> AssumeHandles;
@@ -64,7 +64,7 @@
friend AffectedValueCallbackVH;
- /// \brief A map of values about which an assumption might be providing
+ /// A map of values about which an assumption might be providing
/// information to the relevant set of assumptions.
using AffectedValuesMap =
DenseMap<AffectedValueCallbackVH, SmallVector<WeakTrackingVH, 1>,
@@ -77,17 +77,17 @@
/// Copy affected values in the cache for OV to be affected values for NV.
void copyAffectedValuesInCache(Value *OV, Value *NV);
- /// \brief Flag tracking whether we have scanned the function yet.
+ /// Flag tracking whether we have scanned the function yet.
///
/// We want to be as lazy about this as possible, and so we scan the function
/// at the last moment.
bool Scanned = false;
- /// \brief Scan the function for assumptions and add them to the cache.
+ /// Scan the function for assumptions and add them to the cache.
void scanFunction();
public:
- /// \brief Construct an AssumptionCache from a function by scanning all of
+ /// Construct an AssumptionCache from a function by scanning all of
/// its instructions.
AssumptionCache(Function &F) : F(F) {}
@@ -98,17 +98,17 @@
return false;
}
- /// \brief Add an @llvm.assume intrinsic to this function's cache.
+ /// Add an \@llvm.assume intrinsic to this function's cache.
///
/// The call passed in must be an instruction within this function and must
/// not already be in the cache.
void registerAssumption(CallInst *CI);
- /// \brief Update the cache of values being affected by this assumption (i.e.
+ /// Update the cache of values being affected by this assumption (i.e.
/// the values about which this assumption provides information).
void updateAffectedValues(CallInst *CI);
- /// \brief Clear the cache of @llvm.assume intrinsics for a function.
+ /// Clear the cache of \@llvm.assume intrinsics for a function.
///
/// It will be re-scanned the next time it is requested.
void clear() {
@@ -117,7 +117,7 @@
Scanned = false;
}
- /// \brief Access the list of assumption handles currently tracked for this
+ /// Access the list of assumption handles currently tracked for this
/// function.
///
/// Note that these produce weak handles that may be null. The caller must
@@ -131,7 +131,7 @@
return AssumeHandles;
}
- /// \brief Access the list of assumptions which affect this value.
+ /// Access the list of assumptions which affect this value.
MutableArrayRef<WeakTrackingVH> assumptionsFor(const Value *V) {
if (!Scanned)
scanFunction();
@@ -144,7 +144,7 @@
}
};
-/// \brief A function analysis which provides an \c AssumptionCache.
+/// A function analysis which provides an \c AssumptionCache.
///
/// This analysis is intended for use with the new pass manager and will vend
/// assumption caches for a given function.
@@ -161,7 +161,7 @@
}
};
-/// \brief Printer pass for the \c AssumptionAnalysis results.
+/// Printer pass for the \c AssumptionAnalysis results.
class AssumptionPrinterPass : public PassInfoMixin<AssumptionPrinterPass> {
raw_ostream &OS;
@@ -171,7 +171,7 @@
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
-/// \brief An immutable pass that tracks lazily created \c AssumptionCache
+/// An immutable pass that tracks lazily created \c AssumptionCache
/// objects.
///
/// This is essentially a workaround for the legacy pass manager's weaknesses
@@ -203,7 +203,7 @@
FunctionCallsMap AssumptionCaches;
public:
- /// \brief Get the cached assumptions for a function.
+ /// Get the cached assumptions for a function.
///
/// If no assumptions are cached, this will scan the function. Otherwise, the
/// existing cache will be returned.
diff --git a/linux-x64/clang/include/llvm/Analysis/BasicAliasAnalysis.h b/linux-x64/clang/include/llvm/Analysis/BasicAliasAnalysis.h
index 42e5e97..6344e84 100644
--- a/linux-x64/clang/include/llvm/Analysis/BasicAliasAnalysis.h
+++ b/linux-x64/clang/include/llvm/Analysis/BasicAliasAnalysis.h
@@ -43,6 +43,7 @@
class PHINode;
class SelectInst;
class TargetLibraryInfo;
+class PhiValues;
class Value;
/// This is the AA result object for the basic, local, and stateless alias
@@ -55,26 +56,30 @@
friend AAResultBase<BasicAAResult>;
const DataLayout &DL;
+ const Function &F;
const TargetLibraryInfo &TLI;
AssumptionCache &AC;
DominatorTree *DT;
LoopInfo *LI;
+ PhiValues *PV;
public:
- BasicAAResult(const DataLayout &DL, const TargetLibraryInfo &TLI,
- AssumptionCache &AC, DominatorTree *DT = nullptr,
- LoopInfo *LI = nullptr)
- : AAResultBase(), DL(DL), TLI(TLI), AC(AC), DT(DT), LI(LI) {}
+ BasicAAResult(const DataLayout &DL, const Function &F,
+ const TargetLibraryInfo &TLI, AssumptionCache &AC,
+ DominatorTree *DT = nullptr, LoopInfo *LI = nullptr,
+ PhiValues *PV = nullptr)
+ : AAResultBase(), DL(DL), F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), PV(PV)
+ {}
BasicAAResult(const BasicAAResult &Arg)
- : AAResultBase(Arg), DL(Arg.DL), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT),
- LI(Arg.LI) {}
+ : AAResultBase(Arg), DL(Arg.DL), F(Arg.F), TLI(Arg.TLI), AC(Arg.AC),
+ DT(Arg.DT), LI(Arg.LI), PV(Arg.PV) {}
BasicAAResult(BasicAAResult &&Arg)
- : AAResultBase(std::move(Arg)), DL(Arg.DL), TLI(Arg.TLI), AC(Arg.AC),
- DT(Arg.DT), LI(Arg.LI) {}
+ : AAResultBase(std::move(Arg)), DL(Arg.DL), F(Arg.F), TLI(Arg.TLI),
+ AC(Arg.AC), DT(Arg.DT), LI(Arg.LI), PV(Arg.PV) {}
/// Handle invalidation events in the new pass manager.
- bool invalidate(Function &F, const PreservedAnalyses &PA,
+ bool invalidate(Function &Fn, const PreservedAnalyses &PA,
FunctionAnalysisManager::Invalidator &Inv);
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
@@ -94,7 +99,7 @@
/// Returns the behavior when calling the given function. For use when the
/// call site is not known.
- FunctionModRefBehavior getModRefBehavior(const Function *F);
+ FunctionModRefBehavior getModRefBehavior(const Function *Fn);
private:
// A linear transformation of a Value; this class represents ZExt(SExt(V,
@@ -171,9 +176,9 @@
static bool isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject,
- uint64_t ObjectAccessSize);
+ LocationSize ObjectAccessSize);
- /// \brief A Heuristic for aliasGEP that searches for a constant offset
+ /// A Heuristic for aliasGEP that searches for a constant offset
/// between the variables.
///
/// GetLinearExpression has some limitations, as generally zext(%x + 1)
@@ -183,31 +188,33 @@
/// the addition overflows.
bool
constantOffsetHeuristic(const SmallVectorImpl<VariableGEPIndex> &VarIndices,
- uint64_t V1Size, uint64_t V2Size, int64_t BaseOffset,
- AssumptionCache *AC, DominatorTree *DT);
+ LocationSize V1Size, LocationSize V2Size,
+ int64_t BaseOffset, AssumptionCache *AC,
+ DominatorTree *DT);
bool isValueEqualInPotentialCycles(const Value *V1, const Value *V2);
void GetIndexDifference(SmallVectorImpl<VariableGEPIndex> &Dest,
const SmallVectorImpl<VariableGEPIndex> &Src);
- AliasResult aliasGEP(const GEPOperator *V1, uint64_t V1Size,
+ AliasResult aliasGEP(const GEPOperator *V1, LocationSize V1Size,
const AAMDNodes &V1AAInfo, const Value *V2,
- uint64_t V2Size, const AAMDNodes &V2AAInfo,
+ LocationSize V2Size, const AAMDNodes &V2AAInfo,
const Value *UnderlyingV1, const Value *UnderlyingV2);
- AliasResult aliasPHI(const PHINode *PN, uint64_t PNSize,
+ AliasResult aliasPHI(const PHINode *PN, LocationSize PNSize,
const AAMDNodes &PNAAInfo, const Value *V2,
- uint64_t V2Size, const AAMDNodes &V2AAInfo,
+ LocationSize V2Size, const AAMDNodes &V2AAInfo,
const Value *UnderV2);
- AliasResult aliasSelect(const SelectInst *SI, uint64_t SISize,
+ AliasResult aliasSelect(const SelectInst *SI, LocationSize SISize,
const AAMDNodes &SIAAInfo, const Value *V2,
- uint64_t V2Size, const AAMDNodes &V2AAInfo,
+ LocationSize V2Size, const AAMDNodes &V2AAInfo,
const Value *UnderV2);
- AliasResult aliasCheck(const Value *V1, uint64_t V1Size, AAMDNodes V1AATag,
- const Value *V2, uint64_t V2Size, AAMDNodes V2AATag,
+ AliasResult aliasCheck(const Value *V1, LocationSize V1Size,
+ AAMDNodes V1AATag, const Value *V2,
+ LocationSize V2Size, AAMDNodes V2AATag,
const Value *O1 = nullptr, const Value *O2 = nullptr);
};
diff --git a/linux-x64/clang/include/llvm/Analysis/BlockFrequencyInfo.h b/linux-x64/clang/include/llvm/Analysis/BlockFrequencyInfo.h
index 89370cb..ca12db6 100644
--- a/linux-x64/clang/include/llvm/Analysis/BlockFrequencyInfo.h
+++ b/linux-x64/clang/include/llvm/Analysis/BlockFrequencyInfo.h
@@ -65,17 +65,17 @@
/// floating points.
BlockFrequency getBlockFreq(const BasicBlock *BB) const;
- /// \brief Returns the estimated profile count of \p BB.
+ /// Returns the estimated profile count of \p BB.
/// This computes the relative block frequency of \p BB and multiplies it by
/// the enclosing function's count (if available) and returns the value.
Optional<uint64_t> getBlockProfileCount(const BasicBlock *BB) const;
- /// \brief Returns the estimated profile count of \p Freq.
+ /// Returns the estimated profile count of \p Freq.
/// This uses the frequency \p Freq and multiplies it by
/// the enclosing function's count (if available) and returns the value.
Optional<uint64_t> getProfileCountFromFreq(uint64_t Freq) const;
- /// \brief Returns true if \p BB is an irreducible loop header
+ /// Returns true if \p BB is an irreducible loop header
/// block. Otherwise false.
bool isIrrLoopHeader(const BasicBlock *BB);
@@ -105,7 +105,7 @@
void print(raw_ostream &OS) const;
};
-/// \brief Analysis pass which computes \c BlockFrequencyInfo.
+/// Analysis pass which computes \c BlockFrequencyInfo.
class BlockFrequencyAnalysis
: public AnalysisInfoMixin<BlockFrequencyAnalysis> {
friend AnalysisInfoMixin<BlockFrequencyAnalysis>;
@@ -113,14 +113,14 @@
static AnalysisKey Key;
public:
- /// \brief Provide the result type for this analysis pass.
+ /// Provide the result type for this analysis pass.
using Result = BlockFrequencyInfo;
- /// \brief Run the analysis pass over a function and produce BFI.
+ /// Run the analysis pass over a function and produce BFI.
Result run(Function &F, FunctionAnalysisManager &AM);
};
-/// \brief Printer pass for the \c BlockFrequencyInfo results.
+/// Printer pass for the \c BlockFrequencyInfo results.
class BlockFrequencyPrinterPass
: public PassInfoMixin<BlockFrequencyPrinterPass> {
raw_ostream &OS;
@@ -131,7 +131,7 @@
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
-/// \brief Legacy analysis pass which computes \c BlockFrequencyInfo.
+/// Legacy analysis pass which computes \c BlockFrequencyInfo.
class BlockFrequencyInfoWrapperPass : public FunctionPass {
BlockFrequencyInfo BFI;
diff --git a/linux-x64/clang/include/llvm/Analysis/BlockFrequencyInfoImpl.h b/linux-x64/clang/include/llvm/Analysis/BlockFrequencyInfoImpl.h
index 40c40b8..25b2efd 100644
--- a/linux-x64/clang/include/llvm/Analysis/BlockFrequencyInfoImpl.h
+++ b/linux-x64/clang/include/llvm/Analysis/BlockFrequencyInfoImpl.h
@@ -66,7 +66,7 @@
// This is part of a workaround for a GCC 4.7 crash on lambdas.
template <class BT> struct BlockEdgesAdder;
-/// \brief Mass of a block.
+/// Mass of a block.
///
/// This class implements a sort of fixed-point fraction always between 0.0 and
/// 1.0. getMass() == std::numeric_limits<uint64_t>::max() indicates a value of
@@ -100,7 +100,7 @@
bool operator!() const { return isEmpty(); }
- /// \brief Add another mass.
+ /// Add another mass.
///
/// Adds another mass, saturating at \a isFull() rather than overflowing.
BlockMass &operator+=(BlockMass X) {
@@ -109,7 +109,7 @@
return *this;
}
- /// \brief Subtract another mass.
+ /// Subtract another mass.
///
/// Subtracts another mass, saturating at \a isEmpty() rather than
/// undeflowing.
@@ -131,7 +131,7 @@
bool operator<(BlockMass X) const { return Mass < X.Mass; }
bool operator>(BlockMass X) const { return Mass > X.Mass; }
- /// \brief Convert to scaled number.
+ /// Convert to scaled number.
///
/// Convert to \a ScaledNumber. \a isFull() gives 1.0, while \a isEmpty()
/// gives slightly above 0.0.
@@ -164,7 +164,7 @@
static const bool value = true;
};
-/// \brief Base class for BlockFrequencyInfoImpl
+/// Base class for BlockFrequencyInfoImpl
///
/// BlockFrequencyInfoImplBase has supporting data structures and some
/// algorithms for BlockFrequencyInfoImplBase. Only algorithms that depend on
@@ -177,7 +177,7 @@
using Scaled64 = ScaledNumber<uint64_t>;
using BlockMass = bfi_detail::BlockMass;
- /// \brief Representative of a block.
+ /// Representative of a block.
///
/// This is a simple wrapper around an index into the reverse-post-order
/// traversal of the blocks.
@@ -206,13 +206,13 @@
}
};
- /// \brief Stats about a block itself.
+ /// Stats about a block itself.
struct FrequencyData {
Scaled64 Scaled;
uint64_t Integer;
};
- /// \brief Data about a loop.
+ /// Data about a loop.
///
/// Contains the data necessary to represent a loop as a pseudo-node once it's
/// packaged.
@@ -270,7 +270,7 @@
}
};
- /// \brief Index of loop information.
+ /// Index of loop information.
struct WorkingData {
BlockNode Node; ///< This node.
LoopData *Loop = nullptr; ///< The loop this block is inside.
@@ -293,7 +293,7 @@
return Loop->Parent->Parent;
}
- /// \brief Resolve a node to its representative.
+ /// Resolve a node to its representative.
///
/// Get the node currently representing Node, which could be a containing
/// loop.
@@ -320,7 +320,7 @@
return L;
}
- /// \brief Get the appropriate mass for a node.
+ /// Get the appropriate mass for a node.
///
/// Get appropriate mass for Node. If Node is a loop-header (whose loop
/// has been packaged), returns the mass of its pseudo-node. If it's a
@@ -333,19 +333,19 @@
return Loop->Parent->Mass;
}
- /// \brief Has ContainingLoop been packaged up?
+ /// Has ContainingLoop been packaged up?
bool isPackaged() const { return getResolvedNode() != Node; }
- /// \brief Has Loop been packaged up?
+ /// Has Loop been packaged up?
bool isAPackage() const { return isLoopHeader() && Loop->IsPackaged; }
- /// \brief Has Loop been packaged up twice?
+ /// Has Loop been packaged up twice?
bool isADoublePackage() const {
return isDoubleLoopHeader() && Loop->Parent->IsPackaged;
}
};
- /// \brief Unscaled probability weight.
+ /// Unscaled probability weight.
///
/// Probability weight for an edge in the graph (including the
/// successor/target node).
@@ -369,7 +369,7 @@
: Type(Type), TargetNode(TargetNode), Amount(Amount) {}
};
- /// \brief Distribution of unscaled probability weight.
+ /// Distribution of unscaled probability weight.
///
/// Distribution of unscaled probability weight to a set of successors.
///
@@ -398,7 +398,7 @@
add(Node, Amount, Weight::Backedge);
}
- /// \brief Normalize the distribution.
+ /// Normalize the distribution.
///
/// Combines multiple edges to the same \a Weight::TargetNode and scales
/// down so that \a Total fits into 32-bits.
@@ -413,26 +413,26 @@
void add(const BlockNode &Node, uint64_t Amount, Weight::DistType Type);
};
- /// \brief Data about each block. This is used downstream.
+ /// Data about each block. This is used downstream.
std::vector<FrequencyData> Freqs;
- /// \brief Whether each block is an irreducible loop header.
+ /// Whether each block is an irreducible loop header.
/// This is used downstream.
SparseBitVector<> IsIrrLoopHeader;
- /// \brief Loop data: see initializeLoops().
+ /// Loop data: see initializeLoops().
std::vector<WorkingData> Working;
- /// \brief Indexed information about loops.
+ /// Indexed information about loops.
std::list<LoopData> Loops;
- /// \brief Virtual destructor.
+ /// Virtual destructor.
///
/// Need a virtual destructor to mask the compiler warning about
/// getBlockName().
virtual ~BlockFrequencyInfoImplBase() = default;
- /// \brief Add all edges out of a packaged loop to the distribution.
+ /// Add all edges out of a packaged loop to the distribution.
///
/// Adds all edges from LocalLoopHead to Dist. Calls addToDist() to add each
/// successor edge.
@@ -441,7 +441,7 @@
bool addLoopSuccessorsToDist(const LoopData *OuterLoop, LoopData &Loop,
Distribution &Dist);
- /// \brief Add an edge to the distribution.
+ /// Add an edge to the distribution.
///
/// Adds an edge to Succ to Dist. If \c LoopHead.isValid(), then whether the
/// edge is local/exit/backedge is in the context of LoopHead. Otherwise,
@@ -457,7 +457,7 @@
return *Working[Head.Index].Loop;
}
- /// \brief Analyze irreducible SCCs.
+ /// Analyze irreducible SCCs.
///
/// Separate irreducible SCCs from \c G, which is an explict graph of \c
/// OuterLoop (or the top-level function, if \c OuterLoop is \c nullptr).
@@ -468,7 +468,7 @@
analyzeIrreducible(const bfi_detail::IrreducibleGraph &G, LoopData *OuterLoop,
std::list<LoopData>::iterator Insert);
- /// \brief Update a loop after packaging irreducible SCCs inside of it.
+ /// Update a loop after packaging irreducible SCCs inside of it.
///
/// Update \c OuterLoop. Before finding irreducible control flow, it was
/// partway through \a computeMassInLoop(), so \a LoopData::Exits and \a
@@ -476,7 +476,7 @@
/// up need to be removed from \a OuterLoop::Nodes.
void updateLoopWithIrreducible(LoopData &OuterLoop);
- /// \brief Distribute mass according to a distribution.
+ /// Distribute mass according to a distribution.
///
/// Distributes the mass in Source according to Dist. If LoopHead.isValid(),
/// backedges and exits are stored in its entry in Loops.
@@ -485,7 +485,7 @@
void distributeMass(const BlockNode &Source, LoopData *OuterLoop,
Distribution &Dist);
- /// \brief Compute the loop scale for a loop.
+ /// Compute the loop scale for a loop.
void computeLoopScale(LoopData &Loop);
/// Adjust the mass of all headers in an irreducible loop.
@@ -500,19 +500,19 @@
void distributeIrrLoopHeaderMass(Distribution &Dist);
- /// \brief Package up a loop.
+ /// Package up a loop.
void packageLoop(LoopData &Loop);
- /// \brief Unwrap loops.
+ /// Unwrap loops.
void unwrapLoops();
- /// \brief Finalize frequency metrics.
+ /// Finalize frequency metrics.
///
/// Calculates final frequencies and cleans up no-longer-needed data
/// structures.
void finalizeMetrics();
- /// \brief Clear all memory.
+ /// Clear all memory.
void clear();
virtual std::string getBlockName(const BlockNode &Node) const;
@@ -560,7 +560,7 @@
using LoopInfoT = MachineLoopInfo;
};
-/// \brief Get the name of a MachineBasicBlock.
+/// Get the name of a MachineBasicBlock.
///
/// Get the name of a MachineBasicBlock. It's templated so that including from
/// CodeGen is unnecessary (that would be a layering issue).
@@ -574,13 +574,13 @@
return (MachineName + "[" + BB->getName() + "]").str();
return MachineName.str();
}
-/// \brief Get the name of a BasicBlock.
+/// Get the name of a BasicBlock.
template <> inline std::string getBlockName(const BasicBlock *BB) {
assert(BB && "Unexpected nullptr");
return BB->getName().str();
}
-/// \brief Graph of irreducible control flow.
+/// Graph of irreducible control flow.
///
/// This graph is used for determining the SCCs in a loop (or top-level
/// function) that has irreducible control flow.
@@ -619,7 +619,7 @@
std::vector<IrrNode> Nodes;
SmallDenseMap<uint32_t, IrrNode *, 4> Lookup;
- /// \brief Construct an explicit graph containing irreducible control flow.
+ /// Construct an explicit graph containing irreducible control flow.
///
/// Construct an explicit graph of the control flow in \c OuterLoop (or the
/// top-level function, if \c OuterLoop is \c nullptr). Uses \c
@@ -687,7 +687,7 @@
} // end namespace bfi_detail
-/// \brief Shared implementation for block frequency analysis.
+/// Shared implementation for block frequency analysis.
///
/// This is a shared implementation of BlockFrequencyInfo and
/// MachineBlockFrequencyInfo, and calculates the relative frequencies of
@@ -878,12 +878,12 @@
return RPOT[Node.Index];
}
- /// \brief Run (and save) a post-order traversal.
+ /// Run (and save) a post-order traversal.
///
/// Saves a reverse post-order traversal of all the nodes in \a F.
void initializeRPOT();
- /// \brief Initialize loop data.
+ /// Initialize loop data.
///
/// Build up \a Loops using \a LoopInfo. \a LoopInfo gives us a mapping from
/// each block to the deepest loop it's in, but we need the inverse. For each
@@ -892,7 +892,7 @@
/// the loop that are not in sub-loops.
void initializeLoops();
- /// \brief Propagate to a block's successors.
+ /// Propagate to a block's successors.
///
/// In the context of distributing mass through \c OuterLoop, divide the mass
/// currently assigned to \c Node between its successors.
@@ -900,7 +900,7 @@
/// \return \c true unless there's an irreducible backedge.
bool propagateMassToSuccessors(LoopData *OuterLoop, const BlockNode &Node);
- /// \brief Compute mass in a particular loop.
+ /// Compute mass in a particular loop.
///
/// Assign mass to \c Loop's header, and then for each block in \c Loop in
/// reverse post-order, distribute mass to its successors. Only visits nodes
@@ -910,7 +910,7 @@
/// \return \c true unless there's an irreducible backedge.
bool computeMassInLoop(LoopData &Loop);
- /// \brief Try to compute mass in the top-level function.
+ /// Try to compute mass in the top-level function.
///
/// Assign mass to the entry block, and then for each block in reverse
/// post-order, distribute mass to its successors. Skips nodes that have
@@ -920,7 +920,7 @@
/// \return \c true unless there's an irreducible backedge.
bool tryToComputeMassInFunction();
- /// \brief Compute mass in (and package up) irreducible SCCs.
+ /// Compute mass in (and package up) irreducible SCCs.
///
/// Find the irreducible SCCs in \c OuterLoop, add them to \a Loops (in front
/// of \c Insert), and call \a computeMassInLoop() on each of them.
@@ -935,7 +935,7 @@
void computeIrreducibleMass(LoopData *OuterLoop,
std::list<LoopData>::iterator Insert);
- /// \brief Compute mass in all loops.
+ /// Compute mass in all loops.
///
/// For each loop bottom-up, call \a computeMassInLoop().
///
@@ -946,7 +946,7 @@
/// \post \a computeMassInLoop() has returned \c true for every loop.
void computeMassInLoops();
- /// \brief Compute mass in the top-level function.
+ /// Compute mass in the top-level function.
///
/// Uses \a tryToComputeMassInFunction() and \a computeIrreducibleMass() to
/// compute mass in the top-level function.
@@ -994,7 +994,7 @@
const BranchProbabilityInfoT &getBPI() const { return *BPI; }
- /// \brief Print the frequencies for the current function.
+ /// Print the frequencies for the current function.
///
/// Prints the frequencies for the blocks in the current function.
///
@@ -1030,8 +1030,9 @@
Nodes.clear();
// Initialize.
- DEBUG(dbgs() << "\nblock-frequency: " << F.getName() << "\n================="
- << std::string(F.getName().size(), '=') << "\n");
+ LLVM_DEBUG(dbgs() << "\nblock-frequency: " << F.getName()
+ << "\n================="
+ << std::string(F.getName().size(), '=') << "\n");
initializeRPOT();
initializeLoops();
@@ -1067,10 +1068,11 @@
assert(RPOT.size() - 1 <= BlockNode::getMaxIndex() &&
"More nodes in function than Block Frequency Info supports");
- DEBUG(dbgs() << "reverse-post-order-traversal\n");
+ LLVM_DEBUG(dbgs() << "reverse-post-order-traversal\n");
for (rpot_iterator I = rpot_begin(), E = rpot_end(); I != E; ++I) {
BlockNode Node = getNode(I);
- DEBUG(dbgs() << " - " << getIndex(I) << ": " << getBlockName(Node) << "\n");
+ LLVM_DEBUG(dbgs() << " - " << getIndex(I) << ": " << getBlockName(Node)
+ << "\n");
Nodes[*I] = Node;
}
@@ -1081,7 +1083,7 @@
}
template <class BT> void BlockFrequencyInfoImpl<BT>::initializeLoops() {
- DEBUG(dbgs() << "loop-detection\n");
+ LLVM_DEBUG(dbgs() << "loop-detection\n");
if (LI->empty())
return;
@@ -1099,7 +1101,7 @@
Loops.emplace_back(Parent, Header);
Working[Header.Index].Loop = &Loops.back();
- DEBUG(dbgs() << " - loop = " << getBlockName(Header) << "\n");
+ LLVM_DEBUG(dbgs() << " - loop = " << getBlockName(Header) << "\n");
for (const LoopT *L : *Loop)
Q.emplace_back(L, &Loops.back());
@@ -1128,8 +1130,8 @@
Working[Index].Loop = HeaderData.Loop;
HeaderData.Loop->Nodes.push_back(Index);
- DEBUG(dbgs() << " - loop = " << getBlockName(Header)
- << ": member = " << getBlockName(Index) << "\n");
+ LLVM_DEBUG(dbgs() << " - loop = " << getBlockName(Header)
+ << ": member = " << getBlockName(Index) << "\n");
}
}
@@ -1150,10 +1152,10 @@
template <class BT>
bool BlockFrequencyInfoImpl<BT>::computeMassInLoop(LoopData &Loop) {
// Compute mass in loop.
- DEBUG(dbgs() << "compute-mass-in-loop: " << getLoopName(Loop) << "\n");
+ LLVM_DEBUG(dbgs() << "compute-mass-in-loop: " << getLoopName(Loop) << "\n");
if (Loop.isIrreducible()) {
- DEBUG(dbgs() << "isIrreducible = true\n");
+ LLVM_DEBUG(dbgs() << "isIrreducible = true\n");
Distribution Dist;
unsigned NumHeadersWithWeight = 0;
Optional<uint64_t> MinHeaderWeight;
@@ -1165,14 +1167,14 @@
IsIrrLoopHeader.set(Loop.Nodes[H].Index);
Optional<uint64_t> HeaderWeight = Block->getIrrLoopHeaderWeight();
if (!HeaderWeight) {
- DEBUG(dbgs() << "Missing irr loop header metadata on "
- << getBlockName(HeaderNode) << "\n");
+ LLVM_DEBUG(dbgs() << "Missing irr loop header metadata on "
+ << getBlockName(HeaderNode) << "\n");
HeadersWithoutWeight.insert(H);
continue;
}
- DEBUG(dbgs() << getBlockName(HeaderNode)
- << " has irr loop header weight " << HeaderWeight.getValue()
- << "\n");
+ LLVM_DEBUG(dbgs() << getBlockName(HeaderNode)
+ << " has irr loop header weight "
+ << HeaderWeight.getValue() << "\n");
NumHeadersWithWeight++;
uint64_t HeaderWeightValue = HeaderWeight.getValue();
if (!MinHeaderWeight || HeaderWeightValue < MinHeaderWeight)
@@ -1194,8 +1196,8 @@
assert(!getBlock(HeaderNode)->getIrrLoopHeaderWeight() &&
"Shouldn't have a weight metadata");
uint64_t MinWeight = MinHeaderWeight.getValue();
- DEBUG(dbgs() << "Giving weight " << MinWeight
- << " to " << getBlockName(HeaderNode) << "\n");
+ LLVM_DEBUG(dbgs() << "Giving weight " << MinWeight << " to "
+ << getBlockName(HeaderNode) << "\n");
if (MinWeight)
Dist.addLocal(HeaderNode, MinWeight);
}
@@ -1224,7 +1226,7 @@
template <class BT>
bool BlockFrequencyInfoImpl<BT>::tryToComputeMassInFunction() {
// Compute mass in function.
- DEBUG(dbgs() << "compute-mass-in-function\n");
+ LLVM_DEBUG(dbgs() << "compute-mass-in-function\n");
assert(!Working.empty() && "no blocks in function");
assert(!Working[0].isLoopHeader() && "entry block is a loop header");
@@ -1276,9 +1278,10 @@
template <class BT>
void BlockFrequencyInfoImpl<BT>::computeIrreducibleMass(
LoopData *OuterLoop, std::list<LoopData>::iterator Insert) {
- DEBUG(dbgs() << "analyze-irreducible-in-";
- if (OuterLoop) dbgs() << "loop: " << getLoopName(*OuterLoop) << "\n";
- else dbgs() << "function\n");
+ LLVM_DEBUG(dbgs() << "analyze-irreducible-in-";
+ if (OuterLoop) dbgs()
+ << "loop: " << getLoopName(*OuterLoop) << "\n";
+ else dbgs() << "function\n");
using namespace bfi_detail;
@@ -1304,7 +1307,7 @@
bool
BlockFrequencyInfoImpl<BT>::propagateMassToSuccessors(LoopData *OuterLoop,
const BlockNode &Node) {
- DEBUG(dbgs() << " - node: " << getBlockName(Node) << "\n");
+ LLVM_DEBUG(dbgs() << " - node: " << getBlockName(Node) << "\n");
// Calculate probability for successors.
Distribution Dist;
if (auto *Loop = Working[Node.Index].getPackagedLoop()) {
diff --git a/linux-x64/clang/include/llvm/Analysis/BranchProbabilityInfo.h b/linux-x64/clang/include/llvm/Analysis/BranchProbabilityInfo.h
index 417b649..45277db 100644
--- a/linux-x64/clang/include/llvm/Analysis/BranchProbabilityInfo.h
+++ b/linux-x64/clang/include/llvm/Analysis/BranchProbabilityInfo.h
@@ -38,7 +38,7 @@
class TargetLibraryInfo;
class Value;
-/// \brief Analysis providing branch probability information.
+/// Analysis providing branch probability information.
///
/// This is a function analysis which provides information on the relative
/// probabilities of each "edge" in the function's CFG where such an edge is
@@ -79,7 +79,7 @@
void print(raw_ostream &OS) const;
- /// \brief Get an edge's probability, relative to other out-edges of the Src.
+ /// Get an edge's probability, relative to other out-edges of the Src.
///
/// This routine provides access to the fractional probability between zero
/// (0%) and one (100%) of this edge executing, relative to other edges
@@ -88,7 +88,7 @@
BranchProbability getEdgeProbability(const BasicBlock *Src,
unsigned IndexInSuccessors) const;
- /// \brief Get the probability of going from Src to Dst.
+ /// Get the probability of going from Src to Dst.
///
/// It returns the sum of all probabilities for edges from Src to Dst.
BranchProbability getEdgeProbability(const BasicBlock *Src,
@@ -97,19 +97,19 @@
BranchProbability getEdgeProbability(const BasicBlock *Src,
succ_const_iterator Dst) const;
- /// \brief Test if an edge is hot relative to other out-edges of the Src.
+ /// Test if an edge is hot relative to other out-edges of the Src.
///
/// Check whether this edge out of the source block is 'hot'. We define hot
/// as having a relative probability >= 80%.
bool isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const;
- /// \brief Retrieve the hot successor of a block if one exists.
+ /// Retrieve the hot successor of a block if one exists.
///
/// Given a basic block, look through its successors and if one exists for
/// which \see isEdgeHot would return true, return that successor block.
const BasicBlock *getHotSucc(const BasicBlock *BB) const;
- /// \brief Print an edge's probability.
+ /// Print an edge's probability.
///
/// Retrieves an edge's probability similarly to \see getEdgeProbability, but
/// then prints that probability to the provided stream. That stream is then
@@ -117,7 +117,7 @@
raw_ostream &printEdgeProbability(raw_ostream &OS, const BasicBlock *Src,
const BasicBlock *Dst) const;
- /// \brief Set the raw edge probability for the given edge.
+ /// Set the raw edge probability for the given edge.
///
/// This allows a pass to explicitly set the edge probability for an edge. It
/// can be used when updating the CFG to update and preserve the branch
@@ -179,13 +179,13 @@
DenseMap<Edge, BranchProbability> Probs;
- /// \brief Track the last function we run over for printing.
+ /// Track the last function we run over for printing.
const Function *LastF;
- /// \brief Track the set of blocks directly succeeded by a returning block.
+ /// Track the set of blocks directly succeeded by a returning block.
SmallPtrSet<const BasicBlock *, 16> PostDominatedByUnreachable;
- /// \brief Track the set of blocks that always lead to a cold call.
+ /// Track the set of blocks that always lead to a cold call.
SmallPtrSet<const BasicBlock *, 16> PostDominatedByColdCall;
void updatePostDominatedByUnreachable(const BasicBlock *BB);
@@ -201,7 +201,7 @@
bool calcInvokeHeuristics(const BasicBlock *BB);
};
-/// \brief Analysis pass which computes \c BranchProbabilityInfo.
+/// Analysis pass which computes \c BranchProbabilityInfo.
class BranchProbabilityAnalysis
: public AnalysisInfoMixin<BranchProbabilityAnalysis> {
friend AnalysisInfoMixin<BranchProbabilityAnalysis>;
@@ -209,14 +209,14 @@
static AnalysisKey Key;
public:
- /// \brief Provide the result type for this analysis pass.
+ /// Provide the result type for this analysis pass.
using Result = BranchProbabilityInfo;
- /// \brief Run the analysis pass over a function and produce BPI.
+ /// Run the analysis pass over a function and produce BPI.
BranchProbabilityInfo run(Function &F, FunctionAnalysisManager &AM);
};
-/// \brief Printer pass for the \c BranchProbabilityAnalysis results.
+/// Printer pass for the \c BranchProbabilityAnalysis results.
class BranchProbabilityPrinterPass
: public PassInfoMixin<BranchProbabilityPrinterPass> {
raw_ostream &OS;
@@ -227,7 +227,7 @@
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
-/// \brief Legacy analysis pass which computes \c BranchProbabilityInfo.
+/// Legacy analysis pass which computes \c BranchProbabilityInfo.
class BranchProbabilityInfoWrapperPass : public FunctionPass {
BranchProbabilityInfo BPI;
diff --git a/linux-x64/clang/include/llvm/Analysis/CFG.h b/linux-x64/clang/include/llvm/Analysis/CFG.h
index d569464..cccdd16 100644
--- a/linux-x64/clang/include/llvm/Analysis/CFG.h
+++ b/linux-x64/clang/include/llvm/Analysis/CFG.h
@@ -49,7 +49,7 @@
bool isCriticalEdge(const TerminatorInst *TI, unsigned SuccNum,
bool AllowIdenticalEdges = false);
-/// \brief Determine whether instruction 'To' is reachable from 'From',
+/// Determine whether instruction 'To' is reachable from 'From',
/// returning true if uncertain.
///
/// Determine whether there is a path from From to To within a single function.
@@ -68,7 +68,7 @@
const DominatorTree *DT = nullptr,
const LoopInfo *LI = nullptr);
-/// \brief Determine whether block 'To' is reachable from 'From', returning
+/// Determine whether block 'To' is reachable from 'From', returning
/// true if uncertain.
///
/// Determine whether there is a path from From to To within a single function.
@@ -78,7 +78,7 @@
const DominatorTree *DT = nullptr,
const LoopInfo *LI = nullptr);
-/// \brief Determine whether there is at least one path from a block in
+/// Determine whether there is at least one path from a block in
/// 'Worklist' to 'StopBB', returning true if uncertain.
///
/// Determine whether there is a path from at least one block in Worklist to
@@ -90,7 +90,7 @@
const DominatorTree *DT = nullptr,
const LoopInfo *LI = nullptr);
-/// \brief Return true if the control flow in \p RPOTraversal is irreducible.
+/// Return true if the control flow in \p RPOTraversal is irreducible.
///
/// This is a generic implementation to detect CFG irreducibility based on loop
/// info analysis. It can be used for any kind of CFG (Loop, MachineLoop,
diff --git a/linux-x64/clang/include/llvm/Analysis/CFLAndersAliasAnalysis.h b/linux-x64/clang/include/llvm/Analysis/CFLAndersAliasAnalysis.h
index 6239d53..8ae7255 100644
--- a/linux-x64/clang/include/llvm/Analysis/CFLAndersAliasAnalysis.h
+++ b/linux-x64/clang/include/llvm/Analysis/CFLAndersAliasAnalysis.h
@@ -56,7 +56,7 @@
/// Evict the given function from cache
void evict(const Function *Fn);
- /// \brief Get the alias summary for the given function
+ /// Get the alias summary for the given function
/// Return nullptr if the summary is not found or not available
const cflaa::AliasSummary *getAliasSummary(const Function &);
@@ -64,19 +64,19 @@
AliasResult alias(const MemoryLocation &, const MemoryLocation &);
private:
- /// \brief Ensures that the given function is available in the cache.
+ /// Ensures that the given function is available in the cache.
/// Returns the appropriate entry from the cache.
const Optional<FunctionInfo> &ensureCached(const Function &);
- /// \brief Inserts the given Function into the cache.
+ /// Inserts the given Function into the cache.
void scan(const Function &);
- /// \brief Build summary for a given function
+ /// Build summary for a given function
FunctionInfo buildInfoFrom(const Function &);
const TargetLibraryInfo &TLI;
- /// \brief Cached mapping of Functions to their StratifiedSets.
+ /// Cached mapping of Functions to their StratifiedSets.
/// If a function's sets are currently being built, it is marked
/// in the cache as an Optional without a value. This way, if we
/// have any kind of recursion, it is discernable from a function
diff --git a/linux-x64/clang/include/llvm/Analysis/CFLSteensAliasAnalysis.h b/linux-x64/clang/include/llvm/Analysis/CFLSteensAliasAnalysis.h
index ee9e290..09e366f 100644
--- a/linux-x64/clang/include/llvm/Analysis/CFLSteensAliasAnalysis.h
+++ b/linux-x64/clang/include/llvm/Analysis/CFLSteensAliasAnalysis.h
@@ -55,16 +55,16 @@
return false;
}
- /// \brief Inserts the given Function into the cache.
+ /// Inserts the given Function into the cache.
void scan(Function *Fn);
void evict(Function *Fn);
- /// \brief Ensures that the given function is available in the cache.
+ /// Ensures that the given function is available in the cache.
/// Returns the appropriate entry from the cache.
const Optional<FunctionInfo> &ensureCached(Function *Fn);
- /// \brief Get the alias summary for the given function
+ /// Get the alias summary for the given function
/// Return nullptr if the summary is not found or not available
const cflaa::AliasSummary *getAliasSummary(Function &Fn);
@@ -92,7 +92,7 @@
private:
const TargetLibraryInfo &TLI;
- /// \brief Cached mapping of Functions to their StratifiedSets.
+ /// Cached mapping of Functions to their StratifiedSets.
/// If a function's sets are currently being built, it is marked
/// in the cache as an Optional without a value. This way, if we
/// have any kind of recursion, it is discernable from a function
diff --git a/linux-x64/clang/include/llvm/Analysis/CGSCCPassManager.h b/linux-x64/clang/include/llvm/Analysis/CGSCCPassManager.h
index 457d5a0..5e83ea2 100644
--- a/linux-x64/clang/include/llvm/Analysis/CGSCCPassManager.h
+++ b/linux-x64/clang/include/llvm/Analysis/CGSCCPassManager.h
@@ -119,7 +119,7 @@
extern template class AnalysisManager<LazyCallGraph::SCC, LazyCallGraph &>;
-/// \brief The CGSCC analysis manager.
+/// The CGSCC analysis manager.
///
/// See the documentation for the AnalysisManager template for detail
/// documentation. This type serves as a convenient way to refer to this
@@ -140,7 +140,7 @@
extern template class PassManager<LazyCallGraph::SCC, CGSCCAnalysisManager,
LazyCallGraph &, CGSCCUpdateResult &>;
-/// \brief The CGSCC pass manager.
+/// The CGSCC pass manager.
///
/// See the documentation for the PassManager template for details. It runs
/// a sequence of SCC passes over each SCC that the manager is run over. This
@@ -175,10 +175,10 @@
explicit Result(CGSCCAnalysisManager &InnerAM, LazyCallGraph &G)
: InnerAM(&InnerAM), G(&G) {}
- /// \brief Accessor for the analysis manager.
+ /// Accessor for the analysis manager.
CGSCCAnalysisManager &getManager() { return *InnerAM; }
- /// \brief Handler for invalidation of the Module.
+ /// Handler for invalidation of the Module.
///
/// If the proxy analysis itself is preserved, then we assume that the set of
/// SCCs in the Module hasn't changed. Thus any pointers to SCCs in the
@@ -302,7 +302,7 @@
&InlinedInternalEdges;
};
-/// \brief The core module pass which does a post-order walk of the SCCs and
+/// The core module pass which does a post-order walk of the SCCs and
/// runs a CGSCC pass over each one.
///
/// Designed to allow composition of a CGSCCPass(Manager) and
@@ -338,7 +338,7 @@
return *this;
}
- /// \brief Runs the CGSCC pass across every SCC in the module.
+ /// Runs the CGSCC pass across every SCC in the module.
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM) {
// Setup the CGSCC analysis manager from its proxy.
CGSCCAnalysisManager &CGAM =
@@ -387,15 +387,15 @@
do {
LazyCallGraph::RefSCC *RC = RCWorklist.pop_back_val();
if (InvalidRefSCCSet.count(RC)) {
- DEBUG(dbgs() << "Skipping an invalid RefSCC...\n");
+ LLVM_DEBUG(dbgs() << "Skipping an invalid RefSCC...\n");
continue;
}
assert(CWorklist.empty() &&
"Should always start with an empty SCC worklist");
- DEBUG(dbgs() << "Running an SCC pass across the RefSCC: " << *RC
- << "\n");
+ LLVM_DEBUG(dbgs() << "Running an SCC pass across the RefSCC: " << *RC
+ << "\n");
// Push the initial SCCs in reverse post-order as we'll pop off the
// back and so see this in post-order.
@@ -409,12 +409,13 @@
// other RefSCCs should be queued above, so we just need to skip both
// scenarios here.
if (InvalidSCCSet.count(C)) {
- DEBUG(dbgs() << "Skipping an invalid SCC...\n");
+ LLVM_DEBUG(dbgs() << "Skipping an invalid SCC...\n");
continue;
}
if (&C->getOuterRefSCC() != RC) {
- DEBUG(dbgs() << "Skipping an SCC that is now part of some other "
- "RefSCC...\n");
+ LLVM_DEBUG(dbgs()
+ << "Skipping an SCC that is now part of some other "
+ "RefSCC...\n");
continue;
}
@@ -436,7 +437,8 @@
// If the CGSCC pass wasn't able to provide a valid updated SCC,
// the current SCC may simply need to be skipped if invalid.
if (UR.InvalidatedSCCs.count(C)) {
- DEBUG(dbgs() << "Skipping invalidated root or island SCC!\n");
+ LLVM_DEBUG(dbgs()
+ << "Skipping invalidated root or island SCC!\n");
break;
}
// Check that we didn't miss any update scenario.
@@ -464,9 +466,10 @@
// FIXME: If we ever start having RefSCC passes, we'll want to
// iterate there too.
if (UR.UpdatedC)
- DEBUG(dbgs() << "Re-running SCC passes after a refinement of the "
- "current SCC: "
- << *UR.UpdatedC << "\n");
+ LLVM_DEBUG(dbgs()
+ << "Re-running SCC passes after a refinement of the "
+ "current SCC: "
+ << *UR.UpdatedC << "\n");
// Note that both `C` and `RC` may at this point refer to deleted,
// invalid SCC and RefSCCs respectively. But we will short circuit
@@ -494,7 +497,7 @@
CGSCCPassT Pass;
};
-/// \brief A function to deduce a function pass type and wrap it in the
+/// A function to deduce a function pass type and wrap it in the
/// templated adaptor.
template <typename CGSCCPassT>
ModuleToPostOrderCGSCCPassAdaptor<CGSCCPassT>
@@ -517,7 +520,7 @@
public:
explicit Result(FunctionAnalysisManager &FAM) : FAM(&FAM) {}
- /// \brief Accessor for the analysis manager.
+ /// Accessor for the analysis manager.
FunctionAnalysisManager &getManager() { return *FAM; }
bool invalidate(LazyCallGraph::SCC &C, const PreservedAnalyses &PA,
@@ -552,7 +555,7 @@
LazyCallGraph &G, LazyCallGraph::SCC &C, LazyCallGraph::Node &N,
CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR);
-/// \brief Adaptor that maps from a SCC to its functions.
+/// Adaptor that maps from a SCC to its functions.
///
/// Designed to allow composition of a FunctionPass(Manager) and
/// a CGSCCPassManager. Note that if this pass is constructed with a pointer
@@ -585,7 +588,7 @@
return *this;
}
- /// \brief Runs the function pass across every function in the module.
+ /// Runs the function pass across every function in the module.
PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
LazyCallGraph &CG, CGSCCUpdateResult &UR) {
// Setup the function analysis manager from its proxy.
@@ -601,7 +604,8 @@
// a pointer we can overwrite.
LazyCallGraph::SCC *CurrentC = &C;
- DEBUG(dbgs() << "Running function passes across an SCC: " << C << "\n");
+ LLVM_DEBUG(dbgs() << "Running function passes across an SCC: " << C
+ << "\n");
PreservedAnalyses PA = PreservedAnalyses::all();
for (LazyCallGraph::Node *N : Nodes) {
@@ -652,7 +656,7 @@
FunctionPassT Pass;
};
-/// \brief A function to deduce a function pass type and wrap it in the
+/// A function to deduce a function pass type and wrap it in the
/// templated adaptor.
template <typename FunctionPassT>
CGSCCToFunctionPassAdaptor<FunctionPassT>
@@ -757,9 +761,9 @@
if (!F)
return false;
- DEBUG(dbgs() << "Found devirutalized call from "
- << CS.getParent()->getParent()->getName() << " to "
- << F->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Found devirutalized call from "
+ << CS.getParent()->getParent()->getName() << " to "
+ << F->getName() << "\n");
// We now have a direct call where previously we had an indirect call,
// so iterate to process this devirtualization site.
@@ -793,16 +797,18 @@
// Otherwise, if we've already hit our max, we're done.
if (Iteration >= MaxIterations) {
- DEBUG(dbgs() << "Found another devirtualization after hitting the max "
- "number of repetitions ("
- << MaxIterations << ") on SCC: " << *C << "\n");
+ LLVM_DEBUG(
+ dbgs() << "Found another devirtualization after hitting the max "
+ "number of repetitions ("
+ << MaxIterations << ") on SCC: " << *C << "\n");
PA.intersect(std::move(PassPA));
break;
}
- DEBUG(dbgs()
- << "Repeating an SCC pass after finding a devirtualization in: "
- << *C << "\n");
+ LLVM_DEBUG(
+ dbgs()
+ << "Repeating an SCC pass after finding a devirtualization in: " << *C
+ << "\n");
// Move over the new call counts in preparation for iterating.
CallCounts = std::move(NewCallCounts);
@@ -824,7 +830,7 @@
int MaxIterations;
};
-/// \brief A function to deduce a function pass type and wrap it in the
+/// A function to deduce a function pass type and wrap it in the
/// templated adaptor.
template <typename PassT>
DevirtSCCRepeatedPass<PassT> createDevirtSCCRepeatedPass(PassT Pass,
diff --git a/linux-x64/clang/include/llvm/Analysis/CallGraph.h b/linux-x64/clang/include/llvm/Analysis/CallGraph.h
index 8efc85f..f109cf2 100644
--- a/linux-x64/clang/include/llvm/Analysis/CallGraph.h
+++ b/linux-x64/clang/include/llvm/Analysis/CallGraph.h
@@ -66,7 +66,7 @@
class Module;
class raw_ostream;
-/// \brief The basic data container for the call graph of a \c Module of IR.
+/// The basic data container for the call graph of a \c Module of IR.
///
/// This class exposes both the interface to the call graph for a module of IR.
///
@@ -77,25 +77,25 @@
using FunctionMapTy =
std::map<const Function *, std::unique_ptr<CallGraphNode>>;
- /// \brief A map from \c Function* to \c CallGraphNode*.
+ /// A map from \c Function* to \c CallGraphNode*.
FunctionMapTy FunctionMap;
- /// \brief This node has edges to all external functions and those internal
+ /// This node has edges to all external functions and those internal
/// functions that have their address taken.
CallGraphNode *ExternalCallingNode;
- /// \brief This node has edges to it from all functions making indirect calls
+ /// This node has edges to it from all functions making indirect calls
/// or calling an external function.
std::unique_ptr<CallGraphNode> CallsExternalNode;
- /// \brief Replace the function represented by this node by another.
+ /// Replace the function represented by this node by another.
///
/// This does not rescan the body of the function, so it is suitable when
/// splicing the body of one function to another while also updating all
/// callers from the old function to the new.
void spliceFunction(const Function *From, const Function *To);
- /// \brief Add a function to the call graph, and link the node to all of the
+ /// Add a function to the call graph, and link the node to all of the
/// functions that it calls.
void addToCallGraph(Function *F);
@@ -110,7 +110,7 @@
using iterator = FunctionMapTy::iterator;
using const_iterator = FunctionMapTy::const_iterator;
- /// \brief Returns the module the call graph corresponds to.
+ /// Returns the module the call graph corresponds to.
Module &getModule() const { return M; }
inline iterator begin() { return FunctionMap.begin(); }
@@ -118,21 +118,21 @@
inline const_iterator begin() const { return FunctionMap.begin(); }
inline const_iterator end() const { return FunctionMap.end(); }
- /// \brief Returns the call graph node for the provided function.
+ /// Returns the call graph node for the provided function.
inline const CallGraphNode *operator[](const Function *F) const {
const_iterator I = FunctionMap.find(F);
assert(I != FunctionMap.end() && "Function not in callgraph!");
return I->second.get();
}
- /// \brief Returns the call graph node for the provided function.
+ /// Returns the call graph node for the provided function.
inline CallGraphNode *operator[](const Function *F) {
const_iterator I = FunctionMap.find(F);
assert(I != FunctionMap.end() && "Function not in callgraph!");
return I->second.get();
}
- /// \brief Returns the \c CallGraphNode which is used to represent
+ /// Returns the \c CallGraphNode which is used to represent
/// undetermined calls into the callgraph.
CallGraphNode *getExternalCallingNode() const { return ExternalCallingNode; }
@@ -145,7 +145,7 @@
// modified.
//
- /// \brief Unlink the function from this module, returning it.
+ /// Unlink the function from this module, returning it.
///
/// Because this removes the function from the module, the call graph node is
/// destroyed. This is only valid if the function does not call any other
@@ -153,25 +153,25 @@
/// this is to dropAllReferences before calling this.
Function *removeFunctionFromModule(CallGraphNode *CGN);
- /// \brief Similar to operator[], but this will insert a new CallGraphNode for
+ /// Similar to operator[], but this will insert a new CallGraphNode for
/// \c F if one does not already exist.
CallGraphNode *getOrInsertFunction(const Function *F);
};
-/// \brief A node in the call graph for a module.
+/// A node in the call graph for a module.
///
/// Typically represents a function in the call graph. There are also special
/// "null" nodes used to represent theoretical entries in the call graph.
class CallGraphNode {
public:
- /// \brief A pair of the calling instruction (a call or invoke)
+ /// A pair of the calling instruction (a call or invoke)
/// and the call graph node being called.
using CallRecord = std::pair<WeakTrackingVH, CallGraphNode *>;
public:
using CalledFunctionsVector = std::vector<CallRecord>;
- /// \brief Creates a node for the specified function.
+ /// Creates a node for the specified function.
inline CallGraphNode(Function *F) : F(F) {}
CallGraphNode(const CallGraphNode &) = delete;
@@ -184,7 +184,7 @@
using iterator = std::vector<CallRecord>::iterator;
using const_iterator = std::vector<CallRecord>::const_iterator;
- /// \brief Returns the function that this call graph node represents.
+ /// Returns the function that this call graph node represents.
Function *getFunction() const { return F; }
inline iterator begin() { return CalledFunctions.begin(); }
@@ -194,17 +194,17 @@
inline bool empty() const { return CalledFunctions.empty(); }
inline unsigned size() const { return (unsigned)CalledFunctions.size(); }
- /// \brief Returns the number of other CallGraphNodes in this CallGraph that
+ /// Returns the number of other CallGraphNodes in this CallGraph that
/// reference this node in their callee list.
unsigned getNumReferences() const { return NumReferences; }
- /// \brief Returns the i'th called function.
+ /// Returns the i'th called function.
CallGraphNode *operator[](unsigned i) const {
assert(i < CalledFunctions.size() && "Invalid index");
return CalledFunctions[i].second;
}
- /// \brief Print out this call graph node.
+ /// Print out this call graph node.
void dump() const;
void print(raw_ostream &OS) const;
@@ -213,7 +213,7 @@
// modified
//
- /// \brief Removes all edges from this CallGraphNode to any functions it
+ /// Removes all edges from this CallGraphNode to any functions it
/// calls.
void removeAllCalledFunctions() {
while (!CalledFunctions.empty()) {
@@ -222,14 +222,14 @@
}
}
- /// \brief Moves all the callee information from N to this node.
+ /// Moves all the callee information from N to this node.
void stealCalledFunctionsFrom(CallGraphNode *N) {
assert(CalledFunctions.empty() &&
"Cannot steal callsite information if I already have some");
std::swap(CalledFunctions, N->CalledFunctions);
}
- /// \brief Adds a function to the list of functions called by this one.
+ /// Adds a function to the list of functions called by this one.
void addCalledFunction(CallSite CS, CallGraphNode *M) {
assert(!CS.getInstruction() || !CS.getCalledFunction() ||
!CS.getCalledFunction()->isIntrinsic() ||
@@ -244,23 +244,23 @@
CalledFunctions.pop_back();
}
- /// \brief Removes the edge in the node for the specified call site.
+ /// Removes the edge in the node for the specified call site.
///
/// Note that this method takes linear time, so it should be used sparingly.
void removeCallEdgeFor(CallSite CS);
- /// \brief Removes all call edges from this node to the specified callee
+ /// Removes all call edges from this node to the specified callee
/// function.
///
/// This takes more time to execute than removeCallEdgeTo, so it should not
/// be used unless necessary.
void removeAnyCallEdgeTo(CallGraphNode *Callee);
- /// \brief Removes one edge associated with a null callsite from this node to
+ /// Removes one edge associated with a null callsite from this node to
/// the specified callee function.
void removeOneAbstractEdgeTo(CallGraphNode *Callee);
- /// \brief Replaces the edge in the node for the specified call site with a
+ /// Replaces the edge in the node for the specified call site with a
/// new one.
///
/// Note that this method takes linear time, so it should be used sparingly.
@@ -273,18 +273,18 @@
std::vector<CallRecord> CalledFunctions;
- /// \brief The number of times that this CallGraphNode occurs in the
+ /// The number of times that this CallGraphNode occurs in the
/// CalledFunctions array of this or other CallGraphNodes.
unsigned NumReferences = 0;
void DropRef() { --NumReferences; }
void AddRef() { ++NumReferences; }
- /// \brief A special function that should only be used by the CallGraph class.
+ /// A special function that should only be used by the CallGraph class.
void allReferencesDropped() { NumReferences = 0; }
};
-/// \brief An analysis pass to compute the \c CallGraph for a \c Module.
+/// An analysis pass to compute the \c CallGraph for a \c Module.
///
/// This class implements the concept of an analysis pass used by the \c
/// ModuleAnalysisManager to run an analysis over a module and cache the
@@ -295,16 +295,16 @@
static AnalysisKey Key;
public:
- /// \brief A formulaic type to inform clients of the result type.
+ /// A formulaic type to inform clients of the result type.
using Result = CallGraph;
- /// \brief Compute the \c CallGraph for the module \c M.
+ /// Compute the \c CallGraph for the module \c M.
///
/// The real work here is done in the \c CallGraph constructor.
CallGraph run(Module &M, ModuleAnalysisManager &) { return CallGraph(M); }
};
-/// \brief Printer pass for the \c CallGraphAnalysis results.
+/// Printer pass for the \c CallGraphAnalysis results.
class CallGraphPrinterPass : public PassInfoMixin<CallGraphPrinterPass> {
raw_ostream &OS;
@@ -314,7 +314,7 @@
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};
-/// \brief The \c ModulePass which wraps up a \c CallGraph and the logic to
+/// The \c ModulePass which wraps up a \c CallGraph and the logic to
/// build it.
///
/// This class exposes both the interface to the call graph container and the
@@ -330,7 +330,7 @@
CallGraphWrapperPass();
~CallGraphWrapperPass() override;
- /// \brief The internal \c CallGraph around which the rest of this interface
+ /// The internal \c CallGraph around which the rest of this interface
/// is wrapped.
const CallGraph &getCallGraph() const { return *G; }
CallGraph &getCallGraph() { return *G; }
@@ -338,7 +338,7 @@
using iterator = CallGraph::iterator;
using const_iterator = CallGraph::const_iterator;
- /// \brief Returns the module the call graph corresponds to.
+ /// Returns the module the call graph corresponds to.
Module &getModule() const { return G->getModule(); }
inline iterator begin() { return G->begin(); }
@@ -346,15 +346,15 @@
inline const_iterator begin() const { return G->begin(); }
inline const_iterator end() const { return G->end(); }
- /// \brief Returns the call graph node for the provided function.
+ /// Returns the call graph node for the provided function.
inline const CallGraphNode *operator[](const Function *F) const {
return (*G)[F];
}
- /// \brief Returns the call graph node for the provided function.
+ /// Returns the call graph node for the provided function.
inline CallGraphNode *operator[](const Function *F) { return (*G)[F]; }
- /// \brief Returns the \c CallGraphNode which is used to represent
+ /// Returns the \c CallGraphNode which is used to represent
/// undetermined calls into the callgraph.
CallGraphNode *getExternalCallingNode() const {
return G->getExternalCallingNode();
@@ -369,7 +369,7 @@
// modified.
//
- /// \brief Unlink the function from this module, returning it.
+ /// Unlink the function from this module, returning it.
///
/// Because this removes the function from the module, the call graph node is
/// destroyed. This is only valid if the function does not call any other
@@ -379,7 +379,7 @@
return G->removeFunctionFromModule(CGN);
}
- /// \brief Similar to operator[], but this will insert a new CallGraphNode for
+ /// Similar to operator[], but this will insert a new CallGraphNode for
/// \c F if one does not already exist.
CallGraphNode *getOrInsertFunction(const Function *F) {
return G->getOrInsertFunction(F);
diff --git a/linux-x64/clang/include/llvm/Analysis/CaptureTracking.h b/linux-x64/clang/include/llvm/Analysis/CaptureTracking.h
index 8d2c095..7a869a5 100644
--- a/linux-x64/clang/include/llvm/Analysis/CaptureTracking.h
+++ b/linux-x64/clang/include/llvm/Analysis/CaptureTracking.h
@@ -46,7 +46,7 @@
/// to speed up capture-tracker queries.
bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures,
bool StoreCaptures, const Instruction *I,
- DominatorTree *DT, bool IncludeI = false,
+ const DominatorTree *DT, bool IncludeI = false,
OrderedBasicBlock *OBB = nullptr);
/// This callback is used in conjunction with PointerMayBeCaptured. In
diff --git a/linux-x64/clang/include/llvm/Analysis/CodeMetrics.h b/linux-x64/clang/include/llvm/Analysis/CodeMetrics.h
index 9e861ac..7529022 100644
--- a/linux-x64/clang/include/llvm/Analysis/CodeMetrics.h
+++ b/linux-x64/clang/include/llvm/Analysis/CodeMetrics.h
@@ -29,7 +29,7 @@
class TargetTransformInfo;
class Value;
-/// \brief Check whether a call will lower to something small.
+/// Check whether a call will lower to something small.
///
/// This tests checks whether this callsite will lower to something
/// significantly cheaper than a traditional call, often a single
@@ -37,64 +37,64 @@
/// return true, so will this function.
bool callIsSmall(ImmutableCallSite CS);
-/// \brief Utility to calculate the size and a few similar metrics for a set
+/// Utility to calculate the size and a few similar metrics for a set
/// of basic blocks.
struct CodeMetrics {
- /// \brief True if this function contains a call to setjmp or other functions
+ /// True if this function contains a call to setjmp or other functions
/// with attribute "returns twice" without having the attribute itself.
bool exposesReturnsTwice = false;
- /// \brief True if this function calls itself.
+ /// True if this function calls itself.
bool isRecursive = false;
- /// \brief True if this function cannot be duplicated.
+ /// True if this function cannot be duplicated.
///
/// True if this function contains one or more indirect branches, or it contains
/// one or more 'noduplicate' instructions.
bool notDuplicatable = false;
- /// \brief True if this function contains a call to a convergent function.
+ /// True if this function contains a call to a convergent function.
bool convergent = false;
- /// \brief True if this function calls alloca (in the C sense).
+ /// True if this function calls alloca (in the C sense).
bool usesDynamicAlloca = false;
- /// \brief Number of instructions in the analyzed blocks.
+ /// Number of instructions in the analyzed blocks.
unsigned NumInsts = false;
- /// \brief Number of analyzed blocks.
+ /// Number of analyzed blocks.
unsigned NumBlocks = false;
- /// \brief Keeps track of basic block code size estimates.
+ /// Keeps track of basic block code size estimates.
DenseMap<const BasicBlock *, unsigned> NumBBInsts;
- /// \brief Keep track of the number of calls to 'big' functions.
+ /// Keep track of the number of calls to 'big' functions.
unsigned NumCalls = false;
- /// \brief The number of calls to internal functions with a single caller.
+ /// The number of calls to internal functions with a single caller.
///
/// These are likely targets for future inlining, likely exposed by
/// interleaved devirtualization.
unsigned NumInlineCandidates = 0;
- /// \brief How many instructions produce vector values.
+ /// How many instructions produce vector values.
///
/// The inliner is more aggressive with inlining vector kernels.
unsigned NumVectorInsts = 0;
- /// \brief How many 'ret' instructions the blocks contain.
+ /// How many 'ret' instructions the blocks contain.
unsigned NumRets = 0;
- /// \brief Add information about a block to the current state.
+ /// Add information about a block to the current state.
void analyzeBasicBlock(const BasicBlock *BB, const TargetTransformInfo &TTI,
const SmallPtrSetImpl<const Value*> &EphValues);
- /// \brief Collect a loop's ephemeral values (those used only by an assume
+ /// Collect a loop's ephemeral values (those used only by an assume
/// or similar intrinsics in the loop).
static void collectEphemeralValues(const Loop *L, AssumptionCache *AC,
SmallPtrSetImpl<const Value *> &EphValues);
- /// \brief Collect a functions's ephemeral values (those used only by an
+ /// Collect a functions's ephemeral values (those used only by an
/// assume or similar intrinsics in the function).
static void collectEphemeralValues(const Function *L, AssumptionCache *AC,
SmallPtrSetImpl<const Value *> &EphValues);
diff --git a/linux-x64/clang/include/llvm/Analysis/ConstantFolding.h b/linux-x64/clang/include/llvm/Analysis/ConstantFolding.h
index 354b557..192c1ab 100644
--- a/linux-x64/clang/include/llvm/Analysis/ConstantFolding.h
+++ b/linux-x64/clang/include/llvm/Analysis/ConstantFolding.h
@@ -73,19 +73,19 @@
Constant *RHS, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr);
-/// \brief Attempt to constant fold a binary operation with the specified
+/// Attempt to constant fold a binary operation with the specified
/// operands. If it fails, it returns a constant expression of the specified
/// operands.
Constant *ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS,
Constant *RHS, const DataLayout &DL);
-/// \brief Attempt to constant fold a select instruction with the specified
+/// Attempt to constant fold a select instruction with the specified
/// operands. The constant result is returned if successful; if not, null is
/// returned.
Constant *ConstantFoldSelectInstruction(Constant *Cond, Constant *V1,
Constant *V2);
-/// \brief Attempt to constant fold a cast with the specified operand. If it
+/// Attempt to constant fold a cast with the specified operand. If it
/// fails, it returns a constant expression of the specified operand.
Constant *ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy,
const DataLayout &DL);
@@ -96,25 +96,25 @@
Constant *ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val,
ArrayRef<unsigned> Idxs);
-/// \brief Attempt to constant fold an extractvalue instruction with the
+/// Attempt to constant fold an extractvalue instruction with the
/// specified operands and indices. The constant result is returned if
/// successful; if not, null is returned.
Constant *ConstantFoldExtractValueInstruction(Constant *Agg,
ArrayRef<unsigned> Idxs);
-/// \brief Attempt to constant fold an insertelement instruction with the
+/// Attempt to constant fold an insertelement instruction with the
/// specified operands and indices. The constant result is returned if
/// successful; if not, null is returned.
Constant *ConstantFoldInsertElementInstruction(Constant *Val,
Constant *Elt,
Constant *Idx);
-/// \brief Attempt to constant fold an extractelement instruction with the
+/// Attempt to constant fold an extractelement instruction with the
/// specified operands and indices. The constant result is returned if
/// successful; if not, null is returned.
Constant *ConstantFoldExtractElementInstruction(Constant *Val, Constant *Idx);
-/// \brief Attempt to constant fold a shufflevector instruction with the
+/// Attempt to constant fold a shufflevector instruction with the
/// specified operands and indices. The constant result is returned if
/// successful; if not, null is returned.
Constant *ConstantFoldShuffleVectorInstruction(Constant *V1, Constant *V2,
@@ -153,7 +153,7 @@
Constant *ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
const DataLayout &DL);
-/// \brief Check whether the given call has no side-effects.
+/// Check whether the given call has no side-effects.
/// Specifically checks for math routimes which sometimes set errno.
bool isMathLibCallNoop(CallSite CS, const TargetLibraryInfo *TLI);
}
diff --git a/linux-x64/clang/include/llvm/Analysis/DOTGraphTraitsPass.h b/linux-x64/clang/include/llvm/Analysis/DOTGraphTraitsPass.h
index 39f9c39..b7447a0 100644
--- a/linux-x64/clang/include/llvm/Analysis/DOTGraphTraitsPass.h
+++ b/linux-x64/clang/include/llvm/Analysis/DOTGraphTraitsPass.h
@@ -20,7 +20,7 @@
namespace llvm {
-/// \brief Default traits class for extracting a graph from an analysis pass.
+/// Default traits class for extracting a graph from an analysis pass.
///
/// This assumes that 'GraphT' is 'AnalysisT *' and so just passes it through.
template <typename AnalysisT, typename GraphT = AnalysisT *>
@@ -36,7 +36,7 @@
DOTGraphTraitsViewer(StringRef GraphName, char &ID)
: FunctionPass(ID), Name(GraphName) {}
- /// @brief Return true if this function should be processed.
+ /// Return true if this function should be processed.
///
/// An implementation of this class my override this function to indicate that
/// only certain functions should be viewed.
@@ -78,7 +78,7 @@
DOTGraphTraitsPrinter(StringRef GraphName, char &ID)
: FunctionPass(ID), Name(GraphName) {}
- /// @brief Return true if this function should be processed.
+ /// Return true if this function should be processed.
///
/// An implementation of this class my override this function to indicate that
/// only certain functions should be printed.
diff --git a/linux-x64/clang/include/llvm/Analysis/DemandedBits.h b/linux-x64/clang/include/llvm/Analysis/DemandedBits.h
index ab86682..d438460 100644
--- a/linux-x64/clang/include/llvm/Analysis/DemandedBits.h
+++ b/linux-x64/clang/include/llvm/Analysis/DemandedBits.h
@@ -96,15 +96,15 @@
static AnalysisKey Key;
public:
- /// \brief Provide the result type for this analysis pass.
+ /// Provide the result type for this analysis pass.
using Result = DemandedBits;
- /// \brief Run the analysis pass over a function and produce demanded bits
+ /// Run the analysis pass over a function and produce demanded bits
/// information.
DemandedBits run(Function &F, FunctionAnalysisManager &AM);
};
-/// \brief Printer pass for DemandedBits
+/// Printer pass for DemandedBits
class DemandedBitsPrinterPass : public PassInfoMixin<DemandedBitsPrinterPass> {
raw_ostream &OS;
diff --git a/linux-x64/clang/include/llvm/Analysis/DependenceAnalysis.h b/linux-x64/clang/include/llvm/Analysis/DependenceAnalysis.h
index 90f33b8..c8ec737 100644
--- a/linux-x64/clang/include/llvm/Analysis/DependenceAnalysis.h
+++ b/linux-x64/clang/include/llvm/Analysis/DependenceAnalysis.h
@@ -557,6 +557,17 @@
const SCEV *X,
const SCEV *Y) const;
+ /// isKnownLessThan - Compare to see if S is less than Size
+ /// Another wrapper for isKnownNegative(S - max(Size, 1)) with some extra
+ /// checking if S is an AddRec and we can prove lessthan using the loop
+ /// bounds.
+ bool isKnownLessThan(const SCEV *S, const SCEV *Size) const;
+
+ /// isKnownNonNegative - Compare to see if S is known not to be negative
+ /// Uses the fact that S comes from Ptr, which may be an inbound GEP,
+ /// Proving there is no wrapping going on.
+ bool isKnownNonNegative(const SCEV *S, const Value *Ptr) const;
+
/// collectUpperBound - All subscripts are the same type (on my machine,
/// an i64). The loop bound may be a smaller type. collectUpperBound
/// find the bound, if available, and zero extends it to the Type T.
@@ -914,7 +925,7 @@
SmallVectorImpl<Subscript> &Pair);
}; // class DependenceInfo
- /// \brief AnalysisPass to compute dependence information in a function
+ /// AnalysisPass to compute dependence information in a function
class DependenceAnalysis : public AnalysisInfoMixin<DependenceAnalysis> {
public:
typedef DependenceInfo Result;
@@ -925,7 +936,7 @@
friend struct AnalysisInfoMixin<DependenceAnalysis>;
}; // class DependenceAnalysis
- /// \brief Legacy pass manager pass to access dependence information
+ /// Legacy pass manager pass to access dependence information
class DependenceAnalysisWrapperPass : public FunctionPass {
public:
static char ID; // Class identification, replacement for typeinfo
diff --git a/linux-x64/clang/include/llvm/Analysis/DivergenceAnalysis.h b/linux-x64/clang/include/llvm/Analysis/DivergenceAnalysis.h
index dd3c68e..328c864 100644
--- a/linux-x64/clang/include/llvm/Analysis/DivergenceAnalysis.h
+++ b/linux-x64/clang/include/llvm/Analysis/DivergenceAnalysis.h
@@ -37,12 +37,21 @@
// Print all divergent branches in the function.
void print(raw_ostream &OS, const Module *) const override;
- // Returns true if V is divergent.
+ // Returns true if V is divergent at its definition.
+ //
+ // Even if this function returns false, V may still be divergent when used
+ // in a different basic block.
bool isDivergent(const Value *V) const { return DivergentValues.count(V); }
// Returns true if V is uniform/non-divergent.
+ //
+ // Even if this function returns true, V may still be divergent when used
+ // in a different basic block.
bool isUniform(const Value *V) const { return !isDivergent(V); }
+ // Keep the analysis results uptodate by removing an erased value.
+ void removeValue(const Value *V) { DivergentValues.erase(V); }
+
private:
// Stores all divergent values.
DenseSet<const Value *> DivergentValues;
diff --git a/linux-x64/clang/include/llvm/Analysis/DominanceFrontier.h b/linux-x64/clang/include/llvm/Analysis/DominanceFrontier.h
index a304dff..d94c420 100644
--- a/linux-x64/clang/include/llvm/Analysis/DominanceFrontier.h
+++ b/linux-x64/clang/include/llvm/Analysis/DominanceFrontier.h
@@ -19,6 +19,7 @@
#define LLVM_ANALYSIS_DOMINANCEFRONTIER_H
#include "llvm/ADT/GraphTraits.h"
+#include "llvm/Config/llvm-config.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
#include "llvm/Support/GenericDomTree.h"
@@ -179,7 +180,7 @@
extern template class DominanceFrontierBase<BasicBlock, true>;
extern template class ForwardDominanceFrontierBase<BasicBlock>;
-/// \brief Analysis pass which computes a \c DominanceFrontier.
+/// Analysis pass which computes a \c DominanceFrontier.
class DominanceFrontierAnalysis
: public AnalysisInfoMixin<DominanceFrontierAnalysis> {
friend AnalysisInfoMixin<DominanceFrontierAnalysis>;
@@ -187,14 +188,14 @@
static AnalysisKey Key;
public:
- /// \brief Provide the result type for this analysis pass.
+ /// Provide the result type for this analysis pass.
using Result = DominanceFrontier;
- /// \brief Run the analysis pass over a function and produce a dominator tree.
+ /// Run the analysis pass over a function and produce a dominator tree.
DominanceFrontier run(Function &F, FunctionAnalysisManager &AM);
};
-/// \brief Printer pass for the \c DominanceFrontier.
+/// Printer pass for the \c DominanceFrontier.
class DominanceFrontierPrinterPass
: public PassInfoMixin<DominanceFrontierPrinterPass> {
raw_ostream &OS;
diff --git a/linux-x64/clang/include/llvm/Analysis/DominanceFrontierImpl.h b/linux-x64/clang/include/llvm/Analysis/DominanceFrontierImpl.h
index dffb2e0..99224c0 100644
--- a/linux-x64/clang/include/llvm/Analysis/DominanceFrontierImpl.h
+++ b/linux-x64/clang/include/llvm/Analysis/DominanceFrontierImpl.h
@@ -21,6 +21,7 @@
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/DominanceFrontier.h"
+#include "llvm/Config/llvm-config.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/GenericDomTree.h"
#include "llvm/Support/raw_ostream.h"
diff --git a/linux-x64/clang/include/llvm/Analysis/EHPersonalities.h b/linux-x64/clang/include/llvm/Analysis/EHPersonalities.h
index 2c45ab4..fe0e65b 100644
--- a/linux-x64/clang/include/llvm/Analysis/EHPersonalities.h
+++ b/linux-x64/clang/include/llvm/Analysis/EHPersonalities.h
@@ -32,10 +32,11 @@
MSVC_Win64SEH,
MSVC_CXX,
CoreCLR,
- Rust
+ Rust,
+ Wasm_CXX
};
-/// \brief See if the given exception handling personality function is one
+/// See if the given exception handling personality function is one
/// that we understand. If so, return a description of it; otherwise return
/// Unknown.
EHPersonality classifyEHPersonality(const Value *Pers);
@@ -44,7 +45,7 @@
EHPersonality getDefaultEHPersonality(const Triple &T);
-/// \brief Returns true if this personality function catches asynchronous
+/// Returns true if this personality function catches asynchronous
/// exceptions.
inline bool isAsynchronousEHPersonality(EHPersonality Pers) {
// The two SEH personality functions can catch asynch exceptions. We assume
@@ -59,7 +60,7 @@
llvm_unreachable("invalid enum");
}
-/// \brief Returns true if this is a personality function that invokes
+/// Returns true if this is a personality function that invokes
/// handler funclets (which must return to it).
inline bool isFuncletEHPersonality(EHPersonality Pers) {
switch (Pers) {
@@ -74,7 +75,23 @@
llvm_unreachable("invalid enum");
}
-/// \brief Return true if this personality may be safely removed if there
+/// Returns true if this personality uses scope-style EH IR instructions:
+/// catchswitch, catchpad/ret, and cleanuppad/ret.
+inline bool isScopedEHPersonality(EHPersonality Pers) {
+ switch (Pers) {
+ case EHPersonality::MSVC_CXX:
+ case EHPersonality::MSVC_X86SEH:
+ case EHPersonality::MSVC_Win64SEH:
+ case EHPersonality::CoreCLR:
+ case EHPersonality::Wasm_CXX:
+ return true;
+ default:
+ return false;
+ }
+ llvm_unreachable("invalid enum");
+}
+
+/// Return true if this personality may be safely removed if there
/// are no invoke instructions remaining in the current function.
inline bool isNoOpWithoutInvoke(EHPersonality Pers) {
switch (Pers) {
@@ -91,7 +108,7 @@
typedef TinyPtrVector<BasicBlock *> ColorVector;
-/// \brief If an EH funclet personality is in use (see isFuncletEHPersonality),
+/// If an EH funclet personality is in use (see isFuncletEHPersonality),
/// this will recompute which blocks are in which funclet. It is possible that
/// some blocks are in multiple funclets. Consider this analysis to be
/// expensive.
diff --git a/linux-x64/clang/include/llvm/Analysis/IndirectCallPromotionAnalysis.h b/linux-x64/clang/include/llvm/Analysis/IndirectCallPromotionAnalysis.h
index 8b1c101..be3a284 100644
--- a/linux-x64/clang/include/llvm/Analysis/IndirectCallPromotionAnalysis.h
+++ b/linux-x64/clang/include/llvm/Analysis/IndirectCallPromotionAnalysis.h
@@ -48,7 +48,7 @@
public:
ICallPromotionAnalysis();
- /// \brief Returns reference to array of InstrProfValueData for the given
+ /// Returns reference to array of InstrProfValueData for the given
/// instruction \p I.
///
/// The \p NumVals, \p TotalCount and \p NumCandidates
diff --git a/linux-x64/clang/include/llvm/Analysis/InlineCost.h b/linux-x64/clang/include/llvm/Analysis/InlineCost.h
index 138d3ac..529fb75 100644
--- a/linux-x64/clang/include/llvm/Analysis/InlineCost.h
+++ b/linux-x64/clang/include/llvm/Analysis/InlineCost.h
@@ -52,7 +52,7 @@
const unsigned TotalAllocaSizeRecursiveCaller = 1024;
}
-/// \brief Represents the cost of inlining a function.
+/// Represents the cost of inlining a function.
///
/// This supports special values for functions which should "always" or
/// "never" be inlined. Otherwise, the cost represents a unitless amount;
@@ -68,14 +68,21 @@
NeverInlineCost = INT_MAX
};
- /// \brief The estimated cost of inlining this callsite.
+ /// The estimated cost of inlining this callsite.
const int Cost;
- /// \brief The adjusted threshold against which this cost was computed.
+ /// The adjusted threshold against which this cost was computed.
const int Threshold;
+ /// Must be set for Always and Never instances.
+ const char *Reason = nullptr;
+
// Trivial constructor, interesting logic in the factory functions below.
- InlineCost(int Cost, int Threshold) : Cost(Cost), Threshold(Threshold) {}
+ InlineCost(int Cost, int Threshold, const char *Reason = nullptr)
+ : Cost(Cost), Threshold(Threshold), Reason(Reason) {
+ assert((isVariable() || Reason) &&
+ "Reason must be provided for Never or Always");
+ }
public:
static InlineCost get(int Cost, int Threshold) {
@@ -83,14 +90,14 @@
assert(Cost < NeverInlineCost && "Cost crosses sentinel value");
return InlineCost(Cost, Threshold);
}
- static InlineCost getAlways() {
- return InlineCost(AlwaysInlineCost, 0);
+ static InlineCost getAlways(const char *Reason) {
+ return InlineCost(AlwaysInlineCost, 0, Reason);
}
- static InlineCost getNever() {
- return InlineCost(NeverInlineCost, 0);
+ static InlineCost getNever(const char *Reason) {
+ return InlineCost(NeverInlineCost, 0, Reason);
}
- /// \brief Test whether the inline cost is low enough for inlining.
+ /// Test whether the inline cost is low enough for inlining.
explicit operator bool() const {
return Cost < Threshold;
}
@@ -99,25 +106,43 @@
bool isNever() const { return Cost == NeverInlineCost; }
bool isVariable() const { return !isAlways() && !isNever(); }
- /// \brief Get the inline cost estimate.
+ /// Get the inline cost estimate.
/// It is an error to call this on an "always" or "never" InlineCost.
int getCost() const {
assert(isVariable() && "Invalid access of InlineCost");
return Cost;
}
- /// \brief Get the threshold against which the cost was computed
+ /// Get the threshold against which the cost was computed
int getThreshold() const {
assert(isVariable() && "Invalid access of InlineCost");
return Threshold;
}
- /// \brief Get the cost delta from the threshold for inlining.
+ /// Get the reason of Always or Never.
+ const char *getReason() const {
+ assert((Reason || isVariable()) &&
+ "InlineCost reason must be set for Always or Never");
+ return Reason;
+ }
+
+ /// Get the cost delta from the threshold for inlining.
/// Only valid if the cost is of the variable kind. Returns a negative
/// value if the cost is too high to inline.
int getCostDelta() const { return Threshold - getCost(); }
};
+/// InlineResult is basically true or false. For false results the message
+/// describes a reason why it is decided not to inline.
+struct InlineResult {
+ const char *message = nullptr;
+ InlineResult(bool result, const char *message = nullptr)
+ : message(result ? nullptr : (message ? message : "cost > threshold")) {}
+ InlineResult(const char *message = nullptr) : message(message) {}
+ operator bool() const { return !message; }
+ operator const char *() const { return message; }
+};
+
/// Thresholds to tune inline cost analysis. The inline cost analysis decides
/// the condition to apply a threshold and applies it. Otherwise,
/// DefaultThreshold is used. If a threshold is Optional, it is applied only
@@ -178,7 +203,7 @@
/// and the call/return instruction.
int getCallsiteCost(CallSite CS, const DataLayout &DL);
-/// \brief Get an InlineCost object representing the cost of inlining this
+/// Get an InlineCost object representing the cost of inlining this
/// callsite.
///
/// Note that a default threshold is passed into this function. This threshold
@@ -195,7 +220,7 @@
Optional<function_ref<BlockFrequencyInfo &(Function &)>> GetBFI,
ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE = nullptr);
-/// \brief Get an InlineCost with the callee explicitly specified.
+/// Get an InlineCost with the callee explicitly specified.
/// This allows you to calculate the cost of inlining a function via a
/// pointer. This behaves exactly as the version with no explicit callee
/// parameter in all other respects.
@@ -207,7 +232,7 @@
Optional<function_ref<BlockFrequencyInfo &(Function &)>> GetBFI,
ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE);
-/// \brief Minimal filter to detect invalid constructs for inlining.
+/// Minimal filter to detect invalid constructs for inlining.
bool isInlineViable(Function &Callee);
}
diff --git a/linux-x64/clang/include/llvm/Analysis/IteratedDominanceFrontier.h b/linux-x64/clang/include/llvm/Analysis/IteratedDominanceFrontier.h
index edaf4e9..6b19507 100644
--- a/linux-x64/clang/include/llvm/Analysis/IteratedDominanceFrontier.h
+++ b/linux-x64/clang/include/llvm/Analysis/IteratedDominanceFrontier.h
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-/// \brief Compute iterated dominance frontiers using a linear time algorithm.
+/// Compute iterated dominance frontiers using a linear time algorithm.
///
/// The algorithm used here is based on:
///
@@ -32,7 +32,7 @@
namespace llvm {
-/// \brief Determine the iterated dominance frontier, given a set of defining
+/// Determine the iterated dominance frontier, given a set of defining
/// blocks, and optionally, a set of live-in blocks.
///
/// In turn, the results can be used to place phi nodes.
@@ -48,7 +48,7 @@
IDFCalculator(DominatorTreeBase<BasicBlock, IsPostDom> &DT)
: DT(DT), useLiveIn(false) {}
- /// \brief Give the IDF calculator the set of blocks in which the value is
+ /// Give the IDF calculator the set of blocks in which the value is
/// defined. This is equivalent to the set of starting blocks it should be
/// calculating the IDF for (though later gets pruned based on liveness).
///
@@ -57,7 +57,7 @@
DefBlocks = &Blocks;
}
- /// \brief Give the IDF calculator the set of blocks in which the value is
+ /// Give the IDF calculator the set of blocks in which the value is
/// live on entry to the block. This is used to prune the IDF calculation to
/// not include blocks where any phi insertion would be dead.
///
@@ -68,14 +68,14 @@
useLiveIn = true;
}
- /// \brief Reset the live-in block set to be empty, and tell the IDF
+ /// Reset the live-in block set to be empty, and tell the IDF
/// calculator to not use liveness anymore.
void resetLiveInBlocks() {
LiveInBlocks = nullptr;
useLiveIn = false;
}
- /// \brief Calculate iterated dominance frontiers
+ /// Calculate iterated dominance frontiers
///
/// This uses the linear-time phi algorithm based on DJ-graphs mentioned in
/// the file-level comment. It performs DF->IDF pruning using the live-in
diff --git a/linux-x64/clang/include/llvm/Analysis/LazyBlockFrequencyInfo.h b/linux-x64/clang/include/llvm/Analysis/LazyBlockFrequencyInfo.h
index 71ce084..d1afb63 100644
--- a/linux-x64/clang/include/llvm/Analysis/LazyBlockFrequencyInfo.h
+++ b/linux-x64/clang/include/llvm/Analysis/LazyBlockFrequencyInfo.h
@@ -75,7 +75,7 @@
const LoopInfoT *LI;
};
-/// \brief This is an alternative analysis pass to
+/// This is an alternative analysis pass to
/// BlockFrequencyInfoWrapperPass. The difference is that with this pass the
/// block frequencies are not computed when the analysis pass is executed but
/// rather when the BFI result is explicitly requested by the analysis client.
@@ -109,10 +109,10 @@
LazyBlockFrequencyInfoPass();
- /// \brief Compute and return the block frequencies.
+ /// Compute and return the block frequencies.
BlockFrequencyInfo &getBFI() { return LBFI.getCalculated(); }
- /// \brief Compute and return the block frequencies.
+ /// Compute and return the block frequencies.
const BlockFrequencyInfo &getBFI() const { return LBFI.getCalculated(); }
void getAnalysisUsage(AnalysisUsage &AU) const override;
@@ -126,7 +126,7 @@
void print(raw_ostream &OS, const Module *M) const override;
};
-/// \brief Helper for client passes to initialize dependent passes for LBFI.
+/// Helper for client passes to initialize dependent passes for LBFI.
void initializeLazyBFIPassPass(PassRegistry &Registry);
}
#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/LazyBranchProbabilityInfo.h b/linux-x64/clang/include/llvm/Analysis/LazyBranchProbabilityInfo.h
index e1d404b..9e6bcfe 100644
--- a/linux-x64/clang/include/llvm/Analysis/LazyBranchProbabilityInfo.h
+++ b/linux-x64/clang/include/llvm/Analysis/LazyBranchProbabilityInfo.h
@@ -26,7 +26,7 @@
class LoopInfo;
class TargetLibraryInfo;
-/// \brief This is an alternative analysis pass to
+/// This is an alternative analysis pass to
/// BranchProbabilityInfoWrapperPass. The difference is that with this pass the
/// branch probabilities are not computed when the analysis pass is executed but
/// rather when the BPI results is explicitly requested by the analysis client.
@@ -89,10 +89,10 @@
LazyBranchProbabilityInfoPass();
- /// \brief Compute and return the branch probabilities.
+ /// Compute and return the branch probabilities.
BranchProbabilityInfo &getBPI() { return LBPI->getCalculated(); }
- /// \brief Compute and return the branch probabilities.
+ /// Compute and return the branch probabilities.
const BranchProbabilityInfo &getBPI() const { return LBPI->getCalculated(); }
void getAnalysisUsage(AnalysisUsage &AU) const override;
@@ -106,10 +106,10 @@
void print(raw_ostream &OS, const Module *M) const override;
};
-/// \brief Helper for client passes to initialize dependent passes for LBPI.
+/// Helper for client passes to initialize dependent passes for LBPI.
void initializeLazyBPIPassPass(PassRegistry &Registry);
-/// \brief Simple trait class that provides a mapping between BPI passes and the
+/// Simple trait class that provides a mapping between BPI passes and the
/// corresponding BPInfo.
template <typename PassT> struct BPIPassTrait {
static PassT &getBPI(PassT *P) { return *P; }
diff --git a/linux-x64/clang/include/llvm/Analysis/LazyValueInfo.h b/linux-x64/clang/include/llvm/Analysis/LazyValueInfo.h
index cea5bf0..1a4fdb5 100644
--- a/linux-x64/clang/include/llvm/Analysis/LazyValueInfo.h
+++ b/linux-x64/clang/include/llvm/Analysis/LazyValueInfo.h
@@ -128,7 +128,7 @@
FunctionAnalysisManager::Invalidator &Inv);
};
-/// \brief Analysis to compute lazy value information.
+/// Analysis to compute lazy value information.
class LazyValueAnalysis : public AnalysisInfoMixin<LazyValueAnalysis> {
public:
typedef LazyValueInfo Result;
diff --git a/linux-x64/clang/include/llvm/Analysis/Lint.h b/linux-x64/clang/include/llvm/Analysis/Lint.h
index 7c88b13..db5919f 100644
--- a/linux-x64/clang/include/llvm/Analysis/Lint.h
+++ b/linux-x64/clang/include/llvm/Analysis/Lint.h
@@ -26,12 +26,12 @@
class Module;
class Function;
-/// @brief Create a lint pass.
+/// Create a lint pass.
///
/// Check a module or function.
FunctionPass *createLintPass();
-/// @brief Check a module.
+/// Check a module.
///
/// This should only be used for debugging, because it plays games with
/// PassManagers and stuff.
diff --git a/linux-x64/clang/include/llvm/Analysis/LoopAccessAnalysis.h b/linux-x64/clang/include/llvm/Analysis/LoopAccessAnalysis.h
index 28154c8..d27b3e4 100644
--- a/linux-x64/clang/include/llvm/Analysis/LoopAccessAnalysis.h
+++ b/linux-x64/clang/include/llvm/Analysis/LoopAccessAnalysis.h
@@ -38,25 +38,25 @@
class LoopAccessInfo;
class OptimizationRemarkEmitter;
-/// \brief Collection of parameters shared beetween the Loop Vectorizer and the
+/// Collection of parameters shared beetween the Loop Vectorizer and the
/// Loop Access Analysis.
struct VectorizerParams {
- /// \brief Maximum SIMD width.
+ /// Maximum SIMD width.
static const unsigned MaxVectorWidth;
- /// \brief VF as overridden by the user.
+ /// VF as overridden by the user.
static unsigned VectorizationFactor;
- /// \brief Interleave factor as overridden by the user.
+ /// Interleave factor as overridden by the user.
static unsigned VectorizationInterleave;
- /// \brief True if force-vector-interleave was specified by the user.
+ /// True if force-vector-interleave was specified by the user.
static bool isInterleaveForced();
- /// \\brief When performing memory disambiguation checks at runtime do not
+ /// \When performing memory disambiguation checks at runtime do not
/// make more than this number of comparisons.
static unsigned RuntimeMemoryCheckThreshold;
};
-/// \brief Checks memory dependences among accesses to the same underlying
+/// Checks memory dependences among accesses to the same underlying
/// object to determine whether there vectorization is legal or not (and at
/// which vectorization factor).
///
@@ -94,12 +94,12 @@
public:
typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
- /// \brief Set of potential dependent memory accesses.
+ /// Set of potential dependent memory accesses.
typedef EquivalenceClasses<MemAccessInfo> DepCandidates;
- /// \brief Dependece between memory access instructions.
+ /// Dependece between memory access instructions.
struct Dependence {
- /// \brief The type of the dependence.
+ /// The type of the dependence.
enum DepType {
// No dependence.
NoDep,
@@ -127,36 +127,36 @@
BackwardVectorizableButPreventsForwarding
};
- /// \brief String version of the types.
+ /// String version of the types.
static const char *DepName[];
- /// \brief Index of the source of the dependence in the InstMap vector.
+ /// Index of the source of the dependence in the InstMap vector.
unsigned Source;
- /// \brief Index of the destination of the dependence in the InstMap vector.
+ /// Index of the destination of the dependence in the InstMap vector.
unsigned Destination;
- /// \brief The type of the dependence.
+ /// The type of the dependence.
DepType Type;
Dependence(unsigned Source, unsigned Destination, DepType Type)
: Source(Source), Destination(Destination), Type(Type) {}
- /// \brief Return the source instruction of the dependence.
+ /// Return the source instruction of the dependence.
Instruction *getSource(const LoopAccessInfo &LAI) const;
- /// \brief Return the destination instruction of the dependence.
+ /// Return the destination instruction of the dependence.
Instruction *getDestination(const LoopAccessInfo &LAI) const;
- /// \brief Dependence types that don't prevent vectorization.
+ /// Dependence types that don't prevent vectorization.
static bool isSafeForVectorization(DepType Type);
- /// \brief Lexically forward dependence.
+ /// Lexically forward dependence.
bool isForward() const;
- /// \brief Lexically backward dependence.
+ /// Lexically backward dependence.
bool isBackward() const;
- /// \brief May be a lexically backward dependence type (includes Unknown).
+ /// May be a lexically backward dependence type (includes Unknown).
bool isPossiblyBackward() const;
- /// \brief Print the dependence. \p Instr is used to map the instruction
+ /// Print the dependence. \p Instr is used to map the instruction
/// indices to instructions.
void print(raw_ostream &OS, unsigned Depth,
const SmallVectorImpl<Instruction *> &Instrs) const;
@@ -167,7 +167,7 @@
ShouldRetryWithRuntimeCheck(false), SafeForVectorization(true),
RecordDependences(true) {}
- /// \brief Register the location (instructions are given increasing numbers)
+ /// Register the location (instructions are given increasing numbers)
/// of a write access.
void addAccess(StoreInst *SI) {
Value *Ptr = SI->getPointerOperand();
@@ -176,7 +176,7 @@
++AccessIdx;
}
- /// \brief Register the location (instructions are given increasing numbers)
+ /// Register the location (instructions are given increasing numbers)
/// of a write access.
void addAccess(LoadInst *LI) {
Value *Ptr = LI->getPointerOperand();
@@ -185,29 +185,29 @@
++AccessIdx;
}
- /// \brief Check whether the dependencies between the accesses are safe.
+ /// Check whether the dependencies between the accesses are safe.
///
/// Only checks sets with elements in \p CheckDeps.
bool areDepsSafe(DepCandidates &AccessSets, MemAccessInfoList &CheckDeps,
const ValueToValueMap &Strides);
- /// \brief No memory dependence was encountered that would inhibit
+ /// No memory dependence was encountered that would inhibit
/// vectorization.
bool isSafeForVectorization() const { return SafeForVectorization; }
- /// \brief The maximum number of bytes of a vector register we can vectorize
+ /// The maximum number of bytes of a vector register we can vectorize
/// the accesses safely with.
uint64_t getMaxSafeDepDistBytes() { return MaxSafeDepDistBytes; }
- /// \brief Return the number of elements that are safe to operate on
+ /// Return the number of elements that are safe to operate on
/// simultaneously, multiplied by the size of the element in bits.
uint64_t getMaxSafeRegisterWidth() const { return MaxSafeRegisterWidth; }
- /// \brief In same cases when the dependency check fails we can still
+ /// In same cases when the dependency check fails we can still
/// vectorize the loop with a dynamic array access check.
bool shouldRetryWithRuntimeCheck() { return ShouldRetryWithRuntimeCheck; }
- /// \brief Returns the memory dependences. If null is returned we exceeded
+ /// Returns the memory dependences. If null is returned we exceeded
/// the MaxDependences threshold and this information is not
/// available.
const SmallVectorImpl<Dependence> *getDependences() const {
@@ -216,13 +216,13 @@
void clearDependences() { Dependences.clear(); }
- /// \brief The vector of memory access instructions. The indices are used as
+ /// The vector of memory access instructions. The indices are used as
/// instruction identifiers in the Dependence class.
const SmallVectorImpl<Instruction *> &getMemoryInstructions() const {
return InstMap;
}
- /// \brief Generate a mapping between the memory instructions and their
+ /// Generate a mapping between the memory instructions and their
/// indices according to program order.
DenseMap<Instruction *, unsigned> generateInstructionOrderMap() const {
DenseMap<Instruction *, unsigned> OrderMap;
@@ -233,7 +233,7 @@
return OrderMap;
}
- /// \brief Find the set of instructions that read or write via \p Ptr.
+ /// Find the set of instructions that read or write via \p Ptr.
SmallVector<Instruction *, 4> getInstructionsForAccess(Value *Ptr,
bool isWrite) const;
@@ -247,42 +247,42 @@
PredicatedScalarEvolution &PSE;
const Loop *InnermostLoop;
- /// \brief Maps access locations (ptr, read/write) to program order.
+ /// Maps access locations (ptr, read/write) to program order.
DenseMap<MemAccessInfo, std::vector<unsigned> > Accesses;
- /// \brief Memory access instructions in program order.
+ /// Memory access instructions in program order.
SmallVector<Instruction *, 16> InstMap;
- /// \brief The program order index to be used for the next instruction.
+ /// The program order index to be used for the next instruction.
unsigned AccessIdx;
// We can access this many bytes in parallel safely.
uint64_t MaxSafeDepDistBytes;
- /// \brief Number of elements (from consecutive iterations) that are safe to
+ /// Number of elements (from consecutive iterations) that are safe to
/// operate on simultaneously, multiplied by the size of the element in bits.
/// The size of the element is taken from the memory access that is most
/// restrictive.
uint64_t MaxSafeRegisterWidth;
- /// \brief If we see a non-constant dependence distance we can still try to
+ /// If we see a non-constant dependence distance we can still try to
/// vectorize this loop with runtime checks.
bool ShouldRetryWithRuntimeCheck;
- /// \brief No memory dependence was encountered that would inhibit
+ /// No memory dependence was encountered that would inhibit
/// vectorization.
bool SafeForVectorization;
- //// \brief True if Dependences reflects the dependences in the
+ //// True if Dependences reflects the dependences in the
//// loop. If false we exceeded MaxDependences and
//// Dependences is invalid.
bool RecordDependences;
- /// \brief Memory dependences collected during the analysis. Only valid if
+ /// Memory dependences collected during the analysis. Only valid if
/// RecordDependences is true.
SmallVector<Dependence, 8> Dependences;
- /// \brief Check whether there is a plausible dependence between the two
+ /// Check whether there is a plausible dependence between the two
/// accesses.
///
/// Access \p A must happen before \p B in program order. The two indices
@@ -298,7 +298,7 @@
const MemAccessInfo &B, unsigned BIdx,
const ValueToValueMap &Strides);
- /// \brief Check whether the data dependence could prevent store-load
+ /// Check whether the data dependence could prevent store-load
/// forwarding.
///
/// \return false if we shouldn't vectorize at all or avoid larger
@@ -306,7 +306,7 @@
bool couldPreventStoreLoadForward(uint64_t Distance, uint64_t TypeByteSize);
};
-/// \brief Holds information about the memory runtime legality checks to verify
+/// Holds information about the memory runtime legality checks to verify
/// that a group of pointers do not overlap.
class RuntimePointerChecking {
public:
@@ -355,13 +355,13 @@
unsigned ASId, const ValueToValueMap &Strides,
PredicatedScalarEvolution &PSE);
- /// \brief No run-time memory checking is necessary.
+ /// No run-time memory checking is necessary.
bool empty() const { return Pointers.empty(); }
/// A grouping of pointers. A single memcheck is required between
/// two groups.
struct CheckingPtrGroup {
- /// \brief Create a new pointer checking group containing a single
+ /// Create a new pointer checking group containing a single
/// pointer, with index \p Index in RtCheck.
CheckingPtrGroup(unsigned Index, RuntimePointerChecking &RtCheck)
: RtCheck(RtCheck), High(RtCheck.Pointers[Index].End),
@@ -369,7 +369,7 @@
Members.push_back(Index);
}
- /// \brief Tries to add the pointer recorded in RtCheck at index
+ /// Tries to add the pointer recorded in RtCheck at index
/// \p Index to this pointer checking group. We can only add a pointer
/// to a checking group if we will still be able to get
/// the upper and lower bounds of the check. Returns true in case
@@ -390,7 +390,7 @@
SmallVector<unsigned, 2> Members;
};
- /// \brief A memcheck which made up of a pair of grouped pointers.
+ /// A memcheck which made up of a pair of grouped pointers.
///
/// These *have* to be const for now, since checks are generated from
/// CheckingPtrGroups in LAI::addRuntimeChecks which is a const member
@@ -399,24 +399,24 @@
typedef std::pair<const CheckingPtrGroup *, const CheckingPtrGroup *>
PointerCheck;
- /// \brief Generate the checks and store it. This also performs the grouping
+ /// Generate the checks and store it. This also performs the grouping
/// of pointers to reduce the number of memchecks necessary.
void generateChecks(MemoryDepChecker::DepCandidates &DepCands,
bool UseDependencies);
- /// \brief Returns the checks that generateChecks created.
+ /// Returns the checks that generateChecks created.
const SmallVector<PointerCheck, 4> &getChecks() const { return Checks; }
- /// \brief Decide if we need to add a check between two groups of pointers,
+ /// Decide if we need to add a check between two groups of pointers,
/// according to needsChecking.
bool needsChecking(const CheckingPtrGroup &M,
const CheckingPtrGroup &N) const;
- /// \brief Returns the number of run-time checks required according to
+ /// Returns the number of run-time checks required according to
/// needsChecking.
unsigned getNumberOfChecks() const { return Checks.size(); }
- /// \brief Print the list run-time memory checks necessary.
+ /// Print the list run-time memory checks necessary.
void print(raw_ostream &OS, unsigned Depth = 0) const;
/// Print \p Checks.
@@ -432,7 +432,7 @@
/// Holds a partitioning of pointers into "check groups".
SmallVector<CheckingPtrGroup, 2> CheckingGroups;
- /// \brief Check if pointers are in the same partition
+ /// Check if pointers are in the same partition
///
/// \p PtrToPartition contains the partition number for pointers (-1 if the
/// pointer belongs to multiple partitions).
@@ -440,17 +440,17 @@
arePointersInSamePartition(const SmallVectorImpl<int> &PtrToPartition,
unsigned PtrIdx1, unsigned PtrIdx2);
- /// \brief Decide whether we need to issue a run-time check for pointer at
+ /// Decide whether we need to issue a run-time check for pointer at
/// index \p I and \p J to prove their independence.
bool needsChecking(unsigned I, unsigned J) const;
- /// \brief Return PointerInfo for pointer at index \p PtrIdx.
+ /// Return PointerInfo for pointer at index \p PtrIdx.
const PointerInfo &getPointerInfo(unsigned PtrIdx) const {
return Pointers[PtrIdx];
}
private:
- /// \brief Groups pointers such that a single memcheck is required
+ /// Groups pointers such that a single memcheck is required
/// between two different groups. This will clear the CheckingGroups vector
/// and re-compute it. We will only group dependecies if \p UseDependencies
/// is true, otherwise we will create a separate group for each pointer.
@@ -464,12 +464,12 @@
/// Holds a pointer to the ScalarEvolution analysis.
ScalarEvolution *SE;
- /// \brief Set of run-time checks required to establish independence of
+ /// Set of run-time checks required to establish independence of
/// otherwise may-aliasing pointers in the loop.
SmallVector<PointerCheck, 4> Checks;
};
-/// \brief Drive the analysis of memory accesses in the loop
+/// Drive the analysis of memory accesses in the loop
///
/// This class is responsible for analyzing the memory accesses of a loop. It
/// collects the accesses and then its main helper the AccessAnalysis class
@@ -503,7 +503,7 @@
return PtrRtChecking.get();
}
- /// \brief Number of memchecks required to prove independence of otherwise
+ /// Number of memchecks required to prove independence of otherwise
/// may-alias pointers.
unsigned getNumRuntimePointerChecks() const {
return PtrRtChecking->getNumberOfChecks();
@@ -521,7 +521,7 @@
unsigned getNumStores() const { return NumStores; }
unsigned getNumLoads() const { return NumLoads;}
- /// \brief Add code that checks at runtime if the accessed arrays overlap.
+ /// Add code that checks at runtime if the accessed arrays overlap.
///
/// Returns a pair of instructions where the first element is the first
/// instruction generated in possibly a sequence of instructions and the
@@ -529,7 +529,7 @@
std::pair<Instruction *, Instruction *>
addRuntimeChecks(Instruction *Loc) const;
- /// \brief Generete the instructions for the checks in \p PointerChecks.
+ /// Generete the instructions for the checks in \p PointerChecks.
///
/// Returns a pair of instructions where the first element is the first
/// instruction generated in possibly a sequence of instructions and the
@@ -539,32 +539,32 @@
const SmallVectorImpl<RuntimePointerChecking::PointerCheck>
&PointerChecks) const;
- /// \brief The diagnostics report generated for the analysis. E.g. why we
+ /// The diagnostics report generated for the analysis. E.g. why we
/// couldn't analyze the loop.
const OptimizationRemarkAnalysis *getReport() const { return Report.get(); }
- /// \brief the Memory Dependence Checker which can determine the
+ /// the Memory Dependence Checker which can determine the
/// loop-independent and loop-carried dependences between memory accesses.
const MemoryDepChecker &getDepChecker() const { return *DepChecker; }
- /// \brief Return the list of instructions that use \p Ptr to read or write
+ /// Return the list of instructions that use \p Ptr to read or write
/// memory.
SmallVector<Instruction *, 4> getInstructionsForAccess(Value *Ptr,
bool isWrite) const {
return DepChecker->getInstructionsForAccess(Ptr, isWrite);
}
- /// \brief If an access has a symbolic strides, this maps the pointer value to
+ /// If an access has a symbolic strides, this maps the pointer value to
/// the stride symbol.
const ValueToValueMap &getSymbolicStrides() const { return SymbolicStrides; }
- /// \brief Pointer has a symbolic stride.
+ /// Pointer has a symbolic stride.
bool hasStride(Value *V) const { return StrideSet.count(V); }
- /// \brief Print the information about the memory accesses in the loop.
+ /// Print the information about the memory accesses in the loop.
void print(raw_ostream &OS, unsigned Depth = 0) const;
- /// \brief Checks existence of store to invariant address inside loop.
+ /// Checks existence of store to invariant address inside loop.
/// If the loop has any store to invariant address, then it returns true,
/// else returns false.
bool hasStoreToLoopInvariantAddress() const {
@@ -579,15 +579,15 @@
const PredicatedScalarEvolution &getPSE() const { return *PSE; }
private:
- /// \brief Analyze the loop.
+ /// Analyze the loop.
void analyzeLoop(AliasAnalysis *AA, LoopInfo *LI,
const TargetLibraryInfo *TLI, DominatorTree *DT);
- /// \brief Check if the structure of the loop allows it to be analyzed by this
+ /// Check if the structure of the loop allows it to be analyzed by this
/// pass.
bool canAnalyzeLoop();
- /// \brief Save the analysis remark.
+ /// Save the analysis remark.
///
/// LAA does not directly emits the remarks. Instead it stores it which the
/// client can retrieve and presents as its own analysis
@@ -595,7 +595,7 @@
OptimizationRemarkAnalysis &recordAnalysis(StringRef RemarkName,
Instruction *Instr = nullptr);
- /// \brief Collect memory access with loop invariant strides.
+ /// Collect memory access with loop invariant strides.
///
/// Looks for accesses like "a[i * StrideA]" where "StrideA" is loop
/// invariant.
@@ -607,7 +607,7 @@
/// at runtime. Using std::unique_ptr to make using move ctor simpler.
std::unique_ptr<RuntimePointerChecking> PtrRtChecking;
- /// \brief the Memory Dependence Checker which can determine the
+ /// the Memory Dependence Checker which can determine the
/// loop-independent and loop-carried dependences between memory accesses.
std::unique_ptr<MemoryDepChecker> DepChecker;
@@ -618,28 +618,28 @@
uint64_t MaxSafeDepDistBytes;
- /// \brief Cache the result of analyzeLoop.
+ /// Cache the result of analyzeLoop.
bool CanVecMem;
- /// \brief Indicator for storing to uniform addresses.
+ /// Indicator for storing to uniform addresses.
/// If a loop has write to a loop invariant address then it should be true.
bool StoreToLoopInvariantAddress;
- /// \brief The diagnostics report generated for the analysis. E.g. why we
+ /// The diagnostics report generated for the analysis. E.g. why we
/// couldn't analyze the loop.
std::unique_ptr<OptimizationRemarkAnalysis> Report;
- /// \brief If an access has a symbolic strides, this maps the pointer value to
+ /// If an access has a symbolic strides, this maps the pointer value to
/// the stride symbol.
ValueToValueMap SymbolicStrides;
- /// \brief Set of symbolic strides values.
+ /// Set of symbolic strides values.
SmallPtrSet<Value *, 8> StrideSet;
};
Value *stripIntegerCast(Value *V);
-/// \brief Return the SCEV corresponding to a pointer with the symbolic stride
+/// Return the SCEV corresponding to a pointer with the symbolic stride
/// replaced with constant one, assuming the SCEV predicate associated with
/// \p PSE is true.
///
@@ -653,7 +653,7 @@
const ValueToValueMap &PtrToStride,
Value *Ptr, Value *OrigPtr = nullptr);
-/// \brief If the pointer has a constant stride return it in units of its
+/// If the pointer has a constant stride return it in units of its
/// element size. Otherwise return zero.
///
/// Ensure that it does not wrap in the address space, assuming the predicate
@@ -667,12 +667,26 @@
const ValueToValueMap &StridesMap = ValueToValueMap(),
bool Assume = false, bool ShouldCheckWrap = true);
-/// \brief Returns true if the memory operations \p A and \p B are consecutive.
-/// This is a simple API that does not depend on the analysis pass.
+/// Attempt to sort the pointers in \p VL and return the sorted indices
+/// in \p SortedIndices, if reordering is required.
+///
+/// Returns 'true' if sorting is legal, otherwise returns 'false'.
+///
+/// For example, for a given \p VL of memory accesses in program order, a[i+4],
+/// a[i+0], a[i+1] and a[i+7], this function will sort the \p VL and save the
+/// sorted indices in \p SortedIndices as a[i+0], a[i+1], a[i+4], a[i+7] and
+/// saves the mask for actual memory accesses in program order in
+/// \p SortedIndices as <1,2,0,3>
+bool sortPtrAccesses(ArrayRef<Value *> VL, const DataLayout &DL,
+ ScalarEvolution &SE,
+ SmallVectorImpl<unsigned> &SortedIndices);
+
+/// Returns true if the memory operations \p A and \p B are consecutive.
+/// This is a simple API that does not depend on the analysis pass.
bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
ScalarEvolution &SE, bool CheckType = true);
-/// \brief This analysis provides dependence information for the memory accesses
+/// This analysis provides dependence information for the memory accesses
/// of a loop.
///
/// It runs the analysis for a loop on demand. This can be initiated by
@@ -691,7 +705,7 @@
void getAnalysisUsage(AnalysisUsage &AU) const override;
- /// \brief Query the result of the loop access information for the loop \p L.
+ /// Query the result of the loop access information for the loop \p L.
///
/// If there is no cached result available run the analysis.
const LoopAccessInfo &getInfo(Loop *L);
@@ -701,11 +715,11 @@
LoopAccessInfoMap.clear();
}
- /// \brief Print the result of the analysis when invoked with -analyze.
+ /// Print the result of the analysis when invoked with -analyze.
void print(raw_ostream &OS, const Module *M = nullptr) const override;
private:
- /// \brief The cache.
+ /// The cache.
DenseMap<Loop *, std::unique_ptr<LoopAccessInfo>> LoopAccessInfoMap;
// The used analysis passes.
@@ -716,11 +730,11 @@
LoopInfo *LI;
};
-/// \brief This analysis provides dependence information for the memory
+/// This analysis provides dependence information for the memory
/// accesses of a loop.
///
/// It runs the analysis for a loop on demand. This can be initiated by
-/// querying the loop access info via AM.getResult<LoopAccessAnalysis>.
+/// querying the loop access info via AM.getResult<LoopAccessAnalysis>.
/// getResult return a LoopAccessInfo object. See this class for the
/// specifics of what information is provided.
class LoopAccessAnalysis
diff --git a/linux-x64/clang/include/llvm/Analysis/LoopAnalysisManager.h b/linux-x64/clang/include/llvm/Analysis/LoopAnalysisManager.h
index 417ee97..00e562c 100644
--- a/linux-x64/clang/include/llvm/Analysis/LoopAnalysisManager.h
+++ b/linux-x64/clang/include/llvm/Analysis/LoopAnalysisManager.h
@@ -69,7 +69,7 @@
extern template class AllAnalysesOn<Loop>;
extern template class AnalysisManager<Loop, LoopStandardAnalysisResults &>;
-/// \brief The loop analysis manager.
+/// The loop analysis manager.
///
/// See the documentation for the AnalysisManager template for detail
/// documentation. This typedef serves as a convenient way to refer to this
diff --git a/linux-x64/clang/include/llvm/Analysis/LoopInfo.h b/linux-x64/clang/include/llvm/Analysis/LoopInfo.h
index 28afc39..30b29d6 100644
--- a/linux-x64/clang/include/llvm/Analysis/LoopInfo.h
+++ b/linux-x64/clang/include/llvm/Analysis/LoopInfo.h
@@ -178,6 +178,12 @@
return DenseBlockSet;
}
+ /// Return a direct, immutable handle to the blocks set.
+ const SmallPtrSetImpl<const BlockT *> &getBlocksSet() const {
+ assert(!isInvalid() && "Loop not in a valid state!");
+ return DenseBlockSet;
+ }
+
/// Return true if this loop is no longer valid. The only valid use of this
/// helper is "assert(L.isInvalid())" or equivalent, since IsInvalid is set to
/// true by the destructor. In other words, if this accessor returns true,
@@ -255,6 +261,20 @@
/// Otherwise return null.
BlockT *getExitBlock() const;
+ /// Return true if no exit block for the loop has a predecessor that is
+ /// outside the loop.
+ bool hasDedicatedExits() const;
+
+ /// Return all unique successor blocks of this loop.
+ /// These are the blocks _outside of the current loop_ which are branched to.
+ /// This assumes that loop exits are in canonical form, i.e. all exits are
+ /// dedicated exits.
+ void getUniqueExitBlocks(SmallVectorImpl<BlockT *> &ExitBlocks) const;
+
+ /// If getUniqueExitBlocks would return exactly one block, return that block.
+ /// Otherwise return null.
+ BlockT *getUniqueExitBlock() const;
+
/// Edge type.
typedef std::pair<const BlockT *, const BlockT *> Edge;
@@ -438,7 +458,7 @@
/// in the CFG are necessarily loops.
class Loop : public LoopBase<BasicBlock, Loop> {
public:
- /// \brief A range representing the start and end location of a loop.
+ /// A range representing the start and end location of a loop.
class LocRange {
DebugLoc Start;
DebugLoc End;
@@ -452,7 +472,7 @@
const DebugLoc &getStart() const { return Start; }
const DebugLoc &getEnd() const { return End; }
- /// \brief Check for null.
+ /// Check for null.
///
explicit operator bool() const { return Start && End; }
};
@@ -527,7 +547,7 @@
///
/// If this loop contains the same llvm.loop metadata on each branch to the
/// header then the node is returned. If any latch instruction does not
- /// contain llvm.loop or or if multiple latches contain different nodes then
+ /// contain llvm.loop or if multiple latches contain different nodes then
/// 0 is returned.
MDNode *getLoopID() const;
/// Set the llvm.loop loop id metadata for this loop.
@@ -547,20 +567,6 @@
/// unrolling pass is run more than once (which it generally is).
void setLoopAlreadyUnrolled();
- /// Return true if no exit block for the loop has a predecessor that is
- /// outside the loop.
- bool hasDedicatedExits() const;
-
- /// Return all unique successor blocks of this loop.
- /// These are the blocks _outside of the current loop_ which are branched to.
- /// This assumes that loop exits are in canonical form, i.e. all exits are
- /// dedicated exits.
- void getUniqueExitBlocks(SmallVectorImpl<BasicBlock *> &ExitBlocks) const;
-
- /// If getUniqueExitBlocks would return exactly one block, return that block.
- /// Otherwise return null.
- BasicBlock *getUniqueExitBlock() const;
-
void dump() const;
void dumpVerbose() const;
@@ -929,7 +935,7 @@
static ChildIteratorType child_end(NodeRef N) { return N->end(); }
};
-/// \brief Analysis pass that exposes the \c LoopInfo for a function.
+/// Analysis pass that exposes the \c LoopInfo for a function.
class LoopAnalysis : public AnalysisInfoMixin<LoopAnalysis> {
friend AnalysisInfoMixin<LoopAnalysis>;
static AnalysisKey Key;
@@ -940,7 +946,7 @@
LoopInfo run(Function &F, FunctionAnalysisManager &AM);
};
-/// \brief Printer pass for the \c LoopAnalysis results.
+/// Printer pass for the \c LoopAnalysis results.
class LoopPrinterPass : public PassInfoMixin<LoopPrinterPass> {
raw_ostream &OS;
@@ -949,12 +955,12 @@
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
-/// \brief Verifier pass for the \c LoopAnalysis results.
+/// Verifier pass for the \c LoopAnalysis results.
struct LoopVerifierPass : public PassInfoMixin<LoopVerifierPass> {
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
-/// \brief The legacy pass manager's analysis pass to compute loop information.
+/// The legacy pass manager's analysis pass to compute loop information.
class LoopInfoWrapperPass : public FunctionPass {
LoopInfo LI;
@@ -968,7 +974,7 @@
LoopInfo &getLoopInfo() { return LI; }
const LoopInfo &getLoopInfo() const { return LI; }
- /// \brief Calculate the natural loop information for a given function.
+ /// Calculate the natural loop information for a given function.
bool runOnFunction(Function &F) override;
void verifyAnalysis() const override;
diff --git a/linux-x64/clang/include/llvm/Analysis/LoopInfoImpl.h b/linux-x64/clang/include/llvm/Analysis/LoopInfoImpl.h
index b3a16b5..9413898 100644
--- a/linux-x64/clang/include/llvm/Analysis/LoopInfoImpl.h
+++ b/linux-x64/clang/include/llvm/Analysis/LoopInfoImpl.h
@@ -82,6 +82,74 @@
return nullptr;
}
+template <class BlockT, class LoopT>
+bool LoopBase<BlockT, LoopT>::hasDedicatedExits() const {
+ // Each predecessor of each exit block of a normal loop is contained
+ // within the loop.
+ SmallVector<BlockT *, 4> ExitBlocks;
+ getExitBlocks(ExitBlocks);
+ for (BlockT *EB : ExitBlocks)
+ for (BlockT *Predecessor : children<Inverse<BlockT *>>(EB))
+ if (!contains(Predecessor))
+ return false;
+ // All the requirements are met.
+ return true;
+}
+
+template <class BlockT, class LoopT>
+void LoopBase<BlockT, LoopT>::getUniqueExitBlocks(
+ SmallVectorImpl<BlockT *> &ExitBlocks) const {
+ typedef GraphTraits<BlockT *> BlockTraits;
+ typedef GraphTraits<Inverse<BlockT *>> InvBlockTraits;
+
+ assert(hasDedicatedExits() &&
+ "getUniqueExitBlocks assumes the loop has canonical form exits!");
+
+ SmallVector<BlockT *, 32> SwitchExitBlocks;
+ for (BlockT *Block : this->blocks()) {
+ SwitchExitBlocks.clear();
+ for (BlockT *Successor : children<BlockT *>(Block)) {
+ // If block is inside the loop then it is not an exit block.
+ if (contains(Successor))
+ continue;
+
+ BlockT *FirstPred = *InvBlockTraits::child_begin(Successor);
+
+ // If current basic block is this exit block's first predecessor then only
+ // insert exit block in to the output ExitBlocks vector. This ensures that
+ // same exit block is not inserted twice into ExitBlocks vector.
+ if (Block != FirstPred)
+ continue;
+
+ // If a terminator has more then two successors, for example SwitchInst,
+ // then it is possible that there are multiple edges from current block to
+ // one exit block.
+ if (std::distance(BlockTraits::child_begin(Block),
+ BlockTraits::child_end(Block)) <= 2) {
+ ExitBlocks.push_back(Successor);
+ continue;
+ }
+
+ // In case of multiple edges from current block to exit block, collect
+ // only one edge in ExitBlocks. Use switchExitBlocks to keep track of
+ // duplicate edges.
+ if (!is_contained(SwitchExitBlocks, Successor)) {
+ SwitchExitBlocks.push_back(Successor);
+ ExitBlocks.push_back(Successor);
+ }
+ }
+ }
+}
+
+template <class BlockT, class LoopT>
+BlockT *LoopBase<BlockT, LoopT>::getUniqueExitBlock() const {
+ SmallVector<BlockT *, 8> UniqueExitBlocks;
+ getUniqueExitBlocks(UniqueExitBlocks);
+ if (UniqueExitBlocks.size() == 1)
+ return UniqueExitBlocks[0];
+ return nullptr;
+}
+
/// getExitEdges - Return all pairs of (_inside_block_,_outside_block_).
template <class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::getExitEdges(
@@ -572,8 +640,8 @@
template <typename T>
bool compareVectors(std::vector<T> &BB1, std::vector<T> &BB2) {
- std::sort(BB1.begin(), BB1.end());
- std::sort(BB2.begin(), BB2.end());
+ llvm::sort(BB1.begin(), BB1.end());
+ llvm::sort(BB2.begin(), BB2.end());
return BB1 == BB2;
}
@@ -617,6 +685,15 @@
std::vector<BlockT *> OtherBBs = OtherL->getBlocks();
assert(compareVectors(BBs, OtherBBs) &&
"Mismatched basic blocks in the loops!");
+
+ const SmallPtrSetImpl<const BlockT *> &BlocksSet = L->getBlocksSet();
+ const SmallPtrSetImpl<const BlockT *> &OtherBlocksSet = L->getBlocksSet();
+ assert(BlocksSet.size() == OtherBlocksSet.size() &&
+ std::all_of(BlocksSet.begin(), BlocksSet.end(),
+ [&OtherBlocksSet](const BlockT *BB) {
+ return OtherBlocksSet.count(BB);
+ }) &&
+ "Mismatched basic blocks in BlocksSets!");
}
#endif
@@ -636,6 +713,9 @@
LoopT *L = Entry.second;
assert(Loops.count(L) && "orphaned loop");
assert(L->contains(BB) && "orphaned block");
+ for (LoopT *ChildLoop : *L)
+ assert(!ChildLoop->contains(BB) &&
+ "BBMap should point to the innermost loop containing BB");
}
// Recompute LoopInfo to verify loops structure.
diff --git a/linux-x64/clang/include/llvm/Analysis/LoopUnrollAnalyzer.h b/linux-x64/clang/include/llvm/Analysis/LoopUnrollAnalyzer.h
index 80f3e5f..f45bf0b 100644
--- a/linux-x64/clang/include/llvm/Analysis/LoopUnrollAnalyzer.h
+++ b/linux-x64/clang/include/llvm/Analysis/LoopUnrollAnalyzer.h
@@ -57,7 +57,7 @@
using Base::visit;
private:
- /// \brief A cache of pointer bases and constant-folded offsets corresponding
+ /// A cache of pointer bases and constant-folded offsets corresponding
/// to GEP (or derived from GEP) instructions.
///
/// In order to find the base pointer one needs to perform non-trivial
@@ -65,11 +65,11 @@
/// results saved.
DenseMap<Value *, SimplifiedAddress> SimplifiedAddresses;
- /// \brief SCEV expression corresponding to number of currently simulated
+ /// SCEV expression corresponding to number of currently simulated
/// iteration.
const SCEV *IterationNumber;
- /// \brief A Value->Constant map for keeping values that we managed to
+ /// A Value->Constant map for keeping values that we managed to
/// constant-fold on the given iteration.
///
/// While we walk the loop instructions, we build up and maintain a mapping
diff --git a/linux-x64/clang/include/llvm/Analysis/MemoryBuiltins.h b/linux-x64/clang/include/llvm/Analysis/MemoryBuiltins.h
index 7d53e34..5418128 100644
--- a/linux-x64/clang/include/llvm/Analysis/MemoryBuiltins.h
+++ b/linux-x64/clang/include/llvm/Analysis/MemoryBuiltins.h
@@ -53,33 +53,33 @@
class UndefValue;
class Value;
-/// \brief Tests if a value is a call or invoke to a library function that
+/// Tests if a value is a call or invoke to a library function that
/// allocates or reallocates memory (either malloc, calloc, realloc, or strdup
/// like).
bool isAllocationFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
-/// \brief Tests if a value is a call or invoke to a function that returns a
+/// Tests if a value is a call or invoke to a function that returns a
/// NoAlias pointer (including malloc/calloc/realloc/strdup-like functions).
bool isNoAliasFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
-/// \brief Tests if a value is a call or invoke to a library function that
+/// Tests if a value is a call or invoke to a library function that
/// allocates uninitialized memory (such as malloc).
bool isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
-/// \brief Tests if a value is a call or invoke to a library function that
+/// Tests if a value is a call or invoke to a library function that
/// allocates zero-filled memory (such as calloc).
bool isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
-/// \brief Tests if a value is a call or invoke to a library function that
+/// Tests if a value is a call or invoke to a library function that
/// allocates memory similar to malloc or calloc.
bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
-/// \brief Tests if a value is a call or invoke to a library function that
+/// Tests if a value is a call or invoke to a library function that
/// allocates memory (either malloc, calloc, or strdup like).
bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
@@ -170,14 +170,14 @@
bool NullIsUnknownSize = false;
};
-/// \brief Compute the size of the object pointed by Ptr. Returns true and the
+/// Compute the size of the object pointed by Ptr. Returns true and the
/// object size in Size if successful, and false otherwise. In this context, by
/// object we mean the region of memory starting at Ptr to the end of the
/// underlying object pointed to by Ptr.
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL,
const TargetLibraryInfo *TLI, ObjectSizeOpts Opts = {});
-/// Try to turn a call to @llvm.objectsize into an integer value of the given
+/// Try to turn a call to \@llvm.objectsize into an integer value of the given
/// Type. Returns null on failure.
/// If MustSucceed is true, this function will not return null, and may return
/// conservative values governed by the second argument of the call to
@@ -189,7 +189,7 @@
using SizeOffsetType = std::pair<APInt, APInt>;
-/// \brief Evaluate the size and offset of an object pointed to by a Value*
+/// Evaluate the size and offset of an object pointed to by a Value*
/// statically. Fails if size or offset are not known at compile time.
class ObjectSizeOffsetVisitor
: public InstVisitor<ObjectSizeOffsetVisitor, SizeOffsetType> {
@@ -248,7 +248,7 @@
using SizeOffsetEvalType = std::pair<Value *, Value *>;
-/// \brief Evaluate the size and offset of an object pointed to by a Value*.
+/// Evaluate the size and offset of an object pointed to by a Value*.
/// May create code to compute the result at run-time.
class ObjectSizeOffsetEvaluator
: public InstVisitor<ObjectSizeOffsetEvaluator, SizeOffsetEvalType> {
diff --git a/linux-x64/clang/include/llvm/Analysis/MemoryDependenceAnalysis.h b/linux-x64/clang/include/llvm/Analysis/MemoryDependenceAnalysis.h
index c297452..1c40cff 100644
--- a/linux-x64/clang/include/llvm/Analysis/MemoryDependenceAnalysis.h
+++ b/linux-x64/clang/include/llvm/Analysis/MemoryDependenceAnalysis.h
@@ -26,6 +26,7 @@
#include "llvm/IR/Metadata.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/PredIteratorCache.h"
+#include "llvm/IR/ValueHandle.h"
#include "llvm/Pass.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
@@ -43,6 +44,7 @@
class LoadInst;
class PHITransAddr;
class TargetLibraryInfo;
+class PhiValues;
class Value;
/// A memory dependence query can return one of three different answers.
@@ -302,7 +304,7 @@
/// The maximum size of the dereferences of the pointer.
///
/// May be UnknownSize if the sizes are unknown.
- uint64_t Size = MemoryLocation::UnknownSize;
+ LocationSize Size = MemoryLocation::UnknownSize;
/// The AA tags associated with dereferences of the pointer.
///
/// The members may be null if there are no tags or conflicting tags.
@@ -314,7 +316,10 @@
/// Cache storing single nonlocal def for the instruction.
/// It is set when nonlocal def would be found in function returning only
/// local dependencies.
- DenseMap<Instruction *, NonLocalDepResult> NonLocalDefsCache;
+ DenseMap<AssertingVH<const Value>, NonLocalDepResult> NonLocalDefsCache;
+ using ReverseNonLocalDefsCacheTy =
+ DenseMap<Instruction *, SmallPtrSet<const Value*, 4>>;
+ ReverseNonLocalDefsCacheTy ReverseNonLocalDefsCache;
/// This map stores the cached results of doing a pointer lookup at the
/// bottom of a block.
@@ -356,13 +361,14 @@
AssumptionCache &AC;
const TargetLibraryInfo &TLI;
DominatorTree &DT;
+ PhiValues &PV;
PredIteratorCache PredCache;
public:
MemoryDependenceResults(AliasAnalysis &AA, AssumptionCache &AC,
const TargetLibraryInfo &TLI,
- DominatorTree &DT)
- : AA(AA), AC(AC), TLI(TLI), DT(DT) {}
+ DominatorTree &DT, PhiValues &PV)
+ : AA(AA), AC(AC), TLI(TLI), DT(DT), PV(PV) {}
/// Handle invalidation in the new PM.
bool invalidate(Function &F, const PreservedAnalyses &PA,
diff --git a/linux-x64/clang/include/llvm/Analysis/MemoryLocation.h b/linux-x64/clang/include/llvm/Analysis/MemoryLocation.h
index c108074..6b68000 100644
--- a/linux-x64/clang/include/llvm/Analysis/MemoryLocation.h
+++ b/linux-x64/clang/include/llvm/Analysis/MemoryLocation.h
@@ -27,8 +27,16 @@
class StoreInst;
class MemTransferInst;
class MemIntrinsic;
+class AtomicMemTransferInst;
+class AtomicMemIntrinsic;
+class AnyMemTransferInst;
+class AnyMemIntrinsic;
class TargetLibraryInfo;
+// Represents the size of a MemoryLocation. Logically, it's an
+// Optional<uint64_t>, with a special UnknownSize value from `MemoryLocation`.
+using LocationSize = uint64_t;
+
/// Representation for a specific memory location.
///
/// This abstraction can be used to represent a specific location in memory.
@@ -55,7 +63,7 @@
/// virtual address space, because there are restrictions on stepping out of
/// one object and into another. See
/// http://llvm.org/docs/LangRef.html#pointeraliasing
- uint64_t Size;
+ LocationSize Size;
/// The metadata nodes which describes the aliasing of the location (each
/// member is null if that kind of information is unavailable).
@@ -90,17 +98,21 @@
/// Return a location representing the source of a memory transfer.
static MemoryLocation getForSource(const MemTransferInst *MTI);
+ static MemoryLocation getForSource(const AtomicMemTransferInst *MTI);
+ static MemoryLocation getForSource(const AnyMemTransferInst *MTI);
/// Return a location representing the destination of a memory set or
/// transfer.
static MemoryLocation getForDest(const MemIntrinsic *MI);
+ static MemoryLocation getForDest(const AtomicMemIntrinsic *MI);
+ static MemoryLocation getForDest(const AnyMemIntrinsic *MI);
/// Return a location representing a particular argument of a call.
static MemoryLocation getForArgument(ImmutableCallSite CS, unsigned ArgIdx,
const TargetLibraryInfo &TLI);
explicit MemoryLocation(const Value *Ptr = nullptr,
- uint64_t Size = UnknownSize,
+ LocationSize Size = UnknownSize,
const AAMDNodes &AATags = AAMDNodes())
: Ptr(Ptr), Size(Size), AATags(AATags) {}
@@ -110,7 +122,7 @@
return Copy;
}
- MemoryLocation getWithNewSize(uint64_t NewSize) const {
+ MemoryLocation getWithNewSize(LocationSize NewSize) const {
MemoryLocation Copy(*this);
Copy.Size = NewSize;
return Copy;
@@ -137,7 +149,7 @@
}
static unsigned getHashValue(const MemoryLocation &Val) {
return DenseMapInfo<const Value *>::getHashValue(Val.Ptr) ^
- DenseMapInfo<uint64_t>::getHashValue(Val.Size) ^
+ DenseMapInfo<LocationSize>::getHashValue(Val.Size) ^
DenseMapInfo<AAMDNodes>::getHashValue(Val.AATags);
}
static bool isEqual(const MemoryLocation &LHS, const MemoryLocation &RHS) {
diff --git a/linux-x64/clang/include/llvm/Analysis/MemorySSA.h b/linux-x64/clang/include/llvm/Analysis/MemorySSA.h
index 2899890..d445e44 100644
--- a/linux-x64/clang/include/llvm/Analysis/MemorySSA.h
+++ b/linux-x64/clang/include/llvm/Analysis/MemorySSA.h
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
//
/// \file
-/// \brief This file exposes an interface to building/using memory SSA to
+/// This file exposes an interface to building/using memory SSA to
/// walk memory instructions using a use/def graph.
///
/// Memory SSA class builds an SSA form that links together memory access
@@ -130,7 +130,7 @@
using const_memoryaccess_def_iterator =
memoryaccess_def_iterator_base<const MemoryAccess>;
-// \brief The base for all memory accesses. All memory accesses in a block are
+// The base for all memory accesses. All memory accesses in a block are
// linked together using an intrusive list.
class MemoryAccess
: public DerivedUser,
@@ -159,11 +159,11 @@
void print(raw_ostream &OS) const;
void dump() const;
- /// \brief The user iterators for a memory access
+ /// The user iterators for a memory access
using iterator = user_iterator;
using const_iterator = const_user_iterator;
- /// \brief This iterator walks over all of the defs in a given
+ /// This iterator walks over all of the defs in a given
/// MemoryAccess. For MemoryPhi nodes, this walks arguments. For
/// MemoryUse/MemoryDef, this walks the defining access.
memoryaccess_def_iterator defs_begin();
@@ -171,7 +171,7 @@
memoryaccess_def_iterator defs_end();
const_memoryaccess_def_iterator defs_end() const;
- /// \brief Get the iterators for the all access list and the defs only list
+ /// Get the iterators for the all access list and the defs only list
/// We default to the all access list.
AllAccessType::self_iterator getIterator() {
return this->AllAccessType::getIterator();
@@ -205,11 +205,11 @@
friend class MemoryUse;
friend class MemoryUseOrDef;
- /// \brief Used by MemorySSA to change the block of a MemoryAccess when it is
+ /// Used by MemorySSA to change the block of a MemoryAccess when it is
/// moved.
void setBlock(BasicBlock *BB) { Block = BB; }
- /// \brief Used for debugging and tracking things about MemoryAccesses.
+ /// Used for debugging and tracking things about MemoryAccesses.
/// Guaranteed unique among MemoryAccesses, no guarantees otherwise.
inline unsigned getID() const;
@@ -235,7 +235,7 @@
return OS;
}
-/// \brief Class that has the common methods + fields of memory uses/defs. It's
+/// Class that has the common methods + fields of memory uses/defs. It's
/// a little awkward to have, but there are many cases where we want either a
/// use or def, and there are many cases where uses are needed (defs aren't
/// acceptable), and vice-versa.
@@ -248,10 +248,10 @@
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
- /// \brief Get the instruction that this MemoryUse represents.
+ /// Get the instruction that this MemoryUse represents.
Instruction *getMemoryInst() const { return MemoryInstruction; }
- /// \brief Get the access that produces the memory state used by this Use.
+ /// Get the access that produces the memory state used by this Use.
MemoryAccess *getDefiningAccess() const { return getOperand(0); }
static bool classof(const Value *MA) {
@@ -270,7 +270,7 @@
return OptimizedAccessAlias;
}
- /// \brief Reset the ID of what this MemoryUse was optimized to, causing it to
+ /// Reset the ID of what this MemoryUse was optimized to, causing it to
/// be rewalked by the walker if necessary.
/// This really should only be called by tests.
inline void resetOptimized();
@@ -313,7 +313,7 @@
: public FixedNumOperandTraits<MemoryUseOrDef, 1> {};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)
-/// \brief Represents read-only accesses to memory
+/// Represents read-only accesses to memory
///
/// In particular, the set of Instructions that will be represented by
/// MemoryUse's is exactly the set of Instructions for which
@@ -364,7 +364,7 @@
struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess)
-/// \brief Represents a read-write access to memory, whether it is a must-alias,
+/// Represents a read-write access to memory, whether it is a must-alias,
/// or a may-alias.
///
/// In particular, the set of Instructions that will be represented by
@@ -424,7 +424,7 @@
struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 1> {};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess)
-/// \brief Represents phi nodes for memory accesses.
+/// Represents phi nodes for memory accesses.
///
/// These have the same semantic as regular phi nodes, with the exception that
/// only one phi will ever exist in a given basic block.
@@ -504,10 +504,10 @@
const_op_range incoming_values() const { return operands(); }
- /// \brief Return the number of incoming edges
+ /// Return the number of incoming edges
unsigned getNumIncomingValues() const { return getNumOperands(); }
- /// \brief Return incoming value number x
+ /// Return incoming value number x
MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); }
void setIncomingValue(unsigned I, MemoryAccess *V) {
assert(V && "PHI node got a null value!");
@@ -517,17 +517,17 @@
static unsigned getOperandNumForIncomingValue(unsigned I) { return I; }
static unsigned getIncomingValueNumForOperand(unsigned I) { return I; }
- /// \brief Return incoming basic block number @p i.
+ /// Return incoming basic block number @p i.
BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; }
- /// \brief Return incoming basic block corresponding
+ /// Return incoming basic block corresponding
/// to an operand of the PHI.
BasicBlock *getIncomingBlock(const Use &U) const {
assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
return getIncomingBlock(unsigned(&U - op_begin()));
}
- /// \brief Return incoming basic block corresponding
+ /// Return incoming basic block corresponding
/// to value use iterator.
BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const {
return getIncomingBlock(I.getUse());
@@ -538,7 +538,7 @@
block_begin()[I] = BB;
}
- /// \brief Add an incoming value to the end of the PHI list
+ /// Add an incoming value to the end of the PHI list
void addIncoming(MemoryAccess *V, BasicBlock *BB) {
if (getNumOperands() == ReservedSpace)
growOperands(); // Get more space!
@@ -548,7 +548,7 @@
setIncomingBlock(getNumOperands() - 1, BB);
}
- /// \brief Return the first index of the specified basic
+ /// Return the first index of the specified basic
/// block in the value list for this PHI. Returns -1 if no instance.
int getBasicBlockIndex(const BasicBlock *BB) const {
for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
@@ -557,12 +557,53 @@
return -1;
}
- Value *getIncomingValueForBlock(const BasicBlock *BB) const {
+ MemoryAccess *getIncomingValueForBlock(const BasicBlock *BB) const {
int Idx = getBasicBlockIndex(BB);
assert(Idx >= 0 && "Invalid basic block argument!");
return getIncomingValue(Idx);
}
+ // After deleting incoming position I, the order of incoming may be changed.
+ void unorderedDeleteIncoming(unsigned I) {
+ unsigned E = getNumOperands();
+ assert(I < E && "Cannot remove out of bounds Phi entry.");
+ // MemoryPhi must have at least two incoming values, otherwise the MemoryPhi
+ // itself should be deleted.
+ assert(E >= 2 && "Cannot only remove incoming values in MemoryPhis with "
+ "at least 2 values.");
+ setIncomingValue(I, getIncomingValue(E - 1));
+ setIncomingBlock(I, block_begin()[E - 1]);
+ setOperand(E - 1, nullptr);
+ block_begin()[E - 1] = nullptr;
+ setNumHungOffUseOperands(getNumOperands() - 1);
+ }
+
+ // After deleting entries that satisfy Pred, remaining entries may have
+ // changed order.
+ template <typename Fn> void unorderedDeleteIncomingIf(Fn &&Pred) {
+ for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
+ if (Pred(getIncomingValue(I), getIncomingBlock(I))) {
+ unorderedDeleteIncoming(I);
+ E = getNumOperands();
+ --I;
+ }
+ assert(getNumOperands() >= 1 &&
+ "Cannot remove all incoming blocks in a MemoryPhi.");
+ }
+
+ // After deleting incoming block BB, the incoming blocks order may be changed.
+ void unorderedDeleteIncomingBlock(const BasicBlock *BB) {
+ unorderedDeleteIncomingIf(
+ [&](const MemoryAccess *, const BasicBlock *B) { return BB == B; });
+ }
+
+ // After deleting incoming memory access MA, the incoming accesses order may
+ // be changed.
+ void unorderedDeleteIncomingValue(const MemoryAccess *MA) {
+ unorderedDeleteIncomingIf(
+ [&](const MemoryAccess *M, const BasicBlock *) { return MA == M; });
+ }
+
static bool classof(const Value *V) {
return V->getValueID() == MemoryPhiVal;
}
@@ -574,7 +615,7 @@
protected:
friend class MemorySSA;
- /// \brief this is more complicated than the generic
+ /// this is more complicated than the generic
/// User::allocHungoffUses, because we have to allocate Uses for the incoming
/// values and pointers to the incoming blocks, all in one allocation.
void allocHungoffUses(unsigned N) {
@@ -586,7 +627,7 @@
const unsigned ID;
unsigned ReservedSpace;
- /// \brief This grows the operand list in response to a push_back style of
+ /// This grows the operand list in response to a push_back style of
/// operation. This grows the number of ops by 1.5 times.
void growOperands() {
unsigned E = getNumOperands();
@@ -635,7 +676,7 @@
template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess)
-/// \brief Encapsulates MemorySSA, including all data associated with memory
+/// Encapsulates MemorySSA, including all data associated with memory
/// accesses.
class MemorySSA {
public:
@@ -644,7 +685,7 @@
MemorySSAWalker *getWalker();
- /// \brief Given a memory Mod/Ref'ing instruction, get the MemorySSA
+ /// Given a memory Mod/Ref'ing instruction, get the MemorySSA
/// access associated with it. If passed a basic block gets the memory phi
/// node that exists for that block, if there is one. Otherwise, this will get
/// a MemoryUseOrDef.
@@ -654,7 +695,7 @@
void dump() const;
void print(raw_ostream &) const;
- /// \brief Return true if \p MA represents the live on entry value
+ /// Return true if \p MA represents the live on entry value
///
/// Loads and stores from pointer arguments and other global values may be
/// defined by memory operations that do not occur in the current function, so
@@ -678,14 +719,14 @@
using DefsList =
simple_ilist<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
- /// \brief Return the list of MemoryAccess's for a given basic block.
+ /// Return the list of MemoryAccess's for a given basic block.
///
/// This list is not modifiable by the user.
const AccessList *getBlockAccesses(const BasicBlock *BB) const {
return getWritableBlockAccesses(BB);
}
- /// \brief Return the list of MemoryDef's and MemoryPhi's for a given basic
+ /// Return the list of MemoryDef's and MemoryPhi's for a given basic
/// block.
///
/// This list is not modifiable by the user.
@@ -693,19 +734,19 @@
return getWritableBlockDefs(BB);
}
- /// \brief Given two memory accesses in the same basic block, determine
+ /// Given two memory accesses in the same basic block, determine
/// whether MemoryAccess \p A dominates MemoryAccess \p B.
bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const;
- /// \brief Given two memory accesses in potentially different blocks,
+ /// Given two memory accesses in potentially different blocks,
/// determine whether MemoryAccess \p A dominates MemoryAccess \p B.
bool dominates(const MemoryAccess *A, const MemoryAccess *B) const;
- /// \brief Given a MemoryAccess and a Use, determine whether MemoryAccess \p A
+ /// Given a MemoryAccess and a Use, determine whether MemoryAccess \p A
/// dominates Use \p B.
bool dominates(const MemoryAccess *A, const Use &B) const;
- /// \brief Verify that MemorySSA is self consistent (IE definitions dominate
+ /// Verify that MemorySSA is self consistent (IE definitions dominate
/// all uses, uses appear in the right places). This is used by unit tests.
void verifyMemorySSA() const;
@@ -722,6 +763,7 @@
void verifyDefUses(Function &F) const;
void verifyDomination(Function &F) const;
void verifyOrdering(Function &F) const;
+ void verifyDominationNumbers(const Function &F) const;
// This is used by the use optimizer and updater.
AccessList *getWritableBlockAccesses(const BasicBlock *BB) const {
@@ -740,7 +782,7 @@
// relies on the updater to fixup what it breaks, so it is not public.
void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where);
- void moveTo(MemoryUseOrDef *What, BasicBlock *BB, InsertionPlace Point);
+ void moveTo(MemoryAccess *What, BasicBlock *BB, InsertionPlace Point);
// Rename the dominator tree branch rooted at BB.
void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal,
@@ -776,8 +818,7 @@
MemoryPhi *createMemoryPhi(BasicBlock *BB);
MemoryUseOrDef *createNewAccess(Instruction *);
MemoryAccess *findDominatingDef(BasicBlock *, enum InsertionPlace);
- void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &,
- const DenseMap<const BasicBlock *, unsigned int> &);
+ void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &);
MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool);
void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool);
void renamePass(DomTreeNode *, MemoryAccess *IncomingVal,
@@ -859,7 +900,7 @@
Result run(Function &F, FunctionAnalysisManager &AM);
};
-/// \brief Printer pass for \c MemorySSA.
+/// Printer pass for \c MemorySSA.
class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> {
raw_ostream &OS;
@@ -869,12 +910,12 @@
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
-/// \brief Verifier pass for \c MemorySSA.
+/// Verifier pass for \c MemorySSA.
struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> {
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
-/// \brief Legacy analysis pass which computes \c MemorySSA.
+/// Legacy analysis pass which computes \c MemorySSA.
class MemorySSAWrapperPass : public FunctionPass {
public:
MemorySSAWrapperPass();
@@ -895,7 +936,7 @@
std::unique_ptr<MemorySSA> MSSA;
};
-/// \brief This is the generic walker interface for walkers of MemorySSA.
+/// This is the generic walker interface for walkers of MemorySSA.
/// Walkers are used to be able to further disambiguate the def-use chains
/// MemorySSA gives you, or otherwise produce better info than MemorySSA gives
/// you.
@@ -913,7 +954,7 @@
using MemoryAccessSet = SmallVector<MemoryAccess *, 8>;
- /// \brief Given a memory Mod/Ref/ModRef'ing instruction, calling this
+ /// Given a memory Mod/Ref/ModRef'ing instruction, calling this
/// will give you the nearest dominating MemoryAccess that Mod's the location
/// the instruction accesses (by skipping any def which AA can prove does not
/// alias the location(s) accessed by the instruction given).
@@ -945,7 +986,7 @@
/// but takes a MemoryAccess instead of an Instruction.
virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0;
- /// \brief Given a potentially clobbering memory access and a new location,
+ /// Given a potentially clobbering memory access and a new location,
/// calling this will give you the nearest dominating clobbering MemoryAccess
/// (by skipping non-aliasing def links).
///
@@ -959,7 +1000,7 @@
virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
const MemoryLocation &) = 0;
- /// \brief Given a memory access, invalidate anything this walker knows about
+ /// Given a memory access, invalidate anything this walker knows about
/// that access.
/// This API is used by walkers that store information to perform basic cache
/// invalidation. This will be called by MemorySSA at appropriate times for
@@ -974,7 +1015,7 @@
MemorySSA *MSSA;
};
-/// \brief A MemorySSAWalker that does no alias queries, or anything else. It
+/// A MemorySSAWalker that does no alias queries, or anything else. It
/// simply returns the links as they were constructed by the builder.
class DoNothingMemorySSAWalker final : public MemorySSAWalker {
public:
@@ -990,7 +1031,7 @@
using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>;
using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>;
-/// \brief Iterator base class used to implement const and non-const iterators
+/// Iterator base class used to implement const and non-const iterators
/// over the defining accesses of a MemoryAccess.
template <class T>
class memoryaccess_def_iterator_base
@@ -1063,7 +1104,7 @@
return const_memoryaccess_def_iterator();
}
-/// \brief GraphTraits for a MemoryAccess, which walks defs in the normal case,
+/// GraphTraits for a MemoryAccess, which walks defs in the normal case,
/// and uses in the inverse case.
template <> struct GraphTraits<MemoryAccess *> {
using NodeRef = MemoryAccess *;
@@ -1083,7 +1124,7 @@
static ChildIteratorType child_end(NodeRef N) { return N->user_end(); }
};
-/// \brief Provide an iterator that walks defs, giving both the memory access,
+/// Provide an iterator that walks defs, giving both the memory access,
/// and the current pointer location, updating the pointer location as it
/// changes due to phi node translation.
///
diff --git a/linux-x64/clang/include/llvm/Analysis/MemorySSAUpdater.h b/linux-x64/clang/include/llvm/Analysis/MemorySSAUpdater.h
index 3f4ef06..38f08c1 100644
--- a/linux-x64/clang/include/llvm/Analysis/MemorySSAUpdater.h
+++ b/linux-x64/clang/include/llvm/Analysis/MemorySSAUpdater.h
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
//
// \file
-// \brief An automatic updater for MemorySSA that handles arbitrary insertion,
+// An automatic updater for MemorySSA that handles arbitrary insertion,
// deletion, and moves. It performs phi insertion where necessary, and
// automatically updates the MemorySSA IR to be correct.
// While updating loads or removing instructions is often easy enough to not
@@ -33,6 +33,7 @@
#define LLVM_ANALYSIS_MEMORYSSAUPDATER_H
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/MemorySSA.h"
#include "llvm/IR/BasicBlock.h"
@@ -59,8 +60,13 @@
class MemorySSAUpdater {
private:
MemorySSA *MSSA;
- SmallVector<MemoryPhi *, 8> InsertedPHIs;
+
+ /// We use WeakVH rather than a costly deletion to deal with dangling pointers.
+ /// MemoryPhis are created eagerly and sometimes get zapped shortly afterwards.
+ SmallVector<WeakVH, 16> InsertedPHIs;
+
SmallPtrSet<BasicBlock *, 8> VisitedBlocks;
+ SmallSet<AssertingVH<MemoryPhi>, 8> NonOptPhis;
public:
MemorySSAUpdater(MemorySSA *MSSA) : MSSA(MSSA) {}
@@ -87,6 +93,45 @@
void moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where);
void moveToPlace(MemoryUseOrDef *What, BasicBlock *BB,
MemorySSA::InsertionPlace Where);
+ /// `From` block was spliced into `From` and `To`.
+ /// Move all accesses from `From` to `To` starting at instruction `Start`.
+ /// `To` is newly created BB, so empty of MemorySSA::MemoryAccesses.
+ /// Edges are already updated, so successors of `To` with MPhi nodes need to
+ /// update incoming block.
+ /// |------| |------|
+ /// | From | | From |
+ /// | | |------|
+ /// | | ||
+ /// | | => \/
+ /// | | |------| <- Start
+ /// | | | To |
+ /// |------| |------|
+ void moveAllAfterSpliceBlocks(BasicBlock *From, BasicBlock *To,
+ Instruction *Start);
+ /// `From` block was merged into `To`. All instructions were moved and
+ /// `From` is an empty block with successor edges; `From` is about to be
+ /// deleted. Move all accesses from `From` to `To` starting at instruction
+ /// `Start`. `To` may have multiple successors, `From` has a single
+ /// predecessor. `From` may have successors with MPhi nodes, replace their
+ /// incoming block with `To`.
+ /// |------| |------|
+ /// | To | | To |
+ /// |------| | |
+ /// || => | |
+ /// \/ | |
+ /// |------| | | <- Start
+ /// | From | | |
+ /// |------| |------|
+ void moveAllAfterMergeBlocks(BasicBlock *From, BasicBlock *To,
+ Instruction *Start);
+ /// BasicBlock Old had New, an empty BasicBlock, added directly before it,
+ /// and the predecessors in Preds that used to point to Old, now point to
+ /// New. If New is the only predecessor, move Old's Phi, if present, to New.
+ /// Otherwise, add a new Phi in New with appropriate incoming values, and
+ /// update the incoming values in Old's Phi node too, if present.
+ void
+ wireOldPredecessorsToNewImmediatePredecessor(BasicBlock *Old, BasicBlock *New,
+ ArrayRef<BasicBlock *> Preds);
// The below are utility functions. Other than creation of accesses to pass
// to insertDef, and removeAccess to remove accesses, you should generally
@@ -94,7 +139,7 @@
// the edge cases right, and the above calls already operate in near-optimal
// time bounds.
- /// \brief Create a MemoryAccess in MemorySSA at a specified point in a block,
+ /// Create a MemoryAccess in MemorySSA at a specified point in a block,
/// with a specified clobbering definition.
///
/// Returns the new MemoryAccess.
@@ -111,7 +156,7 @@
const BasicBlock *BB,
MemorySSA::InsertionPlace Point);
- /// \brief Create a MemoryAccess in MemorySSA before or after an existing
+ /// Create a MemoryAccess in MemorySSA before or after an existing
/// MemoryAccess.
///
/// Returns the new MemoryAccess.
@@ -128,7 +173,7 @@
MemoryAccess *Definition,
MemoryAccess *InsertPt);
- /// \brief Remove a MemoryAccess from MemorySSA, including updating all
+ /// Remove a MemoryAccess from MemorySSA, including updating all
/// definitions and uses.
/// This should be called when a memory instruction that has a MemoryAccess
/// associated with it is erased from the program. For example, if a store or
@@ -136,10 +181,33 @@
/// on the MemoryAccess for that store/load.
void removeMemoryAccess(MemoryAccess *);
+ /// Remove MemoryAccess for a given instruction, if a MemoryAccess exists.
+ /// This should be called when an instruction (load/store) is deleted from
+ /// the program.
+ void removeMemoryAccess(const Instruction *I) {
+ if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
+ removeMemoryAccess(MA);
+ }
+
+ /// Remove all MemoryAcceses in a set of BasicBlocks about to be deleted.
+ /// Assumption we make here: all uses of deleted defs and phi must either
+ /// occur in blocks about to be deleted (thus will be deleted as well), or
+ /// they occur in phis that will simply lose an incoming value.
+ /// Deleted blocks still have successor info, but their predecessor edges and
+ /// Phi nodes may already be updated. Instructions in DeadBlocks should be
+ /// deleted after this call.
+ void removeBlocks(const SmallPtrSetImpl<BasicBlock *> &DeadBlocks);
+
+ /// Get handle on MemorySSA.
+ MemorySSA* getMemorySSA() const { return MSSA; }
+
private:
// Move What before Where in the MemorySSA IR.
template <class WhereType>
void moveTo(MemoryUseOrDef *What, BasicBlock *BB, WhereType Where);
+ // Move all memory accesses from `From` to `To` starting at `Start`.
+ // Restrictions apply, see public wrappers of this method.
+ void moveAllAccesses(BasicBlock *From, BasicBlock *To, Instruction *Start);
MemoryAccess *getPreviousDef(MemoryAccess *);
MemoryAccess *getPreviousDefInBlock(MemoryAccess *);
MemoryAccess *
@@ -151,7 +219,7 @@
MemoryAccess *recursePhi(MemoryAccess *Phi);
template <class RangeType>
MemoryAccess *tryRemoveTrivialPhi(MemoryPhi *Phi, RangeType &Operands);
- void fixupDefs(const SmallVectorImpl<MemoryAccess *> &);
+ void fixupDefs(const SmallVectorImpl<WeakVH> &);
};
} // end namespace llvm
diff --git a/linux-x64/clang/include/llvm/Analysis/MustExecute.h b/linux-x64/clang/include/llvm/Analysis/MustExecute.h
index fb48bb6..97ad76d 100644
--- a/linux-x64/clang/include/llvm/Analysis/MustExecute.h
+++ b/linux-x64/clang/include/llvm/Analysis/MustExecute.h
@@ -10,7 +10,7 @@
/// Contains a collection of routines for determining if a given instruction is
/// guaranteed to execute if a given point in control flow is reached. The most
/// common example is an instruction within a loop being provably executed if we
-/// branch to the header of it's containing loop.
+/// branch to the header of it's containing loop.
///
//===----------------------------------------------------------------------===//
@@ -30,7 +30,7 @@
class DominatorTree;
class Loop;
-/// \brief Captures loop safety information.
+/// Captures loop safety information.
/// It keep information for loop & its header may throw exception or otherwise
/// exit abnormaly on any iteration of the loop which might actually execute
/// at runtime. The primary way to consume this infromation is via
@@ -46,7 +46,7 @@
LoopSafetyInfo() = default;
};
-/// \brief Computes safety information for a loop checks loop body & header for
+/// Computes safety information for a loop checks loop body & header for
/// the possibility of may throw exception, it takes LoopSafetyInfo and loop as
/// argument. Updates safety information in LoopSafetyInfo argument.
/// Note: This is defined to clear and reinitialize an already initialized
@@ -58,7 +58,7 @@
bool isGuaranteedToExecute(const Instruction &Inst, const DominatorTree *DT,
const Loop *CurLoop,
const LoopSafetyInfo *SafetyInfo);
-
+
}
#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/ObjCARCAliasAnalysis.h b/linux-x64/clang/include/llvm/Analysis/ObjCARCAliasAnalysis.h
index db524ff..559c77c 100644
--- a/linux-x64/clang/include/llvm/Analysis/ObjCARCAliasAnalysis.h
+++ b/linux-x64/clang/include/llvm/Analysis/ObjCARCAliasAnalysis.h
@@ -29,7 +29,7 @@
namespace llvm {
namespace objcarc {
-/// \brief This is a simple alias analysis implementation that uses knowledge
+/// This is a simple alias analysis implementation that uses knowledge
/// of ARC constructs to answer queries.
///
/// TODO: This class could be generalized to know about other ObjC-specific
diff --git a/linux-x64/clang/include/llvm/Analysis/ObjCARCAnalysisUtils.h b/linux-x64/clang/include/llvm/Analysis/ObjCARCAnalysisUtils.h
index 096573f..07beb0b 100644
--- a/linux-x64/clang/include/llvm/Analysis/ObjCARCAnalysisUtils.h
+++ b/linux-x64/clang/include/llvm/Analysis/ObjCARCAnalysisUtils.h
@@ -34,6 +34,7 @@
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/ValueHandle.h"
#include "llvm/Pass.h"
namespace llvm {
@@ -43,10 +44,10 @@
namespace llvm {
namespace objcarc {
-/// \brief A handy option to enable/disable all ARC Optimizations.
+/// A handy option to enable/disable all ARC Optimizations.
extern bool EnableARCOpts;
-/// \brief Test if the given module looks interesting to run ARC optimization
+/// Test if the given module looks interesting to run ARC optimization
/// on.
inline bool ModuleHasARC(const Module &M) {
return
@@ -71,7 +72,7 @@
M.getNamedValue("clang.arc.use");
}
-/// \brief This is a wrapper around getUnderlyingObject which also knows how to
+/// This is a wrapper around getUnderlyingObject which also knows how to
/// look through objc_retain and objc_autorelease calls, which we know to return
/// their argument verbatim.
inline const Value *GetUnderlyingObjCPtr(const Value *V,
@@ -89,11 +90,13 @@
/// A wrapper for GetUnderlyingObjCPtr used for results memoization.
inline const Value *
GetUnderlyingObjCPtrCached(const Value *V, const DataLayout &DL,
- DenseMap<const Value *, const Value *> &Cache) {
+ DenseMap<const Value *, WeakTrackingVH> &Cache) {
if (auto InCache = Cache.lookup(V))
return InCache;
- return Cache[V] = GetUnderlyingObjCPtr(V, DL);
+ const Value *Computed = GetUnderlyingObjCPtr(V, DL);
+ Cache[V] = const_cast<Value *>(Computed);
+ return Computed;
}
/// The RCIdentity root of a value \p V is a dominating value U for which
@@ -129,7 +132,7 @@
return const_cast<Value *>(GetRCIdentityRoot((const Value *)V));
}
-/// \brief Assuming the given instruction is one of the special calls such as
+/// Assuming the given instruction is one of the special calls such as
/// objc_retain or objc_release, return the RCIdentity root of the argument of
/// the call.
inline Value *GetArgRCIdentityRoot(Value *Inst) {
@@ -146,7 +149,7 @@
cast<GetElementPtrInst>(I)->hasAllZeroIndices());
}
-/// \brief Test whether the given value is possible a retainable object pointer.
+/// Test whether the given value is possible a retainable object pointer.
inline bool IsPotentialRetainableObjPtr(const Value *Op) {
// Pointers to static or stack storage are not valid retainable object
// pointers.
@@ -191,7 +194,7 @@
return true;
}
-/// \brief Helper for GetARCInstKind. Determines what kind of construct CS
+/// Helper for GetARCInstKind. Determines what kind of construct CS
/// is.
inline ARCInstKind GetCallSiteClass(ImmutableCallSite CS) {
for (ImmutableCallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
@@ -202,7 +205,7 @@
return CS.onlyReadsMemory() ? ARCInstKind::None : ARCInstKind::Call;
}
-/// \brief Return true if this value refers to a distinct and identifiable
+/// Return true if this value refers to a distinct and identifiable
/// object.
///
/// This is similar to AliasAnalysis's isIdentifiedObject, except that it uses
diff --git a/linux-x64/clang/include/llvm/Analysis/ObjCARCInstKind.h b/linux-x64/clang/include/llvm/Analysis/ObjCARCInstKind.h
index 02ff035..0b92d8b 100644
--- a/linux-x64/clang/include/llvm/Analysis/ObjCARCInstKind.h
+++ b/linux-x64/clang/include/llvm/Analysis/ObjCARCInstKind.h
@@ -18,7 +18,7 @@
/// \enum ARCInstKind
///
-/// \brief Equivalence classes of instructions in the ARC Model.
+/// Equivalence classes of instructions in the ARC Model.
///
/// Since we do not have "instructions" to represent ARC concepts in LLVM IR,
/// we instead operate on equivalence classes of instructions.
@@ -57,32 +57,32 @@
raw_ostream &operator<<(raw_ostream &OS, const ARCInstKind Class);
-/// \brief Test if the given class is a kind of user.
+/// Test if the given class is a kind of user.
bool IsUser(ARCInstKind Class);
-/// \brief Test if the given class is objc_retain or equivalent.
+/// Test if the given class is objc_retain or equivalent.
bool IsRetain(ARCInstKind Class);
-/// \brief Test if the given class is objc_autorelease or equivalent.
+/// Test if the given class is objc_autorelease or equivalent.
bool IsAutorelease(ARCInstKind Class);
-/// \brief Test if the given class represents instructions which return their
+/// Test if the given class represents instructions which return their
/// argument verbatim.
bool IsForwarding(ARCInstKind Class);
-/// \brief Test if the given class represents instructions which do nothing if
+/// Test if the given class represents instructions which do nothing if
/// passed a null pointer.
bool IsNoopOnNull(ARCInstKind Class);
-/// \brief Test if the given class represents instructions which are always safe
+/// Test if the given class represents instructions which are always safe
/// to mark with the "tail" keyword.
bool IsAlwaysTail(ARCInstKind Class);
-/// \brief Test if the given class represents instructions which are never safe
+/// Test if the given class represents instructions which are never safe
/// to mark with the "tail" keyword.
bool IsNeverTail(ARCInstKind Class);
-/// \brief Test if the given class represents instructions which are always safe
+/// Test if the given class represents instructions which are always safe
/// to mark with the nounwind attribute.
bool IsNoThrow(ARCInstKind Class);
@@ -90,11 +90,11 @@
/// autoreleasepool pop.
bool CanInterruptRV(ARCInstKind Class);
-/// \brief Determine if F is one of the special known Functions. If it isn't,
+/// Determine if F is one of the special known Functions. If it isn't,
/// return ARCInstKind::CallOrUser.
ARCInstKind GetFunctionClass(const Function *F);
-/// \brief Determine which objc runtime call instruction class V belongs to.
+/// Determine which objc runtime call instruction class V belongs to.
///
/// This is similar to GetARCInstKind except that it only detects objc
/// runtime calls. This allows it to be faster.
diff --git a/linux-x64/clang/include/llvm/Analysis/OptimizationRemarkEmitter.h b/linux-x64/clang/include/llvm/Analysis/OptimizationRemarkEmitter.h
index 26f32ac..fa83869 100644
--- a/linux-x64/clang/include/llvm/Analysis/OptimizationRemarkEmitter.h
+++ b/linux-x64/clang/include/llvm/Analysis/OptimizationRemarkEmitter.h
@@ -40,7 +40,7 @@
OptimizationRemarkEmitter(const Function *F, BlockFrequencyInfo *BFI)
: F(F), BFI(BFI) {}
- /// \brief This variant can be used to generate ORE on demand (without the
+ /// This variant can be used to generate ORE on demand (without the
/// analysis pass).
///
/// Note that this ctor has a very different cost depending on whether
@@ -66,11 +66,11 @@
bool invalidate(Function &F, const PreservedAnalyses &PA,
FunctionAnalysisManager::Invalidator &Inv);
- /// \brief Output the remark via the diagnostic handler and to the
+ /// Output the remark via the diagnostic handler and to the
/// optimization record file.
void emit(DiagnosticInfoOptimizationBase &OptDiag);
- /// \brief Take a lambda that returns a remark which will be emitted. Second
+ /// Take a lambda that returns a remark which will be emitted. Second
/// argument is only used to restrict this to functions.
template <typename T>
void emit(T RemarkBuilder, decltype(RemarkBuilder()) * = nullptr) {
@@ -85,7 +85,7 @@
}
}
- /// \brief Whether we allow for extra compile-time budget to perform more
+ /// Whether we allow for extra compile-time budget to perform more
/// analysis to produce fewer false positives.
///
/// This is useful when reporting missed optimizations. In this case we can
@@ -112,7 +112,7 @@
/// Similar but use value from \p OptDiag and update hotness there.
void computeHotness(DiagnosticInfoIROptimization &OptDiag);
- /// \brief Only allow verbose messages if we know we're filtering by hotness
+ /// Only allow verbose messages if we know we're filtering by hotness
/// (BFI is only set in this case).
bool shouldEmitVerbose() { return BFI != nullptr; }
@@ -120,7 +120,7 @@
void operator=(const OptimizationRemarkEmitter &) = delete;
};
-/// \brief Add a small namespace to avoid name clashes with the classes used in
+/// Add a small namespace to avoid name clashes with the classes used in
/// the streaming interface. We want these to be short for better
/// write/readability.
namespace ore {
@@ -158,10 +158,10 @@
static AnalysisKey Key;
public:
- /// \brief Provide the result typedef for this analysis pass.
+ /// Provide the result typedef for this analysis pass.
typedef OptimizationRemarkEmitter Result;
- /// \brief Run the analysis pass over a function and produce BFI.
+ /// Run the analysis pass over a function and produce BFI.
Result run(Function &F, FunctionAnalysisManager &AM);
};
}
diff --git a/linux-x64/clang/include/llvm/Analysis/OrderedBasicBlock.h b/linux-x64/clang/include/llvm/Analysis/OrderedBasicBlock.h
index 2e716af..0776aa6 100644
--- a/linux-x64/clang/include/llvm/Analysis/OrderedBasicBlock.h
+++ b/linux-x64/clang/include/llvm/Analysis/OrderedBasicBlock.h
@@ -33,28 +33,28 @@
class OrderedBasicBlock {
private:
- /// \brief Map a instruction to its position in a BasicBlock.
+ /// Map a instruction to its position in a BasicBlock.
SmallDenseMap<const Instruction *, unsigned, 32> NumberedInsts;
- /// \brief Keep track of last instruction inserted into \p NumberedInsts.
+ /// Keep track of last instruction inserted into \p NumberedInsts.
/// It speeds up queries for uncached instructions by providing a start point
/// for new queries in OrderedBasicBlock::comesBefore.
BasicBlock::const_iterator LastInstFound;
- /// \brief The position/number to tag the next instruction to be found.
+ /// The position/number to tag the next instruction to be found.
unsigned NextInstPos;
- /// \brief The source BasicBlock to map.
+ /// The source BasicBlock to map.
const BasicBlock *BB;
- /// \brief Given no cached results, find if \p A comes before \p B in \p BB.
+ /// Given no cached results, find if \p A comes before \p B in \p BB.
/// Cache and number out instruction while walking \p BB.
bool comesBefore(const Instruction *A, const Instruction *B);
public:
OrderedBasicBlock(const BasicBlock *BasicB);
- /// \brief Find out whether \p A dominates \p B, meaning whether \p A
+ /// Find out whether \p A dominates \p B, meaning whether \p A
/// comes before \p B in \p BB. This is a simplification that considers
/// cached instruction positions and ignores other basic blocks, being
/// only relevant to compare relative instructions positions inside \p BB.
diff --git a/linux-x64/clang/include/llvm/Analysis/PHITransAddr.h b/linux-x64/clang/include/llvm/Analysis/PHITransAddr.h
index f0f34f3..0a335b6 100644
--- a/linux-x64/clang/include/llvm/Analysis/PHITransAddr.h
+++ b/linux-x64/clang/include/llvm/Analysis/PHITransAddr.h
@@ -43,7 +43,7 @@
/// TLI - The target library info if known, otherwise null.
const TargetLibraryInfo *TLI;
- /// A cache of @llvm.assume calls used by SimplifyInstruction.
+ /// A cache of \@llvm.assume calls used by SimplifyInstruction.
AssumptionCache *AC;
/// InstInputs - The inputs for our symbolic address.
diff --git a/linux-x64/clang/include/llvm/Analysis/PhiValues.h b/linux-x64/clang/include/llvm/Analysis/PhiValues.h
new file mode 100644
index 0000000..6607b32
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Analysis/PhiValues.h
@@ -0,0 +1,143 @@
+//===- PhiValues.h - Phi Value Analysis -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PhiValues class, and associated passes, which can be
+// used to find the underlying values of the phis in a function, i.e. the
+// non-phi values that can be found by traversing the phi graph.
+//
+// This information is computed lazily and cached. If new phis are added to the
+// function they are handled correctly, but if an existing phi has its operands
+// modified PhiValues has to be notified by calling invalidateValue.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_PHIVALUES_H
+#define LLVM_ANALYSIS_PHIVALUES_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+
+class Use;
+class Value;
+class PHINode;
+class Function;
+
+/// Class for calculating and caching the underlying values of phis in a
+/// function.
+///
+/// Initially the PhiValues is empty, and gets incrementally populated whenever
+/// it is queried.
+class PhiValues {
+public:
+ using ValueSet = SmallPtrSet<Value *, 4>;
+
+ /// Construct an empty PhiValues.
+ PhiValues(const Function &F) : F(F) {}
+
+ /// Get the underlying values of a phi.
+ ///
+ /// This returns the cached value if PN has previously been processed,
+ /// otherwise it processes it first.
+ const ValueSet &getValuesForPhi(const PHINode *PN);
+
+ /// Notify PhiValues that the cached information using V is no longer valid
+ ///
+ /// Whenever a phi has its operands modified the cached values for that phi
+ /// (and the phis that use that phi) become invalid. A user of PhiValues has
+ /// to notify it of this by calling invalidateValue on either the operand or
+ /// the phi, which will then clear the relevant cached information.
+ void invalidateValue(const Value *V);
+
+ /// Free the memory used by this class.
+ void releaseMemory();
+
+ /// Print out the values currently in the cache.
+ void print(raw_ostream &OS) const;
+
+ /// Handle invalidation events in the new pass manager.
+ bool invalidate(Function &, const PreservedAnalyses &,
+ FunctionAnalysisManager::Invalidator &);
+
+private:
+ using PhiSet = SmallPtrSet<const PHINode *, 4>;
+ using ConstValueSet = SmallPtrSet<const Value *, 4>;
+
+ /// The next depth number to be used by processPhi.
+ unsigned int NextDepthNumber = 1;
+
+ /// Depth numbers of phis. Phis with the same depth number are part of the
+ /// same strongly connected component.
+ DenseMap<const PHINode *, unsigned int> DepthMap;
+
+ /// Non-phi values reachable from each component.
+ DenseMap<unsigned int, ValueSet> NonPhiReachableMap;
+
+ /// All values reachable from each component.
+ DenseMap<unsigned int, ConstValueSet> ReachableMap;
+
+ /// The function that the PhiValues is for.
+ const Function &F;
+
+ /// Process a phi so that its entries in the depth and reachable maps are
+ /// fully populated.
+ void processPhi(const PHINode *PN, SmallVector<const PHINode *, 8> &Stack);
+};
+
+/// The analysis pass which yields a PhiValues
+///
+/// The analysis does nothing by itself, and just returns an empty PhiValues
+/// which will get filled in as it's used.
+class PhiValuesAnalysis : public AnalysisInfoMixin<PhiValuesAnalysis> {
+ friend AnalysisInfoMixin<PhiValuesAnalysis>;
+ static AnalysisKey Key;
+
+public:
+ using Result = PhiValues;
+ PhiValues run(Function &F, FunctionAnalysisManager &);
+};
+
+/// A pass for printing the PhiValues for a function.
+///
+/// This pass doesn't print whatever information the PhiValues happens to hold,
+/// but instead first uses the PhiValues to analyze all the phis in the function
+/// so the complete information is printed.
+class PhiValuesPrinterPass : public PassInfoMixin<PhiValuesPrinterPass> {
+ raw_ostream &OS;
+
+public:
+ explicit PhiValuesPrinterPass(raw_ostream &OS) : OS(OS) {}
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// Wrapper pass for the legacy pass manager
+class PhiValuesWrapperPass : public FunctionPass {
+ std::unique_ptr<PhiValues> Result;
+
+public:
+ static char ID;
+ PhiValuesWrapperPass();
+
+ PhiValues &getResult() { return *Result; }
+ const PhiValues &getResult() const { return *Result; }
+
+ bool runOnFunction(Function &F) override;
+ void releaseMemory() override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+};
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Analysis/PostDominators.h b/linux-x64/clang/include/llvm/Analysis/PostDominators.h
index 9a8c4d7..f2dc8d1 100644
--- a/linux-x64/clang/include/llvm/Analysis/PostDominators.h
+++ b/linux-x64/clang/include/llvm/Analysis/PostDominators.h
@@ -30,12 +30,14 @@
public:
using Base = PostDomTreeBase<BasicBlock>;
+ PostDominatorTree() = default;
+ explicit PostDominatorTree(Function &F) { recalculate(F); }
/// Handle invalidation explicitly.
bool invalidate(Function &F, const PreservedAnalyses &PA,
FunctionAnalysisManager::Invalidator &);
};
-/// \brief Analysis pass which computes a \c PostDominatorTree.
+/// Analysis pass which computes a \c PostDominatorTree.
class PostDominatorTreeAnalysis
: public AnalysisInfoMixin<PostDominatorTreeAnalysis> {
friend AnalysisInfoMixin<PostDominatorTreeAnalysis>;
@@ -43,15 +45,15 @@
static AnalysisKey Key;
public:
- /// \brief Provide the result type for this analysis pass.
+ /// Provide the result type for this analysis pass.
using Result = PostDominatorTree;
- /// \brief Run the analysis pass over a function and produce a post dominator
+ /// Run the analysis pass over a function and produce a post dominator
/// tree.
PostDominatorTree run(Function &F, FunctionAnalysisManager &);
};
-/// \brief Printer pass for the \c PostDominatorTree.
+/// Printer pass for the \c PostDominatorTree.
class PostDominatorTreePrinterPass
: public PassInfoMixin<PostDominatorTreePrinterPass> {
raw_ostream &OS;
diff --git a/linux-x64/clang/include/llvm/Analysis/ProfileSummaryInfo.h b/linux-x64/clang/include/llvm/Analysis/ProfileSummaryInfo.h
index 2930334..58b67e7 100644
--- a/linux-x64/clang/include/llvm/Analysis/ProfileSummaryInfo.h
+++ b/linux-x64/clang/include/llvm/Analysis/ProfileSummaryInfo.h
@@ -31,7 +31,7 @@
class BlockFrequencyInfo;
class CallSite;
class ProfileSummary;
-/// \brief Analysis providing profile information.
+/// Analysis providing profile information.
///
/// This is an immutable analysis pass that provides ability to query global
/// (program-level) profile information. The main APIs are isHotCount and
@@ -59,16 +59,16 @@
ProfileSummaryInfo(ProfileSummaryInfo &&Arg)
: M(Arg.M), Summary(std::move(Arg.Summary)) {}
- /// \brief Returns true if profile summary is available.
+ /// Returns true if profile summary is available.
bool hasProfileSummary() { return computeSummary(); }
- /// \brief Returns true if module \c M has sample profile.
+ /// Returns true if module \c M has sample profile.
bool hasSampleProfile() {
return hasProfileSummary() &&
Summary->getKind() == ProfileSummary::PSK_Sample;
}
- /// \brief Returns true if module \c M has instrumentation profile.
+ /// Returns true if module \c M has instrumentation profile.
bool hasInstrumentationProfile() {
return hasProfileSummary() &&
Summary->getKind() == ProfileSummary::PSK_Instr;
@@ -90,31 +90,37 @@
BlockFrequencyInfo *BFI);
/// Returns true if the working set size of the code is considered huge.
bool hasHugeWorkingSetSize();
- /// \brief Returns true if \p F has hot function entry.
+ /// Returns true if \p F has hot function entry.
bool isFunctionEntryHot(const Function *F);
/// Returns true if \p F contains hot code.
bool isFunctionHotInCallGraph(const Function *F, BlockFrequencyInfo &BFI);
- /// \brief Returns true if \p F has cold function entry.
+ /// Returns true if \p F has cold function entry.
bool isFunctionEntryCold(const Function *F);
/// Returns true if \p F contains only cold code.
bool isFunctionColdInCallGraph(const Function *F, BlockFrequencyInfo &BFI);
- /// \brief Returns true if \p F is a hot function.
+ /// Returns true if \p F is a hot function.
bool isHotCount(uint64_t C);
- /// \brief Returns true if count \p C is considered cold.
+ /// Returns true if count \p C is considered cold.
bool isColdCount(uint64_t C);
- /// \brief Returns true if BasicBlock \p B is considered hot.
+ /// Returns true if BasicBlock \p B is considered hot.
bool isHotBB(const BasicBlock *B, BlockFrequencyInfo *BFI);
- /// \brief Returns true if BasicBlock \p B is considered cold.
+ /// Returns true if BasicBlock \p B is considered cold.
bool isColdBB(const BasicBlock *B, BlockFrequencyInfo *BFI);
- /// \brief Returns true if CallSite \p CS is considered hot.
+ /// Returns true if CallSite \p CS is considered hot.
bool isHotCallSite(const CallSite &CS, BlockFrequencyInfo *BFI);
- /// \brief Returns true if Callsite \p CS is considered cold.
+ /// Returns true if Callsite \p CS is considered cold.
bool isColdCallSite(const CallSite &CS, BlockFrequencyInfo *BFI);
- /// \brief Returns HotCountThreshold if set.
+ /// Returns HotCountThreshold if set. Recompute HotCountThreshold
+ /// if not set.
+ uint64_t getOrCompHotCountThreshold();
+ /// Returns ColdCountThreshold if set. Recompute HotCountThreshold
+ /// if not set.
+ uint64_t getOrCompColdCountThreshold();
+ /// Returns HotCountThreshold if set.
uint64_t getHotCountThreshold() {
return HotCountThreshold ? HotCountThreshold.getValue() : 0;
}
- /// \brief Returns ColdCountThreshold if set.
+ /// Returns ColdCountThreshold if set.
uint64_t getColdCountThreshold() {
return ColdCountThreshold ? ColdCountThreshold.getValue() : 0;
}
@@ -152,7 +158,7 @@
static AnalysisKey Key;
};
-/// \brief Printer pass that uses \c ProfileSummaryAnalysis.
+/// Printer pass that uses \c ProfileSummaryAnalysis.
class ProfileSummaryPrinterPass
: public PassInfoMixin<ProfileSummaryPrinterPass> {
raw_ostream &OS;
diff --git a/linux-x64/clang/include/llvm/Analysis/PtrUseVisitor.h b/linux-x64/clang/include/llvm/Analysis/PtrUseVisitor.h
index f934aa6..b34b25c 100644
--- a/linux-x64/clang/include/llvm/Analysis/PtrUseVisitor.h
+++ b/linux-x64/clang/include/llvm/Analysis/PtrUseVisitor.h
@@ -47,7 +47,7 @@
namespace detail {
-/// \brief Implementation of non-dependent functionality for \c PtrUseVisitor.
+/// Implementation of non-dependent functionality for \c PtrUseVisitor.
///
/// See \c PtrUseVisitor for the public interface and detailed comments about
/// usage. This class is just a helper base class which is not templated and
@@ -55,7 +55,7 @@
/// PtrUseVisitor.
class PtrUseVisitorBase {
public:
- /// \brief This class provides information about the result of a visit.
+ /// This class provides information about the result of a visit.
///
/// After walking all the users (recursively) of a pointer, the basic
/// infrastructure records some commonly useful information such as escape
@@ -64,7 +64,7 @@
public:
PtrInfo() : AbortedInfo(nullptr, false), EscapedInfo(nullptr, false) {}
- /// \brief Reset the pointer info, clearing all state.
+ /// Reset the pointer info, clearing all state.
void reset() {
AbortedInfo.setPointer(nullptr);
AbortedInfo.setInt(false);
@@ -72,37 +72,37 @@
EscapedInfo.setInt(false);
}
- /// \brief Did we abort the visit early?
+ /// Did we abort the visit early?
bool isAborted() const { return AbortedInfo.getInt(); }
- /// \brief Is the pointer escaped at some point?
+ /// Is the pointer escaped at some point?
bool isEscaped() const { return EscapedInfo.getInt(); }
- /// \brief Get the instruction causing the visit to abort.
+ /// Get the instruction causing the visit to abort.
/// \returns a pointer to the instruction causing the abort if one is
/// available; otherwise returns null.
Instruction *getAbortingInst() const { return AbortedInfo.getPointer(); }
- /// \brief Get the instruction causing the pointer to escape.
+ /// Get the instruction causing the pointer to escape.
/// \returns a pointer to the instruction which escapes the pointer if one
/// is available; otherwise returns null.
Instruction *getEscapingInst() const { return EscapedInfo.getPointer(); }
- /// \brief Mark the visit as aborted. Intended for use in a void return.
+ /// Mark the visit as aborted. Intended for use in a void return.
/// \param I The instruction which caused the visit to abort, if available.
void setAborted(Instruction *I = nullptr) {
AbortedInfo.setInt(true);
AbortedInfo.setPointer(I);
}
- /// \brief Mark the pointer as escaped. Intended for use in a void return.
+ /// Mark the pointer as escaped. Intended for use in a void return.
/// \param I The instruction which escapes the pointer, if available.
void setEscaped(Instruction *I = nullptr) {
EscapedInfo.setInt(true);
EscapedInfo.setPointer(I);
}
- /// \brief Mark the pointer as escaped, and the visit as aborted. Intended
+ /// Mark the pointer as escaped, and the visit as aborted. Intended
/// for use in a void return.
/// \param I The instruction which both escapes the pointer and aborts the
/// visit, if available.
@@ -121,10 +121,10 @@
/// \name Visitation infrastructure
/// @{
- /// \brief The info collected about the pointer being visited thus far.
+ /// The info collected about the pointer being visited thus far.
PtrInfo PI;
- /// \brief A struct of the data needed to visit a particular use.
+ /// A struct of the data needed to visit a particular use.
///
/// This is used to maintain a worklist fo to-visit uses. This is used to
/// make the visit be iterative rather than recursive.
@@ -135,10 +135,10 @@
APInt Offset;
};
- /// \brief The worklist of to-visit uses.
+ /// The worklist of to-visit uses.
SmallVector<UseToVisit, 8> Worklist;
- /// \brief A set of visited uses to break cycles in unreachable code.
+ /// A set of visited uses to break cycles in unreachable code.
SmallPtrSet<Use *, 8> VisitedUses;
/// @}
@@ -147,14 +147,14 @@
/// This state is reset for each instruction visited.
/// @{
- /// \brief The use currently being visited.
+ /// The use currently being visited.
Use *U;
- /// \brief True if we have a known constant offset for the use currently
+ /// True if we have a known constant offset for the use currently
/// being visited.
bool IsOffsetKnown;
- /// \brief The constant offset of the use if that is known.
+ /// The constant offset of the use if that is known.
APInt Offset;
/// @}
@@ -163,13 +163,13 @@
/// class, we can't create instances directly of this class.
PtrUseVisitorBase(const DataLayout &DL) : DL(DL) {}
- /// \brief Enqueue the users of this instruction in the visit worklist.
+ /// Enqueue the users of this instruction in the visit worklist.
///
/// This will visit the users with the same offset of the current visit
/// (including an unknown offset if that is the current state).
void enqueueUsers(Instruction &I);
- /// \brief Walk the operands of a GEP and adjust the offset as appropriate.
+ /// Walk the operands of a GEP and adjust the offset as appropriate.
///
/// This routine does the heavy lifting of the pointer walk by computing
/// offsets and looking through GEPs.
@@ -178,7 +178,7 @@
} // end namespace detail
-/// \brief A base class for visitors over the uses of a pointer value.
+/// A base class for visitors over the uses of a pointer value.
///
/// Once constructed, a user can call \c visit on a pointer value, and this
/// will walk its uses and visit each instruction using an InstVisitor. It also
@@ -216,7 +216,7 @@
"Must pass the derived type to this template!");
}
- /// \brief Recursively visit the uses of the given pointer.
+ /// Recursively visit the uses of the given pointer.
/// \returns An info struct about the pointer. See \c PtrInfo for details.
PtrInfo visitPtr(Instruction &I) {
// This must be a pointer type. Get an integer type suitable to hold
diff --git a/linux-x64/clang/include/llvm/Analysis/RegionInfo.h b/linux-x64/clang/include/llvm/Analysis/RegionInfo.h
index 4bf64d1..27f6cc1 100644
--- a/linux-x64/clang/include/llvm/Analysis/RegionInfo.h
+++ b/linux-x64/clang/include/llvm/Analysis/RegionInfo.h
@@ -42,6 +42,7 @@
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/iterator_range.h"
+#include "llvm/Config/llvm-config.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/PassManager.h"
@@ -102,7 +103,7 @@
}
};
-/// @brief Marker class to iterate over the elements of a Region in flat mode.
+/// Marker class to iterate over the elements of a Region in flat mode.
///
/// The class is used to either iterate in Flat mode or by not using it to not
/// iterate in Flat mode. During a Flat mode iteration all Regions are entered
@@ -112,7 +113,7 @@
template <class GraphType>
class FlatIt {};
-/// @brief A RegionNode represents a subregion or a BasicBlock that is part of a
+/// A RegionNode represents a subregion or a BasicBlock that is part of a
/// Region.
template <class Tr>
class RegionNodeBase {
@@ -135,12 +136,12 @@
/// RegionNode.
PointerIntPair<BlockT *, 1, bool> entry;
- /// @brief The parent Region of this RegionNode.
+ /// The parent Region of this RegionNode.
/// @see getParent()
RegionT *parent;
protected:
- /// @brief Create a RegionNode.
+ /// Create a RegionNode.
///
/// @param Parent The parent of this RegionNode.
/// @param Entry The entry BasicBlock of the RegionNode. If this
@@ -156,7 +157,7 @@
RegionNodeBase(const RegionNodeBase &) = delete;
RegionNodeBase &operator=(const RegionNodeBase &) = delete;
- /// @brief Get the parent Region of this RegionNode.
+ /// Get the parent Region of this RegionNode.
///
/// The parent Region is the Region this RegionNode belongs to. If for
/// example a BasicBlock is element of two Regions, there exist two
@@ -166,7 +167,7 @@
/// @return Get the parent Region of this RegionNode.
inline RegionT *getParent() const { return parent; }
- /// @brief Get the entry BasicBlock of this RegionNode.
+ /// Get the entry BasicBlock of this RegionNode.
///
/// If this RegionNode represents a BasicBlock this is just the BasicBlock
/// itself, otherwise we return the entry BasicBlock of the Subregion
@@ -174,7 +175,7 @@
/// @return The entry BasicBlock of this RegionNode.
inline BlockT *getEntry() const { return entry.getPointer(); }
- /// @brief Get the content of this RegionNode.
+ /// Get the content of this RegionNode.
///
/// This can be either a BasicBlock or a subregion. Before calling getNodeAs()
/// check the type of the content with the isSubRegion() function call.
@@ -182,7 +183,7 @@
/// @return The content of this RegionNode.
template <class T> inline T *getNodeAs() const;
- /// @brief Is this RegionNode a subregion?
+ /// Is this RegionNode a subregion?
///
/// @return True if it contains a subregion. False if it contains a
/// BasicBlock.
@@ -190,7 +191,7 @@
};
//===----------------------------------------------------------------------===//
-/// @brief A single entry single exit Region.
+/// A single entry single exit Region.
///
/// A Region is a connected subgraph of a control flow graph that has exactly
/// two connections to the remaining graph. It can be used to analyze or
@@ -301,7 +302,7 @@
void verifyRegionNest() const;
public:
- /// @brief Create a new region.
+ /// Create a new region.
///
/// @param Entry The entry basic block of the region.
/// @param Exit The exit basic block of the region.
@@ -318,25 +319,25 @@
/// Delete the Region and all its subregions.
~RegionBase();
- /// @brief Get the entry BasicBlock of the Region.
+ /// Get the entry BasicBlock of the Region.
/// @return The entry BasicBlock of the region.
BlockT *getEntry() const {
return RegionNodeBase<Tr>::getEntry();
}
- /// @brief Replace the entry basic block of the region with the new basic
+ /// Replace the entry basic block of the region with the new basic
/// block.
///
/// @param BB The new entry basic block of the region.
void replaceEntry(BlockT *BB);
- /// @brief Replace the exit basic block of the region with the new basic
+ /// Replace the exit basic block of the region with the new basic
/// block.
///
/// @param BB The new exit basic block of the region.
void replaceExit(BlockT *BB);
- /// @brief Recursively replace the entry basic block of the region.
+ /// Recursively replace the entry basic block of the region.
///
/// This function replaces the entry basic block with a new basic block. It
/// also updates all child regions that have the same entry basic block as
@@ -345,7 +346,7 @@
/// @param NewEntry The new entry basic block.
void replaceEntryRecursive(BlockT *NewEntry);
- /// @brief Recursively replace the exit basic block of the region.
+ /// Recursively replace the exit basic block of the region.
///
/// This function replaces the exit basic block with a new basic block. It
/// also updates all child regions that have the same exit basic block as
@@ -354,38 +355,38 @@
/// @param NewExit The new exit basic block.
void replaceExitRecursive(BlockT *NewExit);
- /// @brief Get the exit BasicBlock of the Region.
+ /// Get the exit BasicBlock of the Region.
/// @return The exit BasicBlock of the Region, NULL if this is the TopLevel
/// Region.
BlockT *getExit() const { return exit; }
- /// @brief Get the parent of the Region.
+ /// Get the parent of the Region.
/// @return The parent of the Region or NULL if this is a top level
/// Region.
RegionT *getParent() const {
return RegionNodeBase<Tr>::getParent();
}
- /// @brief Get the RegionNode representing the current Region.
+ /// Get the RegionNode representing the current Region.
/// @return The RegionNode representing the current Region.
RegionNodeT *getNode() const {
return const_cast<RegionNodeT *>(
reinterpret_cast<const RegionNodeT *>(this));
}
- /// @brief Get the nesting level of this Region.
+ /// Get the nesting level of this Region.
///
/// An toplevel Region has depth 0.
///
/// @return The depth of the region.
unsigned getDepth() const;
- /// @brief Check if a Region is the TopLevel region.
+ /// Check if a Region is the TopLevel region.
///
/// The toplevel region represents the whole function.
bool isTopLevelRegion() const { return exit == nullptr; }
- /// @brief Return a new (non-canonical) region, that is obtained by joining
+ /// Return a new (non-canonical) region, that is obtained by joining
/// this region with its predecessors.
///
/// @return A region also starting at getEntry(), but reaching to the next
@@ -393,43 +394,43 @@
/// NULL if such a basic block does not exist.
RegionT *getExpandedRegion() const;
- /// @brief Return the first block of this region's single entry edge,
+ /// Return the first block of this region's single entry edge,
/// if existing.
///
/// @return The BasicBlock starting this region's single entry edge,
/// else NULL.
BlockT *getEnteringBlock() const;
- /// @brief Return the first block of this region's single exit edge,
+ /// Return the first block of this region's single exit edge,
/// if existing.
///
/// @return The BasicBlock starting this region's single exit edge,
/// else NULL.
BlockT *getExitingBlock() const;
- /// @brief Collect all blocks of this region's single exit edge, if existing.
+ /// Collect all blocks of this region's single exit edge, if existing.
///
/// @return True if this region contains all the predecessors of the exit.
bool getExitingBlocks(SmallVectorImpl<BlockT *> &Exitings) const;
- /// @brief Is this a simple region?
+ /// Is this a simple region?
///
/// A region is simple if it has exactly one exit and one entry edge.
///
/// @return True if the Region is simple.
bool isSimple() const;
- /// @brief Returns the name of the Region.
+ /// Returns the name of the Region.
/// @return The Name of the Region.
std::string getNameStr() const;
- /// @brief Return the RegionInfo object, that belongs to this Region.
+ /// Return the RegionInfo object, that belongs to this Region.
RegionInfoT *getRegionInfo() const { return RI; }
/// PrintStyle - Print region in difference ways.
enum PrintStyle { PrintNone, PrintBB, PrintRN };
- /// @brief Print the region.
+ /// Print the region.
///
/// @param OS The output stream the Region is printed to.
/// @param printTree Print also the tree of subregions.
@@ -438,17 +439,17 @@
PrintStyle Style = PrintNone) const;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
- /// @brief Print the region to stderr.
+ /// Print the region to stderr.
void dump() const;
#endif
- /// @brief Check if the region contains a BasicBlock.
+ /// Check if the region contains a BasicBlock.
///
/// @param BB The BasicBlock that might be contained in this Region.
/// @return True if the block is contained in the region otherwise false.
bool contains(const BlockT *BB) const;
- /// @brief Check if the region contains another region.
+ /// Check if the region contains another region.
///
/// @param SubRegion The region that might be contained in this Region.
/// @return True if SubRegion is contained in the region otherwise false.
@@ -462,14 +463,14 @@
SubRegion->getExit() == getExit());
}
- /// @brief Check if the region contains an Instruction.
+ /// Check if the region contains an Instruction.
///
/// @param Inst The Instruction that might be contained in this region.
/// @return True if the Instruction is contained in the region otherwise
/// false.
bool contains(const InstT *Inst) const { return contains(Inst->getParent()); }
- /// @brief Check if the region contains a loop.
+ /// Check if the region contains a loop.
///
/// @param L The loop that might be contained in this region.
/// @return True if the loop is contained in the region otherwise false.
@@ -478,7 +479,7 @@
/// In that case true is returned.
bool contains(const LoopT *L) const;
- /// @brief Get the outermost loop in the region that contains a loop.
+ /// Get the outermost loop in the region that contains a loop.
///
/// Find for a Loop L the outermost loop OuterL that is a parent loop of L
/// and is itself contained in the region.
@@ -488,7 +489,7 @@
/// exist or if the region describes the whole function.
LoopT *outermostLoopInRegion(LoopT *L) const;
- /// @brief Get the outermost loop in the region that contains a basic block.
+ /// Get the outermost loop in the region that contains a basic block.
///
/// Find for a basic block BB the outermost loop L that contains BB and is
/// itself contained in the region.
@@ -499,13 +500,13 @@
/// exist or if the region describes the whole function.
LoopT *outermostLoopInRegion(LoopInfoT *LI, BlockT *BB) const;
- /// @brief Get the subregion that starts at a BasicBlock
+ /// Get the subregion that starts at a BasicBlock
///
/// @param BB The BasicBlock the subregion should start.
/// @return The Subregion if available, otherwise NULL.
RegionT *getSubRegionNode(BlockT *BB) const;
- /// @brief Get the RegionNode for a BasicBlock
+ /// Get the RegionNode for a BasicBlock
///
/// @param BB The BasicBlock at which the RegionNode should start.
/// @return If available, the RegionNode that represents the subregion
@@ -513,38 +514,38 @@
/// representing BB.
RegionNodeT *getNode(BlockT *BB) const;
- /// @brief Get the BasicBlock RegionNode for a BasicBlock
+ /// Get the BasicBlock RegionNode for a BasicBlock
///
/// @param BB The BasicBlock for which the RegionNode is requested.
/// @return The RegionNode representing the BB.
RegionNodeT *getBBNode(BlockT *BB) const;
- /// @brief Add a new subregion to this Region.
+ /// Add a new subregion to this Region.
///
/// @param SubRegion The new subregion that will be added.
/// @param moveChildren Move the children of this region, that are also
/// contained in SubRegion into SubRegion.
void addSubRegion(RegionT *SubRegion, bool moveChildren = false);
- /// @brief Remove a subregion from this Region.
+ /// Remove a subregion from this Region.
///
/// The subregion is not deleted, as it will probably be inserted into another
/// region.
/// @param SubRegion The SubRegion that will be removed.
RegionT *removeSubRegion(RegionT *SubRegion);
- /// @brief Move all direct child nodes of this Region to another Region.
+ /// Move all direct child nodes of this Region to another Region.
///
/// @param To The Region the child nodes will be transferred to.
void transferChildrenTo(RegionT *To);
- /// @brief Verify if the region is a correct region.
+ /// Verify if the region is a correct region.
///
/// Check if this is a correctly build Region. This is an expensive check, as
/// the complete CFG of the Region will be walked.
void verifyRegion() const;
- /// @brief Clear the cache for BB RegionNodes.
+ /// Clear the cache for BB RegionNodes.
///
/// After calling this function the BasicBlock RegionNodes will be stored at
/// different memory locations. RegionNodes obtained before this function is
@@ -620,12 +621,12 @@
using block_range = iterator_range<block_iterator>;
using const_block_range = iterator_range<const_block_iterator>;
- /// @brief Returns a range view of the basic blocks in the region.
+ /// Returns a range view of the basic blocks in the region.
inline block_range blocks() {
return block_range(block_begin(), block_end());
}
- /// @brief Returns a range view of the basic blocks in the region.
+ /// Returns a range view of the basic blocks in the region.
///
/// This is the 'const' version of the range view.
inline const_block_range blocks() const {
@@ -667,7 +668,7 @@
inline raw_ostream &operator<<(raw_ostream &OS, const RegionNodeBase<Tr> &Node);
//===----------------------------------------------------------------------===//
-/// @brief Analysis that detects all canonical Regions.
+/// Analysis that detects all canonical Regions.
///
/// The RegionInfo pass detects all canonical regions in a function. The Regions
/// are connected using the parent relation. This builds a Program Structure
@@ -725,7 +726,7 @@
BBtoRegionMap BBtoRegion;
protected:
- /// \brief Update refences to a RegionInfoT held by the RegionT managed here
+ /// Update refences to a RegionInfoT held by the RegionT managed here
///
/// This is a post-move helper. Regions hold references to the owning
/// RegionInfo object. After a move these need to be fixed.
@@ -739,7 +740,7 @@
}
private:
- /// \brief Wipe this region tree's state without releasing any resources.
+ /// Wipe this region tree's state without releasing any resources.
///
/// This is essentially a post-move helper only. It leaves the object in an
/// assignable and destroyable state, but otherwise invalid.
@@ -811,40 +812,40 @@
void releaseMemory();
- /// @brief Get the smallest region that contains a BasicBlock.
+ /// Get the smallest region that contains a BasicBlock.
///
/// @param BB The basic block.
/// @return The smallest region, that contains BB or NULL, if there is no
/// region containing BB.
RegionT *getRegionFor(BlockT *BB) const;
- /// @brief Set the smallest region that surrounds a basic block.
+ /// Set the smallest region that surrounds a basic block.
///
/// @param BB The basic block surrounded by a region.
/// @param R The smallest region that surrounds BB.
void setRegionFor(BlockT *BB, RegionT *R);
- /// @brief A shortcut for getRegionFor().
+ /// A shortcut for getRegionFor().
///
/// @param BB The basic block.
/// @return The smallest region, that contains BB or NULL, if there is no
/// region containing BB.
RegionT *operator[](BlockT *BB) const;
- /// @brief Return the exit of the maximal refined region, that starts at a
+ /// Return the exit of the maximal refined region, that starts at a
/// BasicBlock.
///
/// @param BB The BasicBlock the refined region starts.
BlockT *getMaxRegionExit(BlockT *BB) const;
- /// @brief Find the smallest region that contains two regions.
+ /// Find the smallest region that contains two regions.
///
/// @param A The first region.
/// @param B The second region.
/// @return The smallest region containing A and B.
RegionT *getCommonRegion(RegionT *A, RegionT *B) const;
- /// @brief Find the smallest region that contains two basic blocks.
+ /// Find the smallest region that contains two basic blocks.
///
/// @param A The first basic block.
/// @param B The second basic block.
@@ -853,13 +854,13 @@
return getCommonRegion(getRegionFor(A), getRegionFor(B));
}
- /// @brief Find the smallest region that contains a set of regions.
+ /// Find the smallest region that contains a set of regions.
///
/// @param Regions A vector of regions.
/// @return The smallest region that contains all regions in Regions.
RegionT *getCommonRegion(SmallVectorImpl<RegionT *> &Regions) const;
- /// @brief Find the smallest region that contains a set of basic blocks.
+ /// Find the smallest region that contains a set of basic blocks.
///
/// @param BBs A vector of basic blocks.
/// @return The smallest region that contains all basic blocks in BBS.
@@ -867,7 +868,7 @@
RegionT *getTopLevelRegion() const { return TopLevelRegion; }
- /// @brief Clear the Node Cache for all Regions.
+ /// Clear the Node Cache for all Regions.
///
/// @see Region::clearNodeCache()
void clearNodeCache() {
@@ -930,12 +931,12 @@
DominanceFrontier *DF);
#ifndef NDEBUG
- /// @brief Opens a viewer to show the GraphViz visualization of the regions.
+ /// Opens a viewer to show the GraphViz visualization of the regions.
///
/// Useful during debugging as an alternative to dump().
void view();
- /// @brief Opens a viewer to show the GraphViz visualization of this region
+ /// Opens a viewer to show the GraphViz visualization of this region
/// without instructions in the BasicBlocks.
///
/// Useful during debugging as an alternative to dump().
@@ -967,7 +968,7 @@
//@}
};
-/// \brief Analysis pass that exposes the \c RegionInfo for a function.
+/// Analysis pass that exposes the \c RegionInfo for a function.
class RegionInfoAnalysis : public AnalysisInfoMixin<RegionInfoAnalysis> {
friend AnalysisInfoMixin<RegionInfoAnalysis>;
@@ -979,7 +980,7 @@
RegionInfo run(Function &F, FunctionAnalysisManager &AM);
};
-/// \brief Printer pass for the \c RegionInfo.
+/// Printer pass for the \c RegionInfo.
class RegionInfoPrinterPass : public PassInfoMixin<RegionInfoPrinterPass> {
raw_ostream &OS;
@@ -989,7 +990,7 @@
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
-/// \brief Verifier pass for the \c RegionInfo.
+/// Verifier pass for the \c RegionInfo.
struct RegionInfoVerifierPass : PassInfoMixin<RegionInfoVerifierPass> {
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
diff --git a/linux-x64/clang/include/llvm/Analysis/RegionInfoImpl.h b/linux-x64/clang/include/llvm/Analysis/RegionInfoImpl.h
index eb6baac..5904214 100644
--- a/linux-x64/clang/include/llvm/Analysis/RegionInfoImpl.h
+++ b/linux-x64/clang/include/llvm/Analysis/RegionInfoImpl.h
@@ -22,6 +22,7 @@
#include "llvm/Analysis/PostDominators.h"
#include "llvm/Analysis/RegionInfo.h"
#include "llvm/Analysis/RegionIterator.h"
+#include "llvm/Config/llvm-config.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -674,7 +675,7 @@
#ifdef EXPENSIVE_CHECKS
region->verifyRegion();
#else
- DEBUG(region->verifyRegion());
+ LLVM_DEBUG(region->verifyRegion());
#endif
updateStatistics(region);
diff --git a/linux-x64/clang/include/llvm/Analysis/RegionIterator.h b/linux-x64/clang/include/llvm/Analysis/RegionIterator.h
index 4f823cc..4fd92fc 100644
--- a/linux-x64/clang/include/llvm/Analysis/RegionIterator.h
+++ b/linux-x64/clang/include/llvm/Analysis/RegionIterator.h
@@ -26,7 +26,7 @@
class BasicBlock;
//===----------------------------------------------------------------------===//
-/// @brief Hierarchical RegionNode successor iterator.
+/// Hierarchical RegionNode successor iterator.
///
/// This iterator iterates over all successors of a RegionNode.
///
@@ -102,7 +102,7 @@
using Self = RNSuccIterator<NodeRef, BlockT, RegionT>;
using value_type = typename super::value_type;
- /// @brief Create begin iterator of a RegionNode.
+ /// Create begin iterator of a RegionNode.
inline RNSuccIterator(NodeRef node)
: Node(node, node->isSubRegion() ? ItRgBegin : ItBB),
BItor(BlockTraits::child_begin(node->getEntry())) {
@@ -115,7 +115,7 @@
advanceRegionSucc();
}
- /// @brief Create an end iterator.
+ /// Create an end iterator.
inline RNSuccIterator(NodeRef node, bool)
: Node(node, node->isSubRegion() ? ItRgEnd : ItBB),
BItor(BlockTraits::child_end(node->getEntry())) {}
@@ -158,7 +158,7 @@
};
//===----------------------------------------------------------------------===//
-/// @brief Flat RegionNode iterator.
+/// Flat RegionNode iterator.
///
/// The Flat Region iterator will iterate over all BasicBlock RegionNodes that
/// are contained in the Region and its subregions. This is close to a virtual
@@ -177,7 +177,7 @@
using Self = RNSuccIterator<FlatIt<NodeRef>, BlockT, RegionT>;
using value_type = typename super::value_type;
- /// @brief Create the iterator from a RegionNode.
+ /// Create the iterator from a RegionNode.
///
/// Note that the incoming node must be a bb node, otherwise it will trigger
/// an assertion when we try to get a BasicBlock.
@@ -193,7 +193,7 @@
++Itor;
}
- /// @brief Create an end iterator
+ /// Create an end iterator
inline RNSuccIterator(NodeRef node, bool)
: Node(node), Itor(BlockTraits::child_end(node->getEntry())) {
assert(!Node->isSubRegion() &&
diff --git a/linux-x64/clang/include/llvm/Analysis/RegionPass.h b/linux-x64/clang/include/llvm/Analysis/RegionPass.h
index 515b362..b3da91c 100644
--- a/linux-x64/clang/include/llvm/Analysis/RegionPass.h
+++ b/linux-x64/clang/include/llvm/Analysis/RegionPass.h
@@ -28,7 +28,7 @@
class Function;
//===----------------------------------------------------------------------===//
-/// @brief A pass that runs on each Region in a function.
+/// A pass that runs on each Region in a function.
///
/// RegionPass is managed by RGPassManager.
class RegionPass : public Pass {
@@ -39,7 +39,7 @@
/// @name To be implemented by every RegionPass
///
//@{
- /// @brief Run the pass on a specific Region
+ /// Run the pass on a specific Region
///
/// Accessing regions not contained in the current region is not allowed.
///
@@ -49,7 +49,7 @@
/// @return True if the pass modifies this Region.
virtual bool runOnRegion(Region *R, RGPassManager &RGM) = 0;
- /// @brief Get a pass to print the LLVM IR in the region.
+ /// Get a pass to print the LLVM IR in the region.
///
/// @param O The output stream to print the Region.
/// @param Banner The banner to separate different printed passes.
@@ -85,7 +85,7 @@
bool skipRegion(Region &R) const;
};
-/// @brief The pass manager to schedule RegionPasses.
+/// The pass manager to schedule RegionPasses.
class RGPassManager : public FunctionPass, public PMDataManager {
std::deque<Region*> RQ;
bool skipThisRegion;
@@ -97,7 +97,7 @@
static char ID;
explicit RGPassManager();
- /// @brief Execute all of the passes scheduled for execution.
+ /// Execute all of the passes scheduled for execution.
///
/// @return True if any of the passes modifies the function.
bool runOnFunction(Function &F) override;
@@ -111,10 +111,10 @@
PMDataManager *getAsPMDataManager() override { return this; }
Pass *getAsPass() override { return this; }
- /// @brief Print passes managed by this manager.
+ /// Print passes managed by this manager.
void dumpPassStructure(unsigned Offset) override;
- /// @brief Get passes contained by this manager.
+ /// Get passes contained by this manager.
Pass *getContainedPass(unsigned N) {
assert(N < PassVector.size() && "Pass number out of range!");
Pass *FP = static_cast<Pass *>(PassVector[N]);
diff --git a/linux-x64/clang/include/llvm/Analysis/RegionPrinter.h b/linux-x64/clang/include/llvm/Analysis/RegionPrinter.h
index 8f0035c..e132eae 100644
--- a/linux-x64/clang/include/llvm/Analysis/RegionPrinter.h
+++ b/linux-x64/clang/include/llvm/Analysis/RegionPrinter.h
@@ -26,7 +26,7 @@
FunctionPass *createRegionOnlyPrinterPass();
#ifndef NDEBUG
- /// @brief Open a viewer to display the GraphViz vizualization of the analysis
+ /// Open a viewer to display the GraphViz vizualization of the analysis
/// result.
///
/// Practical to call in the debugger.
@@ -35,7 +35,7 @@
/// @param RI The analysis to display.
void viewRegion(llvm::RegionInfo *RI);
- /// @brief Analyze the regions of a function and open its GraphViz
+ /// Analyze the regions of a function and open its GraphViz
/// visualization in a viewer.
///
/// Useful to call in the debugger.
@@ -46,7 +46,7 @@
/// @param F Function to analyze.
void viewRegion(const llvm::Function *F);
- /// @brief Open a viewer to display the GraphViz vizualization of the analysis
+ /// Open a viewer to display the GraphViz vizualization of the analysis
/// result.
///
/// Useful to call in the debugger.
@@ -55,7 +55,7 @@
/// @param RI The analysis to display.
void viewRegionOnly(llvm::RegionInfo *RI);
- /// @brief Analyze the regions of a function and open its GraphViz
+ /// Analyze the regions of a function and open its GraphViz
/// visualization in a viewer.
///
/// Useful to call in the debugger.
diff --git a/linux-x64/clang/include/llvm/Analysis/ScalarEvolution.h b/linux-x64/clang/include/llvm/Analysis/ScalarEvolution.h
index 7a43b81..89918e3 100644
--- a/linux-x64/clang/include/llvm/Analysis/ScalarEvolution.h
+++ b/linux-x64/clang/include/llvm/Analysis/ScalarEvolution.h
@@ -587,7 +587,9 @@
const SCEV *getUMaxExpr(const SCEV *LHS, const SCEV *RHS);
const SCEV *getUMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
const SCEV *getSMinExpr(const SCEV *LHS, const SCEV *RHS);
+ const SCEV *getSMinExpr(SmallVectorImpl<const SCEV *> &Operands);
const SCEV *getUMinExpr(const SCEV *LHS, const SCEV *RHS);
+ const SCEV *getUMinExpr(SmallVectorImpl<const SCEV *> &Operands);
const SCEV *getUnknown(Value *V);
const SCEV *getCouldNotCompute();
@@ -650,6 +652,10 @@
/// then perform a umin operation with them.
const SCEV *getUMinFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS);
+ /// Promote the operands to the wider of the types using zero-extension, and
+ /// then perform a umin operation with them. N-ary function.
+ const SCEV *getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV *> &Ops);
+
/// Transitively follow the chain of pointer-type operands until reaching a
/// SCEV that does not have a single pointer operand. This returns a
/// SCEVUnknown pointer for well-formed pointer-type expressions, but corner
@@ -764,6 +770,12 @@
/// loop bodies.
void forgetLoop(const Loop *L);
+ // This method invokes forgetLoop for the outermost loop of the given loop
+ // \p L, making ScalarEvolution forget about all this subtree. This needs to
+ // be done whenever we make a transform that may affect the parameters of the
+ // outer loop, such as exit counts for branches.
+ void forgetTopmostLoop(const Loop *L);
+
/// This method should be called by the client when it has changed a value
/// in a way that may effect its value, or which may disconnect it from a
/// def-use chain linking it to a loop.
@@ -1085,7 +1097,7 @@
/// The target library information for the target we are targeting.
TargetLibraryInfo &TLI;
- /// The tracker for @llvm.assume intrinsics in this function.
+ /// The tracker for \@llvm.assume intrinsics in this function.
AssumptionCache &AC;
/// The dominator tree.
@@ -1142,6 +1154,9 @@
/// Mark SCEVUnknown Phis currently being processed by getRangeRef.
SmallPtrSet<const PHINode *, 6> PendingPhiRanges;
+ // Mark SCEVUnknown Phis currently being processed by isImpliedViaMerge.
+ SmallPtrSet<const PHINode *, 6> PendingMerges;
+
/// Set to true by isLoopBackedgeGuardedByCond when we're walking the set of
/// conditions dominating the backedge of a loop.
bool WalkingBEDominatingConds = false;
@@ -1667,6 +1682,18 @@
const SCEV *FoundLHS,
const SCEV *FoundRHS);
+ /// Test whether the condition described by Pred, LHS, and RHS is true
+ /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
+ /// true.
+ ///
+ /// This routine tries to figure out predicate for Phis which are SCEVUnknown
+ /// if it is true for every possible incoming value from their respective
+ /// basic blocks.
+ bool isImpliedViaMerge(ICmpInst::Predicate Pred,
+ const SCEV *LHS, const SCEV *RHS,
+ const SCEV *FoundLHS, const SCEV *FoundRHS,
+ unsigned Depth);
+
/// If we know that the specified Phi is in the header of its containing
/// loop, we know the loop executes a constant number of times, and the PHI
/// node is just a recurrence involving constants, fold it.
@@ -1806,6 +1833,9 @@
const SCEV *getOrCreateMulExpr(SmallVectorImpl<const SCEV *> &Ops,
SCEV::NoWrapFlags Flags);
+ /// Return x if \p Val is f(x) where f is a 1-1 function.
+ const SCEV *stripInjectiveFunctions(const SCEV *Val) const;
+
/// Find all of the loops transitively used in \p S, and fill \p LoopsUsed.
/// A loop is considered "used" by an expression if it contains
/// an add rec on said loop.
@@ -1815,6 +1845,10 @@
/// accordingly.
void addToLoopUseLists(const SCEV *S);
+ /// Try to match the pattern generated by getURemExpr(A, B). If successful,
+ /// Assign A and B to LHS and RHS, respectively.
+ bool matchURem(const SCEV *Expr, const SCEV *&LHS, const SCEV *&RHS);
+
FoldingSet<SCEV> UniqueSCEVs;
FoldingSet<SCEVPredicate> UniquePreds;
BumpPtrAllocator SCEVAllocator;
diff --git a/linux-x64/clang/include/llvm/Analysis/ScalarEvolutionExpander.h b/linux-x64/clang/include/llvm/Analysis/ScalarEvolutionExpander.h
index 3df04e9..58d4268 100644
--- a/linux-x64/clang/include/llvm/Analysis/ScalarEvolutionExpander.h
+++ b/linux-x64/clang/include/llvm/Analysis/ScalarEvolutionExpander.h
@@ -321,7 +321,7 @@
/// Arrange for there to be a cast of V to Ty at IP, reusing an existing
/// cast if a suitable one exists, moving an existing cast if a suitable one
- /// exists but isn't in the right place, or or creating a new one.
+ /// exists but isn't in the right place, or creating a new one.
Value *ReuseOrCreateCast(Value *V, Type *Ty,
Instruction::CastOps Op,
BasicBlock::iterator IP);
@@ -335,6 +335,7 @@
Value *expandAddToGEP(const SCEV *const *op_begin,
const SCEV *const *op_end,
PointerType *PTy, Type *Ty, Value *V);
+ Value *expandAddToGEP(const SCEV *Op, PointerType *PTy, Type *Ty, Value *V);
/// Find a previous Value in ExprValueMap for expand.
ScalarEvolution::ValueOffsetPair
diff --git a/linux-x64/clang/include/llvm/Analysis/SparsePropagation.h b/linux-x64/clang/include/llvm/Analysis/SparsePropagation.h
index 1b8df03..defcf96 100644
--- a/linux-x64/clang/include/llvm/Analysis/SparsePropagation.h
+++ b/linux-x64/clang/include/llvm/Analysis/SparsePropagation.h
@@ -238,7 +238,7 @@
// If this value is untracked, don't add it to the map.
if (LV == LatticeFunc->getUntrackedVal())
return LV;
- return ValueState[Key] = LV;
+ return ValueState[Key] = std::move(LV);
}
template <class LatticeKey, class LatticeVal, class KeyInfo>
@@ -250,7 +250,7 @@
// Update the state of the given LatticeKey and add its corresponding LLVM
// value to the work list.
- ValueState[Key] = LV;
+ ValueState[Key] = std::move(LV);
if (Value *V = KeyInfo::getValueFromLatticeKey(Key))
ValueWorkList.push_back(V);
}
@@ -260,7 +260,7 @@
BasicBlock *BB) {
if (!BBExecutable.insert(BB).second)
return;
- DEBUG(dbgs() << "Marking Block Executable: " << BB->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Marking Block Executable: " << BB->getName() << "\n");
BBWorkList.push_back(BB); // Add the block to the work list!
}
@@ -270,8 +270,8 @@
if (!KnownFeasibleEdges.insert(Edge(Source, Dest)).second)
return; // This edge is already known to be executable!
- DEBUG(dbgs() << "Marking Edge Executable: " << Source->getName() << " -> "
- << Dest->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Marking Edge Executable: " << Source->getName()
+ << " -> " << Dest->getName() << "\n");
if (BBExecutable.count(Dest)) {
// The destination is already executable, but we just made an edge
@@ -318,7 +318,7 @@
Constant *C =
dyn_cast_or_null<Constant>(LatticeFunc->GetValueFromLatticeVal(
- BCValue, BI->getCondition()->getType()));
+ std::move(BCValue), BI->getCondition()->getType()));
if (!C || !isa<ConstantInt>(C)) {
// Non-constant values can go either way.
Succs[0] = Succs[1] = true;
@@ -360,7 +360,7 @@
return;
Constant *C = dyn_cast_or_null<Constant>(LatticeFunc->GetValueFromLatticeVal(
- SCValue, SI.getCondition()->getType()));
+ std::move(SCValue), SI.getCondition()->getType()));
if (!C || !isa<ConstantInt>(C)) {
// All destinations are executable!
Succs.assign(TI.getNumSuccessors(), true);
@@ -408,7 +408,8 @@
LatticeFunc->ComputeInstructionState(PN, ChangedValues, *this);
for (auto &ChangedValue : ChangedValues)
if (ChangedValue.second != LatticeFunc->getUntrackedVal())
- UpdateState(ChangedValue.first, ChangedValue.second);
+ UpdateState(std::move(ChangedValue.first),
+ std::move(ChangedValue.second));
return;
}
@@ -477,7 +478,7 @@
Value *V = ValueWorkList.back();
ValueWorkList.pop_back();
- DEBUG(dbgs() << "\nPopped off V-WL: " << *V << "\n");
+ LLVM_DEBUG(dbgs() << "\nPopped off V-WL: " << *V << "\n");
// "V" got into the work list because it made a transition. See if any
// users are both live and in need of updating.
@@ -492,7 +493,7 @@
BasicBlock *BB = BBWorkList.back();
BBWorkList.pop_back();
- DEBUG(dbgs() << "\nPopped off BBWL: " << *BB);
+ LLVM_DEBUG(dbgs() << "\nPopped off BBWL: " << *BB);
// Notify all instructions in this basic block that they are newly
// executable.
diff --git a/linux-x64/clang/include/llvm/Analysis/TargetLibraryInfo.def b/linux-x64/clang/include/llvm/Analysis/TargetLibraryInfo.def
index a461ed8..f94debb 100644
--- a/linux-x64/clang/include/llvm/Analysis/TargetLibraryInfo.def
+++ b/linux-x64/clang/include/llvm/Analysis/TargetLibraryInfo.def
@@ -119,6 +119,12 @@
/// void operator delete[](void*, nothrow);
TLI_DEFINE_ENUM_INTERNAL(ZdaPvRKSt9nothrow_t)
TLI_DEFINE_STRING_INTERNAL("_ZdaPvRKSt9nothrow_t")
+/// void operator delete[](void*, align_val_t);
+TLI_DEFINE_ENUM_INTERNAL(ZdaPvSt11align_val_t)
+TLI_DEFINE_STRING_INTERNAL("_ZdaPvSt11align_val_t")
+/// void operator delete[](void*, align_val_t, nothrow)
+TLI_DEFINE_ENUM_INTERNAL(ZdaPvSt11align_val_tRKSt9nothrow_t)
+TLI_DEFINE_STRING_INTERNAL("_ZdaPvSt11align_val_tRKSt9nothrow_t")
/// void operator delete[](void*, unsigned int);
TLI_DEFINE_ENUM_INTERNAL(ZdaPvj)
TLI_DEFINE_STRING_INTERNAL("_ZdaPvj")
@@ -131,6 +137,12 @@
/// void operator delete(void*, nothrow);
TLI_DEFINE_ENUM_INTERNAL(ZdlPvRKSt9nothrow_t)
TLI_DEFINE_STRING_INTERNAL("_ZdlPvRKSt9nothrow_t")
+/// void operator delete(void*, align_val_t)
+TLI_DEFINE_ENUM_INTERNAL(ZdlPvSt11align_val_t)
+TLI_DEFINE_STRING_INTERNAL("_ZdlPvSt11align_val_t")
+/// void operator delete(void*, align_val_t, nothrow)
+TLI_DEFINE_ENUM_INTERNAL(ZdlPvSt11align_val_tRKSt9nothrow_t)
+TLI_DEFINE_STRING_INTERNAL("_ZdlPvSt11align_val_tRKSt9nothrow_t")
/// void operator delete(void*, unsigned int);
TLI_DEFINE_ENUM_INTERNAL(ZdlPvj)
TLI_DEFINE_STRING_INTERNAL("_ZdlPvj")
@@ -143,24 +155,48 @@
/// void *new[](unsigned int, nothrow);
TLI_DEFINE_ENUM_INTERNAL(ZnajRKSt9nothrow_t)
TLI_DEFINE_STRING_INTERNAL("_ZnajRKSt9nothrow_t")
+/// void *new[](unsigned int, align_val_t)
+TLI_DEFINE_ENUM_INTERNAL(ZnajSt11align_val_t)
+TLI_DEFINE_STRING_INTERNAL("_ZnajSt11align_val_t")
+/// void *new[](unsigned int, align_val_t, nothrow)
+TLI_DEFINE_ENUM_INTERNAL(ZnajSt11align_val_tRKSt9nothrow_t)
+TLI_DEFINE_STRING_INTERNAL("_ZnajSt11align_val_tRKSt9nothrow_t")
/// void *new[](unsigned long);
TLI_DEFINE_ENUM_INTERNAL(Znam)
TLI_DEFINE_STRING_INTERNAL("_Znam")
/// void *new[](unsigned long, nothrow);
TLI_DEFINE_ENUM_INTERNAL(ZnamRKSt9nothrow_t)
TLI_DEFINE_STRING_INTERNAL("_ZnamRKSt9nothrow_t")
+/// void *new[](unsigned long, align_val_t)
+TLI_DEFINE_ENUM_INTERNAL(ZnamSt11align_val_t)
+TLI_DEFINE_STRING_INTERNAL("_ZnamSt11align_val_t")
+/// void *new[](unsigned long, align_val_t, nothrow)
+TLI_DEFINE_ENUM_INTERNAL(ZnamSt11align_val_tRKSt9nothrow_t)
+TLI_DEFINE_STRING_INTERNAL("_ZnamSt11align_val_tRKSt9nothrow_t")
/// void *new(unsigned int);
TLI_DEFINE_ENUM_INTERNAL(Znwj)
TLI_DEFINE_STRING_INTERNAL("_Znwj")
/// void *new(unsigned int, nothrow);
TLI_DEFINE_ENUM_INTERNAL(ZnwjRKSt9nothrow_t)
TLI_DEFINE_STRING_INTERNAL("_ZnwjRKSt9nothrow_t")
+/// void *new(unsigned int, align_val_t)
+TLI_DEFINE_ENUM_INTERNAL(ZnwjSt11align_val_t)
+TLI_DEFINE_STRING_INTERNAL("_ZnwjSt11align_val_t")
+/// void *new(unsigned int, align_val_t, nothrow)
+TLI_DEFINE_ENUM_INTERNAL(ZnwjSt11align_val_tRKSt9nothrow_t)
+TLI_DEFINE_STRING_INTERNAL("_ZnwjSt11align_val_tRKSt9nothrow_t")
/// void *new(unsigned long);
TLI_DEFINE_ENUM_INTERNAL(Znwm)
TLI_DEFINE_STRING_INTERNAL("_Znwm")
/// void *new(unsigned long, nothrow);
TLI_DEFINE_ENUM_INTERNAL(ZnwmRKSt9nothrow_t)
TLI_DEFINE_STRING_INTERNAL("_ZnwmRKSt9nothrow_t")
+/// void *new(unsigned long, align_val_t)
+TLI_DEFINE_ENUM_INTERNAL(ZnwmSt11align_val_t)
+TLI_DEFINE_STRING_INTERNAL("_ZnwmSt11align_val_t")
+/// void *new(unsigned long, align_val_t, nothrow)
+TLI_DEFINE_ENUM_INTERNAL(ZnwmSt11align_val_tRKSt9nothrow_t)
+TLI_DEFINE_STRING_INTERNAL("_ZnwmSt11align_val_tRKSt9nothrow_t")
/// double __acos_finite(double x);
TLI_DEFINE_ENUM_INTERNAL(acos_finite)
TLI_DEFINE_STRING_INTERNAL("__acos_finite")
@@ -601,12 +637,18 @@
/// int fgetc(FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(fgetc)
TLI_DEFINE_STRING_INTERNAL("fgetc")
+/// int fgetc_unlocked(FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(fgetc_unlocked)
+TLI_DEFINE_STRING_INTERNAL("fgetc_unlocked")
/// int fgetpos(FILE *stream, fpos_t *pos);
TLI_DEFINE_ENUM_INTERNAL(fgetpos)
TLI_DEFINE_STRING_INTERNAL("fgetpos")
/// char *fgets(char *s, int n, FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(fgets)
TLI_DEFINE_STRING_INTERNAL("fgets")
+/// char *fgets_unlocked(char *s, int n, FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(fgets_unlocked)
+TLI_DEFINE_STRING_INTERNAL("fgets_unlocked")
/// int fileno(FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(fileno)
TLI_DEFINE_STRING_INTERNAL("fileno")
@@ -673,12 +715,21 @@
/// int fputc(int c, FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(fputc)
TLI_DEFINE_STRING_INTERNAL("fputc")
+/// int fputc_unlocked(int c, FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(fputc_unlocked)
+TLI_DEFINE_STRING_INTERNAL("fputc_unlocked")
/// int fputs(const char *s, FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(fputs)
TLI_DEFINE_STRING_INTERNAL("fputs")
+/// int fputs_unlocked(const char *s, FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(fputs_unlocked)
+TLI_DEFINE_STRING_INTERNAL("fputs_unlocked")
/// size_t fread(void *ptr, size_t size, size_t nitems, FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(fread)
TLI_DEFINE_STRING_INTERNAL("fread")
+/// size_t fread_unlocked(void *ptr, size_t size, size_t nitems, FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(fread_unlocked)
+TLI_DEFINE_STRING_INTERNAL("fread_unlocked")
/// void free(void *ptr);
TLI_DEFINE_ENUM_INTERNAL(free)
TLI_DEFINE_STRING_INTERNAL("free")
@@ -736,6 +787,9 @@
/// size_t fwrite(const void *ptr, size_t size, size_t nitems, FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(fwrite)
TLI_DEFINE_STRING_INTERNAL("fwrite")
+/// size_t fwrite_unlocked(const void *ptr, size_t size, size_t nitems, FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(fwrite_unlocked)
+TLI_DEFINE_STRING_INTERNAL("fwrite_unlocked")
/// int getc(FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(getc)
TLI_DEFINE_STRING_INTERNAL("getc")
@@ -745,6 +799,9 @@
/// int getchar(void);
TLI_DEFINE_ENUM_INTERNAL(getchar)
TLI_DEFINE_STRING_INTERNAL("getchar")
+/// int getchar_unlocked(void);
+TLI_DEFINE_ENUM_INTERNAL(getchar_unlocked)
+TLI_DEFINE_STRING_INTERNAL("getchar_unlocked")
/// char *getenv(const char *name);
TLI_DEFINE_ENUM_INTERNAL(getenv)
TLI_DEFINE_STRING_INTERNAL("getenv")
@@ -950,9 +1007,15 @@
/// int putc(int c, FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(putc)
TLI_DEFINE_STRING_INTERNAL("putc")
+/// int putc_unlocked(int c, FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(putc_unlocked)
+TLI_DEFINE_STRING_INTERNAL("putc_unlocked")
/// int putchar(int c);
TLI_DEFINE_ENUM_INTERNAL(putchar)
TLI_DEFINE_STRING_INTERNAL("putchar")
+/// int putchar_unlocked(int c);
+TLI_DEFINE_ENUM_INTERNAL(putchar_unlocked)
+TLI_DEFINE_STRING_INTERNAL("putchar_unlocked")
/// int puts(const char *s);
TLI_DEFINE_ENUM_INTERNAL(puts)
TLI_DEFINE_STRING_INTERNAL("puts")
diff --git a/linux-x64/clang/include/llvm/Analysis/TargetTransformInfo.h b/linux-x64/clang/include/llvm/Analysis/TargetTransformInfo.h
index 9e9c661..59657cc 100644
--- a/linux-x64/clang/include/llvm/Analysis/TargetTransformInfo.h
+++ b/linux-x64/clang/include/llvm/Analysis/TargetTransformInfo.h
@@ -49,7 +49,7 @@
class User;
class Value;
-/// \brief Information about a load/store intrinsic defined by the target.
+/// Information about a load/store intrinsic defined by the target.
struct MemIntrinsicInfo {
/// This is the pointer that the intrinsic is loading from or storing to.
/// If this is non-null, then analysis/optimization passes can assume that
@@ -73,18 +73,18 @@
}
};
-/// \brief This pass provides access to the codegen interfaces that are needed
+/// This pass provides access to the codegen interfaces that are needed
/// for IR-level transformations.
class TargetTransformInfo {
public:
- /// \brief Construct a TTI object using a type implementing the \c Concept
+ /// Construct a TTI object using a type implementing the \c Concept
/// API below.
///
/// This is used by targets to construct a TTI wrapping their target-specific
/// implementaion that encodes appropriate costs for their target.
template <typename T> TargetTransformInfo(T Impl);
- /// \brief Construct a baseline TTI object using a minimal implementation of
+ /// Construct a baseline TTI object using a minimal implementation of
/// the \c Concept API below.
///
/// The TTI implementation will reflect the information in the DataLayout
@@ -99,7 +99,7 @@
// out-of-line.
~TargetTransformInfo();
- /// \brief Handle the invalidation of this information.
+ /// Handle the invalidation of this information.
///
/// When used as a result of \c TargetIRAnalysis this method will be called
/// when the function this was computed for changes. When it returns false,
@@ -114,7 +114,7 @@
/// \name Generic Target Information
/// @{
- /// \brief The kind of cost model.
+ /// The kind of cost model.
///
/// There are several different cost models that can be customized by the
/// target. The normalization of each cost model may be target specific.
@@ -124,7 +124,7 @@
TCK_CodeSize ///< Instruction code size.
};
- /// \brief Query the cost of a specified instruction.
+ /// Query the cost of a specified instruction.
///
/// Clients should use this interface to query the cost of an existing
/// instruction. The instruction must have a valid parent (basic block).
@@ -145,7 +145,7 @@
llvm_unreachable("Unknown instruction cost kind");
}
- /// \brief Underlying constants for 'cost' values in this interface.
+ /// Underlying constants for 'cost' values in this interface.
///
/// Many APIs in this interface return a cost. This enum defines the
/// fundamental values that should be used to interpret (and produce) those
@@ -169,7 +169,7 @@
TCC_Expensive = 4 ///< The cost of a 'div' instruction on x86.
};
- /// \brief Estimate the cost of a specific operation when lowered.
+ /// Estimate the cost of a specific operation when lowered.
///
/// Note that this is designed to work on an arbitrary synthetic opcode, and
/// thus work for hypothetical queries before an instruction has even been
@@ -185,7 +185,7 @@
/// comments for a detailed explanation of the cost values.
int getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy = nullptr) const;
- /// \brief Estimate the cost of a GEP operation when lowered.
+ /// Estimate the cost of a GEP operation when lowered.
///
/// The contract for this function is the same as \c getOperationCost except
/// that it supports an interface that provides extra information specific to
@@ -193,14 +193,14 @@
int getGEPCost(Type *PointeeType, const Value *Ptr,
ArrayRef<const Value *> Operands) const;
- /// \brief Estimate the cost of a EXT operation when lowered.
+ /// Estimate the cost of a EXT operation when lowered.
///
/// The contract for this function is the same as \c getOperationCost except
/// that it supports an interface that provides extra information specific to
/// the EXT operation.
int getExtCost(const Instruction *I, const Value *Src) const;
- /// \brief Estimate the cost of a function call when lowered.
+ /// Estimate the cost of a function call when lowered.
///
/// The contract for this is the same as \c getOperationCost except that it
/// supports an interface that provides extra information specific to call
@@ -211,13 +211,13 @@
/// The latter is only interesting for varargs function types.
int getCallCost(FunctionType *FTy, int NumArgs = -1) const;
- /// \brief Estimate the cost of calling a specific function when lowered.
+ /// Estimate the cost of calling a specific function when lowered.
///
/// This overload adds the ability to reason about the particular function
/// being called in the event it is a library call with special lowering.
int getCallCost(const Function *F, int NumArgs = -1) const;
- /// \brief Estimate the cost of calling a specific function when lowered.
+ /// Estimate the cost of calling a specific function when lowered.
///
/// This overload allows specifying a set of candidate argument values.
int getCallCost(const Function *F, ArrayRef<const Value *> Arguments) const;
@@ -230,13 +230,13 @@
/// individual classes of instructions would be better.
unsigned getInliningThresholdMultiplier() const;
- /// \brief Estimate the cost of an intrinsic when lowered.
+ /// Estimate the cost of an intrinsic when lowered.
///
/// Mirrors the \c getCallCost method but uses an intrinsic identifier.
int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
ArrayRef<Type *> ParamTys) const;
- /// \brief Estimate the cost of an intrinsic when lowered.
+ /// Estimate the cost of an intrinsic when lowered.
///
/// Mirrors the \c getCallCost method but uses an intrinsic identifier.
int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
@@ -248,7 +248,7 @@
unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
unsigned &JTSize) const;
- /// \brief Estimate the cost of a given IR user when lowered.
+ /// Estimate the cost of a given IR user when lowered.
///
/// This can estimate the cost of either a ConstantExpr or Instruction when
/// lowered. It has two primary advantages over the \c getOperationCost and
@@ -271,7 +271,7 @@
/// comments for a detailed explanation of the cost values.
int getUserCost(const User *U, ArrayRef<const Value *> Operands) const;
- /// \brief This is a helper function which calls the two-argument getUserCost
+ /// This is a helper function which calls the two-argument getUserCost
/// with \p Operands which are the current operands U has.
int getUserCost(const User *U) const {
SmallVector<const Value *, 4> Operands(U->value_op_begin(),
@@ -279,14 +279,14 @@
return getUserCost(U, Operands);
}
- /// \brief Return true if branch divergence exists.
+ /// Return true if branch divergence exists.
///
/// Branch divergence has a significantly negative impact on GPU performance
/// when threads in the same wavefront take different paths due to conditional
/// branches.
bool hasBranchDivergence() const;
- /// \brief Returns whether V is a source of divergence.
+ /// Returns whether V is a source of divergence.
///
/// This function provides the target-dependent information for
/// the target-independent DivergenceAnalysis. DivergenceAnalysis first
@@ -294,7 +294,7 @@
/// starting with the sources of divergence.
bool isSourceOfDivergence(const Value *V) const;
- // \brief Returns true for the target specific
+ // Returns true for the target specific
// set of operations which produce uniform result
// even taking non-unform arguments
bool isAlwaysUniform(const Value *V) const;
@@ -317,7 +317,7 @@
/// optimize away.
unsigned getFlatAddressSpace() const;
- /// \brief Test whether calls to a function lower to actual program function
+ /// Test whether calls to a function lower to actual program function
/// calls.
///
/// The idea is to test whether the program is likely to require a 'call'
@@ -422,9 +422,16 @@
bool AllowPeeling;
/// Allow unrolling of all the iterations of the runtime loop remainder.
bool UnrollRemainder;
+ /// Allow unroll and jam. Used to enable unroll and jam for the target.
+ bool UnrollAndJam;
+ /// Threshold for unroll and jam, for inner loop size. The 'Threshold'
+ /// value above is used during unroll and jam for the outer loop size.
+ /// This value is used in the same manner to limit the size of the inner
+ /// loop.
+ unsigned UnrollAndJamInnerLoopThreshold;
};
- /// \brief Get target-customized preferences for the generic loop unrolling
+ /// Get target-customized preferences for the generic loop unrolling
/// transformation. The caller will initialize UP with the current
/// target-independent defaults.
void getUnrollingPreferences(Loop *L, ScalarEvolution &,
@@ -435,7 +442,7 @@
/// \name Scalar Target Information
/// @{
- /// \brief Flags indicating the kind of support for population count.
+ /// Flags indicating the kind of support for population count.
///
/// Compared to the SW implementation, HW support is supposed to
/// significantly boost the performance when the population is dense, and it
@@ -445,18 +452,18 @@
/// considered as "Slow".
enum PopcntSupportKind { PSK_Software, PSK_SlowHardware, PSK_FastHardware };
- /// \brief Return true if the specified immediate is legal add immediate, that
+ /// Return true if the specified immediate is legal add immediate, that
/// is the target has add instructions which can add a register with the
/// immediate without having to materialize the immediate into a register.
bool isLegalAddImmediate(int64_t Imm) const;
- /// \brief Return true if the specified immediate is legal icmp immediate,
+ /// Return true if the specified immediate is legal icmp immediate,
/// that is the target has icmp instructions which can compare a register
/// against the immediate without having to materialize the immediate into a
/// register.
bool isLegalICmpImmediate(int64_t Imm) const;
- /// \brief Return true if the addressing mode represented by AM is legal for
+ /// Return true if the addressing mode represented by AM is legal for
/// this target, for a load/store of the specified type.
/// The type may be VoidTy, in which case only return true if the addressing
/// mode is legal for a load/store of any legal type.
@@ -467,7 +474,7 @@
unsigned AddrSpace = 0,
Instruction *I = nullptr) const;
- /// \brief Return true if LSR cost of C1 is lower than C1.
+ /// Return true if LSR cost of C1 is lower than C1.
bool isLSRCostLess(TargetTransformInfo::LSRCost &C1,
TargetTransformInfo::LSRCost &C2) const;
@@ -480,12 +487,12 @@
/// addressing mode expressions.
bool shouldFavorPostInc() const;
- /// \brief Return true if the target supports masked load/store
+ /// Return true if the target supports masked load/store
/// AVX2 and AVX-512 targets allow masks for consecutive load and store
bool isLegalMaskedStore(Type *DataType) const;
bool isLegalMaskedLoad(Type *DataType) const;
- /// \brief Return true if the target supports masked gather/scatter
+ /// Return true if the target supports masked gather/scatter
/// AVX-512 fully supports gather and scatter for vectors with 32 and 64
/// bits scalar type.
bool isLegalMaskedScatter(Type *DataType) const;
@@ -508,7 +515,7 @@
/// Return true if target doesn't mind addresses in vectors.
bool prefersVectorizedAddressing() const;
- /// \brief Return the cost of the scaling factor used in the addressing
+ /// Return the cost of the scaling factor used in the addressing
/// mode represented by AM for this target, for a load/store
/// of the specified type.
/// If the AM is supported, the return value must be >= 0.
@@ -518,41 +525,41 @@
bool HasBaseReg, int64_t Scale,
unsigned AddrSpace = 0) const;
- /// \brief Return true if the loop strength reduce pass should make
+ /// Return true if the loop strength reduce pass should make
/// Instruction* based TTI queries to isLegalAddressingMode(). This is
/// needed on SystemZ, where e.g. a memcpy can only have a 12 bit unsigned
/// immediate offset and no index register.
bool LSRWithInstrQueries() const;
- /// \brief Return true if it's free to truncate a value of type Ty1 to type
+ /// Return true if it's free to truncate a value of type Ty1 to type
/// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
/// by referencing its sub-register AX.
bool isTruncateFree(Type *Ty1, Type *Ty2) const;
- /// \brief Return true if it is profitable to hoist instruction in the
+ /// Return true if it is profitable to hoist instruction in the
/// then/else to before if.
bool isProfitableToHoist(Instruction *I) const;
bool useAA() const;
- /// \brief Return true if this type is legal.
+ /// Return true if this type is legal.
bool isTypeLegal(Type *Ty) const;
- /// \brief Returns the target's jmp_buf alignment in bytes.
+ /// Returns the target's jmp_buf alignment in bytes.
unsigned getJumpBufAlignment() const;
- /// \brief Returns the target's jmp_buf size in bytes.
+ /// Returns the target's jmp_buf size in bytes.
unsigned getJumpBufSize() const;
- /// \brief Return true if switches should be turned into lookup tables for the
+ /// Return true if switches should be turned into lookup tables for the
/// target.
bool shouldBuildLookupTables() const;
- /// \brief Return true if switches should be turned into lookup tables
+ /// Return true if switches should be turned into lookup tables
/// containing this constant value for the target.
bool shouldBuildLookupTablesForConstant(Constant *C) const;
- /// \brief Return true if the input function which is cold at all call sites,
+ /// Return true if the input function which is cold at all call sites,
/// should use coldcc calling convention.
bool useColdCCForColdCall(Function &F) const;
@@ -566,10 +573,10 @@
/// the scalarization cost of a load/store.
bool supportsEfficientVectorElementLoadStore() const;
- /// \brief Don't restrict interleaved unrolling to small loops.
+ /// Don't restrict interleaved unrolling to small loops.
bool enableAggressiveInterleaving(bool LoopHasReductions) const;
- /// \brief If not nullptr, enable inline expansion of memcmp. IsZeroCmp is
+ /// If not nullptr, enable inline expansion of memcmp. IsZeroCmp is
/// true if this is the expansion of memcmp(p1, p2, s) == 0.
struct MemCmpExpansionOptions {
// The list of available load sizes (in bytes), sorted in decreasing order.
@@ -577,10 +584,10 @@
};
const MemCmpExpansionOptions *enableMemCmpExpansion(bool IsZeroCmp) const;
- /// \brief Enable matching of interleaved access groups.
+ /// Enable matching of interleaved access groups.
bool enableInterleavedAccessVectorization() const;
- /// \brief Indicate that it is potentially unsafe to automatically vectorize
+ /// Indicate that it is potentially unsafe to automatically vectorize
/// floating-point operations because the semantics of vector and scalar
/// floating-point semantics may differ. For example, ARM NEON v7 SIMD math
/// does not support IEEE-754 denormal numbers, while depending on the
@@ -589,16 +596,16 @@
/// operations, shuffles, or casts.
bool isFPVectorizationPotentiallyUnsafe() const;
- /// \brief Determine if the target supports unaligned memory accesses.
+ /// Determine if the target supports unaligned memory accesses.
bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
unsigned BitWidth, unsigned AddressSpace = 0,
unsigned Alignment = 1,
bool *Fast = nullptr) const;
- /// \brief Return hardware support for population count.
+ /// Return hardware support for population count.
PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const;
- /// \brief Return true if the hardware has a fast square-root instruction.
+ /// Return true if the hardware has a fast square-root instruction.
bool haveFastSqrt(Type *Ty) const;
/// Return true if it is faster to check if a floating-point value is NaN
@@ -607,15 +614,15 @@
/// generally as cheap as checking for ordered/unordered.
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const;
- /// \brief Return the expected cost of supporting the floating point operation
+ /// Return the expected cost of supporting the floating point operation
/// of the specified type.
int getFPOpCost(Type *Ty) const;
- /// \brief Return the expected cost of materializing for the given integer
+ /// Return the expected cost of materializing for the given integer
/// immediate of the specified type.
int getIntImmCost(const APInt &Imm, Type *Ty) const;
- /// \brief Return the expected cost of materialization for the given integer
+ /// Return the expected cost of materialization for the given integer
/// immediate of the specified type for a given instruction. The cost can be
/// zero if the immediate can be folded into the specified instruction.
int getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
@@ -623,7 +630,7 @@
int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
Type *Ty) const;
- /// \brief Return the expected cost for the given integer when optimising
+ /// Return the expected cost for the given integer when optimising
/// for size. This is different than the other integer immediate cost
/// functions in that it is subtarget agnostic. This is useful when you e.g.
/// target one ISA such as Aarch32 but smaller encodings could be possible
@@ -637,11 +644,14 @@
/// \name Vector Target Information
/// @{
- /// \brief The various kinds of shuffle patterns for vector queries.
+ /// The various kinds of shuffle patterns for vector queries.
enum ShuffleKind {
SK_Broadcast, ///< Broadcast element 0 to all other elements.
SK_Reverse, ///< Reverse the order of the vector.
- SK_Alternate, ///< Choose alternate elements from vector.
+ SK_Select, ///< Selects elements from the corresponding lane of
+ ///< either source operand. This is equivalent to a
+ ///< vector select with a constant condition operand.
+ SK_Transpose, ///< Transpose two vectors.
SK_InsertSubvector, ///< InsertSubvector. Index indicates start offset.
SK_ExtractSubvector,///< ExtractSubvector Index indicates start offset.
SK_PermuteTwoSrc, ///< Merge elements from two source vectors into one
@@ -650,7 +660,7 @@
///< shuffle mask.
};
- /// \brief Additional information about an operand's possible values.
+ /// Additional information about an operand's possible values.
enum OperandValueKind {
OK_AnyValue, // Operand can have any value.
OK_UniformValue, // Operand is uniform (splat of a value).
@@ -658,7 +668,7 @@
OK_NonUniformConstantValue // Operand is a non uniform constant value.
};
- /// \brief Additional properties of an operand's values.
+ /// Additional properties of an operand's values.
enum OperandValueProperties { OP_None = 0, OP_PowerOf2 = 1 };
/// \return The number of scalar or vector registers that the target has.
@@ -680,6 +690,11 @@
/// size of the widest element type.
bool shouldMaximizeVectorBandwidth(bool OptSize) const;
+ /// \return The minimum vectorization factor for types of given element
+ /// bit width, or 0 if there is no mimimum VF. The returned value only
+ /// applies when shouldMaximizeVectorBandwidth returns true.
+ unsigned getMinimumVF(unsigned ElemWidth) const;
+
/// \return True if it should be considered for address type promotion.
/// \p AllowPromotionWithoutCommonHeader Set true if promoting \p I is
/// profitable without finding other extensions fed by the same input.
@@ -806,7 +821,7 @@
ArrayRef<unsigned> Indices, unsigned Alignment,
unsigned AddressSpace) const;
- /// \brief Calculate the cost of performing a vector reduction.
+ /// Calculate the cost of performing a vector reduction.
///
/// This is the cost of reducing the vector value of type \p Ty to a scalar
/// value using the operation denoted by \p Opcode. The form of the reduction
@@ -900,7 +915,7 @@
bool areInlineCompatible(const Function *Caller,
const Function *Callee) const;
- /// \brief The type of load/store indexing.
+ /// The type of load/store indexing.
enum MemIndexedMode {
MIM_Unindexed, ///< No indexing.
MIM_PreInc, ///< Pre-incrementing.
@@ -966,19 +981,19 @@
/// @}
private:
- /// \brief Estimate the latency of specified instruction.
+ /// Estimate the latency of specified instruction.
/// Returns 1 as the default value.
int getInstructionLatency(const Instruction *I) const;
- /// \brief Returns the expected throughput cost of the instruction.
+ /// Returns the expected throughput cost of the instruction.
/// Returns -1 if the cost is unknown.
int getInstructionThroughput(const Instruction *I) const;
- /// \brief The abstract base class used to type erase specific TTI
+ /// The abstract base class used to type erase specific TTI
/// implementations.
class Concept;
- /// \brief The template model for the base class which wraps a concrete
+ /// The template model for the base class which wraps a concrete
/// implementation in a type erased interface.
template <typename T> class Model;
@@ -1074,6 +1089,7 @@
virtual unsigned getRegisterBitWidth(bool Vector) const = 0;
virtual unsigned getMinVectorRegisterBitWidth() = 0;
virtual bool shouldMaximizeVectorBandwidth(bool OptSize) const = 0;
+ virtual unsigned getMinimumVF(unsigned ElemWidth) const = 0;
virtual bool shouldConsiderAddressTypePromotion(
const Instruction &I, bool &AllowPromotionWithoutCommonHeader) = 0;
virtual unsigned getCacheLineSize() = 0;
@@ -1373,6 +1389,9 @@
bool shouldMaximizeVectorBandwidth(bool OptSize) const override {
return Impl.shouldMaximizeVectorBandwidth(OptSize);
}
+ unsigned getMinimumVF(unsigned ElemWidth) const override {
+ return Impl.getMinimumVF(ElemWidth);
+ }
bool shouldConsiderAddressTypePromotion(
const Instruction &I, bool &AllowPromotionWithoutCommonHeader) override {
return Impl.shouldConsiderAddressTypePromotion(
@@ -1564,7 +1583,7 @@
TargetTransformInfo::TargetTransformInfo(T Impl)
: TTIImpl(new Model<T>(Impl)) {}
-/// \brief Analysis pass providing the \c TargetTransformInfo.
+/// Analysis pass providing the \c TargetTransformInfo.
///
/// The core idea of the TargetIRAnalysis is to expose an interface through
/// which LLVM targets can analyze and provide information about the middle
@@ -1579,13 +1598,13 @@
public:
typedef TargetTransformInfo Result;
- /// \brief Default construct a target IR analysis.
+ /// Default construct a target IR analysis.
///
/// This will use the module's datalayout to construct a baseline
/// conservative TTI result.
TargetIRAnalysis();
- /// \brief Construct an IR analysis pass around a target-provide callback.
+ /// Construct an IR analysis pass around a target-provide callback.
///
/// The callback will be called with a particular function for which the TTI
/// is needed and must return a TTI object for that function.
@@ -1611,7 +1630,7 @@
friend AnalysisInfoMixin<TargetIRAnalysis>;
static AnalysisKey Key;
- /// \brief The callback used to produce a result.
+ /// The callback used to produce a result.
///
/// We use a completely opaque callback so that targets can provide whatever
/// mechanism they desire for constructing the TTI for a given function.
@@ -1623,11 +1642,11 @@
/// the external TargetMachine, and that reference needs to never dangle.
std::function<Result(const Function &)> TTICallback;
- /// \brief Helper function used as the callback in the default constructor.
+ /// Helper function used as the callback in the default constructor.
static Result getDefaultTTI(const Function &F);
};
-/// \brief Wrapper pass for TargetTransformInfo.
+/// Wrapper pass for TargetTransformInfo.
///
/// This pass can be constructed from a TTI object which it stores internally
/// and is queried by passes.
@@ -1640,7 +1659,7 @@
public:
static char ID;
- /// \brief We must provide a default constructor for the pass but it should
+ /// We must provide a default constructor for the pass but it should
/// never be used.
///
/// Use the constructor below or call one of the creation routines.
@@ -1651,7 +1670,7 @@
TargetTransformInfo &getTTI(const Function &F);
};
-/// \brief Create an analysis pass wrapper around a TTI object.
+/// Create an analysis pass wrapper around a TTI object.
///
/// This analysis pass just holds the TTI instance and makes it available to
/// clients.
diff --git a/linux-x64/clang/include/llvm/Analysis/TargetTransformInfoImpl.h b/linux-x64/clang/include/llvm/Analysis/TargetTransformInfoImpl.h
index df4f853..d80ae1d 100644
--- a/linux-x64/clang/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/linux-x64/clang/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -27,7 +27,7 @@
namespace llvm {
-/// \brief Base class for use as a mix-in that aids implementing
+/// Base class for use as a mix-in that aids implementing
/// a TargetTransformInfo-compatible class.
class TargetTransformInfoImplBase {
protected:
@@ -155,6 +155,7 @@
case Intrinsic::sideeffect:
case Intrinsic::dbg_declare:
case Intrinsic::dbg_value:
+ case Intrinsic::dbg_label:
case Intrinsic::invariant_start:
case Intrinsic::invariant_end:
case Intrinsic::lifetime_start:
@@ -325,7 +326,7 @@
bool haveFastSqrt(Type *Ty) { return false; }
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) { return true; }
-
+
unsigned getFPOpCost(Type *Ty) { return TargetTransformInfo::TCC_Basic; }
int getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
@@ -353,6 +354,8 @@
bool shouldMaximizeVectorBandwidth(bool OptSize) const { return false; }
+ unsigned getMinimumVF(unsigned ElemWidth) const { return 0; }
+
bool
shouldConsiderAddressTypePromotion(const Instruction &I,
bool &AllowPromotionWithoutCommonHeader) {
@@ -649,7 +652,7 @@
}
};
-/// \brief CRTP base class for use as a mix-in that aids implementing
+/// CRTP base class for use as a mix-in that aids implementing
/// a TargetTransformInfo-compatible class.
template <typename T>
class TargetTransformInfoImplCRTPBase : public TargetTransformInfoImplBase {
diff --git a/linux-x64/clang/include/llvm/Analysis/TypeMetadataUtils.h b/linux-x64/clang/include/llvm/Analysis/TypeMetadataUtils.h
index 422e153..6764563 100644
--- a/linux-x64/clang/include/llvm/Analysis/TypeMetadataUtils.h
+++ b/linux-x64/clang/include/llvm/Analysis/TypeMetadataUtils.h
@@ -35,13 +35,13 @@
CallSite CS;
};
-/// Given a call to the intrinsic @llvm.type.test, find all devirtualizable
+/// Given a call to the intrinsic \@llvm.type.test, find all devirtualizable
/// call sites based on the call and return them in DevirtCalls.
void findDevirtualizableCallsForTypeTest(
SmallVectorImpl<DevirtCallSite> &DevirtCalls,
SmallVectorImpl<CallInst *> &Assumes, const CallInst *CI);
-/// Given a call to the intrinsic @llvm.type.checked.load, find all
+/// Given a call to the intrinsic \@llvm.type.checked.load, find all
/// devirtualizable call sites based on the call and return them in DevirtCalls.
void findDevirtualizableCallsForTypeCheckedLoad(
SmallVectorImpl<DevirtCallSite> &DevirtCalls,
diff --git a/linux-x64/clang/include/llvm/Analysis/Utils/Local.h b/linux-x64/clang/include/llvm/Analysis/Utils/Local.h
index 42a654d..b4141bb 100644
--- a/linux-x64/clang/include/llvm/Analysis/Utils/Local.h
+++ b/linux-x64/clang/include/llvm/Analysis/Utils/Local.h
@@ -15,241 +15,11 @@
#ifndef LLVM_ANALYSIS_UTILS_LOCAL_H
#define LLVM_ANALYSIS_UTILS_LOCAL_H
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/TinyPtrVector.h"
-#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/IR/CallSite.h"
-#include "llvm/IR/Constant.h"
-#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
-#include "llvm/IR/Dominators.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
-#include "llvm/IR/Operator.h"
-#include "llvm/IR/Type.h"
-#include "llvm/IR/User.h"
-#include "llvm/IR/Value.h"
-#include "llvm/Support/Casting.h"
-#include <cstdint>
-#include <limits>
namespace llvm {
-class AllocaInst;
-class AssumptionCache;
-class BasicBlock;
-class BranchInst;
-class CallInst;
-class DbgInfoIntrinsic;
-class DbgValueInst;
-class DIBuilder;
-class Function;
-class Instruction;
-class LazyValueInfo;
-class LoadInst;
-class MDNode;
-class PHINode;
-class StoreInst;
-class TargetLibraryInfo;
-class TargetTransformInfo;
-
-/// A set of parameters used to control the transforms in the SimplifyCFG pass.
-/// Options may change depending on the position in the optimization pipeline.
-/// For example, canonical form that includes switches and branches may later be
-/// replaced by lookup tables and selects.
-struct SimplifyCFGOptions {
- int BonusInstThreshold;
- bool ForwardSwitchCondToPhi;
- bool ConvertSwitchToLookupTable;
- bool NeedCanonicalLoop;
- bool SinkCommonInsts;
- AssumptionCache *AC;
-
- SimplifyCFGOptions(unsigned BonusThreshold = 1,
- bool ForwardSwitchCond = false,
- bool SwitchToLookup = false, bool CanonicalLoops = true,
- bool SinkCommon = false,
- AssumptionCache *AssumpCache = nullptr)
- : BonusInstThreshold(BonusThreshold),
- ForwardSwitchCondToPhi(ForwardSwitchCond),
- ConvertSwitchToLookupTable(SwitchToLookup),
- NeedCanonicalLoop(CanonicalLoops),
- SinkCommonInsts(SinkCommon),
- AC(AssumpCache) {}
-
- // Support 'builder' pattern to set members by name at construction time.
- SimplifyCFGOptions &bonusInstThreshold(int I) {
- BonusInstThreshold = I;
- return *this;
- }
- SimplifyCFGOptions &forwardSwitchCondToPhi(bool B) {
- ForwardSwitchCondToPhi = B;
- return *this;
- }
- SimplifyCFGOptions &convertSwitchToLookupTable(bool B) {
- ConvertSwitchToLookupTable = B;
- return *this;
- }
- SimplifyCFGOptions &needCanonicalLoops(bool B) {
- NeedCanonicalLoop = B;
- return *this;
- }
- SimplifyCFGOptions &sinkCommonInsts(bool B) {
- SinkCommonInsts = B;
- return *this;
- }
- SimplifyCFGOptions &setAssumptionCache(AssumptionCache *Cache) {
- AC = Cache;
- return *this;
- }
-};
-
-//===----------------------------------------------------------------------===//
-// Local constant propagation.
-//
-
-/// If a terminator instruction is predicated on a constant value, convert it
-/// into an unconditional branch to the constant destination.
-/// This is a nontrivial operation because the successors of this basic block
-/// must have their PHI nodes updated.
-/// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
-/// conditions and indirectbr addresses this might make dead if
-/// DeleteDeadConditions is true.
-bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions = false,
- const TargetLibraryInfo *TLI = nullptr,
- DeferredDominance *DDT = nullptr);
-
-//===----------------------------------------------------------------------===//
-// Local dead code elimination.
-//
-
-/// Return true if the result produced by the instruction is not used, and the
-/// instruction has no side effects.
-bool isInstructionTriviallyDead(Instruction *I,
- const TargetLibraryInfo *TLI = nullptr);
-
-/// Return true if the result produced by the instruction would have no side
-/// effects if it was not used. This is equivalent to checking whether
-/// isInstructionTriviallyDead would be true if the use count was 0.
-bool wouldInstructionBeTriviallyDead(Instruction *I,
- const TargetLibraryInfo *TLI = nullptr);
-
-/// If the specified value is a trivially dead instruction, delete it.
-/// If that makes any of its operands trivially dead, delete them too,
-/// recursively. Return true if any instructions were deleted.
-bool RecursivelyDeleteTriviallyDeadInstructions(Value *V,
- const TargetLibraryInfo *TLI = nullptr);
-
-/// If the specified value is an effectively dead PHI node, due to being a
-/// def-use chain of single-use nodes that either forms a cycle or is terminated
-/// by a trivially dead instruction, delete it. If that makes any of its
-/// operands trivially dead, delete them too, recursively. Return true if a
-/// change was made.
-bool RecursivelyDeleteDeadPHINode(PHINode *PN,
- const TargetLibraryInfo *TLI = nullptr);
-
-/// Scan the specified basic block and try to simplify any instructions in it
-/// and recursively delete dead instructions.
-///
-/// This returns true if it changed the code, note that it can delete
-/// instructions in other blocks as well in this block.
-bool SimplifyInstructionsInBlock(BasicBlock *BB,
- const TargetLibraryInfo *TLI = nullptr);
-
-//===----------------------------------------------------------------------===//
-// Control Flow Graph Restructuring.
-//
-
-/// Like BasicBlock::removePredecessor, this method is called when we're about
-/// to delete Pred as a predecessor of BB. If BB contains any PHI nodes, this
-/// drops the entries in the PHI nodes for Pred.
-///
-/// Unlike the removePredecessor method, this attempts to simplify uses of PHI
-/// nodes that collapse into identity values. For example, if we have:
-/// x = phi(1, 0, 0, 0)
-/// y = and x, z
-///
-/// .. and delete the predecessor corresponding to the '1', this will attempt to
-/// recursively fold the 'and' to 0.
-void RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred,
- DeferredDominance *DDT = nullptr);
-
-/// BB is a block with one predecessor and its predecessor is known to have one
-/// successor (BB!). Eliminate the edge between them, moving the instructions in
-/// the predecessor into BB. This deletes the predecessor block.
-void MergeBasicBlockIntoOnlyPred(BasicBlock *BB, DominatorTree *DT = nullptr,
- DeferredDominance *DDT = nullptr);
-
-/// BB is known to contain an unconditional branch, and contains no instructions
-/// other than PHI nodes, potential debug intrinsics and the branch. If
-/// possible, eliminate BB by rewriting all the predecessors to branch to the
-/// successor block and return true. If we can't transform, return false.
-bool TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB,
- DeferredDominance *DDT = nullptr);
-
-/// Check for and eliminate duplicate PHI nodes in this block. This doesn't try
-/// to be clever about PHI nodes which differ only in the order of the incoming
-/// values, but instcombine orders them so it usually won't matter.
-bool EliminateDuplicatePHINodes(BasicBlock *BB);
-
-/// This function is used to do simplification of a CFG. For example, it
-/// adjusts branches to branches to eliminate the extra hop, it eliminates
-/// unreachable basic blocks, and does other peephole optimization of the CFG.
-/// It returns true if a modification was made, possibly deleting the basic
-/// block that was pointed to. LoopHeaders is an optional input parameter
-/// providing the set of loop headers that SimplifyCFG should not eliminate.
-bool simplifyCFG(BasicBlock *BB, const TargetTransformInfo &TTI,
- const SimplifyCFGOptions &Options = {},
- SmallPtrSetImpl<BasicBlock *> *LoopHeaders = nullptr);
-
-/// This function is used to flatten a CFG. For example, it uses parallel-and
-/// and parallel-or mode to collapse if-conditions and merge if-regions with
-/// identical statements.
-bool FlattenCFG(BasicBlock *BB, AliasAnalysis *AA = nullptr);
-
-/// If this basic block is ONLY a setcc and a branch, and if a predecessor
-/// branches to us and one of our successors, fold the setcc into the
-/// predecessor and use logical operations to pick the right destination.
-bool FoldBranchToCommonDest(BranchInst *BI, unsigned BonusInstThreshold = 1);
-
-/// This function takes a virtual register computed by an Instruction and
-/// replaces it with a slot in the stack frame, allocated via alloca.
-/// This allows the CFG to be changed around without fear of invalidating the
-/// SSA information for the value. It returns the pointer to the alloca inserted
-/// to create a stack slot for X.
-AllocaInst *DemoteRegToStack(Instruction &X,
- bool VolatileLoads = false,
- Instruction *AllocaPoint = nullptr);
-
-/// This function takes a virtual register computed by a phi node and replaces
-/// it with a slot in the stack frame, allocated via alloca. The phi node is
-/// deleted and it returns the pointer to the alloca inserted.
-AllocaInst *DemotePHIToStack(PHINode *P, Instruction *AllocaPoint = nullptr);
-
-/// Try to ensure that the alignment of \p V is at least \p PrefAlign bytes. If
-/// the owning object can be modified and has an alignment less than \p
-/// PrefAlign, it will be increased and \p PrefAlign returned. If the alignment
-/// cannot be increased, the known alignment of the value is returned.
-///
-/// It is not always possible to modify the alignment of the underlying object,
-/// so if alignment is important, a more reliable approach is to simply align
-/// all global variables and allocation instructions to their preferred
-/// alignment from the beginning.
-unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
- const DataLayout &DL,
- const Instruction *CxtI = nullptr,
- AssumptionCache *AC = nullptr,
- const DominatorTree *DT = nullptr);
-
-/// Try to infer an alignment for the specified pointer.
-inline unsigned getKnownAlignment(Value *V, const DataLayout &DL,
- const Instruction *CxtI = nullptr,
- AssumptionCache *AC = nullptr,
- const DominatorTree *DT = nullptr) {
- return getOrEnforceKnownAlignment(V, 0, DL, CxtI, AC, DT);
-}
-
/// Given a getelementptr instruction/constantexpr, emit the code necessary to
/// compute the offset from the base pointer (without adding in the base
/// pointer). Return the result as a signed integer of intptr size.
@@ -316,192 +86,6 @@
return Result;
}
-///===---------------------------------------------------------------------===//
-/// Dbg Intrinsic utilities
-///
-
-/// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
-/// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
-void ConvertDebugDeclareToDebugValue(DbgInfoIntrinsic *DII,
- StoreInst *SI, DIBuilder &Builder);
-
-/// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
-/// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
-void ConvertDebugDeclareToDebugValue(DbgInfoIntrinsic *DII,
- LoadInst *LI, DIBuilder &Builder);
-
-/// Inserts a llvm.dbg.value intrinsic after a phi that has an associated
-/// llvm.dbg.declare or llvm.dbg.addr intrinsic.
-void ConvertDebugDeclareToDebugValue(DbgInfoIntrinsic *DII,
- PHINode *LI, DIBuilder &Builder);
-
-/// Lowers llvm.dbg.declare intrinsics into appropriate set of
-/// llvm.dbg.value intrinsics.
-bool LowerDbgDeclare(Function &F);
-
-/// Propagate dbg.value intrinsics through the newly inserted PHIs.
-void insertDebugValuesForPHIs(BasicBlock *BB,
- SmallVectorImpl<PHINode *> &InsertedPHIs);
-
-/// Finds all intrinsics declaring local variables as living in the memory that
-/// 'V' points to. This may include a mix of dbg.declare and
-/// dbg.addr intrinsics.
-TinyPtrVector<DbgInfoIntrinsic *> FindDbgAddrUses(Value *V);
-
-/// Finds the llvm.dbg.value intrinsics describing a value.
-void findDbgValues(SmallVectorImpl<DbgValueInst *> &DbgValues, Value *V);
-
-/// Finds the debug info intrinsics describing a value.
-void findDbgUsers(SmallVectorImpl<DbgInfoIntrinsic *> &DbgInsts, Value *V);
-
-/// Replaces llvm.dbg.declare instruction when the address it
-/// describes is replaced with a new value. If Deref is true, an
-/// additional DW_OP_deref is prepended to the expression. If Offset
-/// is non-zero, a constant displacement is added to the expression
-/// (between the optional Deref operations). Offset can be negative.
-bool replaceDbgDeclare(Value *Address, Value *NewAddress,
- Instruction *InsertBefore, DIBuilder &Builder,
- bool DerefBefore, int Offset, bool DerefAfter);
-
-/// Replaces llvm.dbg.declare instruction when the alloca it describes
-/// is replaced with a new value. If Deref is true, an additional
-/// DW_OP_deref is prepended to the expression. If Offset is non-zero,
-/// a constant displacement is added to the expression (between the
-/// optional Deref operations). Offset can be negative. The new
-/// llvm.dbg.declare is inserted immediately before AI.
-bool replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
- DIBuilder &Builder, bool DerefBefore,
- int Offset, bool DerefAfter);
-
-/// Replaces multiple llvm.dbg.value instructions when the alloca it describes
-/// is replaced with a new value. If Offset is non-zero, a constant displacement
-/// is added to the expression (after the mandatory Deref). Offset can be
-/// negative. New llvm.dbg.value instructions are inserted at the locations of
-/// the instructions they replace.
-void replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
- DIBuilder &Builder, int Offset = 0);
-
-/// Assuming the instruction \p I is going to be deleted, attempt to salvage any
-/// dbg.value intrinsics referring to \p I by rewriting its effect into a
-/// DIExpression.
-void salvageDebugInfo(Instruction &I);
-
-/// Remove all instructions from a basic block other than it's terminator
-/// and any present EH pad instructions.
-unsigned removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB);
-
-/// Insert an unreachable instruction before the specified
-/// instruction, making it and the rest of the code in the block dead.
-unsigned changeToUnreachable(Instruction *I, bool UseLLVMTrap,
- bool PreserveLCSSA = false,
- DeferredDominance *DDT = nullptr);
-
-/// Convert the CallInst to InvokeInst with the specified unwind edge basic
-/// block. This also splits the basic block where CI is located, because
-/// InvokeInst is a terminator instruction. Returns the newly split basic
-/// block.
-BasicBlock *changeToInvokeAndSplitBasicBlock(CallInst *CI,
- BasicBlock *UnwindEdge);
-
-/// Replace 'BB's terminator with one that does not have an unwind successor
-/// block. Rewrites `invoke` to `call`, etc. Updates any PHIs in unwind
-/// successor.
-///
-/// \param BB Block whose terminator will be replaced. Its terminator must
-/// have an unwind successor.
-void removeUnwindEdge(BasicBlock *BB, DeferredDominance *DDT = nullptr);
-
-/// Remove all blocks that can not be reached from the function's entry.
-///
-/// Returns true if any basic block was removed.
-bool removeUnreachableBlocks(Function &F, LazyValueInfo *LVI = nullptr,
- DeferredDominance *DDT = nullptr);
-
-/// Combine the metadata of two instructions so that K can replace J
-///
-/// Metadata not listed as known via KnownIDs is removed
-void combineMetadata(Instruction *K, const Instruction *J, ArrayRef<unsigned> KnownIDs);
-
-/// Combine the metadata of two instructions so that K can replace J. This
-/// specifically handles the case of CSE-like transformations.
-///
-/// Unknown metadata is removed.
-void combineMetadataForCSE(Instruction *K, const Instruction *J);
-
-// Replace each use of 'From' with 'To', if that use does not belong to basic
-// block where 'From' is defined. Returns the number of replacements made.
-unsigned replaceNonLocalUsesWith(Instruction *From, Value *To);
-
-/// Replace each use of 'From' with 'To' if that use is dominated by
-/// the given edge. Returns the number of replacements made.
-unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT,
- const BasicBlockEdge &Edge);
-/// Replace each use of 'From' with 'To' if that use is dominated by
-/// the end of the given BasicBlock. Returns the number of replacements made.
-unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT,
- const BasicBlock *BB);
-
-/// Return true if the CallSite CS calls a gc leaf function.
-///
-/// A leaf function is a function that does not safepoint the thread during its
-/// execution. During a call or invoke to such a function, the callers stack
-/// does not have to be made parseable.
-///
-/// Most passes can and should ignore this information, and it is only used
-/// during lowering by the GC infrastructure.
-bool callsGCLeafFunction(ImmutableCallSite CS, const TargetLibraryInfo &TLI);
-
-/// Copy a nonnull metadata node to a new load instruction.
-///
-/// This handles mapping it to range metadata if the new load is an integer
-/// load instead of a pointer load.
-void copyNonnullMetadata(const LoadInst &OldLI, MDNode *N, LoadInst &NewLI);
-
-/// Copy a range metadata node to a new load instruction.
-///
-/// This handles mapping it to nonnull metadata if the new load is a pointer
-/// load instead of an integer load and the range doesn't cover null.
-void copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI, MDNode *N,
- LoadInst &NewLI);
-
-//===----------------------------------------------------------------------===//
-// Intrinsic pattern matching
-//
-
-/// Try to match a bswap or bitreverse idiom.
-///
-/// If an idiom is matched, an intrinsic call is inserted before \c I. Any added
-/// instructions are returned in \c InsertedInsts. They will all have been added
-/// to a basic block.
-///
-/// A bitreverse idiom normally requires around 2*BW nodes to be searched (where
-/// BW is the bitwidth of the integer type). A bswap idiom requires anywhere up
-/// to BW / 4 nodes to be searched, so is significantly faster.
-///
-/// This function returns true on a successful match or false otherwise.
-bool recognizeBSwapOrBitReverseIdiom(
- Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
- SmallVectorImpl<Instruction *> &InsertedInsts);
-
-//===----------------------------------------------------------------------===//
-// Sanitizer utilities
-//
-
-/// Given a CallInst, check if it calls a string function known to CodeGen,
-/// and mark it with NoBuiltin if so. To be used by sanitizers that intend
-/// to intercept string functions and want to avoid converting them to target
-/// specific instructions.
-void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI,
- const TargetLibraryInfo *TLI);
-
-//===----------------------------------------------------------------------===//
-// Transform predicates
-//
-
-/// Given an instruction, is it legal to set operand OpIdx to a non-constant
-/// value?
-bool canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx);
-
-} // end namespace llvm
+}
#endif // LLVM_TRANSFORMS_UTILS_LOCAL_H
diff --git a/linux-x64/clang/include/llvm/Analysis/ValueLattice.h b/linux-x64/clang/include/llvm/Analysis/ValueLattice.h
index c943fd1..0744ca6 100644
--- a/linux-x64/clang/include/llvm/Analysis/ValueLattice.h
+++ b/linux-x64/clang/include/llvm/Analysis/ValueLattice.h
@@ -275,6 +275,8 @@
ConstantRange NewR = getConstantRange().unionWith(RHS.getConstantRange());
if (NewR.isFullSet())
markOverdefined();
+ else if (NewR == getConstantRange())
+ return false;
else
markConstantRange(std::move(NewR));
return true;
diff --git a/linux-x64/clang/include/llvm/Analysis/ValueTracking.h b/linux-x64/clang/include/llvm/Analysis/ValueTracking.h
index ced95df..99b47fb 100644
--- a/linux-x64/clang/include/llvm/Analysis/ValueTracking.h
+++ b/linux-x64/clang/include/llvm/Analysis/ValueTracking.h
@@ -101,6 +101,12 @@
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
+ /// Return true if the two given values are negation.
+ /// Currently can recoginze Value pair:
+ /// 1: <X, Y> if X = sub (0, Y) or Y = sub (0, X)
+ /// 2: <X, Y> if X = sub (A, B) and Y = sub (B, A)
+ bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW = false);
+
/// Returns true if the give value is known to be non-negative.
bool isKnownNonNegative(const Value *V, const DataLayout &DL,
unsigned Depth = 0,
@@ -188,7 +194,8 @@
/// Return true if the floating-point scalar value is not a NaN or if the
/// floating-point vector value has no NaN elements. Return false if a value
/// could ever be NaN.
- bool isKnownNeverNaN(const Value *V);
+ bool isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
+ unsigned Depth = 0);
/// Return true if we can prove that the specified FP value's sign bit is 0.
///
@@ -276,6 +283,22 @@
/// pointer, return 'len+1'. If we can't, return 0.
uint64_t GetStringLength(const Value *V, unsigned CharSize = 8);
+ /// This function returns call pointer argument that is considered the same by
+ /// aliasing rules. You CAN'T use it to replace one value with another.
+ const Value *getArgumentAliasingToReturnedPointer(ImmutableCallSite CS);
+ inline Value *getArgumentAliasingToReturnedPointer(CallSite CS) {
+ return const_cast<Value *>(
+ getArgumentAliasingToReturnedPointer(ImmutableCallSite(CS)));
+ }
+
+ // {launder,strip}.invariant.group returns pointer that aliases its argument,
+ // and it only captures pointer by returning it.
+ // These intrinsics are not marked as nocapture, because returning is
+ // considered as capture. The arguments are not marked as returned neither,
+ // because it would make it useless.
+ bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
+ ImmutableCallSite CS);
+
/// This method strips off any GEP address adjustments and pointer casts from
/// the specified value, returning the original object being addressed. Note
/// that the returned value has pointer type if the specified value does. If
@@ -288,7 +311,7 @@
return GetUnderlyingObject(const_cast<Value *>(V), DL, MaxLookup);
}
- /// \brief This method is similar to GetUnderlyingObject except that it can
+ /// This method is similar to GetUnderlyingObject except that it can
/// look through phi and select instructions and return multiple objects.
///
/// If LoopInfo is passed, loop phis are further analyzed. If a pointer
@@ -384,6 +407,11 @@
AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT);
+ OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
+ const DataLayout &DL,
+ AssumptionCache *AC,
+ const Instruction *CxtI,
+ const DominatorTree *DT);
OverflowResult computeOverflowForUnsignedAdd(const Value *LHS,
const Value *RHS,
const DataLayout &DL,
@@ -401,6 +429,16 @@
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
+ OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS,
+ const DataLayout &DL,
+ AssumptionCache *AC,
+ const Instruction *CxtI,
+ const DominatorTree *DT);
+ OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS,
+ const DataLayout &DL,
+ AssumptionCache *AC,
+ const Instruction *CxtI,
+ const DominatorTree *DT);
/// Returns true if the arithmetic part of the \p II 's result is
/// used only along the paths control dependent on the computation
@@ -427,7 +465,7 @@
/// This is equivelent to saying that all instructions within the basic block
/// are guaranteed to transfer execution to their successor within the basic
/// block. This has the same assumptions w.r.t. undefined behavior as the
- /// instruction variant of this function.
+ /// instruction variant of this function.
bool isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB);
/// Return true if this function can prove that the instruction I
@@ -461,7 +499,7 @@
/// the parent of I.
bool programUndefinedIfFullPoison(const Instruction *PoisonI);
- /// \brief Specific patterns of select instructions we can match.
+ /// Specific patterns of select instructions we can match.
enum SelectPatternFlavor {
SPF_UNKNOWN = 0,
SPF_SMIN, /// Signed minimum
@@ -474,7 +512,7 @@
SPF_NABS /// Negated absolute value
};
- /// \brief Behavior when a floating point min/max is given one NaN and one
+ /// Behavior when a floating point min/max is given one NaN and one
/// non-NaN as input.
enum SelectPatternNaNBehavior {
SPNB_NA = 0, /// NaN behavior not applicable.
@@ -502,6 +540,9 @@
/// Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind
/// and providing the out parameter results if we successfully match.
///
+ /// For ABS/NABS, LHS will be set to the input to the abs idiom. RHS will be
+ /// the negation instruction from the idiom.
+ ///
/// If CastOp is not nullptr, also match MIN/MAX idioms where the type does
/// not match that of the original select. If this is the case, the cast
/// operation (one of Trunc,SExt,Zext) that must be done to transform the
diff --git a/linux-x64/clang/include/llvm/Analysis/VectorUtils.h b/linux-x64/clang/include/llvm/Analysis/VectorUtils.h
index 6315e84..9fde36d 100644
--- a/linux-x64/clang/include/llvm/Analysis/VectorUtils.h
+++ b/linux-x64/clang/include/llvm/Analysis/VectorUtils.h
@@ -33,50 +33,50 @@
enum ID : unsigned;
}
-/// \brief Identify if the intrinsic is trivially vectorizable.
+/// Identify if the intrinsic is trivially vectorizable.
/// This method returns true if the intrinsic's argument types are all
/// scalars for the scalar form of the intrinsic and all vectors for
/// the vector form of the intrinsic.
bool isTriviallyVectorizable(Intrinsic::ID ID);
-/// \brief Identifies if the intrinsic has a scalar operand. It checks for
+/// Identifies if the intrinsic has a scalar operand. It checks for
/// ctlz,cttz and powi special intrinsics whose argument is scalar.
bool hasVectorInstrinsicScalarOpd(Intrinsic::ID ID, unsigned ScalarOpdIdx);
-/// \brief Returns intrinsic ID for call.
+/// Returns intrinsic ID for call.
/// For the input call instruction it finds mapping intrinsic and returns
/// its intrinsic ID, in case it does not found it return not_intrinsic.
Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI,
const TargetLibraryInfo *TLI);
-/// \brief Find the operand of the GEP that should be checked for consecutive
+/// Find the operand of the GEP that should be checked for consecutive
/// stores. This ignores trailing indices that have no effect on the final
/// pointer.
unsigned getGEPInductionOperand(const GetElementPtrInst *Gep);
-/// \brief If the argument is a GEP, then returns the operand identified by
+/// If the argument is a GEP, then returns the operand identified by
/// getGEPInductionOperand. However, if there is some other non-loop-invariant
/// operand, it returns that instead.
Value *stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp);
-/// \brief If a value has only one user that is a CastInst, return it.
+/// If a value has only one user that is a CastInst, return it.
Value *getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty);
-/// \brief Get the stride of a pointer access in a loop. Looks for symbolic
+/// Get the stride of a pointer access in a loop. Looks for symbolic
/// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
Value *getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp);
-/// \brief Given a vector and an element number, see if the scalar value is
+/// Given a vector and an element number, see if the scalar value is
/// already around as a register, for example if it were inserted then extracted
/// from the vector.
Value *findScalarElement(Value *V, unsigned EltNo);
-/// \brief Get splat value if the input is a splat vector or return nullptr.
+/// Get splat value if the input is a splat vector or return nullptr.
/// The value may be extracted from a splat constants vector or from
/// a sequence of instructions that broadcast a single value into a vector.
const Value *getSplatValue(const Value *V);
-/// \brief Compute a map of integer instructions to their minimum legal type
+/// Compute a map of integer instructions to their minimum legal type
/// size.
///
/// C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
@@ -124,7 +124,7 @@
/// This function always sets a (possibly null) value for each K in Kinds.
Instruction *propagateMetadata(Instruction *I, ArrayRef<Value *> VL);
-/// \brief Create an interleave shuffle mask.
+/// Create an interleave shuffle mask.
///
/// This function creates a shuffle mask for interleaving \p NumVecs vectors of
/// vectorization factor \p VF into a single wide vector. The mask is of the
@@ -138,7 +138,7 @@
Constant *createInterleaveMask(IRBuilder<> &Builder, unsigned VF,
unsigned NumVecs);
-/// \brief Create a stride shuffle mask.
+/// Create a stride shuffle mask.
///
/// This function creates a shuffle mask whose elements begin at \p Start and
/// are incremented by \p Stride. The mask can be used to deinterleave an
@@ -153,7 +153,7 @@
Constant *createStrideMask(IRBuilder<> &Builder, unsigned Start,
unsigned Stride, unsigned VF);
-/// \brief Create a sequential shuffle mask.
+/// Create a sequential shuffle mask.
///
/// This function creates shuffle mask whose elements are sequential and begin
/// at \p Start. The mask contains \p NumInts integers and is padded with \p
@@ -167,7 +167,7 @@
Constant *createSequentialMask(IRBuilder<> &Builder, unsigned Start,
unsigned NumInts, unsigned NumUndefs);
-/// \brief Concatenate a list of vectors.
+/// Concatenate a list of vectors.
///
/// This function generates code that concatenate the vectors in \p Vecs into a
/// single large vector. The number of vectors should be greater than one, and