Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame^] | 1 | //===- CodeGen/Analysis.h - CodeGen LLVM IR Analysis Utilities --*- C++ -*-===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file declares several CodeGen-specific LLVM IR analysis utilities. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #ifndef LLVM_CODEGEN_ANALYSIS_H |
| 15 | #define LLVM_CODEGEN_ANALYSIS_H |
| 16 | |
| 17 | #include "llvm/ADT/ArrayRef.h" |
| 18 | #include "llvm/ADT/DenseMap.h" |
| 19 | #include "llvm/ADT/SmallVector.h" |
| 20 | #include "llvm/ADT/Triple.h" |
| 21 | #include "llvm/CodeGen/ISDOpcodes.h" |
| 22 | #include "llvm/IR/CallSite.h" |
| 23 | #include "llvm/IR/InlineAsm.h" |
| 24 | #include "llvm/IR/Instructions.h" |
| 25 | #include "llvm/Support/CodeGen.h" |
| 26 | |
| 27 | namespace llvm { |
| 28 | class GlobalValue; |
| 29 | class MachineBasicBlock; |
| 30 | class MachineFunction; |
| 31 | class TargetLoweringBase; |
| 32 | class TargetLowering; |
| 33 | class TargetMachine; |
| 34 | class SDNode; |
| 35 | class SDValue; |
| 36 | class SelectionDAG; |
| 37 | struct EVT; |
| 38 | |
| 39 | /// \brief Compute the linearized index of a member in a nested |
| 40 | /// aggregate/struct/array. |
| 41 | /// |
| 42 | /// Given an LLVM IR aggregate type and a sequence of insertvalue or |
| 43 | /// extractvalue indices that identify a member, return the linearized index of |
| 44 | /// the start of the member, i.e the number of element in memory before the |
| 45 | /// sought one. This is disconnected from the number of bytes. |
| 46 | /// |
| 47 | /// \param Ty is the type indexed by \p Indices. |
| 48 | /// \param Indices is an optional pointer in the indices list to the current |
| 49 | /// index. |
| 50 | /// \param IndicesEnd is the end of the indices list. |
| 51 | /// \param CurIndex is the current index in the recursion. |
| 52 | /// |
| 53 | /// \returns \p CurIndex plus the linear index in \p Ty the indices list. |
| 54 | unsigned ComputeLinearIndex(Type *Ty, |
| 55 | const unsigned *Indices, |
| 56 | const unsigned *IndicesEnd, |
| 57 | unsigned CurIndex = 0); |
| 58 | |
| 59 | inline unsigned ComputeLinearIndex(Type *Ty, |
| 60 | ArrayRef<unsigned> Indices, |
| 61 | unsigned CurIndex = 0) { |
| 62 | return ComputeLinearIndex(Ty, Indices.begin(), Indices.end(), CurIndex); |
| 63 | } |
| 64 | |
| 65 | /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of |
| 66 | /// EVTs that represent all the individual underlying |
| 67 | /// non-aggregate types that comprise it. |
| 68 | /// |
| 69 | /// If Offsets is non-null, it points to a vector to be filled in |
| 70 | /// with the in-memory offsets of each of the individual values. |
| 71 | /// |
| 72 | void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, |
| 73 | SmallVectorImpl<EVT> &ValueVTs, |
| 74 | SmallVectorImpl<uint64_t> *Offsets = nullptr, |
| 75 | uint64_t StartingOffset = 0); |
| 76 | |
| 77 | /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V. |
| 78 | GlobalValue *ExtractTypeInfo(Value *V); |
| 79 | |
| 80 | /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being |
| 81 | /// processed uses a memory 'm' constraint. |
| 82 | bool hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos, |
| 83 | const TargetLowering &TLI); |
| 84 | |
| 85 | /// getFCmpCondCode - Return the ISD condition code corresponding to |
| 86 | /// the given LLVM IR floating-point condition code. This includes |
| 87 | /// consideration of global floating-point math flags. |
| 88 | /// |
| 89 | ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred); |
| 90 | |
| 91 | /// getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats, |
| 92 | /// return the equivalent code if we're allowed to assume that NaNs won't occur. |
| 93 | ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC); |
| 94 | |
| 95 | /// getICmpCondCode - Return the ISD condition code corresponding to |
| 96 | /// the given LLVM IR integer condition code. |
| 97 | /// |
| 98 | ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred); |
| 99 | |
| 100 | /// Test if the given instruction is in a position to be optimized |
| 101 | /// with a tail-call. This roughly means that it's in a block with |
| 102 | /// a return and there's nothing that needs to be scheduled |
| 103 | /// between it and the return. |
| 104 | /// |
| 105 | /// This function only tests target-independent requirements. |
| 106 | bool isInTailCallPosition(ImmutableCallSite CS, const TargetMachine &TM); |
| 107 | |
| 108 | /// Test if given that the input instruction is in the tail call position, if |
| 109 | /// there is an attribute mismatch between the caller and the callee that will |
| 110 | /// inhibit tail call optimizations. |
| 111 | /// \p AllowDifferingSizes is an output parameter which, if forming a tail call |
| 112 | /// is permitted, determines whether it's permitted only if the size of the |
| 113 | /// caller's and callee's return types match exactly. |
| 114 | bool attributesPermitTailCall(const Function *F, const Instruction *I, |
| 115 | const ReturnInst *Ret, |
| 116 | const TargetLoweringBase &TLI, |
| 117 | bool *AllowDifferingSizes = nullptr); |
| 118 | |
| 119 | /// Test if given that the input instruction is in the tail call position if the |
| 120 | /// return type or any attributes of the function will inhibit tail call |
| 121 | /// optimization. |
| 122 | bool returnTypeIsEligibleForTailCall(const Function *F, const Instruction *I, |
| 123 | const ReturnInst *Ret, |
| 124 | const TargetLoweringBase &TLI); |
| 125 | |
| 126 | DenseMap<const MachineBasicBlock *, int> |
| 127 | getFuncletMembership(const MachineFunction &MF); |
| 128 | |
| 129 | } // End llvm namespace |
| 130 | |
| 131 | #endif |