Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 1 | //===- CodeGen/Analysis.h - CodeGen LLVM IR Analysis Utilities --*- C++ -*-===// |
| 2 | // |
Andrew Walbran | 16937d0 | 2019-10-22 13:54:20 +0100 | [diff] [blame] | 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file declares several CodeGen-specific LLVM IR analysis utilities. |
| 10 | // |
| 11 | //===----------------------------------------------------------------------===// |
| 12 | |
| 13 | #ifndef LLVM_CODEGEN_ANALYSIS_H |
| 14 | #define LLVM_CODEGEN_ANALYSIS_H |
| 15 | |
| 16 | #include "llvm/ADT/ArrayRef.h" |
| 17 | #include "llvm/ADT/DenseMap.h" |
| 18 | #include "llvm/ADT/SmallVector.h" |
| 19 | #include "llvm/ADT/Triple.h" |
| 20 | #include "llvm/CodeGen/ISDOpcodes.h" |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 21 | #include "llvm/IR/InlineAsm.h" |
| 22 | #include "llvm/IR/Instructions.h" |
| 23 | #include "llvm/Support/CodeGen.h" |
| 24 | |
| 25 | namespace llvm { |
| 26 | class GlobalValue; |
Andrew Walbran | 3d2c197 | 2020-04-07 12:24:26 +0100 | [diff] [blame] | 27 | class LLT; |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 28 | class MachineBasicBlock; |
| 29 | class MachineFunction; |
| 30 | class TargetLoweringBase; |
| 31 | class TargetLowering; |
| 32 | class TargetMachine; |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 33 | struct EVT; |
| 34 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 35 | /// Compute the linearized index of a member in a nested |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 36 | /// aggregate/struct/array. |
| 37 | /// |
| 38 | /// Given an LLVM IR aggregate type and a sequence of insertvalue or |
| 39 | /// extractvalue indices that identify a member, return the linearized index of |
| 40 | /// the start of the member, i.e the number of element in memory before the |
| 41 | /// sought one. This is disconnected from the number of bytes. |
| 42 | /// |
| 43 | /// \param Ty is the type indexed by \p Indices. |
| 44 | /// \param Indices is an optional pointer in the indices list to the current |
| 45 | /// index. |
| 46 | /// \param IndicesEnd is the end of the indices list. |
| 47 | /// \param CurIndex is the current index in the recursion. |
| 48 | /// |
| 49 | /// \returns \p CurIndex plus the linear index in \p Ty the indices list. |
| 50 | unsigned ComputeLinearIndex(Type *Ty, |
| 51 | const unsigned *Indices, |
| 52 | const unsigned *IndicesEnd, |
| 53 | unsigned CurIndex = 0); |
| 54 | |
| 55 | inline unsigned ComputeLinearIndex(Type *Ty, |
| 56 | ArrayRef<unsigned> Indices, |
| 57 | unsigned CurIndex = 0) { |
| 58 | return ComputeLinearIndex(Ty, Indices.begin(), Indices.end(), CurIndex); |
| 59 | } |
| 60 | |
| 61 | /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of |
| 62 | /// EVTs that represent all the individual underlying |
| 63 | /// non-aggregate types that comprise it. |
| 64 | /// |
| 65 | /// If Offsets is non-null, it points to a vector to be filled in |
| 66 | /// with the in-memory offsets of each of the individual values. |
| 67 | /// |
| 68 | void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, |
| 69 | SmallVectorImpl<EVT> &ValueVTs, |
| 70 | SmallVectorImpl<uint64_t> *Offsets = nullptr, |
| 71 | uint64_t StartingOffset = 0); |
| 72 | |
Andrew Walbran | 3d2c197 | 2020-04-07 12:24:26 +0100 | [diff] [blame] | 73 | /// Variant of ComputeValueVTs that also produces the memory VTs. |
| 74 | void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, |
| 75 | SmallVectorImpl<EVT> &ValueVTs, |
| 76 | SmallVectorImpl<EVT> *MemVTs, |
| 77 | SmallVectorImpl<uint64_t> *Offsets = nullptr, |
| 78 | uint64_t StartingOffset = 0); |
| 79 | |
| 80 | /// computeValueLLTs - Given an LLVM IR type, compute a sequence of |
| 81 | /// LLTs that represent all the individual underlying |
| 82 | /// non-aggregate types that comprise it. |
| 83 | /// |
| 84 | /// If Offsets is non-null, it points to a vector to be filled in |
| 85 | /// with the in-memory offsets of each of the individual values. |
| 86 | /// |
| 87 | void computeValueLLTs(const DataLayout &DL, Type &Ty, |
| 88 | SmallVectorImpl<LLT> &ValueTys, |
| 89 | SmallVectorImpl<uint64_t> *Offsets = nullptr, |
| 90 | uint64_t StartingOffset = 0); |
| 91 | |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 92 | /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V. |
| 93 | GlobalValue *ExtractTypeInfo(Value *V); |
| 94 | |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 95 | /// getFCmpCondCode - Return the ISD condition code corresponding to |
| 96 | /// the given LLVM IR floating-point condition code. This includes |
| 97 | /// consideration of global floating-point math flags. |
| 98 | /// |
| 99 | ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred); |
| 100 | |
| 101 | /// getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats, |
| 102 | /// return the equivalent code if we're allowed to assume that NaNs won't occur. |
| 103 | ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC); |
| 104 | |
| 105 | /// getICmpCondCode - Return the ISD condition code corresponding to |
| 106 | /// the given LLVM IR integer condition code. |
| 107 | /// |
| 108 | ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred); |
| 109 | |
| 110 | /// Test if the given instruction is in a position to be optimized |
| 111 | /// with a tail-call. This roughly means that it's in a block with |
| 112 | /// a return and there's nothing that needs to be scheduled |
| 113 | /// between it and the return. |
| 114 | /// |
| 115 | /// This function only tests target-independent requirements. |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 116 | bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM); |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 117 | |
| 118 | /// Test if given that the input instruction is in the tail call position, if |
| 119 | /// there is an attribute mismatch between the caller and the callee that will |
| 120 | /// inhibit tail call optimizations. |
| 121 | /// \p AllowDifferingSizes is an output parameter which, if forming a tail call |
| 122 | /// is permitted, determines whether it's permitted only if the size of the |
| 123 | /// caller's and callee's return types match exactly. |
| 124 | bool attributesPermitTailCall(const Function *F, const Instruction *I, |
| 125 | const ReturnInst *Ret, |
| 126 | const TargetLoweringBase &TLI, |
| 127 | bool *AllowDifferingSizes = nullptr); |
| 128 | |
| 129 | /// Test if given that the input instruction is in the tail call position if the |
| 130 | /// return type or any attributes of the function will inhibit tail call |
| 131 | /// optimization. |
| 132 | bool returnTypeIsEligibleForTailCall(const Function *F, const Instruction *I, |
| 133 | const ReturnInst *Ret, |
| 134 | const TargetLoweringBase &TLI); |
| 135 | |
| 136 | DenseMap<const MachineBasicBlock *, int> |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 137 | getEHScopeMembership(const MachineFunction &MF); |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 138 | |
| 139 | } // End llvm namespace |
| 140 | |
| 141 | #endif |