blob: 0be0ac22a74d4e984e448ca7d7290c2288a201e0 [file] [log] [blame]
Andrew Scull5e1ddfa2018-08-14 10:06:54 +01001//===- CodeGen/Analysis.h - CodeGen LLVM IR Analysis Utilities --*- C++ -*-===//
2//
Andrew Walbran16937d02019-10-22 13:54:20 +01003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Andrew Scull5e1ddfa2018-08-14 10:06:54 +01006//
7//===----------------------------------------------------------------------===//
8//
9// This file declares several CodeGen-specific LLVM IR analysis utilities.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_CODEGEN_ANALYSIS_H
14#define LLVM_CODEGEN_ANALYSIS_H
15
16#include "llvm/ADT/ArrayRef.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/SmallVector.h"
19#include "llvm/ADT/Triple.h"
20#include "llvm/CodeGen/ISDOpcodes.h"
21#include "llvm/IR/CallSite.h"
22#include "llvm/IR/InlineAsm.h"
23#include "llvm/IR/Instructions.h"
24#include "llvm/Support/CodeGen.h"
25
26namespace llvm {
27class GlobalValue;
Andrew Walbran3d2c1972020-04-07 12:24:26 +010028class LLT;
Andrew Scull5e1ddfa2018-08-14 10:06:54 +010029class MachineBasicBlock;
30class MachineFunction;
31class TargetLoweringBase;
32class TargetLowering;
33class TargetMachine;
34class SDNode;
35class SDValue;
36class SelectionDAG;
37struct EVT;
38
Andrew Scullcdfcccc2018-10-05 20:58:37 +010039/// Compute the linearized index of a member in a nested
Andrew Scull5e1ddfa2018-08-14 10:06:54 +010040/// aggregate/struct/array.
41///
42/// Given an LLVM IR aggregate type and a sequence of insertvalue or
43/// extractvalue indices that identify a member, return the linearized index of
44/// the start of the member, i.e the number of element in memory before the
45/// sought one. This is disconnected from the number of bytes.
46///
47/// \param Ty is the type indexed by \p Indices.
48/// \param Indices is an optional pointer in the indices list to the current
49/// index.
50/// \param IndicesEnd is the end of the indices list.
51/// \param CurIndex is the current index in the recursion.
52///
53/// \returns \p CurIndex plus the linear index in \p Ty the indices list.
54unsigned ComputeLinearIndex(Type *Ty,
55 const unsigned *Indices,
56 const unsigned *IndicesEnd,
57 unsigned CurIndex = 0);
58
59inline unsigned ComputeLinearIndex(Type *Ty,
60 ArrayRef<unsigned> Indices,
61 unsigned CurIndex = 0) {
62 return ComputeLinearIndex(Ty, Indices.begin(), Indices.end(), CurIndex);
63}
64
65/// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
66/// EVTs that represent all the individual underlying
67/// non-aggregate types that comprise it.
68///
69/// If Offsets is non-null, it points to a vector to be filled in
70/// with the in-memory offsets of each of the individual values.
71///
72void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty,
73 SmallVectorImpl<EVT> &ValueVTs,
74 SmallVectorImpl<uint64_t> *Offsets = nullptr,
75 uint64_t StartingOffset = 0);
76
Andrew Walbran3d2c1972020-04-07 12:24:26 +010077/// Variant of ComputeValueVTs that also produces the memory VTs.
78void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty,
79 SmallVectorImpl<EVT> &ValueVTs,
80 SmallVectorImpl<EVT> *MemVTs,
81 SmallVectorImpl<uint64_t> *Offsets = nullptr,
82 uint64_t StartingOffset = 0);
83
84/// computeValueLLTs - Given an LLVM IR type, compute a sequence of
85/// LLTs that represent all the individual underlying
86/// non-aggregate types that comprise it.
87///
88/// If Offsets is non-null, it points to a vector to be filled in
89/// with the in-memory offsets of each of the individual values.
90///
91void computeValueLLTs(const DataLayout &DL, Type &Ty,
92 SmallVectorImpl<LLT> &ValueTys,
93 SmallVectorImpl<uint64_t> *Offsets = nullptr,
94 uint64_t StartingOffset = 0);
95
Andrew Scull5e1ddfa2018-08-14 10:06:54 +010096/// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
97GlobalValue *ExtractTypeInfo(Value *V);
98
99/// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
100/// processed uses a memory 'm' constraint.
101bool hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos,
102 const TargetLowering &TLI);
103
104/// getFCmpCondCode - Return the ISD condition code corresponding to
105/// the given LLVM IR floating-point condition code. This includes
106/// consideration of global floating-point math flags.
107///
108ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred);
109
110/// getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats,
111/// return the equivalent code if we're allowed to assume that NaNs won't occur.
112ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC);
113
114/// getICmpCondCode - Return the ISD condition code corresponding to
115/// the given LLVM IR integer condition code.
116///
117ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred);
118
119/// Test if the given instruction is in a position to be optimized
120/// with a tail-call. This roughly means that it's in a block with
121/// a return and there's nothing that needs to be scheduled
122/// between it and the return.
123///
124/// This function only tests target-independent requirements.
125bool isInTailCallPosition(ImmutableCallSite CS, const TargetMachine &TM);
126
127/// Test if given that the input instruction is in the tail call position, if
128/// there is an attribute mismatch between the caller and the callee that will
129/// inhibit tail call optimizations.
130/// \p AllowDifferingSizes is an output parameter which, if forming a tail call
131/// is permitted, determines whether it's permitted only if the size of the
132/// caller's and callee's return types match exactly.
133bool attributesPermitTailCall(const Function *F, const Instruction *I,
134 const ReturnInst *Ret,
135 const TargetLoweringBase &TLI,
136 bool *AllowDifferingSizes = nullptr);
137
138/// Test if given that the input instruction is in the tail call position if the
139/// return type or any attributes of the function will inhibit tail call
140/// optimization.
141bool returnTypeIsEligibleForTailCall(const Function *F, const Instruction *I,
142 const ReturnInst *Ret,
143 const TargetLoweringBase &TLI);
144
145DenseMap<const MachineBasicBlock *, int>
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100146getEHScopeMembership(const MachineFunction &MF);
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100147
148} // End llvm namespace
149
150#endif