Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 1 | //===- llvm/CodeGen/TargetSchedule.h - Sched Machine Model ------*- C++ -*-===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file defines a wrapper around MCSchedModel that allows the interface to |
| 11 | // benefit from information currently only available in TargetInstrInfo. |
| 12 | // Ideally, the scheduling interface would be fully defined in the MC layer. |
| 13 | // |
| 14 | //===----------------------------------------------------------------------===// |
| 15 | |
| 16 | #ifndef LLVM_CODEGEN_TARGETSCHEDULE_H |
| 17 | #define LLVM_CODEGEN_TARGETSCHEDULE_H |
| 18 | |
| 19 | #include "llvm/ADT/Optional.h" |
| 20 | #include "llvm/ADT/SmallVector.h" |
| 21 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame^] | 22 | #include "llvm/Config/llvm-config.h" |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 23 | #include "llvm/MC/MCInstrItineraries.h" |
| 24 | #include "llvm/MC/MCSchedule.h" |
| 25 | |
| 26 | namespace llvm { |
| 27 | |
| 28 | class MachineInstr; |
| 29 | class TargetInstrInfo; |
| 30 | |
| 31 | /// Provide an instruction scheduling machine model to CodeGen passes. |
| 32 | class TargetSchedModel { |
| 33 | // For efficiency, hold a copy of the statically defined MCSchedModel for this |
| 34 | // processor. |
| 35 | MCSchedModel SchedModel; |
| 36 | InstrItineraryData InstrItins; |
| 37 | const TargetSubtargetInfo *STI = nullptr; |
| 38 | const TargetInstrInfo *TII = nullptr; |
| 39 | |
| 40 | SmallVector<unsigned, 16> ResourceFactors; |
| 41 | unsigned MicroOpFactor; // Multiply to normalize microops to resource units. |
| 42 | unsigned ResourceLCM; // Resource units per cycle. Latency normalization factor. |
| 43 | |
| 44 | unsigned computeInstrLatency(const MCSchedClassDesc &SCDesc) const; |
| 45 | |
| 46 | public: |
| 47 | TargetSchedModel() : SchedModel(MCSchedModel::GetDefaultSchedModel()) {} |
| 48 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame^] | 49 | /// Initialize the machine model for instruction scheduling. |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 50 | /// |
| 51 | /// The machine model API keeps a copy of the top-level MCSchedModel table |
| 52 | /// indices and may query TargetSubtargetInfo and TargetInstrInfo to resolve |
| 53 | /// dynamic properties. |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame^] | 54 | void init(const TargetSubtargetInfo *TSInfo); |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 55 | |
| 56 | /// Return the MCSchedClassDesc for this instruction. |
| 57 | const MCSchedClassDesc *resolveSchedClass(const MachineInstr *MI) const; |
| 58 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame^] | 59 | /// TargetSubtargetInfo getter. |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 60 | const TargetSubtargetInfo *getSubtargetInfo() const { return STI; } |
| 61 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame^] | 62 | /// TargetInstrInfo getter. |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 63 | const TargetInstrInfo *getInstrInfo() const { return TII; } |
| 64 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame^] | 65 | /// Return true if this machine model includes an instruction-level |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 66 | /// scheduling model. |
| 67 | /// |
| 68 | /// This is more detailed than the course grain IssueWidth and default |
| 69 | /// latency properties, but separate from the per-cycle itinerary data. |
| 70 | bool hasInstrSchedModel() const; |
| 71 | |
| 72 | const MCSchedModel *getMCSchedModel() const { return &SchedModel; } |
| 73 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame^] | 74 | /// Return true if this machine model includes cycle-to-cycle itinerary |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 75 | /// data. |
| 76 | /// |
| 77 | /// This models scheduling at each stage in the processor pipeline. |
| 78 | bool hasInstrItineraries() const; |
| 79 | |
| 80 | const InstrItineraryData *getInstrItineraries() const { |
| 81 | if (hasInstrItineraries()) |
| 82 | return &InstrItins; |
| 83 | return nullptr; |
| 84 | } |
| 85 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame^] | 86 | /// Return true if this machine model includes an instruction-level |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 87 | /// scheduling model or cycle-to-cycle itinerary data. |
| 88 | bool hasInstrSchedModelOrItineraries() const { |
| 89 | return hasInstrSchedModel() || hasInstrItineraries(); |
| 90 | } |
| 91 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame^] | 92 | /// Identify the processor corresponding to the current subtarget. |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 93 | unsigned getProcessorID() const { return SchedModel.getProcessorID(); } |
| 94 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame^] | 95 | /// Maximum number of micro-ops that may be scheduled per cycle. |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 96 | unsigned getIssueWidth() const { return SchedModel.IssueWidth; } |
| 97 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame^] | 98 | /// Return true if new group must begin. |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 99 | bool mustBeginGroup(const MachineInstr *MI, |
| 100 | const MCSchedClassDesc *SC = nullptr) const; |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame^] | 101 | /// Return true if current group must end. |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 102 | bool mustEndGroup(const MachineInstr *MI, |
| 103 | const MCSchedClassDesc *SC = nullptr) const; |
| 104 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame^] | 105 | /// Return the number of issue slots required for this MI. |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 106 | unsigned getNumMicroOps(const MachineInstr *MI, |
| 107 | const MCSchedClassDesc *SC = nullptr) const; |
| 108 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame^] | 109 | /// Get the number of kinds of resources for this target. |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 110 | unsigned getNumProcResourceKinds() const { |
| 111 | return SchedModel.getNumProcResourceKinds(); |
| 112 | } |
| 113 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame^] | 114 | /// Get a processor resource by ID for convenience. |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 115 | const MCProcResourceDesc *getProcResource(unsigned PIdx) const { |
| 116 | return SchedModel.getProcResource(PIdx); |
| 117 | } |
| 118 | |
| 119 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) |
| 120 | const char *getResourceName(unsigned PIdx) const { |
| 121 | if (!PIdx) |
| 122 | return "MOps"; |
| 123 | return SchedModel.getProcResource(PIdx)->Name; |
| 124 | } |
| 125 | #endif |
| 126 | |
| 127 | using ProcResIter = const MCWriteProcResEntry *; |
| 128 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame^] | 129 | // Get an iterator into the processor resources consumed by this |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 130 | // scheduling class. |
| 131 | ProcResIter getWriteProcResBegin(const MCSchedClassDesc *SC) const { |
| 132 | // The subtarget holds a single resource table for all processors. |
| 133 | return STI->getWriteProcResBegin(SC); |
| 134 | } |
| 135 | ProcResIter getWriteProcResEnd(const MCSchedClassDesc *SC) const { |
| 136 | return STI->getWriteProcResEnd(SC); |
| 137 | } |
| 138 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame^] | 139 | /// Multiply the number of units consumed for a resource by this factor |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 140 | /// to normalize it relative to other resources. |
| 141 | unsigned getResourceFactor(unsigned ResIdx) const { |
| 142 | return ResourceFactors[ResIdx]; |
| 143 | } |
| 144 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame^] | 145 | /// Multiply number of micro-ops by this factor to normalize it |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 146 | /// relative to other resources. |
| 147 | unsigned getMicroOpFactor() const { |
| 148 | return MicroOpFactor; |
| 149 | } |
| 150 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame^] | 151 | /// Multiply cycle count by this factor to normalize it relative to |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 152 | /// other resources. This is the number of resource units per cycle. |
| 153 | unsigned getLatencyFactor() const { |
| 154 | return ResourceLCM; |
| 155 | } |
| 156 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame^] | 157 | /// Number of micro-ops that may be buffered for OOO execution. |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 158 | unsigned getMicroOpBufferSize() const { return SchedModel.MicroOpBufferSize; } |
| 159 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame^] | 160 | /// Number of resource units that may be buffered for OOO execution. |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 161 | /// \return The buffer size in resource units or -1 for unlimited. |
| 162 | int getResourceBufferSize(unsigned PIdx) const { |
| 163 | return SchedModel.getProcResource(PIdx)->BufferSize; |
| 164 | } |
| 165 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame^] | 166 | /// Compute operand latency based on the available machine model. |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 167 | /// |
| 168 | /// Compute and return the latency of the given data dependent def and use |
| 169 | /// when the operand indices are already known. UseMI may be NULL for an |
| 170 | /// unknown user. |
| 171 | unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx, |
| 172 | const MachineInstr *UseMI, unsigned UseOperIdx) |
| 173 | const; |
| 174 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame^] | 175 | /// Compute the instruction latency based on the available machine |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 176 | /// model. |
| 177 | /// |
| 178 | /// Compute and return the expected latency of this instruction independent of |
| 179 | /// a particular use. computeOperandLatency is the preferred API, but this is |
| 180 | /// occasionally useful to help estimate instruction cost. |
| 181 | /// |
| 182 | /// If UseDefaultDefLatency is false and no new machine sched model is |
| 183 | /// present this method falls back to TII->getInstrLatency with an empty |
| 184 | /// instruction itinerary (this is so we preserve the previous behavior of the |
| 185 | /// if converter after moving it to TargetSchedModel). |
| 186 | unsigned computeInstrLatency(const MachineInstr *MI, |
| 187 | bool UseDefaultDefLatency = true) const; |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame^] | 188 | unsigned computeInstrLatency(const MCInst &Inst) const; |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 189 | unsigned computeInstrLatency(unsigned Opcode) const; |
| 190 | |
| 191 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame^] | 192 | /// Output dependency latency of a pair of defs of the same register. |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 193 | /// |
| 194 | /// This is typically one cycle. |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame^] | 195 | unsigned computeOutputLatency(const MachineInstr *DefMI, unsigned DefOperIdx, |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 196 | const MachineInstr *DepMI) const; |
| 197 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame^] | 198 | /// Compute the reciprocal throughput of the given instruction. |
| 199 | double computeReciprocalThroughput(const MachineInstr *MI) const; |
| 200 | double computeReciprocalThroughput(const MCInst &MI) const; |
| 201 | double computeReciprocalThroughput(unsigned Opcode) const; |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 202 | }; |
| 203 | |
| 204 | } // end namespace llvm |
| 205 | |
| 206 | #endif // LLVM_CODEGEN_TARGETSCHEDULE_H |