Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 1 | //===- llvm/CodeGen/ScheduleDAG.h - Common Base Class -----------*- C++ -*-===// |
| 2 | // |
Andrew Walbran | 16937d0 | 2019-10-22 13:54:20 +0100 | [diff] [blame] | 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | /// \file Implements the ScheduleDAG class, which is used as the common base |
| 10 | /// class for instruction schedulers. This encapsulates the scheduling DAG, |
| 11 | /// which is shared between SelectionDAG and MachineInstr scheduling. |
| 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
| 15 | #ifndef LLVM_CODEGEN_SCHEDULEDAG_H |
| 16 | #define LLVM_CODEGEN_SCHEDULEDAG_H |
| 17 | |
| 18 | #include "llvm/ADT/BitVector.h" |
| 19 | #include "llvm/ADT/GraphTraits.h" |
| 20 | #include "llvm/ADT/PointerIntPair.h" |
| 21 | #include "llvm/ADT/SmallVector.h" |
| 22 | #include "llvm/ADT/iterator.h" |
| 23 | #include "llvm/CodeGen/MachineInstr.h" |
| 24 | #include "llvm/CodeGen/TargetLowering.h" |
| 25 | #include "llvm/Support/ErrorHandling.h" |
| 26 | #include <cassert> |
| 27 | #include <cstddef> |
| 28 | #include <iterator> |
| 29 | #include <string> |
| 30 | #include <vector> |
| 31 | |
| 32 | namespace llvm { |
| 33 | |
| 34 | template<class Graph> class GraphWriter; |
Andrew Walbran | 16937d0 | 2019-10-22 13:54:20 +0100 | [diff] [blame] | 35 | class LLVMTargetMachine; |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 36 | class MachineFunction; |
| 37 | class MachineRegisterInfo; |
| 38 | class MCInstrDesc; |
| 39 | struct MCSchedClassDesc; |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 40 | class SDNode; |
| 41 | class SUnit; |
Andrew Walbran | 16937d0 | 2019-10-22 13:54:20 +0100 | [diff] [blame] | 42 | class ScheduleDAG; |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 43 | class TargetInstrInfo; |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 44 | class TargetRegisterClass; |
| 45 | class TargetRegisterInfo; |
| 46 | |
| 47 | /// Scheduling dependency. This represents one direction of an edge in the |
| 48 | /// scheduling DAG. |
| 49 | class SDep { |
| 50 | public: |
| 51 | /// These are the different kinds of scheduling dependencies. |
| 52 | enum Kind { |
| 53 | Data, ///< Regular data dependence (aka true-dependence). |
| 54 | Anti, ///< A register anti-dependence (aka WAR). |
| 55 | Output, ///< A register output-dependence (aka WAW). |
| 56 | Order ///< Any other ordering dependency. |
| 57 | }; |
| 58 | |
| 59 | // Strong dependencies must be respected by the scheduler. Artificial |
| 60 | // dependencies may be removed only if they are redundant with another |
| 61 | // strong dependence. |
| 62 | // |
| 63 | // Weak dependencies may be violated by the scheduling strategy, but only if |
| 64 | // the strategy can prove it is correct to do so. |
| 65 | // |
| 66 | // Strong OrderKinds must occur before "Weak". |
| 67 | // Weak OrderKinds must occur after "Weak". |
| 68 | enum OrderKind { |
| 69 | Barrier, ///< An unknown scheduling barrier. |
| 70 | MayAliasMem, ///< Nonvolatile load/Store instructions that may alias. |
| 71 | MustAliasMem, ///< Nonvolatile load/Store instructions that must alias. |
| 72 | Artificial, ///< Arbitrary strong DAG edge (no real dependence). |
| 73 | Weak, ///< Arbitrary weak DAG edge. |
| 74 | Cluster ///< Weak DAG edge linking a chain of clustered instrs. |
| 75 | }; |
| 76 | |
| 77 | private: |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 78 | /// A pointer to the depending/depended-on SUnit, and an enum |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 79 | /// indicating the kind of the dependency. |
| 80 | PointerIntPair<SUnit *, 2, Kind> Dep; |
| 81 | |
| 82 | /// A union discriminated by the dependence kind. |
| 83 | union { |
| 84 | /// For Data, Anti, and Output dependencies, the associated register. For |
| 85 | /// Data dependencies that don't currently have a register/ assigned, this |
| 86 | /// is set to zero. |
| 87 | unsigned Reg; |
| 88 | |
| 89 | /// Additional information about Order dependencies. |
| 90 | unsigned OrdKind; // enum OrderKind |
| 91 | } Contents; |
| 92 | |
| 93 | /// The time associated with this edge. Often this is just the value of the |
| 94 | /// Latency field of the predecessor, however advanced models may provide |
| 95 | /// additional information about specific edges. |
| 96 | unsigned Latency; |
| 97 | |
| 98 | public: |
| 99 | /// Constructs a null SDep. This is only for use by container classes which |
| 100 | /// require default constructors. SUnits may not/ have null SDep edges. |
| 101 | SDep() : Dep(nullptr, Data) {} |
| 102 | |
| 103 | /// Constructs an SDep with the specified values. |
| 104 | SDep(SUnit *S, Kind kind, unsigned Reg) |
| 105 | : Dep(S, kind), Contents() { |
| 106 | switch (kind) { |
| 107 | default: |
| 108 | llvm_unreachable("Reg given for non-register dependence!"); |
| 109 | case Anti: |
| 110 | case Output: |
| 111 | assert(Reg != 0 && |
| 112 | "SDep::Anti and SDep::Output must use a non-zero Reg!"); |
| 113 | Contents.Reg = Reg; |
| 114 | Latency = 0; |
| 115 | break; |
| 116 | case Data: |
| 117 | Contents.Reg = Reg; |
| 118 | Latency = 1; |
| 119 | break; |
| 120 | } |
| 121 | } |
| 122 | |
| 123 | SDep(SUnit *S, OrderKind kind) |
| 124 | : Dep(S, Order), Contents(), Latency(0) { |
| 125 | Contents.OrdKind = kind; |
| 126 | } |
| 127 | |
| 128 | /// Returns true if the specified SDep is equivalent except for latency. |
| 129 | bool overlaps(const SDep &Other) const; |
| 130 | |
| 131 | bool operator==(const SDep &Other) const { |
| 132 | return overlaps(Other) && Latency == Other.Latency; |
| 133 | } |
| 134 | |
| 135 | bool operator!=(const SDep &Other) const { |
| 136 | return !operator==(Other); |
| 137 | } |
| 138 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 139 | /// Returns the latency value for this edge, which roughly means the |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 140 | /// minimum number of cycles that must elapse between the predecessor and |
| 141 | /// the successor, given that they have this edge between them. |
| 142 | unsigned getLatency() const { |
| 143 | return Latency; |
| 144 | } |
| 145 | |
| 146 | /// Sets the latency for this edge. |
| 147 | void setLatency(unsigned Lat) { |
| 148 | Latency = Lat; |
| 149 | } |
| 150 | |
| 151 | //// Returns the SUnit to which this edge points. |
| 152 | SUnit *getSUnit() const; |
| 153 | |
| 154 | //// Assigns the SUnit to which this edge points. |
| 155 | void setSUnit(SUnit *SU); |
| 156 | |
| 157 | /// Returns an enum value representing the kind of the dependence. |
| 158 | Kind getKind() const; |
| 159 | |
| 160 | /// Shorthand for getKind() != SDep::Data. |
| 161 | bool isCtrl() const { |
| 162 | return getKind() != Data; |
| 163 | } |
| 164 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 165 | /// Tests if this is an Order dependence between two memory accesses |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 166 | /// where both sides of the dependence access memory in non-volatile and |
| 167 | /// fully modeled ways. |
| 168 | bool isNormalMemory() const { |
| 169 | return getKind() == Order && (Contents.OrdKind == MayAliasMem |
| 170 | || Contents.OrdKind == MustAliasMem); |
| 171 | } |
| 172 | |
| 173 | /// Tests if this is an Order dependence that is marked as a barrier. |
| 174 | bool isBarrier() const { |
| 175 | return getKind() == Order && Contents.OrdKind == Barrier; |
| 176 | } |
| 177 | |
| 178 | /// Tests if this is could be any kind of memory dependence. |
| 179 | bool isNormalMemoryOrBarrier() const { |
| 180 | return (isNormalMemory() || isBarrier()); |
| 181 | } |
| 182 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 183 | /// Tests if this is an Order dependence that is marked as |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 184 | /// "must alias", meaning that the SUnits at either end of the edge have a |
| 185 | /// memory dependence on a known memory location. |
| 186 | bool isMustAlias() const { |
| 187 | return getKind() == Order && Contents.OrdKind == MustAliasMem; |
| 188 | } |
| 189 | |
| 190 | /// Tests if this a weak dependence. Weak dependencies are considered DAG |
| 191 | /// edges for height computation and other heuristics, but do not force |
| 192 | /// ordering. Breaking a weak edge may require the scheduler to compensate, |
| 193 | /// for example by inserting a copy. |
| 194 | bool isWeak() const { |
| 195 | return getKind() == Order && Contents.OrdKind >= Weak; |
| 196 | } |
| 197 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 198 | /// Tests if this is an Order dependence that is marked as |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 199 | /// "artificial", meaning it isn't necessary for correctness. |
| 200 | bool isArtificial() const { |
| 201 | return getKind() == Order && Contents.OrdKind == Artificial; |
| 202 | } |
| 203 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 204 | /// Tests if this is an Order dependence that is marked as "cluster", |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 205 | /// meaning it is artificial and wants to be adjacent. |
| 206 | bool isCluster() const { |
| 207 | return getKind() == Order && Contents.OrdKind == Cluster; |
| 208 | } |
| 209 | |
| 210 | /// Tests if this is a Data dependence that is associated with a register. |
| 211 | bool isAssignedRegDep() const { |
| 212 | return getKind() == Data && Contents.Reg != 0; |
| 213 | } |
| 214 | |
| 215 | /// Returns the register associated with this edge. This is only valid on |
| 216 | /// Data, Anti, and Output edges. On Data edges, this value may be zero, |
| 217 | /// meaning there is no associated register. |
| 218 | unsigned getReg() const { |
| 219 | assert((getKind() == Data || getKind() == Anti || getKind() == Output) && |
| 220 | "getReg called on non-register dependence edge!"); |
| 221 | return Contents.Reg; |
| 222 | } |
| 223 | |
| 224 | /// Assigns the associated register for this edge. This is only valid on |
| 225 | /// Data, Anti, and Output edges. On Anti and Output edges, this value must |
| 226 | /// not be zero. On Data edges, the value may be zero, which would mean that |
| 227 | /// no specific register is associated with this edge. |
| 228 | void setReg(unsigned Reg) { |
| 229 | assert((getKind() == Data || getKind() == Anti || getKind() == Output) && |
| 230 | "setReg called on non-register dependence edge!"); |
| 231 | assert((getKind() != Anti || Reg != 0) && |
| 232 | "SDep::Anti edge cannot use the zero register!"); |
| 233 | assert((getKind() != Output || Reg != 0) && |
| 234 | "SDep::Output edge cannot use the zero register!"); |
| 235 | Contents.Reg = Reg; |
| 236 | } |
| 237 | |
Andrew Scull | 0372a57 | 2018-11-16 15:47:06 +0000 | [diff] [blame] | 238 | void dump(const TargetRegisterInfo *TRI = nullptr) const; |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 239 | }; |
| 240 | |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 241 | /// Scheduling unit. This is a node in the scheduling DAG. |
| 242 | class SUnit { |
| 243 | private: |
| 244 | enum : unsigned { BoundaryID = ~0u }; |
| 245 | |
| 246 | SDNode *Node = nullptr; ///< Representative node. |
| 247 | MachineInstr *Instr = nullptr; ///< Alternatively, a MachineInstr. |
| 248 | |
| 249 | public: |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 250 | SUnit *OrigNode = nullptr; ///< If not this, the node from which this node |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 251 | /// was cloned. (SD scheduling only) |
| 252 | |
| 253 | const MCSchedClassDesc *SchedClass = |
| 254 | nullptr; ///< nullptr or resolved SchedClass. |
| 255 | |
| 256 | SmallVector<SDep, 4> Preds; ///< All sunit predecessors. |
| 257 | SmallVector<SDep, 4> Succs; ///< All sunit successors. |
| 258 | |
| 259 | typedef SmallVectorImpl<SDep>::iterator pred_iterator; |
| 260 | typedef SmallVectorImpl<SDep>::iterator succ_iterator; |
| 261 | typedef SmallVectorImpl<SDep>::const_iterator const_pred_iterator; |
| 262 | typedef SmallVectorImpl<SDep>::const_iterator const_succ_iterator; |
| 263 | |
| 264 | unsigned NodeNum = BoundaryID; ///< Entry # of node in the node vector. |
| 265 | unsigned NodeQueueId = 0; ///< Queue id of node. |
| 266 | unsigned NumPreds = 0; ///< # of SDep::Data preds. |
| 267 | unsigned NumSuccs = 0; ///< # of SDep::Data sucss. |
| 268 | unsigned NumPredsLeft = 0; ///< # of preds not scheduled. |
| 269 | unsigned NumSuccsLeft = 0; ///< # of succs not scheduled. |
| 270 | unsigned WeakPredsLeft = 0; ///< # of weak preds not scheduled. |
| 271 | unsigned WeakSuccsLeft = 0; ///< # of weak succs not scheduled. |
| 272 | unsigned short NumRegDefsLeft = 0; ///< # of reg defs with no scheduled use. |
| 273 | unsigned short Latency = 0; ///< Node latency. |
| 274 | bool isVRegCycle : 1; ///< May use and def the same vreg. |
| 275 | bool isCall : 1; ///< Is a function call. |
| 276 | bool isCallOp : 1; ///< Is a function call operand. |
| 277 | bool isTwoAddress : 1; ///< Is a two-address instruction. |
| 278 | bool isCommutable : 1; ///< Is a commutable instruction. |
| 279 | bool hasPhysRegUses : 1; ///< Has physreg uses. |
| 280 | bool hasPhysRegDefs : 1; ///< Has physreg defs that are being used. |
| 281 | bool hasPhysRegClobbers : 1; ///< Has any physreg defs, used or not. |
| 282 | bool isPending : 1; ///< True once pending. |
| 283 | bool isAvailable : 1; ///< True once available. |
| 284 | bool isScheduled : 1; ///< True once scheduled. |
| 285 | bool isScheduleHigh : 1; ///< True if preferable to schedule high. |
| 286 | bool isScheduleLow : 1; ///< True if preferable to schedule low. |
| 287 | bool isCloned : 1; ///< True if this node has been cloned. |
| 288 | bool isUnbuffered : 1; ///< Uses an unbuffered resource. |
| 289 | bool hasReservedResource : 1; ///< Uses a reserved resource. |
| 290 | Sched::Preference SchedulingPref = Sched::None; ///< Scheduling preference. |
| 291 | |
| 292 | private: |
| 293 | bool isDepthCurrent : 1; ///< True if Depth is current. |
| 294 | bool isHeightCurrent : 1; ///< True if Height is current. |
| 295 | unsigned Depth = 0; ///< Node depth. |
| 296 | unsigned Height = 0; ///< Node height. |
| 297 | |
| 298 | public: |
| 299 | unsigned TopReadyCycle = 0; ///< Cycle relative to start when node is ready. |
| 300 | unsigned BotReadyCycle = 0; ///< Cycle relative to end when node is ready. |
| 301 | |
| 302 | const TargetRegisterClass *CopyDstRC = |
| 303 | nullptr; ///< Is a special copy node if != nullptr. |
| 304 | const TargetRegisterClass *CopySrcRC = nullptr; |
| 305 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 306 | /// Constructs an SUnit for pre-regalloc scheduling to represent an |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 307 | /// SDNode and any nodes flagged to it. |
| 308 | SUnit(SDNode *node, unsigned nodenum) |
| 309 | : Node(node), NodeNum(nodenum), isVRegCycle(false), isCall(false), |
| 310 | isCallOp(false), isTwoAddress(false), isCommutable(false), |
| 311 | hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false), |
| 312 | isPending(false), isAvailable(false), isScheduled(false), |
| 313 | isScheduleHigh(false), isScheduleLow(false), isCloned(false), |
| 314 | isUnbuffered(false), hasReservedResource(false), isDepthCurrent(false), |
| 315 | isHeightCurrent(false) {} |
| 316 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 317 | /// Constructs an SUnit for post-regalloc scheduling to represent a |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 318 | /// MachineInstr. |
| 319 | SUnit(MachineInstr *instr, unsigned nodenum) |
| 320 | : Instr(instr), NodeNum(nodenum), isVRegCycle(false), isCall(false), |
| 321 | isCallOp(false), isTwoAddress(false), isCommutable(false), |
| 322 | hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false), |
| 323 | isPending(false), isAvailable(false), isScheduled(false), |
| 324 | isScheduleHigh(false), isScheduleLow(false), isCloned(false), |
| 325 | isUnbuffered(false), hasReservedResource(false), isDepthCurrent(false), |
| 326 | isHeightCurrent(false) {} |
| 327 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 328 | /// Constructs a placeholder SUnit. |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 329 | SUnit() |
| 330 | : isVRegCycle(false), isCall(false), isCallOp(false), isTwoAddress(false), |
| 331 | isCommutable(false), hasPhysRegUses(false), hasPhysRegDefs(false), |
| 332 | hasPhysRegClobbers(false), isPending(false), isAvailable(false), |
| 333 | isScheduled(false), isScheduleHigh(false), isScheduleLow(false), |
| 334 | isCloned(false), isUnbuffered(false), hasReservedResource(false), |
| 335 | isDepthCurrent(false), isHeightCurrent(false) {} |
| 336 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 337 | /// Boundary nodes are placeholders for the boundary of the |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 338 | /// scheduling region. |
| 339 | /// |
| 340 | /// BoundaryNodes can have DAG edges, including Data edges, but they do not |
| 341 | /// correspond to schedulable entities (e.g. instructions) and do not have a |
| 342 | /// valid ID. Consequently, always check for boundary nodes before accessing |
| 343 | /// an associative data structure keyed on node ID. |
| 344 | bool isBoundaryNode() const { return NodeNum == BoundaryID; } |
| 345 | |
| 346 | /// Assigns the representative SDNode for this SUnit. This may be used |
| 347 | /// during pre-regalloc scheduling. |
| 348 | void setNode(SDNode *N) { |
| 349 | assert(!Instr && "Setting SDNode of SUnit with MachineInstr!"); |
| 350 | Node = N; |
| 351 | } |
| 352 | |
| 353 | /// Returns the representative SDNode for this SUnit. This may be used |
| 354 | /// during pre-regalloc scheduling. |
| 355 | SDNode *getNode() const { |
| 356 | assert(!Instr && "Reading SDNode of SUnit with MachineInstr!"); |
| 357 | return Node; |
| 358 | } |
| 359 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 360 | /// Returns true if this SUnit refers to a machine instruction as |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 361 | /// opposed to an SDNode. |
| 362 | bool isInstr() const { return Instr; } |
| 363 | |
| 364 | /// Assigns the instruction for the SUnit. This may be used during |
| 365 | /// post-regalloc scheduling. |
| 366 | void setInstr(MachineInstr *MI) { |
| 367 | assert(!Node && "Setting MachineInstr of SUnit with SDNode!"); |
| 368 | Instr = MI; |
| 369 | } |
| 370 | |
| 371 | /// Returns the representative MachineInstr for this SUnit. This may be used |
| 372 | /// during post-regalloc scheduling. |
| 373 | MachineInstr *getInstr() const { |
| 374 | assert(!Node && "Reading MachineInstr of SUnit with SDNode!"); |
| 375 | return Instr; |
| 376 | } |
| 377 | |
| 378 | /// Adds the specified edge as a pred of the current node if not already. |
| 379 | /// It also adds the current node as a successor of the specified node. |
| 380 | bool addPred(const SDep &D, bool Required = true); |
| 381 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 382 | /// Adds a barrier edge to SU by calling addPred(), with latency 0 |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 383 | /// generally or latency 1 for a store followed by a load. |
| 384 | bool addPredBarrier(SUnit *SU) { |
| 385 | SDep Dep(SU, SDep::Barrier); |
| 386 | unsigned TrueMemOrderLatency = |
| 387 | ((SU->getInstr()->mayStore() && this->getInstr()->mayLoad()) ? 1 : 0); |
| 388 | Dep.setLatency(TrueMemOrderLatency); |
| 389 | return addPred(Dep); |
| 390 | } |
| 391 | |
| 392 | /// Removes the specified edge as a pred of the current node if it exists. |
| 393 | /// It also removes the current node as a successor of the specified node. |
| 394 | void removePred(const SDep &D); |
| 395 | |
| 396 | /// Returns the depth of this node, which is the length of the maximum path |
| 397 | /// up to any node which has no predecessors. |
| 398 | unsigned getDepth() const { |
| 399 | if (!isDepthCurrent) |
| 400 | const_cast<SUnit *>(this)->ComputeDepth(); |
| 401 | return Depth; |
| 402 | } |
| 403 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 404 | /// Returns the height of this node, which is the length of the |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 405 | /// maximum path down to any node which has no successors. |
| 406 | unsigned getHeight() const { |
| 407 | if (!isHeightCurrent) |
| 408 | const_cast<SUnit *>(this)->ComputeHeight(); |
| 409 | return Height; |
| 410 | } |
| 411 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 412 | /// If NewDepth is greater than this node's depth value, sets it to |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 413 | /// be the new depth value. This also recursively marks successor nodes |
| 414 | /// dirty. |
| 415 | void setDepthToAtLeast(unsigned NewDepth); |
| 416 | |
Andrew Walbran | 3d2c197 | 2020-04-07 12:24:26 +0100 | [diff] [blame] | 417 | /// If NewHeight is greater than this node's height value, set it to be |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 418 | /// the new height value. This also recursively marks predecessor nodes |
| 419 | /// dirty. |
| 420 | void setHeightToAtLeast(unsigned NewHeight); |
| 421 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 422 | /// Sets a flag in this node to indicate that its stored Depth value |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 423 | /// will require recomputation the next time getDepth() is called. |
| 424 | void setDepthDirty(); |
| 425 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 426 | /// Sets a flag in this node to indicate that its stored Height value |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 427 | /// will require recomputation the next time getHeight() is called. |
| 428 | void setHeightDirty(); |
| 429 | |
| 430 | /// Tests if node N is a predecessor of this node. |
| 431 | bool isPred(const SUnit *N) const { |
| 432 | for (const SDep &Pred : Preds) |
| 433 | if (Pred.getSUnit() == N) |
| 434 | return true; |
| 435 | return false; |
| 436 | } |
| 437 | |
| 438 | /// Tests if node N is a successor of this node. |
| 439 | bool isSucc(const SUnit *N) const { |
| 440 | for (const SDep &Succ : Succs) |
| 441 | if (Succ.getSUnit() == N) |
| 442 | return true; |
| 443 | return false; |
| 444 | } |
| 445 | |
| 446 | bool isTopReady() const { |
| 447 | return NumPredsLeft == 0; |
| 448 | } |
| 449 | bool isBottomReady() const { |
| 450 | return NumSuccsLeft == 0; |
| 451 | } |
| 452 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 453 | /// Orders this node's predecessor edges such that the critical path |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 454 | /// edge occurs first. |
| 455 | void biasCriticalPath(); |
| 456 | |
Andrew Scull | 0372a57 | 2018-11-16 15:47:06 +0000 | [diff] [blame] | 457 | void dumpAttributes() const; |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 458 | |
| 459 | private: |
| 460 | void ComputeDepth(); |
| 461 | void ComputeHeight(); |
| 462 | }; |
| 463 | |
| 464 | /// Returns true if the specified SDep is equivalent except for latency. |
| 465 | inline bool SDep::overlaps(const SDep &Other) const { |
| 466 | if (Dep != Other.Dep) |
| 467 | return false; |
| 468 | switch (Dep.getInt()) { |
| 469 | case Data: |
| 470 | case Anti: |
| 471 | case Output: |
| 472 | return Contents.Reg == Other.Contents.Reg; |
| 473 | case Order: |
| 474 | return Contents.OrdKind == Other.Contents.OrdKind; |
| 475 | } |
| 476 | llvm_unreachable("Invalid dependency kind!"); |
| 477 | } |
| 478 | |
| 479 | //// Returns the SUnit to which this edge points. |
| 480 | inline SUnit *SDep::getSUnit() const { return Dep.getPointer(); } |
| 481 | |
| 482 | //// Assigns the SUnit to which this edge points. |
| 483 | inline void SDep::setSUnit(SUnit *SU) { Dep.setPointer(SU); } |
| 484 | |
| 485 | /// Returns an enum value representing the kind of the dependence. |
| 486 | inline SDep::Kind SDep::getKind() const { return Dep.getInt(); } |
| 487 | |
| 488 | //===--------------------------------------------------------------------===// |
| 489 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 490 | /// This interface is used to plug different priorities computation |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 491 | /// algorithms into the list scheduler. It implements the interface of a |
| 492 | /// standard priority queue, where nodes are inserted in arbitrary order and |
| 493 | /// returned in priority order. The computation of the priority and the |
| 494 | /// representation of the queue are totally up to the implementation to |
| 495 | /// decide. |
| 496 | class SchedulingPriorityQueue { |
| 497 | virtual void anchor(); |
| 498 | |
| 499 | unsigned CurCycle = 0; |
| 500 | bool HasReadyFilter; |
| 501 | |
| 502 | public: |
| 503 | SchedulingPriorityQueue(bool rf = false) : HasReadyFilter(rf) {} |
| 504 | |
| 505 | virtual ~SchedulingPriorityQueue() = default; |
| 506 | |
| 507 | virtual bool isBottomUp() const = 0; |
| 508 | |
| 509 | virtual void initNodes(std::vector<SUnit> &SUnits) = 0; |
| 510 | virtual void addNode(const SUnit *SU) = 0; |
| 511 | virtual void updateNode(const SUnit *SU) = 0; |
| 512 | virtual void releaseState() = 0; |
| 513 | |
| 514 | virtual bool empty() const = 0; |
| 515 | |
| 516 | bool hasReadyFilter() const { return HasReadyFilter; } |
| 517 | |
| 518 | virtual bool tracksRegPressure() const { return false; } |
| 519 | |
| 520 | virtual bool isReady(SUnit *) const { |
| 521 | assert(!HasReadyFilter && "The ready filter must override isReady()"); |
| 522 | return true; |
| 523 | } |
| 524 | |
| 525 | virtual void push(SUnit *U) = 0; |
| 526 | |
| 527 | void push_all(const std::vector<SUnit *> &Nodes) { |
| 528 | for (std::vector<SUnit *>::const_iterator I = Nodes.begin(), |
| 529 | E = Nodes.end(); I != E; ++I) |
| 530 | push(*I); |
| 531 | } |
| 532 | |
| 533 | virtual SUnit *pop() = 0; |
| 534 | |
| 535 | virtual void remove(SUnit *SU) = 0; |
| 536 | |
| 537 | virtual void dump(ScheduleDAG *) const {} |
| 538 | |
| 539 | /// As each node is scheduled, this method is invoked. This allows the |
| 540 | /// priority function to adjust the priority of related unscheduled nodes, |
| 541 | /// for example. |
| 542 | virtual void scheduledNode(SUnit *) {} |
| 543 | |
| 544 | virtual void unscheduledNode(SUnit *) {} |
| 545 | |
| 546 | void setCurCycle(unsigned Cycle) { |
| 547 | CurCycle = Cycle; |
| 548 | } |
| 549 | |
| 550 | unsigned getCurCycle() const { |
| 551 | return CurCycle; |
| 552 | } |
| 553 | }; |
| 554 | |
| 555 | class ScheduleDAG { |
| 556 | public: |
Andrew Walbran | 16937d0 | 2019-10-22 13:54:20 +0100 | [diff] [blame] | 557 | const LLVMTargetMachine &TM; ///< Target processor |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 558 | const TargetInstrInfo *TII; ///< Target instruction information |
| 559 | const TargetRegisterInfo *TRI; ///< Target processor register info |
| 560 | MachineFunction &MF; ///< Machine function |
| 561 | MachineRegisterInfo &MRI; ///< Virtual/real register map |
| 562 | std::vector<SUnit> SUnits; ///< The scheduling units. |
| 563 | SUnit EntrySU; ///< Special node for the region entry. |
| 564 | SUnit ExitSU; ///< Special node for the region exit. |
| 565 | |
| 566 | #ifdef NDEBUG |
| 567 | static const bool StressSched = false; |
| 568 | #else |
| 569 | bool StressSched; |
| 570 | #endif |
| 571 | |
| 572 | explicit ScheduleDAG(MachineFunction &mf); |
| 573 | |
| 574 | virtual ~ScheduleDAG(); |
| 575 | |
| 576 | /// Clears the DAG state (between regions). |
| 577 | void clearDAG(); |
| 578 | |
| 579 | /// Returns the MCInstrDesc of this SUnit. |
| 580 | /// Returns NULL for SDNodes without a machine opcode. |
| 581 | const MCInstrDesc *getInstrDesc(const SUnit *SU) const { |
| 582 | if (SU->isInstr()) return &SU->getInstr()->getDesc(); |
| 583 | return getNodeDesc(SU->getNode()); |
| 584 | } |
| 585 | |
| 586 | /// Pops up a GraphViz/gv window with the ScheduleDAG rendered using 'dot'. |
| 587 | virtual void viewGraph(const Twine &Name, const Twine &Title); |
| 588 | virtual void viewGraph(); |
| 589 | |
Andrew Scull | 0372a57 | 2018-11-16 15:47:06 +0000 | [diff] [blame] | 590 | virtual void dumpNode(const SUnit &SU) const = 0; |
| 591 | virtual void dump() const = 0; |
| 592 | void dumpNodeName(const SUnit &SU) const; |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 593 | |
| 594 | /// Returns a label for an SUnit node in a visualization of the ScheduleDAG. |
| 595 | virtual std::string getGraphNodeLabel(const SUnit *SU) const = 0; |
| 596 | |
| 597 | /// Returns a label for the region of code covered by the DAG. |
| 598 | virtual std::string getDAGName() const = 0; |
| 599 | |
| 600 | /// Adds custom features for a visualization of the ScheduleDAG. |
| 601 | virtual void addCustomGraphFeatures(GraphWriter<ScheduleDAG*> &) const {} |
| 602 | |
| 603 | #ifndef NDEBUG |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 604 | /// Verifies that all SUnits were scheduled and that their state is |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 605 | /// consistent. Returns the number of scheduled SUnits. |
| 606 | unsigned VerifyScheduledDAG(bool isBottomUp); |
| 607 | #endif |
| 608 | |
Andrew Scull | 0372a57 | 2018-11-16 15:47:06 +0000 | [diff] [blame] | 609 | protected: |
| 610 | void dumpNodeAll(const SUnit &SU) const; |
| 611 | |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 612 | private: |
| 613 | /// Returns the MCInstrDesc of this SDNode or NULL. |
| 614 | const MCInstrDesc *getNodeDesc(const SDNode *Node) const; |
| 615 | }; |
| 616 | |
| 617 | class SUnitIterator : public std::iterator<std::forward_iterator_tag, |
| 618 | SUnit, ptrdiff_t> { |
| 619 | SUnit *Node; |
| 620 | unsigned Operand; |
| 621 | |
| 622 | SUnitIterator(SUnit *N, unsigned Op) : Node(N), Operand(Op) {} |
| 623 | |
| 624 | public: |
| 625 | bool operator==(const SUnitIterator& x) const { |
| 626 | return Operand == x.Operand; |
| 627 | } |
| 628 | bool operator!=(const SUnitIterator& x) const { return !operator==(x); } |
| 629 | |
| 630 | pointer operator*() const { |
| 631 | return Node->Preds[Operand].getSUnit(); |
| 632 | } |
| 633 | pointer operator->() const { return operator*(); } |
| 634 | |
| 635 | SUnitIterator& operator++() { // Preincrement |
| 636 | ++Operand; |
| 637 | return *this; |
| 638 | } |
| 639 | SUnitIterator operator++(int) { // Postincrement |
| 640 | SUnitIterator tmp = *this; ++*this; return tmp; |
| 641 | } |
| 642 | |
| 643 | static SUnitIterator begin(SUnit *N) { return SUnitIterator(N, 0); } |
| 644 | static SUnitIterator end (SUnit *N) { |
| 645 | return SUnitIterator(N, (unsigned)N->Preds.size()); |
| 646 | } |
| 647 | |
| 648 | unsigned getOperand() const { return Operand; } |
| 649 | const SUnit *getNode() const { return Node; } |
| 650 | |
| 651 | /// Tests if this is not an SDep::Data dependence. |
| 652 | bool isCtrlDep() const { |
| 653 | return getSDep().isCtrl(); |
| 654 | } |
| 655 | bool isArtificialDep() const { |
| 656 | return getSDep().isArtificial(); |
| 657 | } |
| 658 | const SDep &getSDep() const { |
| 659 | return Node->Preds[Operand]; |
| 660 | } |
| 661 | }; |
| 662 | |
| 663 | template <> struct GraphTraits<SUnit*> { |
| 664 | typedef SUnit *NodeRef; |
| 665 | typedef SUnitIterator ChildIteratorType; |
| 666 | static NodeRef getEntryNode(SUnit *N) { return N; } |
| 667 | static ChildIteratorType child_begin(NodeRef N) { |
| 668 | return SUnitIterator::begin(N); |
| 669 | } |
| 670 | static ChildIteratorType child_end(NodeRef N) { |
| 671 | return SUnitIterator::end(N); |
| 672 | } |
| 673 | }; |
| 674 | |
| 675 | template <> struct GraphTraits<ScheduleDAG*> : public GraphTraits<SUnit*> { |
| 676 | typedef pointer_iterator<std::vector<SUnit>::iterator> nodes_iterator; |
| 677 | static nodes_iterator nodes_begin(ScheduleDAG *G) { |
| 678 | return nodes_iterator(G->SUnits.begin()); |
| 679 | } |
| 680 | static nodes_iterator nodes_end(ScheduleDAG *G) { |
| 681 | return nodes_iterator(G->SUnits.end()); |
| 682 | } |
| 683 | }; |
| 684 | |
| 685 | /// This class can compute a topological ordering for SUnits and provides |
| 686 | /// methods for dynamically updating the ordering as new edges are added. |
| 687 | /// |
| 688 | /// This allows a very fast implementation of IsReachable, for example. |
| 689 | class ScheduleDAGTopologicalSort { |
| 690 | /// A reference to the ScheduleDAG's SUnits. |
| 691 | std::vector<SUnit> &SUnits; |
| 692 | SUnit *ExitSU; |
| 693 | |
Andrew Walbran | 3d2c197 | 2020-04-07 12:24:26 +0100 | [diff] [blame] | 694 | // Have any new nodes been added? |
| 695 | bool Dirty = false; |
| 696 | |
| 697 | // Outstanding added edges, that have not been applied to the ordering. |
| 698 | SmallVector<std::pair<SUnit *, SUnit *>, 16> Updates; |
| 699 | |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 700 | /// Maps topological index to the node number. |
| 701 | std::vector<int> Index2Node; |
| 702 | /// Maps the node number to its topological index. |
| 703 | std::vector<int> Node2Index; |
| 704 | /// a set of nodes visited during a DFS traversal. |
| 705 | BitVector Visited; |
| 706 | |
| 707 | /// Makes a DFS traversal and mark all nodes affected by the edge insertion. |
| 708 | /// These nodes will later get new topological indexes by means of the Shift |
| 709 | /// method. |
| 710 | void DFS(const SUnit *SU, int UpperBound, bool& HasLoop); |
| 711 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 712 | /// Reassigns topological indexes for the nodes in the DAG to |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 713 | /// preserve the topological ordering. |
| 714 | void Shift(BitVector& Visited, int LowerBound, int UpperBound); |
| 715 | |
| 716 | /// Assigns the topological index to the node n. |
| 717 | void Allocate(int n, int index); |
| 718 | |
Andrew Walbran | 3d2c197 | 2020-04-07 12:24:26 +0100 | [diff] [blame] | 719 | /// Fix the ordering, by either recomputing from scratch or by applying |
| 720 | /// any outstanding updates. Uses a heuristic to estimate what will be |
| 721 | /// cheaper. |
| 722 | void FixOrder(); |
| 723 | |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 724 | public: |
| 725 | ScheduleDAGTopologicalSort(std::vector<SUnit> &SUnits, SUnit *ExitSU); |
| 726 | |
Olivier Deprez | f4ef2d0 | 2021-04-20 13:36:24 +0200 | [diff] [blame] | 727 | /// Add a SUnit without predecessors to the end of the topological order. It |
| 728 | /// also must be the first new node added to the DAG. |
| 729 | void AddSUnitWithoutPredecessors(const SUnit *SU); |
| 730 | |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 731 | /// Creates the initial topological ordering from the DAG to be scheduled. |
| 732 | void InitDAGTopologicalSorting(); |
| 733 | |
| 734 | /// Returns an array of SUs that are both in the successor |
| 735 | /// subtree of StartSU and in the predecessor subtree of TargetSU. |
| 736 | /// StartSU and TargetSU are not in the array. |
| 737 | /// Success is false if TargetSU is not in the successor subtree of |
| 738 | /// StartSU, else it is true. |
| 739 | std::vector<int> GetSubGraph(const SUnit &StartSU, const SUnit &TargetSU, |
| 740 | bool &Success); |
| 741 | |
| 742 | /// Checks if \p SU is reachable from \p TargetSU. |
| 743 | bool IsReachable(const SUnit *SU, const SUnit *TargetSU); |
| 744 | |
| 745 | /// Returns true if addPred(TargetSU, SU) creates a cycle. |
| 746 | bool WillCreateCycle(SUnit *TargetSU, SUnit *SU); |
| 747 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 748 | /// Updates the topological ordering to accommodate an edge to be |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 749 | /// added from SUnit \p X to SUnit \p Y. |
| 750 | void AddPred(SUnit *Y, SUnit *X); |
| 751 | |
Andrew Walbran | 3d2c197 | 2020-04-07 12:24:26 +0100 | [diff] [blame] | 752 | /// Queues an update to the topological ordering to accommodate an edge to |
| 753 | /// be added from SUnit \p X to SUnit \p Y. |
| 754 | void AddPredQueued(SUnit *Y, SUnit *X); |
| 755 | |
Andrew Scull | cdfcccc | 2018-10-05 20:58:37 +0100 | [diff] [blame] | 756 | /// Updates the topological ordering to accommodate an an edge to be |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 757 | /// removed from the specified node \p N from the predecessors of the |
| 758 | /// current node \p M. |
| 759 | void RemovePred(SUnit *M, SUnit *N); |
| 760 | |
Andrew Walbran | 3d2c197 | 2020-04-07 12:24:26 +0100 | [diff] [blame] | 761 | /// Mark the ordering as temporarily broken, after a new node has been |
| 762 | /// added. |
| 763 | void MarkDirty() { Dirty = true; } |
| 764 | |
Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame] | 765 | typedef std::vector<int>::iterator iterator; |
| 766 | typedef std::vector<int>::const_iterator const_iterator; |
| 767 | iterator begin() { return Index2Node.begin(); } |
| 768 | const_iterator begin() const { return Index2Node.begin(); } |
| 769 | iterator end() { return Index2Node.end(); } |
| 770 | const_iterator end() const { return Index2Node.end(); } |
| 771 | |
| 772 | typedef std::vector<int>::reverse_iterator reverse_iterator; |
| 773 | typedef std::vector<int>::const_reverse_iterator const_reverse_iterator; |
| 774 | reverse_iterator rbegin() { return Index2Node.rbegin(); } |
| 775 | const_reverse_iterator rbegin() const { return Index2Node.rbegin(); } |
| 776 | reverse_iterator rend() { return Index2Node.rend(); } |
| 777 | const_reverse_iterator rend() const { return Index2Node.rend(); } |
| 778 | }; |
| 779 | |
| 780 | } // end namespace llvm |
| 781 | |
| 782 | #endif // LLVM_CODEGEN_SCHEDULEDAG_H |