blob: ba9763077d09b044f6e453cdb4dce652e1cae8c8 [file] [log] [blame]
Andrew Scull5e1ddfa2018-08-14 10:06:54 +01001//===- RegAllocPBQP.h -------------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the PBQPBuilder interface, for classes which build PBQP
11// instances to represent register allocation problems, and the RegAllocPBQP
12// interface.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_CODEGEN_REGALLOCPBQP_H
17#define LLVM_CODEGEN_REGALLOCPBQP_H
18
19#include "llvm/ADT/DenseMap.h"
20#include "llvm/ADT/Hashing.h"
21#include "llvm/CodeGen/PBQP/CostAllocator.h"
22#include "llvm/CodeGen/PBQP/Graph.h"
23#include "llvm/CodeGen/PBQP/Math.h"
24#include "llvm/CodeGen/PBQP/ReductionRules.h"
25#include "llvm/CodeGen/PBQP/Solution.h"
26#include "llvm/Support/ErrorHandling.h"
27#include <algorithm>
28#include <cassert>
29#include <cstddef>
30#include <limits>
31#include <memory>
32#include <set>
33#include <vector>
34
35namespace llvm {
36
37class FunctionPass;
38class LiveIntervals;
39class MachineBlockFrequencyInfo;
40class MachineFunction;
41class raw_ostream;
42
43namespace PBQP {
44namespace RegAlloc {
45
Andrew Scullcdfcccc2018-10-05 20:58:37 +010046/// Spill option index.
Andrew Scull5e1ddfa2018-08-14 10:06:54 +010047inline unsigned getSpillOptionIdx() { return 0; }
48
Andrew Scullcdfcccc2018-10-05 20:58:37 +010049/// Metadata to speed allocatability test.
Andrew Scull5e1ddfa2018-08-14 10:06:54 +010050///
51/// Keeps track of the number of infinities in each row and column.
52class MatrixMetadata {
53public:
54 MatrixMetadata(const Matrix& M)
55 : UnsafeRows(new bool[M.getRows() - 1]()),
56 UnsafeCols(new bool[M.getCols() - 1]()) {
57 unsigned* ColCounts = new unsigned[M.getCols() - 1]();
58
59 for (unsigned i = 1; i < M.getRows(); ++i) {
60 unsigned RowCount = 0;
61 for (unsigned j = 1; j < M.getCols(); ++j) {
62 if (M[i][j] == std::numeric_limits<PBQPNum>::infinity()) {
63 ++RowCount;
64 ++ColCounts[j - 1];
65 UnsafeRows[i - 1] = true;
66 UnsafeCols[j - 1] = true;
67 }
68 }
69 WorstRow = std::max(WorstRow, RowCount);
70 }
71 unsigned WorstColCountForCurRow =
72 *std::max_element(ColCounts, ColCounts + M.getCols() - 1);
73 WorstCol = std::max(WorstCol, WorstColCountForCurRow);
74 delete[] ColCounts;
75 }
76
77 MatrixMetadata(const MatrixMetadata &) = delete;
78 MatrixMetadata &operator=(const MatrixMetadata &) = delete;
79
80 unsigned getWorstRow() const { return WorstRow; }
81 unsigned getWorstCol() const { return WorstCol; }
82 const bool* getUnsafeRows() const { return UnsafeRows.get(); }
83 const bool* getUnsafeCols() const { return UnsafeCols.get(); }
84
85private:
86 unsigned WorstRow = 0;
87 unsigned WorstCol = 0;
88 std::unique_ptr<bool[]> UnsafeRows;
89 std::unique_ptr<bool[]> UnsafeCols;
90};
91
Andrew Scullcdfcccc2018-10-05 20:58:37 +010092/// Holds a vector of the allowed physical regs for a vreg.
Andrew Scull5e1ddfa2018-08-14 10:06:54 +010093class AllowedRegVector {
94 friend hash_code hash_value(const AllowedRegVector &);
95
96public:
97 AllowedRegVector() = default;
98 AllowedRegVector(AllowedRegVector &&) = default;
99
100 AllowedRegVector(const std::vector<unsigned> &OptVec)
101 : NumOpts(OptVec.size()), Opts(new unsigned[NumOpts]) {
102 std::copy(OptVec.begin(), OptVec.end(), Opts.get());
103 }
104
105 unsigned size() const { return NumOpts; }
106 unsigned operator[](size_t I) const { return Opts[I]; }
107
108 bool operator==(const AllowedRegVector &Other) const {
109 if (NumOpts != Other.NumOpts)
110 return false;
111 return std::equal(Opts.get(), Opts.get() + NumOpts, Other.Opts.get());
112 }
113
114 bool operator!=(const AllowedRegVector &Other) const {
115 return !(*this == Other);
116 }
117
118private:
119 unsigned NumOpts = 0;
120 std::unique_ptr<unsigned[]> Opts;
121};
122
123inline hash_code hash_value(const AllowedRegVector &OptRegs) {
124 unsigned *OStart = OptRegs.Opts.get();
125 unsigned *OEnd = OptRegs.Opts.get() + OptRegs.NumOpts;
126 return hash_combine(OptRegs.NumOpts,
127 hash_combine_range(OStart, OEnd));
128}
129
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100130/// Holds graph-level metadata relevant to PBQP RA problems.
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100131class GraphMetadata {
132private:
133 using AllowedRegVecPool = ValuePool<AllowedRegVector>;
134
135public:
136 using AllowedRegVecRef = AllowedRegVecPool::PoolRef;
137
138 GraphMetadata(MachineFunction &MF,
139 LiveIntervals &LIS,
140 MachineBlockFrequencyInfo &MBFI)
141 : MF(MF), LIS(LIS), MBFI(MBFI) {}
142
143 MachineFunction &MF;
144 LiveIntervals &LIS;
145 MachineBlockFrequencyInfo &MBFI;
146
147 void setNodeIdForVReg(unsigned VReg, GraphBase::NodeId NId) {
148 VRegToNodeId[VReg] = NId;
149 }
150
151 GraphBase::NodeId getNodeIdForVReg(unsigned VReg) const {
152 auto VRegItr = VRegToNodeId.find(VReg);
153 if (VRegItr == VRegToNodeId.end())
154 return GraphBase::invalidNodeId();
155 return VRegItr->second;
156 }
157
158 AllowedRegVecRef getAllowedRegs(AllowedRegVector Allowed) {
159 return AllowedRegVecs.getValue(std::move(Allowed));
160 }
161
162private:
163 DenseMap<unsigned, GraphBase::NodeId> VRegToNodeId;
164 AllowedRegVecPool AllowedRegVecs;
165};
166
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100167/// Holds solver state and other metadata relevant to each PBQP RA node.
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100168class NodeMetadata {
169public:
170 using AllowedRegVector = RegAlloc::AllowedRegVector;
171
172 // The node's reduction state. The order in this enum is important,
173 // as it is assumed nodes can only progress up (i.e. towards being
174 // optimally reducible) when reducing the graph.
175 using ReductionState = enum {
176 Unprocessed,
177 NotProvablyAllocatable,
178 ConservativelyAllocatable,
179 OptimallyReducible
180 };
181
182 NodeMetadata() = default;
183
184 NodeMetadata(const NodeMetadata &Other)
185 : RS(Other.RS), NumOpts(Other.NumOpts), DeniedOpts(Other.DeniedOpts),
186 OptUnsafeEdges(new unsigned[NumOpts]), VReg(Other.VReg),
187 AllowedRegs(Other.AllowedRegs)
188#ifndef NDEBUG
189 , everConservativelyAllocatable(Other.everConservativelyAllocatable)
190#endif
191 {
192 if (NumOpts > 0) {
193 std::copy(&Other.OptUnsafeEdges[0], &Other.OptUnsafeEdges[NumOpts],
194 &OptUnsafeEdges[0]);
195 }
196 }
197
198 NodeMetadata(NodeMetadata &&) = default;
199 NodeMetadata& operator=(NodeMetadata &&) = default;
200
201 void setVReg(unsigned VReg) { this->VReg = VReg; }
202 unsigned getVReg() const { return VReg; }
203
204 void setAllowedRegs(GraphMetadata::AllowedRegVecRef AllowedRegs) {
205 this->AllowedRegs = std::move(AllowedRegs);
206 }
207 const AllowedRegVector& getAllowedRegs() const { return *AllowedRegs; }
208
209 void setup(const Vector& Costs) {
210 NumOpts = Costs.getLength() - 1;
211 OptUnsafeEdges = std::unique_ptr<unsigned[]>(new unsigned[NumOpts]());
212 }
213
214 ReductionState getReductionState() const { return RS; }
215 void setReductionState(ReductionState RS) {
216 assert(RS >= this->RS && "A node's reduction state can not be downgraded");
217 this->RS = RS;
218
219#ifndef NDEBUG
220 // Remember this state to assert later that a non-infinite register
221 // option was available.
222 if (RS == ConservativelyAllocatable)
223 everConservativelyAllocatable = true;
224#endif
225 }
226
227 void handleAddEdge(const MatrixMetadata& MD, bool Transpose) {
228 DeniedOpts += Transpose ? MD.getWorstRow() : MD.getWorstCol();
229 const bool* UnsafeOpts =
230 Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
231 for (unsigned i = 0; i < NumOpts; ++i)
232 OptUnsafeEdges[i] += UnsafeOpts[i];
233 }
234
235 void handleRemoveEdge(const MatrixMetadata& MD, bool Transpose) {
236 DeniedOpts -= Transpose ? MD.getWorstRow() : MD.getWorstCol();
237 const bool* UnsafeOpts =
238 Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
239 for (unsigned i = 0; i < NumOpts; ++i)
240 OptUnsafeEdges[i] -= UnsafeOpts[i];
241 }
242
243 bool isConservativelyAllocatable() const {
244 return (DeniedOpts < NumOpts) ||
245 (std::find(&OptUnsafeEdges[0], &OptUnsafeEdges[NumOpts], 0) !=
246 &OptUnsafeEdges[NumOpts]);
247 }
248
249#ifndef NDEBUG
250 bool wasConservativelyAllocatable() const {
251 return everConservativelyAllocatable;
252 }
253#endif
254
255private:
256 ReductionState RS = Unprocessed;
257 unsigned NumOpts = 0;
258 unsigned DeniedOpts = 0;
259 std::unique_ptr<unsigned[]> OptUnsafeEdges;
260 unsigned VReg = 0;
261 GraphMetadata::AllowedRegVecRef AllowedRegs;
262
263#ifndef NDEBUG
264 bool everConservativelyAllocatable = false;
265#endif
266};
267
268class RegAllocSolverImpl {
269private:
270 using RAMatrix = MDMatrix<MatrixMetadata>;
271
272public:
273 using RawVector = PBQP::Vector;
274 using RawMatrix = PBQP::Matrix;
275 using Vector = PBQP::Vector;
276 using Matrix = RAMatrix;
277 using CostAllocator = PBQP::PoolCostAllocator<Vector, Matrix>;
278
279 using NodeId = GraphBase::NodeId;
280 using EdgeId = GraphBase::EdgeId;
281
282 using NodeMetadata = RegAlloc::NodeMetadata;
283 struct EdgeMetadata {};
284 using GraphMetadata = RegAlloc::GraphMetadata;
285
286 using Graph = PBQP::Graph<RegAllocSolverImpl>;
287
288 RegAllocSolverImpl(Graph &G) : G(G) {}
289
290 Solution solve() {
291 G.setSolver(*this);
292 Solution S;
293 setup();
294 S = backpropagate(G, reduce());
295 G.unsetSolver();
296 return S;
297 }
298
299 void handleAddNode(NodeId NId) {
300 assert(G.getNodeCosts(NId).getLength() > 1 &&
301 "PBQP Graph should not contain single or zero-option nodes");
302 G.getNodeMetadata(NId).setup(G.getNodeCosts(NId));
303 }
304
305 void handleRemoveNode(NodeId NId) {}
306 void handleSetNodeCosts(NodeId NId, const Vector& newCosts) {}
307
308 void handleAddEdge(EdgeId EId) {
309 handleReconnectEdge(EId, G.getEdgeNode1Id(EId));
310 handleReconnectEdge(EId, G.getEdgeNode2Id(EId));
311 }
312
313 void handleDisconnectEdge(EdgeId EId, NodeId NId) {
314 NodeMetadata& NMd = G.getNodeMetadata(NId);
315 const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
316 NMd.handleRemoveEdge(MMd, NId == G.getEdgeNode2Id(EId));
317 promote(NId, NMd);
318 }
319
320 void handleReconnectEdge(EdgeId EId, NodeId NId) {
321 NodeMetadata& NMd = G.getNodeMetadata(NId);
322 const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
323 NMd.handleAddEdge(MMd, NId == G.getEdgeNode2Id(EId));
324 }
325
326 void handleUpdateCosts(EdgeId EId, const Matrix& NewCosts) {
327 NodeId N1Id = G.getEdgeNode1Id(EId);
328 NodeId N2Id = G.getEdgeNode2Id(EId);
329 NodeMetadata& N1Md = G.getNodeMetadata(N1Id);
330 NodeMetadata& N2Md = G.getNodeMetadata(N2Id);
331 bool Transpose = N1Id != G.getEdgeNode1Id(EId);
332
333 // Metadata are computed incrementally. First, update them
334 // by removing the old cost.
335 const MatrixMetadata& OldMMd = G.getEdgeCosts(EId).getMetadata();
336 N1Md.handleRemoveEdge(OldMMd, Transpose);
337 N2Md.handleRemoveEdge(OldMMd, !Transpose);
338
339 // And update now the metadata with the new cost.
340 const MatrixMetadata& MMd = NewCosts.getMetadata();
341 N1Md.handleAddEdge(MMd, Transpose);
342 N2Md.handleAddEdge(MMd, !Transpose);
343
344 // As the metadata may have changed with the update, the nodes may have
345 // become ConservativelyAllocatable or OptimallyReducible.
346 promote(N1Id, N1Md);
347 promote(N2Id, N2Md);
348 }
349
350private:
351 void promote(NodeId NId, NodeMetadata& NMd) {
352 if (G.getNodeDegree(NId) == 3) {
353 // This node is becoming optimally reducible.
354 moveToOptimallyReducibleNodes(NId);
355 } else if (NMd.getReductionState() ==
356 NodeMetadata::NotProvablyAllocatable &&
357 NMd.isConservativelyAllocatable()) {
358 // This node just became conservatively allocatable.
359 moveToConservativelyAllocatableNodes(NId);
360 }
361 }
362
363 void removeFromCurrentSet(NodeId NId) {
364 switch (G.getNodeMetadata(NId).getReductionState()) {
365 case NodeMetadata::Unprocessed: break;
366 case NodeMetadata::OptimallyReducible:
367 assert(OptimallyReducibleNodes.find(NId) !=
368 OptimallyReducibleNodes.end() &&
369 "Node not in optimally reducible set.");
370 OptimallyReducibleNodes.erase(NId);
371 break;
372 case NodeMetadata::ConservativelyAllocatable:
373 assert(ConservativelyAllocatableNodes.find(NId) !=
374 ConservativelyAllocatableNodes.end() &&
375 "Node not in conservatively allocatable set.");
376 ConservativelyAllocatableNodes.erase(NId);
377 break;
378 case NodeMetadata::NotProvablyAllocatable:
379 assert(NotProvablyAllocatableNodes.find(NId) !=
380 NotProvablyAllocatableNodes.end() &&
381 "Node not in not-provably-allocatable set.");
382 NotProvablyAllocatableNodes.erase(NId);
383 break;
384 }
385 }
386
387 void moveToOptimallyReducibleNodes(NodeId NId) {
388 removeFromCurrentSet(NId);
389 OptimallyReducibleNodes.insert(NId);
390 G.getNodeMetadata(NId).setReductionState(
391 NodeMetadata::OptimallyReducible);
392 }
393
394 void moveToConservativelyAllocatableNodes(NodeId NId) {
395 removeFromCurrentSet(NId);
396 ConservativelyAllocatableNodes.insert(NId);
397 G.getNodeMetadata(NId).setReductionState(
398 NodeMetadata::ConservativelyAllocatable);
399 }
400
401 void moveToNotProvablyAllocatableNodes(NodeId NId) {
402 removeFromCurrentSet(NId);
403 NotProvablyAllocatableNodes.insert(NId);
404 G.getNodeMetadata(NId).setReductionState(
405 NodeMetadata::NotProvablyAllocatable);
406 }
407
408 void setup() {
409 // Set up worklists.
410 for (auto NId : G.nodeIds()) {
411 if (G.getNodeDegree(NId) < 3)
412 moveToOptimallyReducibleNodes(NId);
413 else if (G.getNodeMetadata(NId).isConservativelyAllocatable())
414 moveToConservativelyAllocatableNodes(NId);
415 else
416 moveToNotProvablyAllocatableNodes(NId);
417 }
418 }
419
420 // Compute a reduction order for the graph by iteratively applying PBQP
421 // reduction rules. Locally optimal rules are applied whenever possible (R0,
422 // R1, R2). If no locally-optimal rules apply then any conservatively
423 // allocatable node is reduced. Finally, if no conservatively allocatable
424 // node exists then the node with the lowest spill-cost:degree ratio is
425 // selected.
426 std::vector<GraphBase::NodeId> reduce() {
427 assert(!G.empty() && "Cannot reduce empty graph.");
428
429 using NodeId = GraphBase::NodeId;
430 std::vector<NodeId> NodeStack;
431
432 // Consume worklists.
433 while (true) {
434 if (!OptimallyReducibleNodes.empty()) {
435 NodeSet::iterator NItr = OptimallyReducibleNodes.begin();
436 NodeId NId = *NItr;
437 OptimallyReducibleNodes.erase(NItr);
438 NodeStack.push_back(NId);
439 switch (G.getNodeDegree(NId)) {
440 case 0:
441 break;
442 case 1:
443 applyR1(G, NId);
444 break;
445 case 2:
446 applyR2(G, NId);
447 break;
448 default: llvm_unreachable("Not an optimally reducible node.");
449 }
450 } else if (!ConservativelyAllocatableNodes.empty()) {
451 // Conservatively allocatable nodes will never spill. For now just
452 // take the first node in the set and push it on the stack. When we
453 // start optimizing more heavily for register preferencing, it may
454 // would be better to push nodes with lower 'expected' or worst-case
455 // register costs first (since early nodes are the most
456 // constrained).
457 NodeSet::iterator NItr = ConservativelyAllocatableNodes.begin();
458 NodeId NId = *NItr;
459 ConservativelyAllocatableNodes.erase(NItr);
460 NodeStack.push_back(NId);
461 G.disconnectAllNeighborsFromNode(NId);
462 } else if (!NotProvablyAllocatableNodes.empty()) {
463 NodeSet::iterator NItr =
464 std::min_element(NotProvablyAllocatableNodes.begin(),
465 NotProvablyAllocatableNodes.end(),
466 SpillCostComparator(G));
467 NodeId NId = *NItr;
468 NotProvablyAllocatableNodes.erase(NItr);
469 NodeStack.push_back(NId);
470 G.disconnectAllNeighborsFromNode(NId);
471 } else
472 break;
473 }
474
475 return NodeStack;
476 }
477
478 class SpillCostComparator {
479 public:
480 SpillCostComparator(const Graph& G) : G(G) {}
481
482 bool operator()(NodeId N1Id, NodeId N2Id) {
483 PBQPNum N1SC = G.getNodeCosts(N1Id)[0];
484 PBQPNum N2SC = G.getNodeCosts(N2Id)[0];
485 if (N1SC == N2SC)
486 return G.getNodeDegree(N1Id) < G.getNodeDegree(N2Id);
487 return N1SC < N2SC;
488 }
489
490 private:
491 const Graph& G;
492 };
493
494 Graph& G;
495 using NodeSet = std::set<NodeId>;
496 NodeSet OptimallyReducibleNodes;
497 NodeSet ConservativelyAllocatableNodes;
498 NodeSet NotProvablyAllocatableNodes;
499};
500
501class PBQPRAGraph : public PBQP::Graph<RegAllocSolverImpl> {
502private:
503 using BaseT = PBQP::Graph<RegAllocSolverImpl>;
504
505public:
506 PBQPRAGraph(GraphMetadata Metadata) : BaseT(std::move(Metadata)) {}
507
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100508 /// Dump this graph to dbgs().
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100509 void dump() const;
510
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100511 /// Dump this graph to an output stream.
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100512 /// @param OS Output stream to print on.
513 void dump(raw_ostream &OS) const;
514
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100515 /// Print a representation of this graph in DOT format.
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100516 /// @param OS Output stream to print on.
517 void printDot(raw_ostream &OS) const;
518};
519
520inline Solution solve(PBQPRAGraph& G) {
521 if (G.empty())
522 return Solution();
523 RegAllocSolverImpl RegAllocSolver(G);
524 return RegAllocSolver.solve();
525}
526
527} // end namespace RegAlloc
528} // end namespace PBQP
529
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100530/// Create a PBQP register allocator instance.
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100531FunctionPass *
532createPBQPRegisterAllocator(char *customPassID = nullptr);
533
534} // end namespace llvm
535
536#endif // LLVM_CODEGEN_REGALLOCPBQP_H