blob: 620083717eb462bb6105cc86f8574d3c4b447e6a [file] [log] [blame]
Andrew Scull5e1ddfa2018-08-14 10:06:54 +01001//===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
Andrew Scullcdfcccc2018-10-05 20:58:37 +010011/// This file exposes an interface to building/using memory SSA to
Andrew Scull5e1ddfa2018-08-14 10:06:54 +010012/// walk memory instructions using a use/def graph.
13///
14/// Memory SSA class builds an SSA form that links together memory access
15/// instructions such as loads, stores, atomics, and calls. Additionally, it
16/// does a trivial form of "heap versioning" Every time the memory state changes
17/// in the program, we generate a new heap version. It generates
18/// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions.
19///
20/// As a trivial example,
21/// define i32 @main() #0 {
22/// entry:
23/// %call = call noalias i8* @_Znwm(i64 4) #2
24/// %0 = bitcast i8* %call to i32*
25/// %call1 = call noalias i8* @_Znwm(i64 4) #2
26/// %1 = bitcast i8* %call1 to i32*
27/// store i32 5, i32* %0, align 4
28/// store i32 7, i32* %1, align 4
29/// %2 = load i32* %0, align 4
30/// %3 = load i32* %1, align 4
31/// %add = add nsw i32 %2, %3
32/// ret i32 %add
33/// }
34///
35/// Will become
36/// define i32 @main() #0 {
37/// entry:
38/// ; 1 = MemoryDef(0)
39/// %call = call noalias i8* @_Znwm(i64 4) #3
40/// %2 = bitcast i8* %call to i32*
41/// ; 2 = MemoryDef(1)
42/// %call1 = call noalias i8* @_Znwm(i64 4) #3
43/// %4 = bitcast i8* %call1 to i32*
44/// ; 3 = MemoryDef(2)
45/// store i32 5, i32* %2, align 4
46/// ; 4 = MemoryDef(3)
47/// store i32 7, i32* %4, align 4
48/// ; MemoryUse(3)
49/// %7 = load i32* %2, align 4
50/// ; MemoryUse(4)
51/// %8 = load i32* %4, align 4
52/// %add = add nsw i32 %7, %8
53/// ret i32 %add
54/// }
55///
56/// Given this form, all the stores that could ever effect the load at %8 can be
57/// gotten by using the MemoryUse associated with it, and walking from use to
58/// def until you hit the top of the function.
59///
60/// Each def also has a list of users associated with it, so you can walk from
61/// both def to users, and users to defs. Note that we disambiguate MemoryUses,
62/// but not the RHS of MemoryDefs. You can see this above at %7, which would
63/// otherwise be a MemoryUse(4). Being disambiguated means that for a given
64/// store, all the MemoryUses on its use lists are may-aliases of that store
65/// (but the MemoryDefs on its use list may not be).
66///
67/// MemoryDefs are not disambiguated because it would require multiple reaching
68/// definitions, which would require multiple phis, and multiple memoryaccesses
69/// per instruction.
70//
71//===----------------------------------------------------------------------===//
72
73#ifndef LLVM_ANALYSIS_MEMORYSSA_H
74#define LLVM_ANALYSIS_MEMORYSSA_H
75
76#include "llvm/ADT/DenseMap.h"
77#include "llvm/ADT/GraphTraits.h"
78#include "llvm/ADT/SmallPtrSet.h"
79#include "llvm/ADT/SmallVector.h"
80#include "llvm/ADT/ilist.h"
81#include "llvm/ADT/ilist_node.h"
82#include "llvm/ADT/iterator.h"
83#include "llvm/ADT/iterator_range.h"
84#include "llvm/ADT/simple_ilist.h"
85#include "llvm/Analysis/AliasAnalysis.h"
86#include "llvm/Analysis/MemoryLocation.h"
87#include "llvm/Analysis/PHITransAddr.h"
88#include "llvm/IR/BasicBlock.h"
89#include "llvm/IR/DerivedUser.h"
90#include "llvm/IR/Dominators.h"
91#include "llvm/IR/Module.h"
92#include "llvm/IR/Type.h"
93#include "llvm/IR/Use.h"
94#include "llvm/IR/User.h"
95#include "llvm/IR/Value.h"
96#include "llvm/IR/ValueHandle.h"
97#include "llvm/Pass.h"
98#include "llvm/Support/Casting.h"
99#include <algorithm>
100#include <cassert>
101#include <cstddef>
102#include <iterator>
103#include <memory>
104#include <utility>
105
106namespace llvm {
107
108class Function;
109class Instruction;
110class MemoryAccess;
111class MemorySSAWalker;
112class LLVMContext;
113class raw_ostream;
114
115namespace MSSAHelpers {
116
117struct AllAccessTag {};
118struct DefsOnlyTag {};
119
120} // end namespace MSSAHelpers
121
122enum : unsigned {
123 // Used to signify what the default invalid ID is for MemoryAccess's
124 // getID()
125 INVALID_MEMORYACCESS_ID = -1U
126};
127
128template <class T> class memoryaccess_def_iterator_base;
129using memoryaccess_def_iterator = memoryaccess_def_iterator_base<MemoryAccess>;
130using const_memoryaccess_def_iterator =
131 memoryaccess_def_iterator_base<const MemoryAccess>;
132
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100133// The base for all memory accesses. All memory accesses in a block are
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100134// linked together using an intrusive list.
135class MemoryAccess
136 : public DerivedUser,
137 public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>,
138 public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> {
139public:
140 using AllAccessType =
141 ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
142 using DefsOnlyType =
143 ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
144
145 MemoryAccess(const MemoryAccess &) = delete;
146 MemoryAccess &operator=(const MemoryAccess &) = delete;
147
148 void *operator new(size_t) = delete;
149
150 // Methods for support type inquiry through isa, cast, and
151 // dyn_cast
152 static bool classof(const Value *V) {
153 unsigned ID = V->getValueID();
154 return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal;
155 }
156
157 BasicBlock *getBlock() const { return Block; }
158
159 void print(raw_ostream &OS) const;
160 void dump() const;
161
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100162 /// The user iterators for a memory access
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100163 using iterator = user_iterator;
164 using const_iterator = const_user_iterator;
165
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100166 /// This iterator walks over all of the defs in a given
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100167 /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For
168 /// MemoryUse/MemoryDef, this walks the defining access.
169 memoryaccess_def_iterator defs_begin();
170 const_memoryaccess_def_iterator defs_begin() const;
171 memoryaccess_def_iterator defs_end();
172 const_memoryaccess_def_iterator defs_end() const;
173
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100174 /// Get the iterators for the all access list and the defs only list
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100175 /// We default to the all access list.
176 AllAccessType::self_iterator getIterator() {
177 return this->AllAccessType::getIterator();
178 }
179 AllAccessType::const_self_iterator getIterator() const {
180 return this->AllAccessType::getIterator();
181 }
182 AllAccessType::reverse_self_iterator getReverseIterator() {
183 return this->AllAccessType::getReverseIterator();
184 }
185 AllAccessType::const_reverse_self_iterator getReverseIterator() const {
186 return this->AllAccessType::getReverseIterator();
187 }
188 DefsOnlyType::self_iterator getDefsIterator() {
189 return this->DefsOnlyType::getIterator();
190 }
191 DefsOnlyType::const_self_iterator getDefsIterator() const {
192 return this->DefsOnlyType::getIterator();
193 }
194 DefsOnlyType::reverse_self_iterator getReverseDefsIterator() {
195 return this->DefsOnlyType::getReverseIterator();
196 }
197 DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const {
198 return this->DefsOnlyType::getReverseIterator();
199 }
200
201protected:
202 friend class MemoryDef;
203 friend class MemoryPhi;
204 friend class MemorySSA;
205 friend class MemoryUse;
206 friend class MemoryUseOrDef;
207
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100208 /// Used by MemorySSA to change the block of a MemoryAccess when it is
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100209 /// moved.
210 void setBlock(BasicBlock *BB) { Block = BB; }
211
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100212 /// Used for debugging and tracking things about MemoryAccesses.
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100213 /// Guaranteed unique among MemoryAccesses, no guarantees otherwise.
214 inline unsigned getID() const;
215
216 MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue,
217 BasicBlock *BB, unsigned NumOperands)
218 : DerivedUser(Type::getVoidTy(C), Vty, nullptr, NumOperands, DeleteValue),
219 Block(BB) {}
220
221 // Use deleteValue() to delete a generic MemoryAccess.
222 ~MemoryAccess() = default;
223
224private:
225 BasicBlock *Block;
226};
227
228template <>
229struct ilist_alloc_traits<MemoryAccess> {
230 static void deleteNode(MemoryAccess *MA) { MA->deleteValue(); }
231};
232
233inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) {
234 MA.print(OS);
235 return OS;
236}
237
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100238/// Class that has the common methods + fields of memory uses/defs. It's
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100239/// a little awkward to have, but there are many cases where we want either a
240/// use or def, and there are many cases where uses are needed (defs aren't
241/// acceptable), and vice-versa.
242///
243/// This class should never be instantiated directly; make a MemoryUse or
244/// MemoryDef instead.
245class MemoryUseOrDef : public MemoryAccess {
246public:
247 void *operator new(size_t) = delete;
248
249 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
250
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100251 /// Get the instruction that this MemoryUse represents.
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100252 Instruction *getMemoryInst() const { return MemoryInstruction; }
253
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100254 /// Get the access that produces the memory state used by this Use.
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100255 MemoryAccess *getDefiningAccess() const { return getOperand(0); }
256
257 static bool classof(const Value *MA) {
258 return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal;
259 }
260
261 // Sadly, these have to be public because they are needed in some of the
262 // iterators.
263 inline bool isOptimized() const;
264 inline MemoryAccess *getOptimized() const;
265 inline void setOptimized(MemoryAccess *);
266
267 // Retrieve AliasResult type of the optimized access. Ideally this would be
268 // returned by the caching walker and may go away in the future.
269 Optional<AliasResult> getOptimizedAccessType() const {
270 return OptimizedAccessAlias;
271 }
272
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100273 /// Reset the ID of what this MemoryUse was optimized to, causing it to
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100274 /// be rewalked by the walker if necessary.
275 /// This really should only be called by tests.
276 inline void resetOptimized();
277
278protected:
279 friend class MemorySSA;
280 friend class MemorySSAUpdater;
281
282 MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty,
Andrew Scull0372a572018-11-16 15:47:06 +0000283 DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB,
284 unsigned NumOperands)
285 : MemoryAccess(C, Vty, DeleteValue, BB, NumOperands),
286 MemoryInstruction(MI), OptimizedAccessAlias(MayAlias) {
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100287 setDefiningAccess(DMA);
288 }
289
290 // Use deleteValue() to delete a generic MemoryUseOrDef.
291 ~MemoryUseOrDef() = default;
292
293 void setOptimizedAccessType(Optional<AliasResult> AR) {
294 OptimizedAccessAlias = AR;
295 }
296
297 void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false,
298 Optional<AliasResult> AR = MayAlias) {
299 if (!Optimized) {
300 setOperand(0, DMA);
301 return;
302 }
303 setOptimized(DMA);
304 setOptimizedAccessType(AR);
305 }
306
307private:
308 Instruction *MemoryInstruction;
309 Optional<AliasResult> OptimizedAccessAlias;
310};
311
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100312/// Represents read-only accesses to memory
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100313///
314/// In particular, the set of Instructions that will be represented by
315/// MemoryUse's is exactly the set of Instructions for which
316/// AliasAnalysis::getModRefInfo returns "Ref".
317class MemoryUse final : public MemoryUseOrDef {
318public:
319 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
320
321 MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB)
Andrew Scull0372a572018-11-16 15:47:06 +0000322 : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB,
323 /*NumOperands=*/1) {}
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100324
325 // allocate space for exactly one operand
326 void *operator new(size_t s) { return User::operator new(s, 1); }
327
328 static bool classof(const Value *MA) {
329 return MA->getValueID() == MemoryUseVal;
330 }
331
332 void print(raw_ostream &OS) const;
333
334 void setOptimized(MemoryAccess *DMA) {
335 OptimizedID = DMA->getID();
336 setOperand(0, DMA);
337 }
338
339 bool isOptimized() const {
340 return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID();
341 }
342
343 MemoryAccess *getOptimized() const {
344 return getDefiningAccess();
345 }
346
347 void resetOptimized() {
348 OptimizedID = INVALID_MEMORYACCESS_ID;
349 }
350
351protected:
352 friend class MemorySSA;
353
354private:
355 static void deleteMe(DerivedUser *Self);
356
357 unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
358};
359
360template <>
361struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {};
362DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess)
363
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100364/// Represents a read-write access to memory, whether it is a must-alias,
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100365/// or a may-alias.
366///
367/// In particular, the set of Instructions that will be represented by
368/// MemoryDef's is exactly the set of Instructions for which
369/// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef".
370/// Note that, in order to provide def-def chains, all defs also have a use
371/// associated with them. This use points to the nearest reaching
372/// MemoryDef/MemoryPhi.
373class MemoryDef final : public MemoryUseOrDef {
374public:
375 friend class MemorySSA;
376
377 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
378
379 MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB,
380 unsigned Ver)
Andrew Scull0372a572018-11-16 15:47:06 +0000381 : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB,
382 /*NumOperands=*/2),
383 ID(Ver) {}
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100384
Andrew Scull0372a572018-11-16 15:47:06 +0000385 // allocate space for exactly two operands
386 void *operator new(size_t s) { return User::operator new(s, 2); }
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100387
388 static bool classof(const Value *MA) {
389 return MA->getValueID() == MemoryDefVal;
390 }
391
392 void setOptimized(MemoryAccess *MA) {
Andrew Scull0372a572018-11-16 15:47:06 +0000393 setOperand(1, MA);
394 OptimizedID = MA->getID();
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100395 }
396
397 MemoryAccess *getOptimized() const {
Andrew Scull0372a572018-11-16 15:47:06 +0000398 return cast_or_null<MemoryAccess>(getOperand(1));
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100399 }
400
401 bool isOptimized() const {
Andrew Scull0372a572018-11-16 15:47:06 +0000402 return getOptimized() && OptimizedID == getOptimized()->getID();
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100403 }
404
405 void resetOptimized() {
406 OptimizedID = INVALID_MEMORYACCESS_ID;
407 }
408
409 void print(raw_ostream &OS) const;
410
411 unsigned getID() const { return ID; }
412
413private:
414 static void deleteMe(DerivedUser *Self);
415
416 const unsigned ID;
417 unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100418};
419
420template <>
Andrew Scull0372a572018-11-16 15:47:06 +0000421struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 2> {};
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100422DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess)
423
Andrew Scull0372a572018-11-16 15:47:06 +0000424template <>
425struct OperandTraits<MemoryUseOrDef> {
426 static Use *op_begin(MemoryUseOrDef *MUD) {
427 if (auto *MU = dyn_cast<MemoryUse>(MUD))
428 return OperandTraits<MemoryUse>::op_begin(MU);
429 return OperandTraits<MemoryDef>::op_begin(cast<MemoryDef>(MUD));
430 }
431
432 static Use *op_end(MemoryUseOrDef *MUD) {
433 if (auto *MU = dyn_cast<MemoryUse>(MUD))
434 return OperandTraits<MemoryUse>::op_end(MU);
435 return OperandTraits<MemoryDef>::op_end(cast<MemoryDef>(MUD));
436 }
437
438 static unsigned operands(const MemoryUseOrDef *MUD) {
439 if (const auto *MU = dyn_cast<MemoryUse>(MUD))
440 return OperandTraits<MemoryUse>::operands(MU);
441 return OperandTraits<MemoryDef>::operands(cast<MemoryDef>(MUD));
442 }
443};
444DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)
445
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100446/// Represents phi nodes for memory accesses.
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100447///
448/// These have the same semantic as regular phi nodes, with the exception that
449/// only one phi will ever exist in a given basic block.
450/// Guaranteeing one phi per block means guaranteeing there is only ever one
451/// valid reaching MemoryDef/MemoryPHI along each path to the phi node.
452/// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or
453/// a MemoryPhi's operands.
454/// That is, given
455/// if (a) {
456/// store %a
457/// store %b
458/// }
459/// it *must* be transformed into
460/// if (a) {
461/// 1 = MemoryDef(liveOnEntry)
462/// store %a
463/// 2 = MemoryDef(1)
464/// store %b
465/// }
466/// and *not*
467/// if (a) {
468/// 1 = MemoryDef(liveOnEntry)
469/// store %a
470/// 2 = MemoryDef(liveOnEntry)
471/// store %b
472/// }
473/// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the
474/// end of the branch, and if there are not two phi nodes, one will be
475/// disconnected completely from the SSA graph below that point.
476/// Because MemoryUse's do not generate new definitions, they do not have this
477/// issue.
478class MemoryPhi final : public MemoryAccess {
479 // allocate space for exactly zero operands
480 void *operator new(size_t s) { return User::operator new(s); }
481
482public:
483 /// Provide fast operand accessors
484 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
485
486 MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0)
487 : MemoryAccess(C, MemoryPhiVal, deleteMe, BB, 0), ID(Ver),
488 ReservedSpace(NumPreds) {
489 allocHungoffUses(ReservedSpace);
490 }
491
492 // Block iterator interface. This provides access to the list of incoming
493 // basic blocks, which parallels the list of incoming values.
494 using block_iterator = BasicBlock **;
495 using const_block_iterator = BasicBlock *const *;
496
497 block_iterator block_begin() {
498 auto *Ref = reinterpret_cast<Use::UserRef *>(op_begin() + ReservedSpace);
499 return reinterpret_cast<block_iterator>(Ref + 1);
500 }
501
502 const_block_iterator block_begin() const {
503 const auto *Ref =
504 reinterpret_cast<const Use::UserRef *>(op_begin() + ReservedSpace);
505 return reinterpret_cast<const_block_iterator>(Ref + 1);
506 }
507
508 block_iterator block_end() { return block_begin() + getNumOperands(); }
509
510 const_block_iterator block_end() const {
511 return block_begin() + getNumOperands();
512 }
513
514 iterator_range<block_iterator> blocks() {
515 return make_range(block_begin(), block_end());
516 }
517
518 iterator_range<const_block_iterator> blocks() const {
519 return make_range(block_begin(), block_end());
520 }
521
522 op_range incoming_values() { return operands(); }
523
524 const_op_range incoming_values() const { return operands(); }
525
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100526 /// Return the number of incoming edges
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100527 unsigned getNumIncomingValues() const { return getNumOperands(); }
528
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100529 /// Return incoming value number x
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100530 MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); }
531 void setIncomingValue(unsigned I, MemoryAccess *V) {
532 assert(V && "PHI node got a null value!");
533 setOperand(I, V);
534 }
535
536 static unsigned getOperandNumForIncomingValue(unsigned I) { return I; }
537 static unsigned getIncomingValueNumForOperand(unsigned I) { return I; }
538
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100539 /// Return incoming basic block number @p i.
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100540 BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; }
541
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100542 /// Return incoming basic block corresponding
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100543 /// to an operand of the PHI.
544 BasicBlock *getIncomingBlock(const Use &U) const {
545 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
546 return getIncomingBlock(unsigned(&U - op_begin()));
547 }
548
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100549 /// Return incoming basic block corresponding
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100550 /// to value use iterator.
551 BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const {
552 return getIncomingBlock(I.getUse());
553 }
554
555 void setIncomingBlock(unsigned I, BasicBlock *BB) {
556 assert(BB && "PHI node got a null basic block!");
557 block_begin()[I] = BB;
558 }
559
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100560 /// Add an incoming value to the end of the PHI list
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100561 void addIncoming(MemoryAccess *V, BasicBlock *BB) {
562 if (getNumOperands() == ReservedSpace)
563 growOperands(); // Get more space!
564 // Initialize some new operands.
565 setNumHungOffUseOperands(getNumOperands() + 1);
566 setIncomingValue(getNumOperands() - 1, V);
567 setIncomingBlock(getNumOperands() - 1, BB);
568 }
569
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100570 /// Return the first index of the specified basic
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100571 /// block in the value list for this PHI. Returns -1 if no instance.
572 int getBasicBlockIndex(const BasicBlock *BB) const {
573 for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
574 if (block_begin()[I] == BB)
575 return I;
576 return -1;
577 }
578
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100579 MemoryAccess *getIncomingValueForBlock(const BasicBlock *BB) const {
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100580 int Idx = getBasicBlockIndex(BB);
581 assert(Idx >= 0 && "Invalid basic block argument!");
582 return getIncomingValue(Idx);
583 }
584
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100585 // After deleting incoming position I, the order of incoming may be changed.
586 void unorderedDeleteIncoming(unsigned I) {
587 unsigned E = getNumOperands();
588 assert(I < E && "Cannot remove out of bounds Phi entry.");
589 // MemoryPhi must have at least two incoming values, otherwise the MemoryPhi
590 // itself should be deleted.
591 assert(E >= 2 && "Cannot only remove incoming values in MemoryPhis with "
592 "at least 2 values.");
593 setIncomingValue(I, getIncomingValue(E - 1));
594 setIncomingBlock(I, block_begin()[E - 1]);
595 setOperand(E - 1, nullptr);
596 block_begin()[E - 1] = nullptr;
597 setNumHungOffUseOperands(getNumOperands() - 1);
598 }
599
600 // After deleting entries that satisfy Pred, remaining entries may have
601 // changed order.
602 template <typename Fn> void unorderedDeleteIncomingIf(Fn &&Pred) {
603 for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
604 if (Pred(getIncomingValue(I), getIncomingBlock(I))) {
605 unorderedDeleteIncoming(I);
606 E = getNumOperands();
607 --I;
608 }
609 assert(getNumOperands() >= 1 &&
610 "Cannot remove all incoming blocks in a MemoryPhi.");
611 }
612
613 // After deleting incoming block BB, the incoming blocks order may be changed.
614 void unorderedDeleteIncomingBlock(const BasicBlock *BB) {
615 unorderedDeleteIncomingIf(
616 [&](const MemoryAccess *, const BasicBlock *B) { return BB == B; });
617 }
618
619 // After deleting incoming memory access MA, the incoming accesses order may
620 // be changed.
621 void unorderedDeleteIncomingValue(const MemoryAccess *MA) {
622 unorderedDeleteIncomingIf(
623 [&](const MemoryAccess *M, const BasicBlock *) { return MA == M; });
624 }
625
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100626 static bool classof(const Value *V) {
627 return V->getValueID() == MemoryPhiVal;
628 }
629
630 void print(raw_ostream &OS) const;
631
632 unsigned getID() const { return ID; }
633
634protected:
635 friend class MemorySSA;
636
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100637 /// this is more complicated than the generic
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100638 /// User::allocHungoffUses, because we have to allocate Uses for the incoming
639 /// values and pointers to the incoming blocks, all in one allocation.
640 void allocHungoffUses(unsigned N) {
641 User::allocHungoffUses(N, /* IsPhi */ true);
642 }
643
644private:
645 // For debugging only
646 const unsigned ID;
647 unsigned ReservedSpace;
648
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100649 /// This grows the operand list in response to a push_back style of
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100650 /// operation. This grows the number of ops by 1.5 times.
651 void growOperands() {
652 unsigned E = getNumOperands();
653 // 2 op PHI nodes are VERY common, so reserve at least enough for that.
654 ReservedSpace = std::max(E + E / 2, 2u);
655 growHungoffUses(ReservedSpace, /* IsPhi */ true);
656 }
657
658 static void deleteMe(DerivedUser *Self);
659};
660
661inline unsigned MemoryAccess::getID() const {
662 assert((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) &&
663 "only memory defs and phis have ids");
664 if (const auto *MD = dyn_cast<MemoryDef>(this))
665 return MD->getID();
666 return cast<MemoryPhi>(this)->getID();
667}
668
669inline bool MemoryUseOrDef::isOptimized() const {
670 if (const auto *MD = dyn_cast<MemoryDef>(this))
671 return MD->isOptimized();
672 return cast<MemoryUse>(this)->isOptimized();
673}
674
675inline MemoryAccess *MemoryUseOrDef::getOptimized() const {
676 if (const auto *MD = dyn_cast<MemoryDef>(this))
677 return MD->getOptimized();
678 return cast<MemoryUse>(this)->getOptimized();
679}
680
681inline void MemoryUseOrDef::setOptimized(MemoryAccess *MA) {
682 if (auto *MD = dyn_cast<MemoryDef>(this))
683 MD->setOptimized(MA);
684 else
685 cast<MemoryUse>(this)->setOptimized(MA);
686}
687
688inline void MemoryUseOrDef::resetOptimized() {
689 if (auto *MD = dyn_cast<MemoryDef>(this))
690 MD->resetOptimized();
691 else
692 cast<MemoryUse>(this)->resetOptimized();
693}
694
695template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {};
696DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess)
697
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100698/// Encapsulates MemorySSA, including all data associated with memory
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100699/// accesses.
700class MemorySSA {
701public:
702 MemorySSA(Function &, AliasAnalysis *, DominatorTree *);
703 ~MemorySSA();
704
705 MemorySSAWalker *getWalker();
706
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100707 /// Given a memory Mod/Ref'ing instruction, get the MemorySSA
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100708 /// access associated with it. If passed a basic block gets the memory phi
709 /// node that exists for that block, if there is one. Otherwise, this will get
710 /// a MemoryUseOrDef.
Andrew Scull0372a572018-11-16 15:47:06 +0000711 MemoryUseOrDef *getMemoryAccess(const Instruction *I) const {
712 return cast_or_null<MemoryUseOrDef>(ValueToMemoryAccess.lookup(I));
713 }
714
715 MemoryPhi *getMemoryAccess(const BasicBlock *BB) const {
716 return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB)));
717 }
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100718
719 void dump() const;
720 void print(raw_ostream &) const;
721
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100722 /// Return true if \p MA represents the live on entry value
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100723 ///
724 /// Loads and stores from pointer arguments and other global values may be
725 /// defined by memory operations that do not occur in the current function, so
726 /// they may be live on entry to the function. MemorySSA represents such
727 /// memory state by the live on entry definition, which is guaranteed to occur
728 /// before any other memory access in the function.
729 inline bool isLiveOnEntryDef(const MemoryAccess *MA) const {
730 return MA == LiveOnEntryDef.get();
731 }
732
733 inline MemoryAccess *getLiveOnEntryDef() const {
734 return LiveOnEntryDef.get();
735 }
736
737 // Sadly, iplists, by default, owns and deletes pointers added to the
738 // list. It's not currently possible to have two iplists for the same type,
739 // where one owns the pointers, and one does not. This is because the traits
740 // are per-type, not per-tag. If this ever changes, we should make the
741 // DefList an iplist.
742 using AccessList = iplist<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
743 using DefsList =
744 simple_ilist<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
745
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100746 /// Return the list of MemoryAccess's for a given basic block.
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100747 ///
748 /// This list is not modifiable by the user.
749 const AccessList *getBlockAccesses(const BasicBlock *BB) const {
750 return getWritableBlockAccesses(BB);
751 }
752
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100753 /// Return the list of MemoryDef's and MemoryPhi's for a given basic
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100754 /// block.
755 ///
756 /// This list is not modifiable by the user.
757 const DefsList *getBlockDefs(const BasicBlock *BB) const {
758 return getWritableBlockDefs(BB);
759 }
760
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100761 /// Given two memory accesses in the same basic block, determine
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100762 /// whether MemoryAccess \p A dominates MemoryAccess \p B.
763 bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const;
764
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100765 /// Given two memory accesses in potentially different blocks,
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100766 /// determine whether MemoryAccess \p A dominates MemoryAccess \p B.
767 bool dominates(const MemoryAccess *A, const MemoryAccess *B) const;
768
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100769 /// Given a MemoryAccess and a Use, determine whether MemoryAccess \p A
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100770 /// dominates Use \p B.
771 bool dominates(const MemoryAccess *A, const Use &B) const;
772
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100773 /// Verify that MemorySSA is self consistent (IE definitions dominate
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100774 /// all uses, uses appear in the right places). This is used by unit tests.
775 void verifyMemorySSA() const;
776
Andrew Scull0372a572018-11-16 15:47:06 +0000777 /// Check clobber sanity for an access.
778 void checkClobberSanityAccess(const MemoryAccess *MA) const;
779
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100780 /// Used in various insertion functions to specify whether we are talking
781 /// about the beginning or end of a block.
782 enum InsertionPlace { Beginning, End };
783
784protected:
785 // Used by Memory SSA annotater, dumpers, and wrapper pass
786 friend class MemorySSAAnnotatedWriter;
787 friend class MemorySSAPrinterLegacyPass;
788 friend class MemorySSAUpdater;
789
790 void verifyDefUses(Function &F) const;
791 void verifyDomination(Function &F) const;
792 void verifyOrdering(Function &F) const;
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100793 void verifyDominationNumbers(const Function &F) const;
Andrew Scull0372a572018-11-16 15:47:06 +0000794 void verifyClobberSanity(const Function &F) const;
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100795
796 // This is used by the use optimizer and updater.
797 AccessList *getWritableBlockAccesses(const BasicBlock *BB) const {
798 auto It = PerBlockAccesses.find(BB);
799 return It == PerBlockAccesses.end() ? nullptr : It->second.get();
800 }
801
802 // This is used by the use optimizer and updater.
803 DefsList *getWritableBlockDefs(const BasicBlock *BB) const {
804 auto It = PerBlockDefs.find(BB);
805 return It == PerBlockDefs.end() ? nullptr : It->second.get();
806 }
807
808 // These is used by the updater to perform various internal MemorySSA
809 // machinsations. They do not always leave the IR in a correct state, and
810 // relies on the updater to fixup what it breaks, so it is not public.
811
812 void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where);
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100813 void moveTo(MemoryAccess *What, BasicBlock *BB, InsertionPlace Point);
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100814
815 // Rename the dominator tree branch rooted at BB.
816 void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal,
817 SmallPtrSetImpl<BasicBlock *> &Visited) {
818 renamePass(DT->getNode(BB), IncomingVal, Visited, true, true);
819 }
820
821 void removeFromLookups(MemoryAccess *);
822 void removeFromLists(MemoryAccess *, bool ShouldDelete = true);
823 void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *,
824 InsertionPlace);
825 void insertIntoListsBefore(MemoryAccess *, const BasicBlock *,
826 AccessList::iterator);
Andrew Scull0372a572018-11-16 15:47:06 +0000827 MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *,
828 const MemoryUseOrDef *Template = nullptr);
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100829
830private:
831 class CachingWalker;
832 class OptimizeUses;
833
834 CachingWalker *getWalkerImpl();
835 void buildMemorySSA();
836 void optimizeUses();
837
Andrew Scull0372a572018-11-16 15:47:06 +0000838 void prepareForMoveTo(MemoryAccess *, BasicBlock *);
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100839 void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const;
840
841 using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>;
842 using DefsMap = DenseMap<const BasicBlock *, std::unique_ptr<DefsList>>;
843
844 void
845 determineInsertionPoint(const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks);
846 void markUnreachableAsLiveOnEntry(BasicBlock *BB);
847 bool dominatesUse(const MemoryAccess *, const MemoryAccess *) const;
848 MemoryPhi *createMemoryPhi(BasicBlock *BB);
Andrew Scull0372a572018-11-16 15:47:06 +0000849 MemoryUseOrDef *createNewAccess(Instruction *,
850 const MemoryUseOrDef *Template = nullptr);
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100851 MemoryAccess *findDominatingDef(BasicBlock *, enum InsertionPlace);
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100852 void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &);
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100853 MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool);
854 void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool);
855 void renamePass(DomTreeNode *, MemoryAccess *IncomingVal,
856 SmallPtrSetImpl<BasicBlock *> &Visited,
857 bool SkipVisited = false, bool RenameAllUses = false);
858 AccessList *getOrCreateAccessList(const BasicBlock *);
859 DefsList *getOrCreateDefsList(const BasicBlock *);
860 void renumberBlock(const BasicBlock *) const;
861 AliasAnalysis *AA;
862 DominatorTree *DT;
863 Function &F;
864
865 // Memory SSA mappings
866 DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess;
867
868 // These two mappings contain the main block to access/def mappings for
869 // MemorySSA. The list contained in PerBlockAccesses really owns all the
870 // MemoryAccesses.
871 // Both maps maintain the invariant that if a block is found in them, the
872 // corresponding list is not empty, and if a block is not found in them, the
873 // corresponding list is empty.
874 AccessMap PerBlockAccesses;
875 DefsMap PerBlockDefs;
876 std::unique_ptr<MemoryAccess, ValueDeleter> LiveOnEntryDef;
877
878 // Domination mappings
879 // Note that the numbering is local to a block, even though the map is
880 // global.
881 mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid;
882 mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering;
883
884 // Memory SSA building info
885 std::unique_ptr<CachingWalker> Walker;
886 unsigned NextID;
887};
888
889// Internal MemorySSA utils, for use by MemorySSA classes and walkers
890class MemorySSAUtil {
891protected:
892 friend class GVNHoist;
893 friend class MemorySSAWalker;
894
895 // This function should not be used by new passes.
896 static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
897 AliasAnalysis &AA);
898};
899
900// This pass does eager building and then printing of MemorySSA. It is used by
901// the tests to be able to build, dump, and verify Memory SSA.
902class MemorySSAPrinterLegacyPass : public FunctionPass {
903public:
904 MemorySSAPrinterLegacyPass();
905
906 bool runOnFunction(Function &) override;
907 void getAnalysisUsage(AnalysisUsage &AU) const override;
908
909 static char ID;
910};
911
912/// An analysis that produces \c MemorySSA for a function.
913///
914class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> {
915 friend AnalysisInfoMixin<MemorySSAAnalysis>;
916
917 static AnalysisKey Key;
918
919public:
920 // Wrap MemorySSA result to ensure address stability of internal MemorySSA
921 // pointers after construction. Use a wrapper class instead of plain
922 // unique_ptr<MemorySSA> to avoid build breakage on MSVC.
923 struct Result {
924 Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {}
925
926 MemorySSA &getMSSA() { return *MSSA.get(); }
927
928 std::unique_ptr<MemorySSA> MSSA;
929 };
930
931 Result run(Function &F, FunctionAnalysisManager &AM);
932};
933
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100934/// Printer pass for \c MemorySSA.
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100935class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> {
936 raw_ostream &OS;
937
938public:
939 explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {}
940
941 PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
942};
943
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100944/// Verifier pass for \c MemorySSA.
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100945struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> {
946 PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
947};
948
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100949/// Legacy analysis pass which computes \c MemorySSA.
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100950class MemorySSAWrapperPass : public FunctionPass {
951public:
952 MemorySSAWrapperPass();
953
954 static char ID;
955
956 bool runOnFunction(Function &) override;
957 void releaseMemory() override;
958 MemorySSA &getMSSA() { return *MSSA; }
959 const MemorySSA &getMSSA() const { return *MSSA; }
960
961 void getAnalysisUsage(AnalysisUsage &AU) const override;
962
963 void verifyAnalysis() const override;
964 void print(raw_ostream &OS, const Module *M = nullptr) const override;
965
966private:
967 std::unique_ptr<MemorySSA> MSSA;
968};
969
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100970/// This is the generic walker interface for walkers of MemorySSA.
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100971/// Walkers are used to be able to further disambiguate the def-use chains
972/// MemorySSA gives you, or otherwise produce better info than MemorySSA gives
973/// you.
974/// In particular, while the def-use chains provide basic information, and are
975/// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a
976/// MemoryUse as AliasAnalysis considers it, a user mant want better or other
977/// information. In particular, they may want to use SCEV info to further
978/// disambiguate memory accesses, or they may want the nearest dominating
979/// may-aliasing MemoryDef for a call or a store. This API enables a
980/// standardized interface to getting and using that info.
981class MemorySSAWalker {
982public:
983 MemorySSAWalker(MemorySSA *);
984 virtual ~MemorySSAWalker() = default;
985
986 using MemoryAccessSet = SmallVector<MemoryAccess *, 8>;
987
Andrew Scullcdfcccc2018-10-05 20:58:37 +0100988 /// Given a memory Mod/Ref/ModRef'ing instruction, calling this
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100989 /// will give you the nearest dominating MemoryAccess that Mod's the location
990 /// the instruction accesses (by skipping any def which AA can prove does not
991 /// alias the location(s) accessed by the instruction given).
992 ///
993 /// Note that this will return a single access, and it must dominate the
994 /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction,
995 /// this will return the MemoryPhi, not the operand. This means that
996 /// given:
997 /// if (a) {
998 /// 1 = MemoryDef(liveOnEntry)
999 /// store %a
1000 /// } else {
1001 /// 2 = MemoryDef(liveOnEntry)
1002 /// store %b
1003 /// }
1004 /// 3 = MemoryPhi(2, 1)
1005 /// MemoryUse(3)
1006 /// load %a
1007 ///
1008 /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef
1009 /// in the if (a) branch.
1010 MemoryAccess *getClobberingMemoryAccess(const Instruction *I) {
1011 MemoryAccess *MA = MSSA->getMemoryAccess(I);
1012 assert(MA && "Handed an instruction that MemorySSA doesn't recognize?");
1013 return getClobberingMemoryAccess(MA);
1014 }
1015
1016 /// Does the same thing as getClobberingMemoryAccess(const Instruction *I),
1017 /// but takes a MemoryAccess instead of an Instruction.
1018 virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0;
1019
Andrew Scullcdfcccc2018-10-05 20:58:37 +01001020 /// Given a potentially clobbering memory access and a new location,
Andrew Scull5e1ddfa2018-08-14 10:06:54 +01001021 /// calling this will give you the nearest dominating clobbering MemoryAccess
1022 /// (by skipping non-aliasing def links).
1023 ///
1024 /// This version of the function is mainly used to disambiguate phi translated
1025 /// pointers, where the value of a pointer may have changed from the initial
1026 /// memory access. Note that this expects to be handed either a MemoryUse,
1027 /// or an already potentially clobbering access. Unlike the above API, if
1028 /// given a MemoryDef that clobbers the pointer as the starting access, it
1029 /// will return that MemoryDef, whereas the above would return the clobber
1030 /// starting from the use side of the memory def.
1031 virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
1032 const MemoryLocation &) = 0;
1033
Andrew Scullcdfcccc2018-10-05 20:58:37 +01001034 /// Given a memory access, invalidate anything this walker knows about
Andrew Scull5e1ddfa2018-08-14 10:06:54 +01001035 /// that access.
1036 /// This API is used by walkers that store information to perform basic cache
1037 /// invalidation. This will be called by MemorySSA at appropriate times for
1038 /// the walker it uses or returns.
1039 virtual void invalidateInfo(MemoryAccess *) {}
1040
1041 virtual void verify(const MemorySSA *MSSA) { assert(MSSA == this->MSSA); }
1042
1043protected:
1044 friend class MemorySSA; // For updating MSSA pointer in MemorySSA move
1045 // constructor.
1046 MemorySSA *MSSA;
1047};
1048
Andrew Scullcdfcccc2018-10-05 20:58:37 +01001049/// A MemorySSAWalker that does no alias queries, or anything else. It
Andrew Scull5e1ddfa2018-08-14 10:06:54 +01001050/// simply returns the links as they were constructed by the builder.
1051class DoNothingMemorySSAWalker final : public MemorySSAWalker {
1052public:
1053 // Keep the overrides below from hiding the Instruction overload of
1054 // getClobberingMemoryAccess.
1055 using MemorySSAWalker::getClobberingMemoryAccess;
1056
1057 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
1058 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
1059 const MemoryLocation &) override;
1060};
1061
1062using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>;
1063using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>;
1064
Andrew Scullcdfcccc2018-10-05 20:58:37 +01001065/// Iterator base class used to implement const and non-const iterators
Andrew Scull5e1ddfa2018-08-14 10:06:54 +01001066/// over the defining accesses of a MemoryAccess.
1067template <class T>
1068class memoryaccess_def_iterator_base
1069 : public iterator_facade_base<memoryaccess_def_iterator_base<T>,
1070 std::forward_iterator_tag, T, ptrdiff_t, T *,
1071 T *> {
1072 using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base;
1073
1074public:
1075 memoryaccess_def_iterator_base(T *Start) : Access(Start) {}
1076 memoryaccess_def_iterator_base() = default;
1077
1078 bool operator==(const memoryaccess_def_iterator_base &Other) const {
1079 return Access == Other.Access && (!Access || ArgNo == Other.ArgNo);
1080 }
1081
1082 // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the
1083 // block from the operand in constant time (In a PHINode, the uselist has
1084 // both, so it's just subtraction). We provide it as part of the
1085 // iterator to avoid callers having to linear walk to get the block.
1086 // If the operation becomes constant time on MemoryPHI's, this bit of
1087 // abstraction breaking should be removed.
1088 BasicBlock *getPhiArgBlock() const {
1089 MemoryPhi *MP = dyn_cast<MemoryPhi>(Access);
1090 assert(MP && "Tried to get phi arg block when not iterating over a PHI");
1091 return MP->getIncomingBlock(ArgNo);
1092 }
1093
1094 typename BaseT::iterator::pointer operator*() const {
1095 assert(Access && "Tried to access past the end of our iterator");
1096 // Go to the first argument for phis, and the defining access for everything
1097 // else.
1098 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Access))
1099 return MP->getIncomingValue(ArgNo);
1100 return cast<MemoryUseOrDef>(Access)->getDefiningAccess();
1101 }
1102
1103 using BaseT::operator++;
1104 memoryaccess_def_iterator &operator++() {
1105 assert(Access && "Hit end of iterator");
1106 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) {
1107 if (++ArgNo >= MP->getNumIncomingValues()) {
1108 ArgNo = 0;
1109 Access = nullptr;
1110 }
1111 } else {
1112 Access = nullptr;
1113 }
1114 return *this;
1115 }
1116
1117private:
1118 T *Access = nullptr;
1119 unsigned ArgNo = 0;
1120};
1121
1122inline memoryaccess_def_iterator MemoryAccess::defs_begin() {
1123 return memoryaccess_def_iterator(this);
1124}
1125
1126inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const {
1127 return const_memoryaccess_def_iterator(this);
1128}
1129
1130inline memoryaccess_def_iterator MemoryAccess::defs_end() {
1131 return memoryaccess_def_iterator();
1132}
1133
1134inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const {
1135 return const_memoryaccess_def_iterator();
1136}
1137
Andrew Scullcdfcccc2018-10-05 20:58:37 +01001138/// GraphTraits for a MemoryAccess, which walks defs in the normal case,
Andrew Scull5e1ddfa2018-08-14 10:06:54 +01001139/// and uses in the inverse case.
1140template <> struct GraphTraits<MemoryAccess *> {
1141 using NodeRef = MemoryAccess *;
1142 using ChildIteratorType = memoryaccess_def_iterator;
1143
1144 static NodeRef getEntryNode(NodeRef N) { return N; }
1145 static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); }
1146 static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); }
1147};
1148
1149template <> struct GraphTraits<Inverse<MemoryAccess *>> {
1150 using NodeRef = MemoryAccess *;
1151 using ChildIteratorType = MemoryAccess::iterator;
1152
1153 static NodeRef getEntryNode(NodeRef N) { return N; }
1154 static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); }
1155 static ChildIteratorType child_end(NodeRef N) { return N->user_end(); }
1156};
1157
Andrew Scullcdfcccc2018-10-05 20:58:37 +01001158/// Provide an iterator that walks defs, giving both the memory access,
Andrew Scull5e1ddfa2018-08-14 10:06:54 +01001159/// and the current pointer location, updating the pointer location as it
1160/// changes due to phi node translation.
1161///
1162/// This iterator, while somewhat specialized, is what most clients actually
1163/// want when walking upwards through MemorySSA def chains. It takes a pair of
1164/// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the
1165/// memory location through phi nodes for the user.
1166class upward_defs_iterator
1167 : public iterator_facade_base<upward_defs_iterator,
1168 std::forward_iterator_tag,
1169 const MemoryAccessPair> {
1170 using BaseT = upward_defs_iterator::iterator_facade_base;
1171
1172public:
1173 upward_defs_iterator(const MemoryAccessPair &Info)
1174 : DefIterator(Info.first), Location(Info.second),
1175 OriginalAccess(Info.first) {
1176 CurrentPair.first = nullptr;
1177
1178 WalkingPhi = Info.first && isa<MemoryPhi>(Info.first);
1179 fillInCurrentPair();
1180 }
1181
1182 upward_defs_iterator() { CurrentPair.first = nullptr; }
1183
1184 bool operator==(const upward_defs_iterator &Other) const {
1185 return DefIterator == Other.DefIterator;
1186 }
1187
1188 BaseT::iterator::reference operator*() const {
1189 assert(DefIterator != OriginalAccess->defs_end() &&
1190 "Tried to access past the end of our iterator");
1191 return CurrentPair;
1192 }
1193
1194 using BaseT::operator++;
1195 upward_defs_iterator &operator++() {
1196 assert(DefIterator != OriginalAccess->defs_end() &&
1197 "Tried to access past the end of the iterator");
1198 ++DefIterator;
1199 if (DefIterator != OriginalAccess->defs_end())
1200 fillInCurrentPair();
1201 return *this;
1202 }
1203
1204 BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); }
1205
1206private:
1207 void fillInCurrentPair() {
1208 CurrentPair.first = *DefIterator;
1209 if (WalkingPhi && Location.Ptr) {
1210 PHITransAddr Translator(
1211 const_cast<Value *>(Location.Ptr),
1212 OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr);
1213 if (!Translator.PHITranslateValue(OriginalAccess->getBlock(),
1214 DefIterator.getPhiArgBlock(), nullptr,
1215 false))
1216 if (Translator.getAddr() != Location.Ptr) {
1217 CurrentPair.second = Location.getWithNewPtr(Translator.getAddr());
1218 return;
1219 }
1220 }
1221 CurrentPair.second = Location;
1222 }
1223
1224 MemoryAccessPair CurrentPair;
1225 memoryaccess_def_iterator DefIterator;
1226 MemoryLocation Location;
1227 MemoryAccess *OriginalAccess = nullptr;
1228 bool WalkingPhi = false;
1229};
1230
1231inline upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair) {
1232 return upward_defs_iterator(Pair);
1233}
1234
1235inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); }
1236
1237inline iterator_range<upward_defs_iterator>
1238upward_defs(const MemoryAccessPair &Pair) {
1239 return make_range(upward_defs_begin(Pair), upward_defs_end());
1240}
1241
1242/// Walks the defining accesses of MemoryDefs. Stops after we hit something that
1243/// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when
1244/// comparing against a null def_chain_iterator, this will compare equal only
1245/// after walking said Phi/liveOnEntry.
1246///
1247/// The UseOptimizedChain flag specifies whether to walk the clobbering
1248/// access chain, or all the accesses.
1249///
1250/// Normally, MemoryDef are all just def/use linked together, so a def_chain on
1251/// a MemoryDef will walk all MemoryDefs above it in the program until it hits
1252/// a phi node. The optimized chain walks the clobbering access of a store.
1253/// So if you are just trying to find, given a store, what the next
1254/// thing that would clobber the same memory is, you want the optimized chain.
1255template <class T, bool UseOptimizedChain = false>
1256struct def_chain_iterator
1257 : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>,
1258 std::forward_iterator_tag, MemoryAccess *> {
1259 def_chain_iterator() : MA(nullptr) {}
1260 def_chain_iterator(T MA) : MA(MA) {}
1261
1262 T operator*() const { return MA; }
1263
1264 def_chain_iterator &operator++() {
1265 // N.B. liveOnEntry has a null defining access.
1266 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
1267 if (UseOptimizedChain && MUD->isOptimized())
1268 MA = MUD->getOptimized();
1269 else
1270 MA = MUD->getDefiningAccess();
1271 } else {
1272 MA = nullptr;
1273 }
1274
1275 return *this;
1276 }
1277
1278 bool operator==(const def_chain_iterator &O) const { return MA == O.MA; }
1279
1280private:
1281 T MA;
1282};
1283
1284template <class T>
1285inline iterator_range<def_chain_iterator<T>>
1286def_chain(T MA, MemoryAccess *UpTo = nullptr) {
1287#ifdef EXPENSIVE_CHECKS
1288 assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) &&
1289 "UpTo isn't in the def chain!");
1290#endif
1291 return make_range(def_chain_iterator<T>(MA), def_chain_iterator<T>(UpTo));
1292}
1293
1294template <class T>
1295inline iterator_range<def_chain_iterator<T, true>> optimized_def_chain(T MA) {
1296 return make_range(def_chain_iterator<T, true>(MA),
1297 def_chain_iterator<T, true>(nullptr));
1298}
1299
1300} // end namespace llvm
1301
1302#endif // LLVM_ANALYSIS_MEMORYSSA_H