1 //===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// This file exposes an interface to building/using memory SSA to 11 /// walk memory instructions using a use/def graph. 12 /// 13 /// Memory SSA class builds an SSA form that links together memory access 14 /// instructions such as loads, stores, atomics, and calls. Additionally, it 15 /// does a trivial form of "heap versioning" Every time the memory state changes 16 /// in the program, we generate a new heap version. It generates 17 /// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions. 18 /// 19 /// As a trivial example, 20 /// define i32 @main() #0 { 21 /// entry: 22 /// %call = call noalias i8* @_Znwm(i64 4) #2 23 /// %0 = bitcast i8* %call to i32* 24 /// %call1 = call noalias i8* @_Znwm(i64 4) #2 25 /// %1 = bitcast i8* %call1 to i32* 26 /// store i32 5, i32* %0, align 4 27 /// store i32 7, i32* %1, align 4 28 /// %2 = load i32* %0, align 4 29 /// %3 = load i32* %1, align 4 30 /// %add = add nsw i32 %2, %3 31 /// ret i32 %add 32 /// } 33 /// 34 /// Will become 35 /// define i32 @main() #0 { 36 /// entry: 37 /// ; 1 = MemoryDef(0) 38 /// %call = call noalias i8* @_Znwm(i64 4) #3 39 /// %2 = bitcast i8* %call to i32* 40 /// ; 2 = MemoryDef(1) 41 /// %call1 = call noalias i8* @_Znwm(i64 4) #3 42 /// %4 = bitcast i8* %call1 to i32* 43 /// ; 3 = MemoryDef(2) 44 /// store i32 5, i32* %2, align 4 45 /// ; 4 = MemoryDef(3) 46 /// store i32 7, i32* %4, align 4 47 /// ; MemoryUse(3) 48 /// %7 = load i32* %2, align 4 49 /// ; MemoryUse(4) 50 /// %8 = load i32* %4, align 4 51 /// %add = add nsw i32 %7, %8 52 /// ret i32 %add 53 /// } 54 /// 55 /// Given this form, all the stores that could ever effect the load at %8 can be 56 /// gotten by using the MemoryUse associated with it, and walking from use to 57 /// def until you hit the top of the function. 58 /// 59 /// Each def also has a list of users associated with it, so you can walk from 60 /// both def to users, and users to defs. Note that we disambiguate MemoryUses, 61 /// but not the RHS of MemoryDefs. You can see this above at %7, which would 62 /// otherwise be a MemoryUse(4). Being disambiguated means that for a given 63 /// store, all the MemoryUses on its use lists are may-aliases of that store 64 /// (but the MemoryDefs on its use list may not be). 65 /// 66 /// MemoryDefs are not disambiguated because it would require multiple reaching 67 /// definitions, which would require multiple phis, and multiple memoryaccesses 68 /// per instruction. 69 // 70 //===----------------------------------------------------------------------===// 71 72 #ifndef LLVM_ANALYSIS_MEMORYSSA_H 73 #define LLVM_ANALYSIS_MEMORYSSA_H 74 75 #include "llvm/ADT/DenseMap.h" 76 #include "llvm/ADT/GraphTraits.h" 77 #include "llvm/ADT/SmallPtrSet.h" 78 #include "llvm/ADT/SmallVector.h" 79 #include "llvm/ADT/ilist.h" 80 #include "llvm/ADT/ilist_node.h" 81 #include "llvm/ADT/iterator.h" 82 #include "llvm/ADT/iterator_range.h" 83 #include "llvm/ADT/simple_ilist.h" 84 #include "llvm/Analysis/AliasAnalysis.h" 85 #include "llvm/Analysis/MemoryLocation.h" 86 #include "llvm/Analysis/PHITransAddr.h" 87 #include "llvm/IR/BasicBlock.h" 88 #include "llvm/IR/DerivedUser.h" 89 #include "llvm/IR/Dominators.h" 90 #include "llvm/IR/Module.h" 91 #include "llvm/IR/Type.h" 92 #include "llvm/IR/Use.h" 93 #include "llvm/IR/User.h" 94 #include "llvm/IR/Value.h" 95 #include "llvm/IR/ValueHandle.h" 96 #include "llvm/Pass.h" 97 #include "llvm/Support/Casting.h" 98 #include "llvm/Support/CommandLine.h" 99 #include <algorithm> 100 #include <cassert> 101 #include <cstddef> 102 #include <iterator> 103 #include <memory> 104 #include <utility> 105 106 namespace llvm { 107 108 /// Enables memory ssa as a dependency for loop passes. 109 extern cl::opt<bool> EnableMSSALoopDependency; 110 111 class Function; 112 class Instruction; 113 class MemoryAccess; 114 class MemorySSAWalker; 115 class LLVMContext; 116 class raw_ostream; 117 118 namespace MSSAHelpers { 119 120 struct AllAccessTag {}; 121 struct DefsOnlyTag {}; 122 123 } // end namespace MSSAHelpers 124 125 enum : unsigned { 126 // Used to signify what the default invalid ID is for MemoryAccess's 127 // getID() 128 INVALID_MEMORYACCESS_ID = -1U 129 }; 130 131 template <class T> class memoryaccess_def_iterator_base; 132 using memoryaccess_def_iterator = memoryaccess_def_iterator_base<MemoryAccess>; 133 using const_memoryaccess_def_iterator = 134 memoryaccess_def_iterator_base<const MemoryAccess>; 135 136 // The base for all memory accesses. All memory accesses in a block are 137 // linked together using an intrusive list. 138 class MemoryAccess 139 : public DerivedUser, 140 public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>, 141 public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> { 142 public: 143 using AllAccessType = 144 ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>; 145 using DefsOnlyType = 146 ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>; 147 148 MemoryAccess(const MemoryAccess &) = delete; 149 MemoryAccess &operator=(const MemoryAccess &) = delete; 150 151 void *operator new(size_t) = delete; 152 153 // Methods for support type inquiry through isa, cast, and 154 // dyn_cast classof(const Value * V)155 static bool classof(const Value *V) { 156 unsigned ID = V->getValueID(); 157 return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal; 158 } 159 getBlock()160 BasicBlock *getBlock() const { return Block; } 161 162 void print(raw_ostream &OS) const; 163 void dump() const; 164 165 /// The user iterators for a memory access 166 using iterator = user_iterator; 167 using const_iterator = const_user_iterator; 168 169 /// This iterator walks over all of the defs in a given 170 /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For 171 /// MemoryUse/MemoryDef, this walks the defining access. 172 memoryaccess_def_iterator defs_begin(); 173 const_memoryaccess_def_iterator defs_begin() const; 174 memoryaccess_def_iterator defs_end(); 175 const_memoryaccess_def_iterator defs_end() const; 176 177 /// Get the iterators for the all access list and the defs only list 178 /// We default to the all access list. getIterator()179 AllAccessType::self_iterator getIterator() { 180 return this->AllAccessType::getIterator(); 181 } getIterator()182 AllAccessType::const_self_iterator getIterator() const { 183 return this->AllAccessType::getIterator(); 184 } getReverseIterator()185 AllAccessType::reverse_self_iterator getReverseIterator() { 186 return this->AllAccessType::getReverseIterator(); 187 } getReverseIterator()188 AllAccessType::const_reverse_self_iterator getReverseIterator() const { 189 return this->AllAccessType::getReverseIterator(); 190 } getDefsIterator()191 DefsOnlyType::self_iterator getDefsIterator() { 192 return this->DefsOnlyType::getIterator(); 193 } getDefsIterator()194 DefsOnlyType::const_self_iterator getDefsIterator() const { 195 return this->DefsOnlyType::getIterator(); 196 } getReverseDefsIterator()197 DefsOnlyType::reverse_self_iterator getReverseDefsIterator() { 198 return this->DefsOnlyType::getReverseIterator(); 199 } getReverseDefsIterator()200 DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const { 201 return this->DefsOnlyType::getReverseIterator(); 202 } 203 204 protected: 205 friend class MemoryDef; 206 friend class MemoryPhi; 207 friend class MemorySSA; 208 friend class MemoryUse; 209 friend class MemoryUseOrDef; 210 211 /// Used by MemorySSA to change the block of a MemoryAccess when it is 212 /// moved. setBlock(BasicBlock * BB)213 void setBlock(BasicBlock *BB) { Block = BB; } 214 215 /// Used for debugging and tracking things about MemoryAccesses. 216 /// Guaranteed unique among MemoryAccesses, no guarantees otherwise. 217 inline unsigned getID() const; 218 MemoryAccess(LLVMContext & C,unsigned Vty,DeleteValueTy DeleteValue,BasicBlock * BB,unsigned NumOperands)219 MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue, 220 BasicBlock *BB, unsigned NumOperands) 221 : DerivedUser(Type::getVoidTy(C), Vty, nullptr, NumOperands, DeleteValue), 222 Block(BB) {} 223 224 // Use deleteValue() to delete a generic MemoryAccess. 225 ~MemoryAccess() = default; 226 227 private: 228 BasicBlock *Block; 229 }; 230 231 template <> 232 struct ilist_alloc_traits<MemoryAccess> { 233 static void deleteNode(MemoryAccess *MA) { MA->deleteValue(); } 234 }; 235 236 inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) { 237 MA.print(OS); 238 return OS; 239 } 240 241 /// Class that has the common methods + fields of memory uses/defs. It's 242 /// a little awkward to have, but there are many cases where we want either a 243 /// use or def, and there are many cases where uses are needed (defs aren't 244 /// acceptable), and vice-versa. 245 /// 246 /// This class should never be instantiated directly; make a MemoryUse or 247 /// MemoryDef instead. 248 class MemoryUseOrDef : public MemoryAccess { 249 public: 250 void *operator new(size_t) = delete; 251 252 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess); 253 254 /// Get the instruction that this MemoryUse represents. 255 Instruction *getMemoryInst() const { return MemoryInstruction; } 256 257 /// Get the access that produces the memory state used by this Use. 258 MemoryAccess *getDefiningAccess() const { return getOperand(0); } 259 260 static bool classof(const Value *MA) { 261 return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal; 262 } 263 264 // Sadly, these have to be public because they are needed in some of the 265 // iterators. 266 inline bool isOptimized() const; 267 inline MemoryAccess *getOptimized() const; 268 inline void setOptimized(MemoryAccess *); 269 270 // Retrieve AliasResult type of the optimized access. Ideally this would be 271 // returned by the caching walker and may go away in the future. 272 Optional<AliasResult> getOptimizedAccessType() const { 273 return OptimizedAccessAlias; 274 } 275 276 /// Reset the ID of what this MemoryUse was optimized to, causing it to 277 /// be rewalked by the walker if necessary. 278 /// This really should only be called by tests. 279 inline void resetOptimized(); 280 281 protected: 282 friend class MemorySSA; 283 friend class MemorySSAUpdater; 284 285 MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty, 286 DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB, 287 unsigned NumOperands) 288 : MemoryAccess(C, Vty, DeleteValue, BB, NumOperands), 289 MemoryInstruction(MI), OptimizedAccessAlias(MayAlias) { 290 setDefiningAccess(DMA); 291 } 292 293 // Use deleteValue() to delete a generic MemoryUseOrDef. 294 ~MemoryUseOrDef() = default; 295 296 void setOptimizedAccessType(Optional<AliasResult> AR) { 297 OptimizedAccessAlias = AR; 298 } 299 300 void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false, 301 Optional<AliasResult> AR = MayAlias) { 302 if (!Optimized) { 303 setOperand(0, DMA); 304 return; 305 } 306 setOptimized(DMA); 307 setOptimizedAccessType(AR); 308 } 309 310 private: 311 Instruction *MemoryInstruction; 312 Optional<AliasResult> OptimizedAccessAlias; 313 }; 314 315 /// Represents read-only accesses to memory 316 /// 317 /// In particular, the set of Instructions that will be represented by 318 /// MemoryUse's is exactly the set of Instructions for which 319 /// AliasAnalysis::getModRefInfo returns "Ref". 320 class MemoryUse final : public MemoryUseOrDef { 321 public: 322 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess); 323 324 MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB) 325 : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB, 326 /*NumOperands=*/1) {} 327 328 // allocate space for exactly one operand 329 void *operator new(size_t s) { return User::operator new(s, 1); } 330 331 static bool classof(const Value *MA) { 332 return MA->getValueID() == MemoryUseVal; 333 } 334 335 void print(raw_ostream &OS) const; 336 337 void setOptimized(MemoryAccess *DMA) { 338 OptimizedID = DMA->getID(); 339 setOperand(0, DMA); 340 } 341 342 bool isOptimized() const { 343 return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID(); 344 } 345 346 MemoryAccess *getOptimized() const { 347 return getDefiningAccess(); 348 } 349 350 void resetOptimized() { 351 OptimizedID = INVALID_MEMORYACCESS_ID; 352 } 353 354 protected: 355 friend class MemorySSA; 356 357 private: 358 static void deleteMe(DerivedUser *Self); 359 360 unsigned OptimizedID = INVALID_MEMORYACCESS_ID; 361 }; 362 363 template <> 364 struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {}; 365 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess) 366 367 /// Represents a read-write access to memory, whether it is a must-alias, 368 /// or a may-alias. 369 /// 370 /// In particular, the set of Instructions that will be represented by 371 /// MemoryDef's is exactly the set of Instructions for which 372 /// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef". 373 /// Note that, in order to provide def-def chains, all defs also have a use 374 /// associated with them. This use points to the nearest reaching 375 /// MemoryDef/MemoryPhi. 376 class MemoryDef final : public MemoryUseOrDef { 377 public: 378 friend class MemorySSA; 379 380 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess); 381 382 MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB, 383 unsigned Ver) 384 : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB, 385 /*NumOperands=*/2), 386 ID(Ver) {} 387 388 // allocate space for exactly two operands 389 void *operator new(size_t s) { return User::operator new(s, 2); } 390 391 static bool classof(const Value *MA) { 392 return MA->getValueID() == MemoryDefVal; 393 } 394 395 void setOptimized(MemoryAccess *MA) { 396 setOperand(1, MA); 397 OptimizedID = MA->getID(); 398 } 399 400 MemoryAccess *getOptimized() const { 401 return cast_or_null<MemoryAccess>(getOperand(1)); 402 } 403 404 bool isOptimized() const { 405 return getOptimized() && OptimizedID == getOptimized()->getID(); 406 } 407 408 void resetOptimized() { 409 OptimizedID = INVALID_MEMORYACCESS_ID; 410 setOperand(1, nullptr); 411 } 412 413 void print(raw_ostream &OS) const; 414 415 unsigned getID() const { return ID; } 416 417 private: 418 static void deleteMe(DerivedUser *Self); 419 420 const unsigned ID; 421 unsigned OptimizedID = INVALID_MEMORYACCESS_ID; 422 }; 423 424 template <> 425 struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 2> {}; 426 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess) 427 428 template <> 429 struct OperandTraits<MemoryUseOrDef> { 430 static Use *op_begin(MemoryUseOrDef *MUD) { 431 if (auto *MU = dyn_cast<MemoryUse>(MUD)) 432 return OperandTraits<MemoryUse>::op_begin(MU); 433 return OperandTraits<MemoryDef>::op_begin(cast<MemoryDef>(MUD)); 434 } 435 436 static Use *op_end(MemoryUseOrDef *MUD) { 437 if (auto *MU = dyn_cast<MemoryUse>(MUD)) 438 return OperandTraits<MemoryUse>::op_end(MU); 439 return OperandTraits<MemoryDef>::op_end(cast<MemoryDef>(MUD)); 440 } 441 442 static unsigned operands(const MemoryUseOrDef *MUD) { 443 if (const auto *MU = dyn_cast<MemoryUse>(MUD)) 444 return OperandTraits<MemoryUse>::operands(MU); 445 return OperandTraits<MemoryDef>::operands(cast<MemoryDef>(MUD)); 446 } 447 }; 448 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess) 449 450 /// Represents phi nodes for memory accesses. 451 /// 452 /// These have the same semantic as regular phi nodes, with the exception that 453 /// only one phi will ever exist in a given basic block. 454 /// Guaranteeing one phi per block means guaranteeing there is only ever one 455 /// valid reaching MemoryDef/MemoryPHI along each path to the phi node. 456 /// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or 457 /// a MemoryPhi's operands. 458 /// That is, given 459 /// if (a) { 460 /// store %a 461 /// store %b 462 /// } 463 /// it *must* be transformed into 464 /// if (a) { 465 /// 1 = MemoryDef(liveOnEntry) 466 /// store %a 467 /// 2 = MemoryDef(1) 468 /// store %b 469 /// } 470 /// and *not* 471 /// if (a) { 472 /// 1 = MemoryDef(liveOnEntry) 473 /// store %a 474 /// 2 = MemoryDef(liveOnEntry) 475 /// store %b 476 /// } 477 /// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the 478 /// end of the branch, and if there are not two phi nodes, one will be 479 /// disconnected completely from the SSA graph below that point. 480 /// Because MemoryUse's do not generate new definitions, they do not have this 481 /// issue. 482 class MemoryPhi final : public MemoryAccess { 483 // allocate space for exactly zero operands 484 void *operator new(size_t s) { return User::operator new(s); } 485 486 public: 487 /// Provide fast operand accessors 488 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess); 489 490 MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0) 491 : MemoryAccess(C, MemoryPhiVal, deleteMe, BB, 0), ID(Ver), 492 ReservedSpace(NumPreds) { 493 allocHungoffUses(ReservedSpace); 494 } 495 496 // Block iterator interface. This provides access to the list of incoming 497 // basic blocks, which parallels the list of incoming values. 498 using block_iterator = BasicBlock **; 499 using const_block_iterator = BasicBlock *const *; 500 501 block_iterator block_begin() { 502 auto *Ref = reinterpret_cast<Use::UserRef *>(op_begin() + ReservedSpace); 503 return reinterpret_cast<block_iterator>(Ref + 1); 504 } 505 506 const_block_iterator block_begin() const { 507 const auto *Ref = 508 reinterpret_cast<const Use::UserRef *>(op_begin() + ReservedSpace); 509 return reinterpret_cast<const_block_iterator>(Ref + 1); 510 } 511 512 block_iterator block_end() { return block_begin() + getNumOperands(); } 513 514 const_block_iterator block_end() const { 515 return block_begin() + getNumOperands(); 516 } 517 518 iterator_range<block_iterator> blocks() { 519 return make_range(block_begin(), block_end()); 520 } 521 522 iterator_range<const_block_iterator> blocks() const { 523 return make_range(block_begin(), block_end()); 524 } 525 526 op_range incoming_values() { return operands(); } 527 528 const_op_range incoming_values() const { return operands(); } 529 530 /// Return the number of incoming edges 531 unsigned getNumIncomingValues() const { return getNumOperands(); } 532 533 /// Return incoming value number x 534 MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); } 535 void setIncomingValue(unsigned I, MemoryAccess *V) { 536 assert(V && "PHI node got a null value!"); 537 setOperand(I, V); 538 } 539 540 static unsigned getOperandNumForIncomingValue(unsigned I) { return I; } 541 static unsigned getIncomingValueNumForOperand(unsigned I) { return I; } 542 543 /// Return incoming basic block number @p i. 544 BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; } 545 546 /// Return incoming basic block corresponding 547 /// to an operand of the PHI. 548 BasicBlock *getIncomingBlock(const Use &U) const { 549 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?"); 550 return getIncomingBlock(unsigned(&U - op_begin())); 551 } 552 553 /// Return incoming basic block corresponding 554 /// to value use iterator. 555 BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const { 556 return getIncomingBlock(I.getUse()); 557 } 558 559 void setIncomingBlock(unsigned I, BasicBlock *BB) { 560 assert(BB && "PHI node got a null basic block!"); 561 block_begin()[I] = BB; 562 } 563 564 /// Add an incoming value to the end of the PHI list 565 void addIncoming(MemoryAccess *V, BasicBlock *BB) { 566 if (getNumOperands() == ReservedSpace) 567 growOperands(); // Get more space! 568 // Initialize some new operands. 569 setNumHungOffUseOperands(getNumOperands() + 1); 570 setIncomingValue(getNumOperands() - 1, V); 571 setIncomingBlock(getNumOperands() - 1, BB); 572 } 573 574 /// Return the first index of the specified basic 575 /// block in the value list for this PHI. Returns -1 if no instance. 576 int getBasicBlockIndex(const BasicBlock *BB) const { 577 for (unsigned I = 0, E = getNumOperands(); I != E; ++I) 578 if (block_begin()[I] == BB) 579 return I; 580 return -1; 581 } 582 583 MemoryAccess *getIncomingValueForBlock(const BasicBlock *BB) const { 584 int Idx = getBasicBlockIndex(BB); 585 assert(Idx >= 0 && "Invalid basic block argument!"); 586 return getIncomingValue(Idx); 587 } 588 589 // After deleting incoming position I, the order of incoming may be changed. 590 void unorderedDeleteIncoming(unsigned I) { 591 unsigned E = getNumOperands(); 592 assert(I < E && "Cannot remove out of bounds Phi entry."); 593 // MemoryPhi must have at least two incoming values, otherwise the MemoryPhi 594 // itself should be deleted. 595 assert(E >= 2 && "Cannot only remove incoming values in MemoryPhis with " 596 "at least 2 values."); 597 setIncomingValue(I, getIncomingValue(E - 1)); 598 setIncomingBlock(I, block_begin()[E - 1]); 599 setOperand(E - 1, nullptr); 600 block_begin()[E - 1] = nullptr; 601 setNumHungOffUseOperands(getNumOperands() - 1); 602 } 603 604 // After deleting entries that satisfy Pred, remaining entries may have 605 // changed order. 606 template <typename Fn> void unorderedDeleteIncomingIf(Fn &&Pred) { 607 for (unsigned I = 0, E = getNumOperands(); I != E; ++I) 608 if (Pred(getIncomingValue(I), getIncomingBlock(I))) { 609 unorderedDeleteIncoming(I); 610 E = getNumOperands(); 611 --I; 612 } 613 assert(getNumOperands() >= 1 && 614 "Cannot remove all incoming blocks in a MemoryPhi."); 615 } 616 617 // After deleting incoming block BB, the incoming blocks order may be changed. 618 void unorderedDeleteIncomingBlock(const BasicBlock *BB) { 619 unorderedDeleteIncomingIf( 620 [&](const MemoryAccess *, const BasicBlock *B) { return BB == B; }); 621 } 622 623 // After deleting incoming memory access MA, the incoming accesses order may 624 // be changed. 625 void unorderedDeleteIncomingValue(const MemoryAccess *MA) { 626 unorderedDeleteIncomingIf( 627 [&](const MemoryAccess *M, const BasicBlock *) { return MA == M; }); 628 } 629 630 static bool classof(const Value *V) { 631 return V->getValueID() == MemoryPhiVal; 632 } 633 634 void print(raw_ostream &OS) const; 635 636 unsigned getID() const { return ID; } 637 638 protected: 639 friend class MemorySSA; 640 641 /// this is more complicated than the generic 642 /// User::allocHungoffUses, because we have to allocate Uses for the incoming 643 /// values and pointers to the incoming blocks, all in one allocation. 644 void allocHungoffUses(unsigned N) { 645 User::allocHungoffUses(N, /* IsPhi */ true); 646 } 647 648 private: 649 // For debugging only 650 const unsigned ID; 651 unsigned ReservedSpace; 652 653 /// This grows the operand list in response to a push_back style of 654 /// operation. This grows the number of ops by 1.5 times. 655 void growOperands() { 656 unsigned E = getNumOperands(); 657 // 2 op PHI nodes are VERY common, so reserve at least enough for that. 658 ReservedSpace = std::max(E + E / 2, 2u); 659 growHungoffUses(ReservedSpace, /* IsPhi */ true); 660 } 661 662 static void deleteMe(DerivedUser *Self); 663 }; 664 665 inline unsigned MemoryAccess::getID() const { 666 assert((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) && 667 "only memory defs and phis have ids"); 668 if (const auto *MD = dyn_cast<MemoryDef>(this)) 669 return MD->getID(); 670 return cast<MemoryPhi>(this)->getID(); 671 } 672 673 inline bool MemoryUseOrDef::isOptimized() const { 674 if (const auto *MD = dyn_cast<MemoryDef>(this)) 675 return MD->isOptimized(); 676 return cast<MemoryUse>(this)->isOptimized(); 677 } 678 679 inline MemoryAccess *MemoryUseOrDef::getOptimized() const { 680 if (const auto *MD = dyn_cast<MemoryDef>(this)) 681 return MD->getOptimized(); 682 return cast<MemoryUse>(this)->getOptimized(); 683 } 684 685 inline void MemoryUseOrDef::setOptimized(MemoryAccess *MA) { 686 if (auto *MD = dyn_cast<MemoryDef>(this)) 687 MD->setOptimized(MA); 688 else 689 cast<MemoryUse>(this)->setOptimized(MA); 690 } 691 692 inline void MemoryUseOrDef::resetOptimized() { 693 if (auto *MD = dyn_cast<MemoryDef>(this)) 694 MD->resetOptimized(); 695 else 696 cast<MemoryUse>(this)->resetOptimized(); 697 } 698 699 template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {}; 700 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess) 701 702 /// Encapsulates MemorySSA, including all data associated with memory 703 /// accesses. 704 class MemorySSA { 705 public: 706 MemorySSA(Function &, AliasAnalysis *, DominatorTree *); 707 708 // MemorySSA must remain where it's constructed; Walkers it creates store 709 // pointers to it. 710 MemorySSA(MemorySSA &&) = delete; 711 712 ~MemorySSA(); 713 714 MemorySSAWalker *getWalker(); 715 MemorySSAWalker *getSkipSelfWalker(); 716 717 /// Given a memory Mod/Ref'ing instruction, get the MemorySSA 718 /// access associated with it. If passed a basic block gets the memory phi 719 /// node that exists for that block, if there is one. Otherwise, this will get 720 /// a MemoryUseOrDef. 721 MemoryUseOrDef *getMemoryAccess(const Instruction *I) const { 722 return cast_or_null<MemoryUseOrDef>(ValueToMemoryAccess.lookup(I)); 723 } 724 725 MemoryPhi *getMemoryAccess(const BasicBlock *BB) const { 726 return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB))); 727 } 728 729 void dump() const; 730 void print(raw_ostream &) const; 731 732 /// Return true if \p MA represents the live on entry value 733 /// 734 /// Loads and stores from pointer arguments and other global values may be 735 /// defined by memory operations that do not occur in the current function, so 736 /// they may be live on entry to the function. MemorySSA represents such 737 /// memory state by the live on entry definition, which is guaranteed to occur 738 /// before any other memory access in the function. 739 inline bool isLiveOnEntryDef(const MemoryAccess *MA) const { 740 return MA == LiveOnEntryDef.get(); 741 } 742 743 inline MemoryAccess *getLiveOnEntryDef() const { 744 return LiveOnEntryDef.get(); 745 } 746 747 // Sadly, iplists, by default, owns and deletes pointers added to the 748 // list. It's not currently possible to have two iplists for the same type, 749 // where one owns the pointers, and one does not. This is because the traits 750 // are per-type, not per-tag. If this ever changes, we should make the 751 // DefList an iplist. 752 using AccessList = iplist<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>; 753 using DefsList = 754 simple_ilist<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>; 755 756 /// Return the list of MemoryAccess's for a given basic block. 757 /// 758 /// This list is not modifiable by the user. 759 const AccessList *getBlockAccesses(const BasicBlock *BB) const { 760 return getWritableBlockAccesses(BB); 761 } 762 763 /// Return the list of MemoryDef's and MemoryPhi's for a given basic 764 /// block. 765 /// 766 /// This list is not modifiable by the user. 767 const DefsList *getBlockDefs(const BasicBlock *BB) const { 768 return getWritableBlockDefs(BB); 769 } 770 771 /// Given two memory accesses in the same basic block, determine 772 /// whether MemoryAccess \p A dominates MemoryAccess \p B. 773 bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const; 774 775 /// Given two memory accesses in potentially different blocks, 776 /// determine whether MemoryAccess \p A dominates MemoryAccess \p B. 777 bool dominates(const MemoryAccess *A, const MemoryAccess *B) const; 778 779 /// Given a MemoryAccess and a Use, determine whether MemoryAccess \p A 780 /// dominates Use \p B. 781 bool dominates(const MemoryAccess *A, const Use &B) const; 782 783 /// Verify that MemorySSA is self consistent (IE definitions dominate 784 /// all uses, uses appear in the right places). This is used by unit tests. 785 void verifyMemorySSA() const; 786 787 /// Used in various insertion functions to specify whether we are talking 788 /// about the beginning or end of a block. 789 enum InsertionPlace { Beginning, End, BeforeTerminator }; 790 791 protected: 792 // Used by Memory SSA annotater, dumpers, and wrapper pass 793 friend class MemorySSAAnnotatedWriter; 794 friend class MemorySSAPrinterLegacyPass; 795 friend class MemorySSAUpdater; 796 797 void verifyOrderingDominationAndDefUses(Function &F) const; 798 void verifyDominationNumbers(const Function &F) const; 799 void verifyPrevDefInPhis(Function &F) const; 800 801 // This is used by the use optimizer and updater. 802 AccessList *getWritableBlockAccesses(const BasicBlock *BB) const { 803 auto It = PerBlockAccesses.find(BB); 804 return It == PerBlockAccesses.end() ? nullptr : It->second.get(); 805 } 806 807 // This is used by the use optimizer and updater. 808 DefsList *getWritableBlockDefs(const BasicBlock *BB) const { 809 auto It = PerBlockDefs.find(BB); 810 return It == PerBlockDefs.end() ? nullptr : It->second.get(); 811 } 812 813 // These is used by the updater to perform various internal MemorySSA 814 // machinsations. They do not always leave the IR in a correct state, and 815 // relies on the updater to fixup what it breaks, so it is not public. 816 817 void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where); 818 void moveTo(MemoryAccess *What, BasicBlock *BB, InsertionPlace Point); 819 820 // Rename the dominator tree branch rooted at BB. 821 void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, 822 SmallPtrSetImpl<BasicBlock *> &Visited) { 823 renamePass(DT->getNode(BB), IncomingVal, Visited, true, true); 824 } 825 826 void removeFromLookups(MemoryAccess *); 827 void removeFromLists(MemoryAccess *, bool ShouldDelete = true); 828 void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *, 829 InsertionPlace); 830 void insertIntoListsBefore(MemoryAccess *, const BasicBlock *, 831 AccessList::iterator); 832 MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *, 833 const MemoryUseOrDef *Template = nullptr, 834 bool CreationMustSucceed = true); 835 836 private: 837 template <class AliasAnalysisType> class ClobberWalkerBase; 838 template <class AliasAnalysisType> class CachingWalker; 839 template <class AliasAnalysisType> class SkipSelfWalker; 840 class OptimizeUses; 841 842 CachingWalker<AliasAnalysis> *getWalkerImpl(); 843 void buildMemorySSA(BatchAAResults &BAA); 844 void optimizeUses(); 845 846 void prepareForMoveTo(MemoryAccess *, BasicBlock *); 847 void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const; 848 849 using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>; 850 using DefsMap = DenseMap<const BasicBlock *, std::unique_ptr<DefsList>>; 851 852 void 853 determineInsertionPoint(const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks); 854 void markUnreachableAsLiveOnEntry(BasicBlock *BB); 855 bool dominatesUse(const MemoryAccess *, const MemoryAccess *) const; 856 MemoryPhi *createMemoryPhi(BasicBlock *BB); 857 template <typename AliasAnalysisType> 858 MemoryUseOrDef *createNewAccess(Instruction *, AliasAnalysisType *, 859 const MemoryUseOrDef *Template = nullptr); 860 MemoryAccess *findDominatingDef(BasicBlock *, enum InsertionPlace); 861 void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &); 862 MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool); 863 void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool); 864 void renamePass(DomTreeNode *, MemoryAccess *IncomingVal, 865 SmallPtrSetImpl<BasicBlock *> &Visited, 866 bool SkipVisited = false, bool RenameAllUses = false); 867 AccessList *getOrCreateAccessList(const BasicBlock *); 868 DefsList *getOrCreateDefsList(const BasicBlock *); 869 void renumberBlock(const BasicBlock *) const; 870 AliasAnalysis *AA; 871 DominatorTree *DT; 872 Function &F; 873 874 // Memory SSA mappings 875 DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess; 876 877 // These two mappings contain the main block to access/def mappings for 878 // MemorySSA. The list contained in PerBlockAccesses really owns all the 879 // MemoryAccesses. 880 // Both maps maintain the invariant that if a block is found in them, the 881 // corresponding list is not empty, and if a block is not found in them, the 882 // corresponding list is empty. 883 AccessMap PerBlockAccesses; 884 DefsMap PerBlockDefs; 885 std::unique_ptr<MemoryAccess, ValueDeleter> LiveOnEntryDef; 886 887 // Domination mappings 888 // Note that the numbering is local to a block, even though the map is 889 // global. 890 mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid; 891 mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering; 892 893 // Memory SSA building info 894 std::unique_ptr<ClobberWalkerBase<AliasAnalysis>> WalkerBase; 895 std::unique_ptr<CachingWalker<AliasAnalysis>> Walker; 896 std::unique_ptr<SkipSelfWalker<AliasAnalysis>> SkipWalker; 897 unsigned NextID; 898 }; 899 900 // Internal MemorySSA utils, for use by MemorySSA classes and walkers 901 class MemorySSAUtil { 902 protected: 903 friend class GVNHoist; 904 friend class MemorySSAWalker; 905 906 // This function should not be used by new passes. 907 static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, 908 AliasAnalysis &AA); 909 }; 910 911 // This pass does eager building and then printing of MemorySSA. It is used by 912 // the tests to be able to build, dump, and verify Memory SSA. 913 class MemorySSAPrinterLegacyPass : public FunctionPass { 914 public: 915 MemorySSAPrinterLegacyPass(); 916 917 bool runOnFunction(Function &) override; 918 void getAnalysisUsage(AnalysisUsage &AU) const override; 919 920 static char ID; 921 }; 922 923 /// An analysis that produces \c MemorySSA for a function. 924 /// 925 class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> { 926 friend AnalysisInfoMixin<MemorySSAAnalysis>; 927 928 static AnalysisKey Key; 929 930 public: 931 // Wrap MemorySSA result to ensure address stability of internal MemorySSA 932 // pointers after construction. Use a wrapper class instead of plain 933 // unique_ptr<MemorySSA> to avoid build breakage on MSVC. 934 struct Result { 935 Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {} 936 937 MemorySSA &getMSSA() { return *MSSA.get(); } 938 939 std::unique_ptr<MemorySSA> MSSA; 940 941 bool invalidate(Function &F, const PreservedAnalyses &PA, 942 FunctionAnalysisManager::Invalidator &Inv); 943 }; 944 945 Result run(Function &F, FunctionAnalysisManager &AM); 946 }; 947 948 /// Printer pass for \c MemorySSA. 949 class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> { 950 raw_ostream &OS; 951 952 public: 953 explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {} 954 955 PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); 956 }; 957 958 /// Verifier pass for \c MemorySSA. 959 struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> { 960 PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); 961 }; 962 963 /// Legacy analysis pass which computes \c MemorySSA. 964 class MemorySSAWrapperPass : public FunctionPass { 965 public: 966 MemorySSAWrapperPass(); 967 968 static char ID; 969 970 bool runOnFunction(Function &) override; 971 void releaseMemory() override; 972 MemorySSA &getMSSA() { return *MSSA; } 973 const MemorySSA &getMSSA() const { return *MSSA; } 974 975 void getAnalysisUsage(AnalysisUsage &AU) const override; 976 977 void verifyAnalysis() const override; 978 void print(raw_ostream &OS, const Module *M = nullptr) const override; 979 980 private: 981 std::unique_ptr<MemorySSA> MSSA; 982 }; 983 984 /// This is the generic walker interface for walkers of MemorySSA. 985 /// Walkers are used to be able to further disambiguate the def-use chains 986 /// MemorySSA gives you, or otherwise produce better info than MemorySSA gives 987 /// you. 988 /// In particular, while the def-use chains provide basic information, and are 989 /// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a 990 /// MemoryUse as AliasAnalysis considers it, a user mant want better or other 991 /// information. In particular, they may want to use SCEV info to further 992 /// disambiguate memory accesses, or they may want the nearest dominating 993 /// may-aliasing MemoryDef for a call or a store. This API enables a 994 /// standardized interface to getting and using that info. 995 class MemorySSAWalker { 996 public: 997 MemorySSAWalker(MemorySSA *); 998 virtual ~MemorySSAWalker() = default; 999 1000 using MemoryAccessSet = SmallVector<MemoryAccess *, 8>; 1001 1002 /// Given a memory Mod/Ref/ModRef'ing instruction, calling this 1003 /// will give you the nearest dominating MemoryAccess that Mod's the location 1004 /// the instruction accesses (by skipping any def which AA can prove does not 1005 /// alias the location(s) accessed by the instruction given). 1006 /// 1007 /// Note that this will return a single access, and it must dominate the 1008 /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction, 1009 /// this will return the MemoryPhi, not the operand. This means that 1010 /// given: 1011 /// if (a) { 1012 /// 1 = MemoryDef(liveOnEntry) 1013 /// store %a 1014 /// } else { 1015 /// 2 = MemoryDef(liveOnEntry) 1016 /// store %b 1017 /// } 1018 /// 3 = MemoryPhi(2, 1) 1019 /// MemoryUse(3) 1020 /// load %a 1021 /// 1022 /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef 1023 /// in the if (a) branch. 1024 MemoryAccess *getClobberingMemoryAccess(const Instruction *I) { 1025 MemoryAccess *MA = MSSA->getMemoryAccess(I); 1026 assert(MA && "Handed an instruction that MemorySSA doesn't recognize?"); 1027 return getClobberingMemoryAccess(MA); 1028 } 1029 1030 /// Does the same thing as getClobberingMemoryAccess(const Instruction *I), 1031 /// but takes a MemoryAccess instead of an Instruction. 1032 virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0; 1033 1034 /// Given a potentially clobbering memory access and a new location, 1035 /// calling this will give you the nearest dominating clobbering MemoryAccess 1036 /// (by skipping non-aliasing def links). 1037 /// 1038 /// This version of the function is mainly used to disambiguate phi translated 1039 /// pointers, where the value of a pointer may have changed from the initial 1040 /// memory access. Note that this expects to be handed either a MemoryUse, 1041 /// or an already potentially clobbering access. Unlike the above API, if 1042 /// given a MemoryDef that clobbers the pointer as the starting access, it 1043 /// will return that MemoryDef, whereas the above would return the clobber 1044 /// starting from the use side of the memory def. 1045 virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, 1046 const MemoryLocation &) = 0; 1047 1048 /// Given a memory access, invalidate anything this walker knows about 1049 /// that access. 1050 /// This API is used by walkers that store information to perform basic cache 1051 /// invalidation. This will be called by MemorySSA at appropriate times for 1052 /// the walker it uses or returns. 1053 virtual void invalidateInfo(MemoryAccess *) {} 1054 1055 protected: 1056 friend class MemorySSA; // For updating MSSA pointer in MemorySSA move 1057 // constructor. 1058 MemorySSA *MSSA; 1059 }; 1060 1061 /// A MemorySSAWalker that does no alias queries, or anything else. It 1062 /// simply returns the links as they were constructed by the builder. 1063 class DoNothingMemorySSAWalker final : public MemorySSAWalker { 1064 public: 1065 // Keep the overrides below from hiding the Instruction overload of 1066 // getClobberingMemoryAccess. 1067 using MemorySSAWalker::getClobberingMemoryAccess; 1068 1069 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override; 1070 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, 1071 const MemoryLocation &) override; 1072 }; 1073 1074 using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>; 1075 using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>; 1076 1077 /// Iterator base class used to implement const and non-const iterators 1078 /// over the defining accesses of a MemoryAccess. 1079 template <class T> 1080 class memoryaccess_def_iterator_base 1081 : public iterator_facade_base<memoryaccess_def_iterator_base<T>, 1082 std::forward_iterator_tag, T, ptrdiff_t, T *, 1083 T *> { 1084 using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base; 1085 1086 public: 1087 memoryaccess_def_iterator_base(T *Start) : Access(Start) {} 1088 memoryaccess_def_iterator_base() = default; 1089 1090 bool operator==(const memoryaccess_def_iterator_base &Other) const { 1091 return Access == Other.Access && (!Access || ArgNo == Other.ArgNo); 1092 } 1093 1094 // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the 1095 // block from the operand in constant time (In a PHINode, the uselist has 1096 // both, so it's just subtraction). We provide it as part of the 1097 // iterator to avoid callers having to linear walk to get the block. 1098 // If the operation becomes constant time on MemoryPHI's, this bit of 1099 // abstraction breaking should be removed. 1100 BasicBlock *getPhiArgBlock() const { 1101 MemoryPhi *MP = dyn_cast<MemoryPhi>(Access); 1102 assert(MP && "Tried to get phi arg block when not iterating over a PHI"); 1103 return MP->getIncomingBlock(ArgNo); 1104 } 1105 1106 typename BaseT::iterator::pointer operator*() const { 1107 assert(Access && "Tried to access past the end of our iterator"); 1108 // Go to the first argument for phis, and the defining access for everything 1109 // else. 1110 if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) 1111 return MP->getIncomingValue(ArgNo); 1112 return cast<MemoryUseOrDef>(Access)->getDefiningAccess(); 1113 } 1114 1115 using BaseT::operator++; 1116 memoryaccess_def_iterator_base &operator++() { 1117 assert(Access && "Hit end of iterator"); 1118 if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) { 1119 if (++ArgNo >= MP->getNumIncomingValues()) { 1120 ArgNo = 0; 1121 Access = nullptr; 1122 } 1123 } else { 1124 Access = nullptr; 1125 } 1126 return *this; 1127 } 1128 1129 private: 1130 T *Access = nullptr; 1131 unsigned ArgNo = 0; 1132 }; 1133 1134 inline memoryaccess_def_iterator MemoryAccess::defs_begin() { 1135 return memoryaccess_def_iterator(this); 1136 } 1137 1138 inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const { 1139 return const_memoryaccess_def_iterator(this); 1140 } 1141 1142 inline memoryaccess_def_iterator MemoryAccess::defs_end() { 1143 return memoryaccess_def_iterator(); 1144 } 1145 1146 inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const { 1147 return const_memoryaccess_def_iterator(); 1148 } 1149 1150 /// GraphTraits for a MemoryAccess, which walks defs in the normal case, 1151 /// and uses in the inverse case. 1152 template <> struct GraphTraits<MemoryAccess *> { 1153 using NodeRef = MemoryAccess *; 1154 using ChildIteratorType = memoryaccess_def_iterator; 1155 1156 static NodeRef getEntryNode(NodeRef N) { return N; } 1157 static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); } 1158 static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); } 1159 }; 1160 1161 template <> struct GraphTraits<Inverse<MemoryAccess *>> { 1162 using NodeRef = MemoryAccess *; 1163 using ChildIteratorType = MemoryAccess::iterator; 1164 1165 static NodeRef getEntryNode(NodeRef N) { return N; } 1166 static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); } 1167 static ChildIteratorType child_end(NodeRef N) { return N->user_end(); } 1168 }; 1169 1170 /// Provide an iterator that walks defs, giving both the memory access, 1171 /// and the current pointer location, updating the pointer location as it 1172 /// changes due to phi node translation. 1173 /// 1174 /// This iterator, while somewhat specialized, is what most clients actually 1175 /// want when walking upwards through MemorySSA def chains. It takes a pair of 1176 /// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the 1177 /// memory location through phi nodes for the user. 1178 class upward_defs_iterator 1179 : public iterator_facade_base<upward_defs_iterator, 1180 std::forward_iterator_tag, 1181 const MemoryAccessPair> { 1182 using BaseT = upward_defs_iterator::iterator_facade_base; 1183 1184 public: 1185 upward_defs_iterator(const MemoryAccessPair &Info) 1186 : DefIterator(Info.first), Location(Info.second), 1187 OriginalAccess(Info.first) { 1188 CurrentPair.first = nullptr; 1189 1190 WalkingPhi = Info.first && isa<MemoryPhi>(Info.first); 1191 fillInCurrentPair(); 1192 } 1193 1194 upward_defs_iterator() { CurrentPair.first = nullptr; } 1195 1196 bool operator==(const upward_defs_iterator &Other) const { 1197 return DefIterator == Other.DefIterator; 1198 } 1199 1200 BaseT::iterator::reference operator*() const { 1201 assert(DefIterator != OriginalAccess->defs_end() && 1202 "Tried to access past the end of our iterator"); 1203 return CurrentPair; 1204 } 1205 1206 using BaseT::operator++; 1207 upward_defs_iterator &operator++() { 1208 assert(DefIterator != OriginalAccess->defs_end() && 1209 "Tried to access past the end of the iterator"); 1210 ++DefIterator; 1211 if (DefIterator != OriginalAccess->defs_end()) 1212 fillInCurrentPair(); 1213 return *this; 1214 } 1215 1216 BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); } 1217 1218 private: 1219 void fillInCurrentPair() { 1220 CurrentPair.first = *DefIterator; 1221 if (WalkingPhi && Location.Ptr) { 1222 PHITransAddr Translator( 1223 const_cast<Value *>(Location.Ptr), 1224 OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr); 1225 if (!Translator.PHITranslateValue(OriginalAccess->getBlock(), 1226 DefIterator.getPhiArgBlock(), nullptr, 1227 false)) 1228 if (Translator.getAddr() != Location.Ptr) { 1229 CurrentPair.second = Location.getWithNewPtr(Translator.getAddr()); 1230 return; 1231 } 1232 } 1233 CurrentPair.second = Location; 1234 } 1235 1236 MemoryAccessPair CurrentPair; 1237 memoryaccess_def_iterator DefIterator; 1238 MemoryLocation Location; 1239 MemoryAccess *OriginalAccess = nullptr; 1240 bool WalkingPhi = false; 1241 }; 1242 1243 inline upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair) { 1244 return upward_defs_iterator(Pair); 1245 } 1246 1247 inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); } 1248 1249 inline iterator_range<upward_defs_iterator> 1250 upward_defs(const MemoryAccessPair &Pair) { 1251 return make_range(upward_defs_begin(Pair), upward_defs_end()); 1252 } 1253 1254 /// Walks the defining accesses of MemoryDefs. Stops after we hit something that 1255 /// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when 1256 /// comparing against a null def_chain_iterator, this will compare equal only 1257 /// after walking said Phi/liveOnEntry. 1258 /// 1259 /// The UseOptimizedChain flag specifies whether to walk the clobbering 1260 /// access chain, or all the accesses. 1261 /// 1262 /// Normally, MemoryDef are all just def/use linked together, so a def_chain on 1263 /// a MemoryDef will walk all MemoryDefs above it in the program until it hits 1264 /// a phi node. The optimized chain walks the clobbering access of a store. 1265 /// So if you are just trying to find, given a store, what the next 1266 /// thing that would clobber the same memory is, you want the optimized chain. 1267 template <class T, bool UseOptimizedChain = false> 1268 struct def_chain_iterator 1269 : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>, 1270 std::forward_iterator_tag, MemoryAccess *> { 1271 def_chain_iterator() : MA(nullptr) {} 1272 def_chain_iterator(T MA) : MA(MA) {} 1273 1274 T operator*() const { return MA; } 1275 1276 def_chain_iterator &operator++() { 1277 // N.B. liveOnEntry has a null defining access. 1278 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) { 1279 if (UseOptimizedChain && MUD->isOptimized()) 1280 MA = MUD->getOptimized(); 1281 else 1282 MA = MUD->getDefiningAccess(); 1283 } else { 1284 MA = nullptr; 1285 } 1286 1287 return *this; 1288 } 1289 1290 bool operator==(const def_chain_iterator &O) const { return MA == O.MA; } 1291 1292 private: 1293 T MA; 1294 }; 1295 1296 template <class T> 1297 inline iterator_range<def_chain_iterator<T>> 1298 def_chain(T MA, MemoryAccess *UpTo = nullptr) { 1299 #ifdef EXPENSIVE_CHECKS 1300 assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) && 1301 "UpTo isn't in the def chain!"); 1302 #endif 1303 return make_range(def_chain_iterator<T>(MA), def_chain_iterator<T>(UpTo)); 1304 } 1305 1306 template <class T> 1307 inline iterator_range<def_chain_iterator<T, true>> optimized_def_chain(T MA) { 1308 return make_range(def_chain_iterator<T, true>(MA), 1309 def_chain_iterator<T, true>(nullptr)); 1310 } 1311 1312 } // end namespace llvm 1313 1314 #endif // LLVM_ANALYSIS_MEMORYSSA_H 1315