MemorySSA.h revision 353358
1//===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9/// \file 10/// This file exposes an interface to building/using memory SSA to 11/// walk memory instructions using a use/def graph. 12/// 13/// Memory SSA class builds an SSA form that links together memory access 14/// instructions such as loads, stores, atomics, and calls. Additionally, it 15/// does a trivial form of "heap versioning" Every time the memory state changes 16/// in the program, we generate a new heap version. It generates 17/// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions. 18/// 19/// As a trivial example, 20/// define i32 @main() #0 { 21/// entry: 22/// %call = call noalias i8* @_Znwm(i64 4) #2 23/// %0 = bitcast i8* %call to i32* 24/// %call1 = call noalias i8* @_Znwm(i64 4) #2 25/// %1 = bitcast i8* %call1 to i32* 26/// store i32 5, i32* %0, align 4 27/// store i32 7, i32* %1, align 4 28/// %2 = load i32* %0, align 4 29/// %3 = load i32* %1, align 4 30/// %add = add nsw i32 %2, %3 31/// ret i32 %add 32/// } 33/// 34/// Will become 35/// define i32 @main() #0 { 36/// entry: 37/// ; 1 = MemoryDef(0) 38/// %call = call noalias i8* @_Znwm(i64 4) #3 39/// %2 = bitcast i8* %call to i32* 40/// ; 2 = MemoryDef(1) 41/// %call1 = call noalias i8* @_Znwm(i64 4) #3 42/// %4 = bitcast i8* %call1 to i32* 43/// ; 3 = MemoryDef(2) 44/// store i32 5, i32* %2, align 4 45/// ; 4 = MemoryDef(3) 46/// store i32 7, i32* %4, align 4 47/// ; MemoryUse(3) 48/// %7 = load i32* %2, align 4 49/// ; MemoryUse(4) 50/// %8 = load i32* %4, align 4 51/// %add = add nsw i32 %7, %8 52/// ret i32 %add 53/// } 54/// 55/// Given this form, all the stores that could ever effect the load at %8 can be 56/// gotten by using the MemoryUse associated with it, and walking from use to 57/// def until you hit the top of the function. 58/// 59/// Each def also has a list of users associated with it, so you can walk from 60/// both def to users, and users to defs. Note that we disambiguate MemoryUses, 61/// but not the RHS of MemoryDefs. You can see this above at %7, which would 62/// otherwise be a MemoryUse(4). Being disambiguated means that for a given 63/// store, all the MemoryUses on its use lists are may-aliases of that store 64/// (but the MemoryDefs on its use list may not be). 65/// 66/// MemoryDefs are not disambiguated because it would require multiple reaching 67/// definitions, which would require multiple phis, and multiple memoryaccesses 68/// per instruction. 69// 70//===----------------------------------------------------------------------===// 71 72#ifndef LLVM_ANALYSIS_MEMORYSSA_H 73#define LLVM_ANALYSIS_MEMORYSSA_H 74 75#include "llvm/ADT/DenseMap.h" 76#include "llvm/ADT/GraphTraits.h" 77#include "llvm/ADT/SmallPtrSet.h" 78#include "llvm/ADT/SmallVector.h" 79#include "llvm/ADT/ilist.h" 80#include "llvm/ADT/ilist_node.h" 81#include "llvm/ADT/iterator.h" 82#include "llvm/ADT/iterator_range.h" 83#include "llvm/ADT/simple_ilist.h" 84#include "llvm/Analysis/AliasAnalysis.h" 85#include "llvm/Analysis/MemoryLocation.h" 86#include "llvm/Analysis/PHITransAddr.h" 87#include "llvm/IR/BasicBlock.h" 88#include "llvm/IR/DerivedUser.h" 89#include "llvm/IR/Dominators.h" 90#include "llvm/IR/Module.h" 91#include "llvm/IR/Type.h" 92#include "llvm/IR/Use.h" 93#include "llvm/IR/User.h" 94#include "llvm/IR/Value.h" 95#include "llvm/IR/ValueHandle.h" 96#include "llvm/Pass.h" 97#include "llvm/Support/Casting.h" 98#include <algorithm> 99#include <cassert> 100#include <cstddef> 101#include <iterator> 102#include <memory> 103#include <utility> 104 105namespace llvm { 106 107/// Enables memory ssa as a dependency for loop passes. 108extern cl::opt<bool> EnableMSSALoopDependency; 109 110class Function; 111class Instruction; 112class MemoryAccess; 113class MemorySSAWalker; 114class LLVMContext; 115class raw_ostream; 116 117namespace MSSAHelpers { 118 119struct AllAccessTag {}; 120struct DefsOnlyTag {}; 121 122} // end namespace MSSAHelpers 123 124enum : unsigned { 125 // Used to signify what the default invalid ID is for MemoryAccess's 126 // getID() 127 INVALID_MEMORYACCESS_ID = -1U 128}; 129 130template <class T> class memoryaccess_def_iterator_base; 131using memoryaccess_def_iterator = memoryaccess_def_iterator_base<MemoryAccess>; 132using const_memoryaccess_def_iterator = 133 memoryaccess_def_iterator_base<const MemoryAccess>; 134 135// The base for all memory accesses. All memory accesses in a block are 136// linked together using an intrusive list. 137class MemoryAccess 138 : public DerivedUser, 139 public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>, 140 public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> { 141public: 142 using AllAccessType = 143 ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>; 144 using DefsOnlyType = 145 ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>; 146 147 MemoryAccess(const MemoryAccess &) = delete; 148 MemoryAccess &operator=(const MemoryAccess &) = delete; 149 150 void *operator new(size_t) = delete; 151 152 // Methods for support type inquiry through isa, cast, and 153 // dyn_cast 154 static bool classof(const Value *V) { 155 unsigned ID = V->getValueID(); 156 return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal; 157 } 158 159 BasicBlock *getBlock() const { return Block; } 160 161 void print(raw_ostream &OS) const; 162 void dump() const; 163 164 /// The user iterators for a memory access 165 using iterator = user_iterator; 166 using const_iterator = const_user_iterator; 167 168 /// This iterator walks over all of the defs in a given 169 /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For 170 /// MemoryUse/MemoryDef, this walks the defining access. 171 memoryaccess_def_iterator defs_begin(); 172 const_memoryaccess_def_iterator defs_begin() const; 173 memoryaccess_def_iterator defs_end(); 174 const_memoryaccess_def_iterator defs_end() const; 175 176 /// Get the iterators for the all access list and the defs only list 177 /// We default to the all access list. 178 AllAccessType::self_iterator getIterator() { 179 return this->AllAccessType::getIterator(); 180 } 181 AllAccessType::const_self_iterator getIterator() const { 182 return this->AllAccessType::getIterator(); 183 } 184 AllAccessType::reverse_self_iterator getReverseIterator() { 185 return this->AllAccessType::getReverseIterator(); 186 } 187 AllAccessType::const_reverse_self_iterator getReverseIterator() const { 188 return this->AllAccessType::getReverseIterator(); 189 } 190 DefsOnlyType::self_iterator getDefsIterator() { 191 return this->DefsOnlyType::getIterator(); 192 } 193 DefsOnlyType::const_self_iterator getDefsIterator() const { 194 return this->DefsOnlyType::getIterator(); 195 } 196 DefsOnlyType::reverse_self_iterator getReverseDefsIterator() { 197 return this->DefsOnlyType::getReverseIterator(); 198 } 199 DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const { 200 return this->DefsOnlyType::getReverseIterator(); 201 } 202 203protected: 204 friend class MemoryDef; 205 friend class MemoryPhi; 206 friend class MemorySSA; 207 friend class MemoryUse; 208 friend class MemoryUseOrDef; 209 210 /// Used by MemorySSA to change the block of a MemoryAccess when it is 211 /// moved. 212 void setBlock(BasicBlock *BB) { Block = BB; } 213 214 /// Used for debugging and tracking things about MemoryAccesses. 215 /// Guaranteed unique among MemoryAccesses, no guarantees otherwise. 216 inline unsigned getID() const; 217 218 MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue, 219 BasicBlock *BB, unsigned NumOperands) 220 : DerivedUser(Type::getVoidTy(C), Vty, nullptr, NumOperands, DeleteValue), 221 Block(BB) {} 222 223 // Use deleteValue() to delete a generic MemoryAccess. 224 ~MemoryAccess() = default; 225 226private: 227 BasicBlock *Block; 228}; 229 230template <> 231struct ilist_alloc_traits<MemoryAccess> { 232 static void deleteNode(MemoryAccess *MA) { MA->deleteValue(); } 233}; 234 235inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) { 236 MA.print(OS); 237 return OS; 238} 239 240/// Class that has the common methods + fields of memory uses/defs. It's 241/// a little awkward to have, but there are many cases where we want either a 242/// use or def, and there are many cases where uses are needed (defs aren't 243/// acceptable), and vice-versa. 244/// 245/// This class should never be instantiated directly; make a MemoryUse or 246/// MemoryDef instead. 247class MemoryUseOrDef : public MemoryAccess { 248public: 249 void *operator new(size_t) = delete; 250 251 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess); 252 253 /// Get the instruction that this MemoryUse represents. 254 Instruction *getMemoryInst() const { return MemoryInstruction; } 255 256 /// Get the access that produces the memory state used by this Use. 257 MemoryAccess *getDefiningAccess() const { return getOperand(0); } 258 259 static bool classof(const Value *MA) { 260 return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal; 261 } 262 263 // Sadly, these have to be public because they are needed in some of the 264 // iterators. 265 inline bool isOptimized() const; 266 inline MemoryAccess *getOptimized() const; 267 inline void setOptimized(MemoryAccess *); 268 269 // Retrieve AliasResult type of the optimized access. Ideally this would be 270 // returned by the caching walker and may go away in the future. 271 Optional<AliasResult> getOptimizedAccessType() const { 272 return OptimizedAccessAlias; 273 } 274 275 /// Reset the ID of what this MemoryUse was optimized to, causing it to 276 /// be rewalked by the walker if necessary. 277 /// This really should only be called by tests. 278 inline void resetOptimized(); 279 280protected: 281 friend class MemorySSA; 282 friend class MemorySSAUpdater; 283 284 MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty, 285 DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB, 286 unsigned NumOperands) 287 : MemoryAccess(C, Vty, DeleteValue, BB, NumOperands), 288 MemoryInstruction(MI), OptimizedAccessAlias(MayAlias) { 289 setDefiningAccess(DMA); 290 } 291 292 // Use deleteValue() to delete a generic MemoryUseOrDef. 293 ~MemoryUseOrDef() = default; 294 295 void setOptimizedAccessType(Optional<AliasResult> AR) { 296 OptimizedAccessAlias = AR; 297 } 298 299 void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false, 300 Optional<AliasResult> AR = MayAlias) { 301 if (!Optimized) { 302 setOperand(0, DMA); 303 return; 304 } 305 setOptimized(DMA); 306 setOptimizedAccessType(AR); 307 } 308 309private: 310 Instruction *MemoryInstruction; 311 Optional<AliasResult> OptimizedAccessAlias; 312}; 313 314/// Represents read-only accesses to memory 315/// 316/// In particular, the set of Instructions that will be represented by 317/// MemoryUse's is exactly the set of Instructions for which 318/// AliasAnalysis::getModRefInfo returns "Ref". 319class MemoryUse final : public MemoryUseOrDef { 320public: 321 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess); 322 323 MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB) 324 : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB, 325 /*NumOperands=*/1) {} 326 327 // allocate space for exactly one operand 328 void *operator new(size_t s) { return User::operator new(s, 1); } 329 330 static bool classof(const Value *MA) { 331 return MA->getValueID() == MemoryUseVal; 332 } 333 334 void print(raw_ostream &OS) const; 335 336 void setOptimized(MemoryAccess *DMA) { 337 OptimizedID = DMA->getID(); 338 setOperand(0, DMA); 339 } 340 341 bool isOptimized() const { 342 return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID(); 343 } 344 345 MemoryAccess *getOptimized() const { 346 return getDefiningAccess(); 347 } 348 349 void resetOptimized() { 350 OptimizedID = INVALID_MEMORYACCESS_ID; 351 } 352 353protected: 354 friend class MemorySSA; 355 356private: 357 static void deleteMe(DerivedUser *Self); 358 359 unsigned OptimizedID = INVALID_MEMORYACCESS_ID; 360}; 361 362template <> 363struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {}; 364DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess) 365 366/// Represents a read-write access to memory, whether it is a must-alias, 367/// or a may-alias. 368/// 369/// In particular, the set of Instructions that will be represented by 370/// MemoryDef's is exactly the set of Instructions for which 371/// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef". 372/// Note that, in order to provide def-def chains, all defs also have a use 373/// associated with them. This use points to the nearest reaching 374/// MemoryDef/MemoryPhi. 375class MemoryDef final : public MemoryUseOrDef { 376public: 377 friend class MemorySSA; 378 379 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess); 380 381 MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB, 382 unsigned Ver) 383 : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB, 384 /*NumOperands=*/2), 385 ID(Ver) {} 386 387 // allocate space for exactly two operands 388 void *operator new(size_t s) { return User::operator new(s, 2); } 389 390 static bool classof(const Value *MA) { 391 return MA->getValueID() == MemoryDefVal; 392 } 393 394 void setOptimized(MemoryAccess *MA) { 395 setOperand(1, MA); 396 OptimizedID = MA->getID(); 397 } 398 399 MemoryAccess *getOptimized() const { 400 return cast_or_null<MemoryAccess>(getOperand(1)); 401 } 402 403 bool isOptimized() const { 404 return getOptimized() && OptimizedID == getOptimized()->getID(); 405 } 406 407 void resetOptimized() { 408 OptimizedID = INVALID_MEMORYACCESS_ID; 409 setOperand(1, nullptr); 410 } 411 412 void print(raw_ostream &OS) const; 413 414 unsigned getID() const { return ID; } 415 416private: 417 static void deleteMe(DerivedUser *Self); 418 419 const unsigned ID; 420 unsigned OptimizedID = INVALID_MEMORYACCESS_ID; 421}; 422 423template <> 424struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 2> {}; 425DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess) 426 427template <> 428struct OperandTraits<MemoryUseOrDef> { 429 static Use *op_begin(MemoryUseOrDef *MUD) { 430 if (auto *MU = dyn_cast<MemoryUse>(MUD)) 431 return OperandTraits<MemoryUse>::op_begin(MU); 432 return OperandTraits<MemoryDef>::op_begin(cast<MemoryDef>(MUD)); 433 } 434 435 static Use *op_end(MemoryUseOrDef *MUD) { 436 if (auto *MU = dyn_cast<MemoryUse>(MUD)) 437 return OperandTraits<MemoryUse>::op_end(MU); 438 return OperandTraits<MemoryDef>::op_end(cast<MemoryDef>(MUD)); 439 } 440 441 static unsigned operands(const MemoryUseOrDef *MUD) { 442 if (const auto *MU = dyn_cast<MemoryUse>(MUD)) 443 return OperandTraits<MemoryUse>::operands(MU); 444 return OperandTraits<MemoryDef>::operands(cast<MemoryDef>(MUD)); 445 } 446}; 447DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess) 448 449/// Represents phi nodes for memory accesses. 450/// 451/// These have the same semantic as regular phi nodes, with the exception that 452/// only one phi will ever exist in a given basic block. 453/// Guaranteeing one phi per block means guaranteeing there is only ever one 454/// valid reaching MemoryDef/MemoryPHI along each path to the phi node. 455/// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or 456/// a MemoryPhi's operands. 457/// That is, given 458/// if (a) { 459/// store %a 460/// store %b 461/// } 462/// it *must* be transformed into 463/// if (a) { 464/// 1 = MemoryDef(liveOnEntry) 465/// store %a 466/// 2 = MemoryDef(1) 467/// store %b 468/// } 469/// and *not* 470/// if (a) { 471/// 1 = MemoryDef(liveOnEntry) 472/// store %a 473/// 2 = MemoryDef(liveOnEntry) 474/// store %b 475/// } 476/// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the 477/// end of the branch, and if there are not two phi nodes, one will be 478/// disconnected completely from the SSA graph below that point. 479/// Because MemoryUse's do not generate new definitions, they do not have this 480/// issue. 481class MemoryPhi final : public MemoryAccess { 482 // allocate space for exactly zero operands 483 void *operator new(size_t s) { return User::operator new(s); } 484 485public: 486 /// Provide fast operand accessors 487 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess); 488 489 MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0) 490 : MemoryAccess(C, MemoryPhiVal, deleteMe, BB, 0), ID(Ver), 491 ReservedSpace(NumPreds) { 492 allocHungoffUses(ReservedSpace); 493 } 494 495 // Block iterator interface. This provides access to the list of incoming 496 // basic blocks, which parallels the list of incoming values. 497 using block_iterator = BasicBlock **; 498 using const_block_iterator = BasicBlock *const *; 499 500 block_iterator block_begin() { 501 auto *Ref = reinterpret_cast<Use::UserRef *>(op_begin() + ReservedSpace); 502 return reinterpret_cast<block_iterator>(Ref + 1); 503 } 504 505 const_block_iterator block_begin() const { 506 const auto *Ref = 507 reinterpret_cast<const Use::UserRef *>(op_begin() + ReservedSpace); 508 return reinterpret_cast<const_block_iterator>(Ref + 1); 509 } 510 511 block_iterator block_end() { return block_begin() + getNumOperands(); } 512 513 const_block_iterator block_end() const { 514 return block_begin() + getNumOperands(); 515 } 516 517 iterator_range<block_iterator> blocks() { 518 return make_range(block_begin(), block_end()); 519 } 520 521 iterator_range<const_block_iterator> blocks() const { 522 return make_range(block_begin(), block_end()); 523 } 524 525 op_range incoming_values() { return operands(); } 526 527 const_op_range incoming_values() const { return operands(); } 528 529 /// Return the number of incoming edges 530 unsigned getNumIncomingValues() const { return getNumOperands(); } 531 532 /// Return incoming value number x 533 MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); } 534 void setIncomingValue(unsigned I, MemoryAccess *V) { 535 assert(V && "PHI node got a null value!"); 536 setOperand(I, V); 537 } 538 539 static unsigned getOperandNumForIncomingValue(unsigned I) { return I; } 540 static unsigned getIncomingValueNumForOperand(unsigned I) { return I; } 541 542 /// Return incoming basic block number @p i. 543 BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; } 544 545 /// Return incoming basic block corresponding 546 /// to an operand of the PHI. 547 BasicBlock *getIncomingBlock(const Use &U) const { 548 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?"); 549 return getIncomingBlock(unsigned(&U - op_begin())); 550 } 551 552 /// Return incoming basic block corresponding 553 /// to value use iterator. 554 BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const { 555 return getIncomingBlock(I.getUse()); 556 } 557 558 void setIncomingBlock(unsigned I, BasicBlock *BB) { 559 assert(BB && "PHI node got a null basic block!"); 560 block_begin()[I] = BB; 561 } 562 563 /// Add an incoming value to the end of the PHI list 564 void addIncoming(MemoryAccess *V, BasicBlock *BB) { 565 if (getNumOperands() == ReservedSpace) 566 growOperands(); // Get more space! 567 // Initialize some new operands. 568 setNumHungOffUseOperands(getNumOperands() + 1); 569 setIncomingValue(getNumOperands() - 1, V); 570 setIncomingBlock(getNumOperands() - 1, BB); 571 } 572 573 /// Return the first index of the specified basic 574 /// block in the value list for this PHI. Returns -1 if no instance. 575 int getBasicBlockIndex(const BasicBlock *BB) const { 576 for (unsigned I = 0, E = getNumOperands(); I != E; ++I) 577 if (block_begin()[I] == BB) 578 return I; 579 return -1; 580 } 581 582 MemoryAccess *getIncomingValueForBlock(const BasicBlock *BB) const { 583 int Idx = getBasicBlockIndex(BB); 584 assert(Idx >= 0 && "Invalid basic block argument!"); 585 return getIncomingValue(Idx); 586 } 587 588 // After deleting incoming position I, the order of incoming may be changed. 589 void unorderedDeleteIncoming(unsigned I) { 590 unsigned E = getNumOperands(); 591 assert(I < E && "Cannot remove out of bounds Phi entry."); 592 // MemoryPhi must have at least two incoming values, otherwise the MemoryPhi 593 // itself should be deleted. 594 assert(E >= 2 && "Cannot only remove incoming values in MemoryPhis with " 595 "at least 2 values."); 596 setIncomingValue(I, getIncomingValue(E - 1)); 597 setIncomingBlock(I, block_begin()[E - 1]); 598 setOperand(E - 1, nullptr); 599 block_begin()[E - 1] = nullptr; 600 setNumHungOffUseOperands(getNumOperands() - 1); 601 } 602 603 // After deleting entries that satisfy Pred, remaining entries may have 604 // changed order. 605 template <typename Fn> void unorderedDeleteIncomingIf(Fn &&Pred) { 606 for (unsigned I = 0, E = getNumOperands(); I != E; ++I) 607 if (Pred(getIncomingValue(I), getIncomingBlock(I))) { 608 unorderedDeleteIncoming(I); 609 E = getNumOperands(); 610 --I; 611 } 612 assert(getNumOperands() >= 1 && 613 "Cannot remove all incoming blocks in a MemoryPhi."); 614 } 615 616 // After deleting incoming block BB, the incoming blocks order may be changed. 617 void unorderedDeleteIncomingBlock(const BasicBlock *BB) { 618 unorderedDeleteIncomingIf( 619 [&](const MemoryAccess *, const BasicBlock *B) { return BB == B; }); 620 } 621 622 // After deleting incoming memory access MA, the incoming accesses order may 623 // be changed. 624 void unorderedDeleteIncomingValue(const MemoryAccess *MA) { 625 unorderedDeleteIncomingIf( 626 [&](const MemoryAccess *M, const BasicBlock *) { return MA == M; }); 627 } 628 629 static bool classof(const Value *V) { 630 return V->getValueID() == MemoryPhiVal; 631 } 632 633 void print(raw_ostream &OS) const; 634 635 unsigned getID() const { return ID; } 636 637protected: 638 friend class MemorySSA; 639 640 /// this is more complicated than the generic 641 /// User::allocHungoffUses, because we have to allocate Uses for the incoming 642 /// values and pointers to the incoming blocks, all in one allocation. 643 void allocHungoffUses(unsigned N) { 644 User::allocHungoffUses(N, /* IsPhi */ true); 645 } 646 647private: 648 // For debugging only 649 const unsigned ID; 650 unsigned ReservedSpace; 651 652 /// This grows the operand list in response to a push_back style of 653 /// operation. This grows the number of ops by 1.5 times. 654 void growOperands() { 655 unsigned E = getNumOperands(); 656 // 2 op PHI nodes are VERY common, so reserve at least enough for that. 657 ReservedSpace = std::max(E + E / 2, 2u); 658 growHungoffUses(ReservedSpace, /* IsPhi */ true); 659 } 660 661 static void deleteMe(DerivedUser *Self); 662}; 663 664inline unsigned MemoryAccess::getID() const { 665 assert((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) && 666 "only memory defs and phis have ids"); 667 if (const auto *MD = dyn_cast<MemoryDef>(this)) 668 return MD->getID(); 669 return cast<MemoryPhi>(this)->getID(); 670} 671 672inline bool MemoryUseOrDef::isOptimized() const { 673 if (const auto *MD = dyn_cast<MemoryDef>(this)) 674 return MD->isOptimized(); 675 return cast<MemoryUse>(this)->isOptimized(); 676} 677 678inline MemoryAccess *MemoryUseOrDef::getOptimized() const { 679 if (const auto *MD = dyn_cast<MemoryDef>(this)) 680 return MD->getOptimized(); 681 return cast<MemoryUse>(this)->getOptimized(); 682} 683 684inline void MemoryUseOrDef::setOptimized(MemoryAccess *MA) { 685 if (auto *MD = dyn_cast<MemoryDef>(this)) 686 MD->setOptimized(MA); 687 else 688 cast<MemoryUse>(this)->setOptimized(MA); 689} 690 691inline void MemoryUseOrDef::resetOptimized() { 692 if (auto *MD = dyn_cast<MemoryDef>(this)) 693 MD->resetOptimized(); 694 else 695 cast<MemoryUse>(this)->resetOptimized(); 696} 697 698template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {}; 699DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess) 700 701/// Encapsulates MemorySSA, including all data associated with memory 702/// accesses. 703class MemorySSA { 704public: 705 MemorySSA(Function &, AliasAnalysis *, DominatorTree *); 706 707 // MemorySSA must remain where it's constructed; Walkers it creates store 708 // pointers to it. 709 MemorySSA(MemorySSA &&) = delete; 710 711 ~MemorySSA(); 712 713 MemorySSAWalker *getWalker(); 714 MemorySSAWalker *getSkipSelfWalker(); 715 716 /// Given a memory Mod/Ref'ing instruction, get the MemorySSA 717 /// access associated with it. If passed a basic block gets the memory phi 718 /// node that exists for that block, if there is one. Otherwise, this will get 719 /// a MemoryUseOrDef. 720 MemoryUseOrDef *getMemoryAccess(const Instruction *I) const { 721 return cast_or_null<MemoryUseOrDef>(ValueToMemoryAccess.lookup(I)); 722 } 723 724 MemoryPhi *getMemoryAccess(const BasicBlock *BB) const { 725 return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB))); 726 } 727 728 void dump() const; 729 void print(raw_ostream &) const; 730 731 /// Return true if \p MA represents the live on entry value 732 /// 733 /// Loads and stores from pointer arguments and other global values may be 734 /// defined by memory operations that do not occur in the current function, so 735 /// they may be live on entry to the function. MemorySSA represents such 736 /// memory state by the live on entry definition, which is guaranteed to occur 737 /// before any other memory access in the function. 738 inline bool isLiveOnEntryDef(const MemoryAccess *MA) const { 739 return MA == LiveOnEntryDef.get(); 740 } 741 742 inline MemoryAccess *getLiveOnEntryDef() const { 743 return LiveOnEntryDef.get(); 744 } 745 746 // Sadly, iplists, by default, owns and deletes pointers added to the 747 // list. It's not currently possible to have two iplists for the same type, 748 // where one owns the pointers, and one does not. This is because the traits 749 // are per-type, not per-tag. If this ever changes, we should make the 750 // DefList an iplist. 751 using AccessList = iplist<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>; 752 using DefsList = 753 simple_ilist<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>; 754 755 /// Return the list of MemoryAccess's for a given basic block. 756 /// 757 /// This list is not modifiable by the user. 758 const AccessList *getBlockAccesses(const BasicBlock *BB) const { 759 return getWritableBlockAccesses(BB); 760 } 761 762 /// Return the list of MemoryDef's and MemoryPhi's for a given basic 763 /// block. 764 /// 765 /// This list is not modifiable by the user. 766 const DefsList *getBlockDefs(const BasicBlock *BB) const { 767 return getWritableBlockDefs(BB); 768 } 769 770 /// Given two memory accesses in the same basic block, determine 771 /// whether MemoryAccess \p A dominates MemoryAccess \p B. 772 bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const; 773 774 /// Given two memory accesses in potentially different blocks, 775 /// determine whether MemoryAccess \p A dominates MemoryAccess \p B. 776 bool dominates(const MemoryAccess *A, const MemoryAccess *B) const; 777 778 /// Given a MemoryAccess and a Use, determine whether MemoryAccess \p A 779 /// dominates Use \p B. 780 bool dominates(const MemoryAccess *A, const Use &B) const; 781 782 /// Verify that MemorySSA is self consistent (IE definitions dominate 783 /// all uses, uses appear in the right places). This is used by unit tests. 784 void verifyMemorySSA() const; 785 786 /// Used in various insertion functions to specify whether we are talking 787 /// about the beginning or end of a block. 788 enum InsertionPlace { Beginning, End }; 789 790protected: 791 // Used by Memory SSA annotater, dumpers, and wrapper pass 792 friend class MemorySSAAnnotatedWriter; 793 friend class MemorySSAPrinterLegacyPass; 794 friend class MemorySSAUpdater; 795 796 void verifyDefUses(Function &F) const; 797 void verifyDomination(Function &F) const; 798 void verifyOrdering(Function &F) const; 799 void verifyDominationNumbers(const Function &F) const; 800 801 // This is used by the use optimizer and updater. 802 AccessList *getWritableBlockAccesses(const BasicBlock *BB) const { 803 auto It = PerBlockAccesses.find(BB); 804 return It == PerBlockAccesses.end() ? nullptr : It->second.get(); 805 } 806 807 // This is used by the use optimizer and updater. 808 DefsList *getWritableBlockDefs(const BasicBlock *BB) const { 809 auto It = PerBlockDefs.find(BB); 810 return It == PerBlockDefs.end() ? nullptr : It->second.get(); 811 } 812 813 // These is used by the updater to perform various internal MemorySSA 814 // machinsations. They do not always leave the IR in a correct state, and 815 // relies on the updater to fixup what it breaks, so it is not public. 816 817 void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where); 818 void moveTo(MemoryAccess *What, BasicBlock *BB, InsertionPlace Point); 819 820 // Rename the dominator tree branch rooted at BB. 821 void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, 822 SmallPtrSetImpl<BasicBlock *> &Visited) { 823 renamePass(DT->getNode(BB), IncomingVal, Visited, true, true); 824 } 825 826 void removeFromLookups(MemoryAccess *); 827 void removeFromLists(MemoryAccess *, bool ShouldDelete = true); 828 void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *, 829 InsertionPlace); 830 void insertIntoListsBefore(MemoryAccess *, const BasicBlock *, 831 AccessList::iterator); 832 MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *, 833 const MemoryUseOrDef *Template = nullptr); 834 835private: 836 template <class AliasAnalysisType> class ClobberWalkerBase; 837 template <class AliasAnalysisType> class CachingWalker; 838 template <class AliasAnalysisType> class SkipSelfWalker; 839 class OptimizeUses; 840 841 CachingWalker<AliasAnalysis> *getWalkerImpl(); 842 void buildMemorySSA(BatchAAResults &BAA); 843 void optimizeUses(); 844 845 void prepareForMoveTo(MemoryAccess *, BasicBlock *); 846 void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const; 847 848 using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>; 849 using DefsMap = DenseMap<const BasicBlock *, std::unique_ptr<DefsList>>; 850 851 void 852 determineInsertionPoint(const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks); 853 void markUnreachableAsLiveOnEntry(BasicBlock *BB); 854 bool dominatesUse(const MemoryAccess *, const MemoryAccess *) const; 855 MemoryPhi *createMemoryPhi(BasicBlock *BB); 856 template <typename AliasAnalysisType> 857 MemoryUseOrDef *createNewAccess(Instruction *, AliasAnalysisType *, 858 const MemoryUseOrDef *Template = nullptr); 859 MemoryAccess *findDominatingDef(BasicBlock *, enum InsertionPlace); 860 void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &); 861 MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool); 862 void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool); 863 void renamePass(DomTreeNode *, MemoryAccess *IncomingVal, 864 SmallPtrSetImpl<BasicBlock *> &Visited, 865 bool SkipVisited = false, bool RenameAllUses = false); 866 AccessList *getOrCreateAccessList(const BasicBlock *); 867 DefsList *getOrCreateDefsList(const BasicBlock *); 868 void renumberBlock(const BasicBlock *) const; 869 AliasAnalysis *AA; 870 DominatorTree *DT; 871 Function &F; 872 873 // Memory SSA mappings 874 DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess; 875 876 // These two mappings contain the main block to access/def mappings for 877 // MemorySSA. The list contained in PerBlockAccesses really owns all the 878 // MemoryAccesses. 879 // Both maps maintain the invariant that if a block is found in them, the 880 // corresponding list is not empty, and if a block is not found in them, the 881 // corresponding list is empty. 882 AccessMap PerBlockAccesses; 883 DefsMap PerBlockDefs; 884 std::unique_ptr<MemoryAccess, ValueDeleter> LiveOnEntryDef; 885 886 // Domination mappings 887 // Note that the numbering is local to a block, even though the map is 888 // global. 889 mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid; 890 mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering; 891 892 // Memory SSA building info 893 std::unique_ptr<ClobberWalkerBase<AliasAnalysis>> WalkerBase; 894 std::unique_ptr<CachingWalker<AliasAnalysis>> Walker; 895 std::unique_ptr<SkipSelfWalker<AliasAnalysis>> SkipWalker; 896 unsigned NextID; 897}; 898 899// Internal MemorySSA utils, for use by MemorySSA classes and walkers 900class MemorySSAUtil { 901protected: 902 friend class GVNHoist; 903 friend class MemorySSAWalker; 904 905 // This function should not be used by new passes. 906 static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, 907 AliasAnalysis &AA); 908}; 909 910// This pass does eager building and then printing of MemorySSA. It is used by 911// the tests to be able to build, dump, and verify Memory SSA. 912class MemorySSAPrinterLegacyPass : public FunctionPass { 913public: 914 MemorySSAPrinterLegacyPass(); 915 916 bool runOnFunction(Function &) override; 917 void getAnalysisUsage(AnalysisUsage &AU) const override; 918 919 static char ID; 920}; 921 922/// An analysis that produces \c MemorySSA for a function. 923/// 924class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> { 925 friend AnalysisInfoMixin<MemorySSAAnalysis>; 926 927 static AnalysisKey Key; 928 929public: 930 // Wrap MemorySSA result to ensure address stability of internal MemorySSA 931 // pointers after construction. Use a wrapper class instead of plain 932 // unique_ptr<MemorySSA> to avoid build breakage on MSVC. 933 struct Result { 934 Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {} 935 936 MemorySSA &getMSSA() { return *MSSA.get(); } 937 938 std::unique_ptr<MemorySSA> MSSA; 939 940 bool invalidate(Function &F, const PreservedAnalyses &PA, 941 FunctionAnalysisManager::Invalidator &Inv); 942 }; 943 944 Result run(Function &F, FunctionAnalysisManager &AM); 945}; 946 947/// Printer pass for \c MemorySSA. 948class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> { 949 raw_ostream &OS; 950 951public: 952 explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {} 953 954 PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); 955}; 956 957/// Verifier pass for \c MemorySSA. 958struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> { 959 PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); 960}; 961 962/// Legacy analysis pass which computes \c MemorySSA. 963class MemorySSAWrapperPass : public FunctionPass { 964public: 965 MemorySSAWrapperPass(); 966 967 static char ID; 968 969 bool runOnFunction(Function &) override; 970 void releaseMemory() override; 971 MemorySSA &getMSSA() { return *MSSA; } 972 const MemorySSA &getMSSA() const { return *MSSA; } 973 974 void getAnalysisUsage(AnalysisUsage &AU) const override; 975 976 void verifyAnalysis() const override; 977 void print(raw_ostream &OS, const Module *M = nullptr) const override; 978 979private: 980 std::unique_ptr<MemorySSA> MSSA; 981}; 982 983/// This is the generic walker interface for walkers of MemorySSA. 984/// Walkers are used to be able to further disambiguate the def-use chains 985/// MemorySSA gives you, or otherwise produce better info than MemorySSA gives 986/// you. 987/// In particular, while the def-use chains provide basic information, and are 988/// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a 989/// MemoryUse as AliasAnalysis considers it, a user mant want better or other 990/// information. In particular, they may want to use SCEV info to further 991/// disambiguate memory accesses, or they may want the nearest dominating 992/// may-aliasing MemoryDef for a call or a store. This API enables a 993/// standardized interface to getting and using that info. 994class MemorySSAWalker { 995public: 996 MemorySSAWalker(MemorySSA *); 997 virtual ~MemorySSAWalker() = default; 998 999 using MemoryAccessSet = SmallVector<MemoryAccess *, 8>; 1000 1001 /// Given a memory Mod/Ref/ModRef'ing instruction, calling this 1002 /// will give you the nearest dominating MemoryAccess that Mod's the location 1003 /// the instruction accesses (by skipping any def which AA can prove does not 1004 /// alias the location(s) accessed by the instruction given). 1005 /// 1006 /// Note that this will return a single access, and it must dominate the 1007 /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction, 1008 /// this will return the MemoryPhi, not the operand. This means that 1009 /// given: 1010 /// if (a) { 1011 /// 1 = MemoryDef(liveOnEntry) 1012 /// store %a 1013 /// } else { 1014 /// 2 = MemoryDef(liveOnEntry) 1015 /// store %b 1016 /// } 1017 /// 3 = MemoryPhi(2, 1) 1018 /// MemoryUse(3) 1019 /// load %a 1020 /// 1021 /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef 1022 /// in the if (a) branch. 1023 MemoryAccess *getClobberingMemoryAccess(const Instruction *I) { 1024 MemoryAccess *MA = MSSA->getMemoryAccess(I); 1025 assert(MA && "Handed an instruction that MemorySSA doesn't recognize?"); 1026 return getClobberingMemoryAccess(MA); 1027 } 1028 1029 /// Does the same thing as getClobberingMemoryAccess(const Instruction *I), 1030 /// but takes a MemoryAccess instead of an Instruction. 1031 virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0; 1032 1033 /// Given a potentially clobbering memory access and a new location, 1034 /// calling this will give you the nearest dominating clobbering MemoryAccess 1035 /// (by skipping non-aliasing def links). 1036 /// 1037 /// This version of the function is mainly used to disambiguate phi translated 1038 /// pointers, where the value of a pointer may have changed from the initial 1039 /// memory access. Note that this expects to be handed either a MemoryUse, 1040 /// or an already potentially clobbering access. Unlike the above API, if 1041 /// given a MemoryDef that clobbers the pointer as the starting access, it 1042 /// will return that MemoryDef, whereas the above would return the clobber 1043 /// starting from the use side of the memory def. 1044 virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, 1045 const MemoryLocation &) = 0; 1046 1047 /// Given a memory access, invalidate anything this walker knows about 1048 /// that access. 1049 /// This API is used by walkers that store information to perform basic cache 1050 /// invalidation. This will be called by MemorySSA at appropriate times for 1051 /// the walker it uses or returns. 1052 virtual void invalidateInfo(MemoryAccess *) {} 1053 1054protected: 1055 friend class MemorySSA; // For updating MSSA pointer in MemorySSA move 1056 // constructor. 1057 MemorySSA *MSSA; 1058}; 1059 1060/// A MemorySSAWalker that does no alias queries, or anything else. It 1061/// simply returns the links as they were constructed by the builder. 1062class DoNothingMemorySSAWalker final : public MemorySSAWalker { 1063public: 1064 // Keep the overrides below from hiding the Instruction overload of 1065 // getClobberingMemoryAccess. 1066 using MemorySSAWalker::getClobberingMemoryAccess; 1067 1068 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override; 1069 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, 1070 const MemoryLocation &) override; 1071}; 1072 1073using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>; 1074using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>; 1075 1076/// Iterator base class used to implement const and non-const iterators 1077/// over the defining accesses of a MemoryAccess. 1078template <class T> 1079class memoryaccess_def_iterator_base 1080 : public iterator_facade_base<memoryaccess_def_iterator_base<T>, 1081 std::forward_iterator_tag, T, ptrdiff_t, T *, 1082 T *> { 1083 using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base; 1084 1085public: 1086 memoryaccess_def_iterator_base(T *Start) : Access(Start) {} 1087 memoryaccess_def_iterator_base() = default; 1088 1089 bool operator==(const memoryaccess_def_iterator_base &Other) const { 1090 return Access == Other.Access && (!Access || ArgNo == Other.ArgNo); 1091 } 1092 1093 // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the 1094 // block from the operand in constant time (In a PHINode, the uselist has 1095 // both, so it's just subtraction). We provide it as part of the 1096 // iterator to avoid callers having to linear walk to get the block. 1097 // If the operation becomes constant time on MemoryPHI's, this bit of 1098 // abstraction breaking should be removed. 1099 BasicBlock *getPhiArgBlock() const { 1100 MemoryPhi *MP = dyn_cast<MemoryPhi>(Access); 1101 assert(MP && "Tried to get phi arg block when not iterating over a PHI"); 1102 return MP->getIncomingBlock(ArgNo); 1103 } 1104 1105 typename BaseT::iterator::pointer operator*() const { 1106 assert(Access && "Tried to access past the end of our iterator"); 1107 // Go to the first argument for phis, and the defining access for everything 1108 // else. 1109 if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) 1110 return MP->getIncomingValue(ArgNo); 1111 return cast<MemoryUseOrDef>(Access)->getDefiningAccess(); 1112 } 1113 1114 using BaseT::operator++; 1115 memoryaccess_def_iterator_base &operator++() { 1116 assert(Access && "Hit end of iterator"); 1117 if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) { 1118 if (++ArgNo >= MP->getNumIncomingValues()) { 1119 ArgNo = 0; 1120 Access = nullptr; 1121 } 1122 } else { 1123 Access = nullptr; 1124 } 1125 return *this; 1126 } 1127 1128private: 1129 T *Access = nullptr; 1130 unsigned ArgNo = 0; 1131}; 1132 1133inline memoryaccess_def_iterator MemoryAccess::defs_begin() { 1134 return memoryaccess_def_iterator(this); 1135} 1136 1137inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const { 1138 return const_memoryaccess_def_iterator(this); 1139} 1140 1141inline memoryaccess_def_iterator MemoryAccess::defs_end() { 1142 return memoryaccess_def_iterator(); 1143} 1144 1145inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const { 1146 return const_memoryaccess_def_iterator(); 1147} 1148 1149/// GraphTraits for a MemoryAccess, which walks defs in the normal case, 1150/// and uses in the inverse case. 1151template <> struct GraphTraits<MemoryAccess *> { 1152 using NodeRef = MemoryAccess *; 1153 using ChildIteratorType = memoryaccess_def_iterator; 1154 1155 static NodeRef getEntryNode(NodeRef N) { return N; } 1156 static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); } 1157 static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); } 1158}; 1159 1160template <> struct GraphTraits<Inverse<MemoryAccess *>> { 1161 using NodeRef = MemoryAccess *; 1162 using ChildIteratorType = MemoryAccess::iterator; 1163 1164 static NodeRef getEntryNode(NodeRef N) { return N; } 1165 static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); } 1166 static ChildIteratorType child_end(NodeRef N) { return N->user_end(); } 1167}; 1168 1169/// Provide an iterator that walks defs, giving both the memory access, 1170/// and the current pointer location, updating the pointer location as it 1171/// changes due to phi node translation. 1172/// 1173/// This iterator, while somewhat specialized, is what most clients actually 1174/// want when walking upwards through MemorySSA def chains. It takes a pair of 1175/// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the 1176/// memory location through phi nodes for the user. 1177class upward_defs_iterator 1178 : public iterator_facade_base<upward_defs_iterator, 1179 std::forward_iterator_tag, 1180 const MemoryAccessPair> { 1181 using BaseT = upward_defs_iterator::iterator_facade_base; 1182 1183public: 1184 upward_defs_iterator(const MemoryAccessPair &Info) 1185 : DefIterator(Info.first), Location(Info.second), 1186 OriginalAccess(Info.first) { 1187 CurrentPair.first = nullptr; 1188 1189 WalkingPhi = Info.first && isa<MemoryPhi>(Info.first); 1190 fillInCurrentPair(); 1191 } 1192 1193 upward_defs_iterator() { CurrentPair.first = nullptr; } 1194 1195 bool operator==(const upward_defs_iterator &Other) const { 1196 return DefIterator == Other.DefIterator; 1197 } 1198 1199 BaseT::iterator::reference operator*() const { 1200 assert(DefIterator != OriginalAccess->defs_end() && 1201 "Tried to access past the end of our iterator"); 1202 return CurrentPair; 1203 } 1204 1205 using BaseT::operator++; 1206 upward_defs_iterator &operator++() { 1207 assert(DefIterator != OriginalAccess->defs_end() && 1208 "Tried to access past the end of the iterator"); 1209 ++DefIterator; 1210 if (DefIterator != OriginalAccess->defs_end()) 1211 fillInCurrentPair(); 1212 return *this; 1213 } 1214 1215 BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); } 1216 1217private: 1218 void fillInCurrentPair() { 1219 CurrentPair.first = *DefIterator; 1220 if (WalkingPhi && Location.Ptr) { 1221 PHITransAddr Translator( 1222 const_cast<Value *>(Location.Ptr), 1223 OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr); 1224 if (!Translator.PHITranslateValue(OriginalAccess->getBlock(), 1225 DefIterator.getPhiArgBlock(), nullptr, 1226 false)) 1227 if (Translator.getAddr() != Location.Ptr) { 1228 CurrentPair.second = Location.getWithNewPtr(Translator.getAddr()); 1229 return; 1230 } 1231 } 1232 CurrentPair.second = Location; 1233 } 1234 1235 MemoryAccessPair CurrentPair; 1236 memoryaccess_def_iterator DefIterator; 1237 MemoryLocation Location; 1238 MemoryAccess *OriginalAccess = nullptr; 1239 bool WalkingPhi = false; 1240}; 1241 1242inline upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair) { 1243 return upward_defs_iterator(Pair); 1244} 1245 1246inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); } 1247 1248inline iterator_range<upward_defs_iterator> 1249upward_defs(const MemoryAccessPair &Pair) { 1250 return make_range(upward_defs_begin(Pair), upward_defs_end()); 1251} 1252 1253/// Walks the defining accesses of MemoryDefs. Stops after we hit something that 1254/// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when 1255/// comparing against a null def_chain_iterator, this will compare equal only 1256/// after walking said Phi/liveOnEntry. 1257/// 1258/// The UseOptimizedChain flag specifies whether to walk the clobbering 1259/// access chain, or all the accesses. 1260/// 1261/// Normally, MemoryDef are all just def/use linked together, so a def_chain on 1262/// a MemoryDef will walk all MemoryDefs above it in the program until it hits 1263/// a phi node. The optimized chain walks the clobbering access of a store. 1264/// So if you are just trying to find, given a store, what the next 1265/// thing that would clobber the same memory is, you want the optimized chain. 1266template <class T, bool UseOptimizedChain = false> 1267struct def_chain_iterator 1268 : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>, 1269 std::forward_iterator_tag, MemoryAccess *> { 1270 def_chain_iterator() : MA(nullptr) {} 1271 def_chain_iterator(T MA) : MA(MA) {} 1272 1273 T operator*() const { return MA; } 1274 1275 def_chain_iterator &operator++() { 1276 // N.B. liveOnEntry has a null defining access. 1277 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) { 1278 if (UseOptimizedChain && MUD->isOptimized()) 1279 MA = MUD->getOptimized(); 1280 else 1281 MA = MUD->getDefiningAccess(); 1282 } else { 1283 MA = nullptr; 1284 } 1285 1286 return *this; 1287 } 1288 1289 bool operator==(const def_chain_iterator &O) const { return MA == O.MA; } 1290 1291private: 1292 T MA; 1293}; 1294 1295template <class T> 1296inline iterator_range<def_chain_iterator<T>> 1297def_chain(T MA, MemoryAccess *UpTo = nullptr) { 1298#ifdef EXPENSIVE_CHECKS 1299 assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) && 1300 "UpTo isn't in the def chain!"); 1301#endif 1302 return make_range(def_chain_iterator<T>(MA), def_chain_iterator<T>(UpTo)); 1303} 1304 1305template <class T> 1306inline iterator_range<def_chain_iterator<T, true>> optimized_def_chain(T MA) { 1307 return make_range(def_chain_iterator<T, true>(MA), 1308 def_chain_iterator<T, true>(nullptr)); 1309} 1310 1311} // end namespace llvm 1312 1313#endif // LLVM_ANALYSIS_MEMORYSSA_H 1314