1//===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation -------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements an analysis that determines, for a given memory
11// operation, what preceding memory operations it depends on.  It builds on
12// alias analysis information, and tries to provide a lazy, caching interface to
13// a common kind of alias information query.
14//
15//===----------------------------------------------------------------------===//
16
17#define DEBUG_TYPE "memdep"
18#include "llvm/Analysis/MemoryDependenceAnalysis.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/Statistic.h"
21#include "llvm/Analysis/AliasAnalysis.h"
22#include "llvm/Analysis/Dominators.h"
23#include "llvm/Analysis/InstructionSimplify.h"
24#include "llvm/Analysis/MemoryBuiltins.h"
25#include "llvm/Analysis/PHITransAddr.h"
26#include "llvm/Analysis/ValueTracking.h"
27#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/Function.h"
29#include "llvm/IR/Instructions.h"
30#include "llvm/IR/IntrinsicInst.h"
31#include "llvm/IR/LLVMContext.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/Support/PredIteratorCache.h"
34using namespace llvm;
35
36STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses");
37STATISTIC(NumCacheDirtyNonLocal, "Number of dirty cached non-local responses");
38STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses");
39
40STATISTIC(NumCacheNonLocalPtr,
41          "Number of fully cached non-local ptr responses");
42STATISTIC(NumCacheDirtyNonLocalPtr,
43          "Number of cached, but dirty, non-local ptr responses");
44STATISTIC(NumUncacheNonLocalPtr,
45          "Number of uncached non-local ptr responses");
46STATISTIC(NumCacheCompleteNonLocalPtr,
47          "Number of block queries that were completely cached");
48
49// Limit for the number of instructions to scan in a block.
50static const int BlockScanLimit = 100;
51
52char MemoryDependenceAnalysis::ID = 0;
53
54// Register this pass...
55INITIALIZE_PASS_BEGIN(MemoryDependenceAnalysis, "memdep",
56                "Memory Dependence Analysis", false, true)
57INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
58INITIALIZE_PASS_END(MemoryDependenceAnalysis, "memdep",
59                      "Memory Dependence Analysis", false, true)
60
61MemoryDependenceAnalysis::MemoryDependenceAnalysis()
62: FunctionPass(ID), PredCache(0) {
63  initializeMemoryDependenceAnalysisPass(*PassRegistry::getPassRegistry());
64}
65MemoryDependenceAnalysis::~MemoryDependenceAnalysis() {
66}
67
68/// Clean up memory in between runs
69void MemoryDependenceAnalysis::releaseMemory() {
70  LocalDeps.clear();
71  NonLocalDeps.clear();
72  NonLocalPointerDeps.clear();
73  ReverseLocalDeps.clear();
74  ReverseNonLocalDeps.clear();
75  ReverseNonLocalPtrDeps.clear();
76  PredCache->clear();
77}
78
79
80
81/// getAnalysisUsage - Does not modify anything.  It uses Alias Analysis.
82///
83void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
84  AU.setPreservesAll();
85  AU.addRequiredTransitive<AliasAnalysis>();
86}
87
88bool MemoryDependenceAnalysis::runOnFunction(Function &) {
89  AA = &getAnalysis<AliasAnalysis>();
90  TD = getAnalysisIfAvailable<DataLayout>();
91  DT = getAnalysisIfAvailable<DominatorTree>();
92  if (!PredCache)
93    PredCache.reset(new PredIteratorCache());
94  return false;
95}
96
97/// RemoveFromReverseMap - This is a helper function that removes Val from
98/// 'Inst's set in ReverseMap.  If the set becomes empty, remove Inst's entry.
99template <typename KeyTy>
100static void RemoveFromReverseMap(DenseMap<Instruction*,
101                                 SmallPtrSet<KeyTy, 4> > &ReverseMap,
102                                 Instruction *Inst, KeyTy Val) {
103  typename DenseMap<Instruction*, SmallPtrSet<KeyTy, 4> >::iterator
104  InstIt = ReverseMap.find(Inst);
105  assert(InstIt != ReverseMap.end() && "Reverse map out of sync?");
106  bool Found = InstIt->second.erase(Val);
107  assert(Found && "Invalid reverse map!"); (void)Found;
108  if (InstIt->second.empty())
109    ReverseMap.erase(InstIt);
110}
111
112/// GetLocation - If the given instruction references a specific memory
113/// location, fill in Loc with the details, otherwise set Loc.Ptr to null.
114/// Return a ModRefInfo value describing the general behavior of the
115/// instruction.
116static
117AliasAnalysis::ModRefResult GetLocation(const Instruction *Inst,
118                                        AliasAnalysis::Location &Loc,
119                                        AliasAnalysis *AA) {
120  if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
121    if (LI->isUnordered()) {
122      Loc = AA->getLocation(LI);
123      return AliasAnalysis::Ref;
124    }
125    if (LI->getOrdering() == Monotonic) {
126      Loc = AA->getLocation(LI);
127      return AliasAnalysis::ModRef;
128    }
129    Loc = AliasAnalysis::Location();
130    return AliasAnalysis::ModRef;
131  }
132
133  if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
134    if (SI->isUnordered()) {
135      Loc = AA->getLocation(SI);
136      return AliasAnalysis::Mod;
137    }
138    if (SI->getOrdering() == Monotonic) {
139      Loc = AA->getLocation(SI);
140      return AliasAnalysis::ModRef;
141    }
142    Loc = AliasAnalysis::Location();
143    return AliasAnalysis::ModRef;
144  }
145
146  if (const VAArgInst *V = dyn_cast<VAArgInst>(Inst)) {
147    Loc = AA->getLocation(V);
148    return AliasAnalysis::ModRef;
149  }
150
151  if (const CallInst *CI = isFreeCall(Inst, AA->getTargetLibraryInfo())) {
152    // calls to free() deallocate the entire structure
153    Loc = AliasAnalysis::Location(CI->getArgOperand(0));
154    return AliasAnalysis::Mod;
155  }
156
157  if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
158    switch (II->getIntrinsicID()) {
159    case Intrinsic::lifetime_start:
160    case Intrinsic::lifetime_end:
161    case Intrinsic::invariant_start:
162      Loc = AliasAnalysis::Location(II->getArgOperand(1),
163                                    cast<ConstantInt>(II->getArgOperand(0))
164                                      ->getZExtValue(),
165                                    II->getMetadata(LLVMContext::MD_tbaa));
166      // These intrinsics don't really modify the memory, but returning Mod
167      // will allow them to be handled conservatively.
168      return AliasAnalysis::Mod;
169    case Intrinsic::invariant_end:
170      Loc = AliasAnalysis::Location(II->getArgOperand(2),
171                                    cast<ConstantInt>(II->getArgOperand(1))
172                                      ->getZExtValue(),
173                                    II->getMetadata(LLVMContext::MD_tbaa));
174      // These intrinsics don't really modify the memory, but returning Mod
175      // will allow them to be handled conservatively.
176      return AliasAnalysis::Mod;
177    default:
178      break;
179    }
180
181  // Otherwise, just do the coarse-grained thing that always works.
182  if (Inst->mayWriteToMemory())
183    return AliasAnalysis::ModRef;
184  if (Inst->mayReadFromMemory())
185    return AliasAnalysis::Ref;
186  return AliasAnalysis::NoModRef;
187}
188
189/// getCallSiteDependencyFrom - Private helper for finding the local
190/// dependencies of a call site.
191MemDepResult MemoryDependenceAnalysis::
192getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall,
193                          BasicBlock::iterator ScanIt, BasicBlock *BB) {
194  unsigned Limit = BlockScanLimit;
195
196  // Walk backwards through the block, looking for dependencies
197  while (ScanIt != BB->begin()) {
198    // Limit the amount of scanning we do so we don't end up with quadratic
199    // running time on extreme testcases.
200    --Limit;
201    if (!Limit)
202      return MemDepResult::getUnknown();
203
204    Instruction *Inst = --ScanIt;
205
206    // If this inst is a memory op, get the pointer it accessed
207    AliasAnalysis::Location Loc;
208    AliasAnalysis::ModRefResult MR = GetLocation(Inst, Loc, AA);
209    if (Loc.Ptr) {
210      // A simple instruction.
211      if (AA->getModRefInfo(CS, Loc) != AliasAnalysis::NoModRef)
212        return MemDepResult::getClobber(Inst);
213      continue;
214    }
215
216    if (CallSite InstCS = cast<Value>(Inst)) {
217      // Debug intrinsics don't cause dependences.
218      if (isa<DbgInfoIntrinsic>(Inst)) continue;
219      // If these two calls do not interfere, look past it.
220      switch (AA->getModRefInfo(CS, InstCS)) {
221      case AliasAnalysis::NoModRef:
222        // If the two calls are the same, return InstCS as a Def, so that
223        // CS can be found redundant and eliminated.
224        if (isReadOnlyCall && !(MR & AliasAnalysis::Mod) &&
225            CS.getInstruction()->isIdenticalToWhenDefined(Inst))
226          return MemDepResult::getDef(Inst);
227
228        // Otherwise if the two calls don't interact (e.g. InstCS is readnone)
229        // keep scanning.
230        continue;
231      default:
232        return MemDepResult::getClobber(Inst);
233      }
234    }
235
236    // If we could not obtain a pointer for the instruction and the instruction
237    // touches memory then assume that this is a dependency.
238    if (MR != AliasAnalysis::NoModRef)
239      return MemDepResult::getClobber(Inst);
240  }
241
242  // No dependence found.  If this is the entry block of the function, it is
243  // unknown, otherwise it is non-local.
244  if (BB != &BB->getParent()->getEntryBlock())
245    return MemDepResult::getNonLocal();
246  return MemDepResult::getNonFuncLocal();
247}
248
249/// isLoadLoadClobberIfExtendedToFullWidth - Return true if LI is a load that
250/// would fully overlap MemLoc if done as a wider legal integer load.
251///
252/// MemLocBase, MemLocOffset are lazily computed here the first time the
253/// base/offs of memloc is needed.
254static bool
255isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location &MemLoc,
256                                       const Value *&MemLocBase,
257                                       int64_t &MemLocOffs,
258                                       const LoadInst *LI,
259                                       const DataLayout *TD) {
260  // If we have no target data, we can't do this.
261  if (TD == 0) return false;
262
263  // If we haven't already computed the base/offset of MemLoc, do so now.
264  if (MemLocBase == 0)
265    MemLocBase = GetPointerBaseWithConstantOffset(MemLoc.Ptr, MemLocOffs, TD);
266
267  unsigned Size = MemoryDependenceAnalysis::
268    getLoadLoadClobberFullWidthSize(MemLocBase, MemLocOffs, MemLoc.Size,
269                                    LI, *TD);
270  return Size != 0;
271}
272
273/// getLoadLoadClobberFullWidthSize - This is a little bit of analysis that
274/// looks at a memory location for a load (specified by MemLocBase, Offs,
275/// and Size) and compares it against a load.  If the specified load could
276/// be safely widened to a larger integer load that is 1) still efficient,
277/// 2) safe for the target, and 3) would provide the specified memory
278/// location value, then this function returns the size in bytes of the
279/// load width to use.  If not, this returns zero.
280unsigned MemoryDependenceAnalysis::
281getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
282                                unsigned MemLocSize, const LoadInst *LI,
283                                const DataLayout &TD) {
284  // We can only extend simple integer loads.
285  if (!isa<IntegerType>(LI->getType()) || !LI->isSimple()) return 0;
286
287  // Load widening is hostile to ThreadSanitizer: it may cause false positives
288  // or make the reports more cryptic (access sizes are wrong).
289  if (LI->getParent()->getParent()->getAttributes().
290      hasAttribute(AttributeSet::FunctionIndex, Attribute::SanitizeThread))
291    return 0;
292
293  // Get the base of this load.
294  int64_t LIOffs = 0;
295  const Value *LIBase =
296    GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, &TD);
297
298  // If the two pointers are not based on the same pointer, we can't tell that
299  // they are related.
300  if (LIBase != MemLocBase) return 0;
301
302  // Okay, the two values are based on the same pointer, but returned as
303  // no-alias.  This happens when we have things like two byte loads at "P+1"
304  // and "P+3".  Check to see if increasing the size of the "LI" load up to its
305  // alignment (or the largest native integer type) will allow us to load all
306  // the bits required by MemLoc.
307
308  // If MemLoc is before LI, then no widening of LI will help us out.
309  if (MemLocOffs < LIOffs) return 0;
310
311  // Get the alignment of the load in bytes.  We assume that it is safe to load
312  // any legal integer up to this size without a problem.  For example, if we're
313  // looking at an i8 load on x86-32 that is known 1024 byte aligned, we can
314  // widen it up to an i32 load.  If it is known 2-byte aligned, we can widen it
315  // to i16.
316  unsigned LoadAlign = LI->getAlignment();
317
318  int64_t MemLocEnd = MemLocOffs+MemLocSize;
319
320  // If no amount of rounding up will let MemLoc fit into LI, then bail out.
321  if (LIOffs+LoadAlign < MemLocEnd) return 0;
322
323  // This is the size of the load to try.  Start with the next larger power of
324  // two.
325  unsigned NewLoadByteSize = LI->getType()->getPrimitiveSizeInBits()/8U;
326  NewLoadByteSize = NextPowerOf2(NewLoadByteSize);
327
328  while (1) {
329    // If this load size is bigger than our known alignment or would not fit
330    // into a native integer register, then we fail.
331    if (NewLoadByteSize > LoadAlign ||
332        !TD.fitsInLegalInteger(NewLoadByteSize*8))
333      return 0;
334
335    if (LIOffs+NewLoadByteSize > MemLocEnd &&
336        LI->getParent()->getParent()->getAttributes().
337          hasAttribute(AttributeSet::FunctionIndex, Attribute::SanitizeAddress))
338      // We will be reading past the location accessed by the original program.
339      // While this is safe in a regular build, Address Safety analysis tools
340      // may start reporting false warnings. So, don't do widening.
341      return 0;
342
343    // If a load of this width would include all of MemLoc, then we succeed.
344    if (LIOffs+NewLoadByteSize >= MemLocEnd)
345      return NewLoadByteSize;
346
347    NewLoadByteSize <<= 1;
348  }
349}
350
351/// getPointerDependencyFrom - Return the instruction on which a memory
352/// location depends.  If isLoad is true, this routine ignores may-aliases with
353/// read-only operations.  If isLoad is false, this routine ignores may-aliases
354/// with reads from read-only locations.  If possible, pass the query
355/// instruction as well; this function may take advantage of the metadata
356/// annotated to the query instruction to refine the result.
357MemDepResult MemoryDependenceAnalysis::
358getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
359                         BasicBlock::iterator ScanIt, BasicBlock *BB,
360                         Instruction *QueryInst) {
361
362  const Value *MemLocBase = 0;
363  int64_t MemLocOffset = 0;
364  unsigned Limit = BlockScanLimit;
365  bool isInvariantLoad = false;
366  if (isLoad && QueryInst) {
367    LoadInst *LI = dyn_cast<LoadInst>(QueryInst);
368    if (LI && LI->getMetadata(LLVMContext::MD_invariant_load) != 0)
369      isInvariantLoad = true;
370  }
371
372  // Walk backwards through the basic block, looking for dependencies.
373  while (ScanIt != BB->begin()) {
374    Instruction *Inst = --ScanIt;
375
376    if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
377      // Debug intrinsics don't (and can't) cause dependencies.
378      if (isa<DbgInfoIntrinsic>(II)) continue;
379
380    // Limit the amount of scanning we do so we don't end up with quadratic
381    // running time on extreme testcases.
382    --Limit;
383    if (!Limit)
384      return MemDepResult::getUnknown();
385
386    if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
387      // If we reach a lifetime begin or end marker, then the query ends here
388      // because the value is undefined.
389      if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
390        // FIXME: This only considers queries directly on the invariant-tagged
391        // pointer, not on query pointers that are indexed off of them.  It'd
392        // be nice to handle that at some point (the right approach is to use
393        // GetPointerBaseWithConstantOffset).
394        if (AA->isMustAlias(AliasAnalysis::Location(II->getArgOperand(1)),
395                            MemLoc))
396          return MemDepResult::getDef(II);
397        continue;
398      }
399    }
400
401    // Values depend on loads if the pointers are must aliased.  This means that
402    // a load depends on another must aliased load from the same value.
403    if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
404      // Atomic loads have complications involved.
405      // FIXME: This is overly conservative.
406      if (!LI->isUnordered())
407        return MemDepResult::getClobber(LI);
408
409      AliasAnalysis::Location LoadLoc = AA->getLocation(LI);
410
411      // If we found a pointer, check if it could be the same as our pointer.
412      AliasAnalysis::AliasResult R = AA->alias(LoadLoc, MemLoc);
413
414      if (isLoad) {
415        if (R == AliasAnalysis::NoAlias) {
416          // If this is an over-aligned integer load (for example,
417          // "load i8* %P, align 4") see if it would obviously overlap with the
418          // queried location if widened to a larger load (e.g. if the queried
419          // location is 1 byte at P+1).  If so, return it as a load/load
420          // clobber result, allowing the client to decide to widen the load if
421          // it wants to.
422          if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType()))
423            if (LI->getAlignment()*8 > ITy->getPrimitiveSizeInBits() &&
424                isLoadLoadClobberIfExtendedToFullWidth(MemLoc, MemLocBase,
425                                                       MemLocOffset, LI, TD))
426              return MemDepResult::getClobber(Inst);
427
428          continue;
429        }
430
431        // Must aliased loads are defs of each other.
432        if (R == AliasAnalysis::MustAlias)
433          return MemDepResult::getDef(Inst);
434
435#if 0 // FIXME: Temporarily disabled. GVN is cleverly rewriting loads
436      // in terms of clobbering loads, but since it does this by looking
437      // at the clobbering load directly, it doesn't know about any
438      // phi translation that may have happened along the way.
439
440        // If we have a partial alias, then return this as a clobber for the
441        // client to handle.
442        if (R == AliasAnalysis::PartialAlias)
443          return MemDepResult::getClobber(Inst);
444#endif
445
446        // Random may-alias loads don't depend on each other without a
447        // dependence.
448        continue;
449      }
450
451      // Stores don't depend on other no-aliased accesses.
452      if (R == AliasAnalysis::NoAlias)
453        continue;
454
455      // Stores don't alias loads from read-only memory.
456      if (AA->pointsToConstantMemory(LoadLoc))
457        continue;
458
459      // Stores depend on may/must aliased loads.
460      return MemDepResult::getDef(Inst);
461    }
462
463    if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
464      // Atomic stores have complications involved.
465      // FIXME: This is overly conservative.
466      if (!SI->isUnordered())
467        return MemDepResult::getClobber(SI);
468
469      // If alias analysis can tell that this store is guaranteed to not modify
470      // the query pointer, ignore it.  Use getModRefInfo to handle cases where
471      // the query pointer points to constant memory etc.
472      if (AA->getModRefInfo(SI, MemLoc) == AliasAnalysis::NoModRef)
473        continue;
474
475      // Ok, this store might clobber the query pointer.  Check to see if it is
476      // a must alias: in this case, we want to return this as a def.
477      AliasAnalysis::Location StoreLoc = AA->getLocation(SI);
478
479      // If we found a pointer, check if it could be the same as our pointer.
480      AliasAnalysis::AliasResult R = AA->alias(StoreLoc, MemLoc);
481
482      if (R == AliasAnalysis::NoAlias)
483        continue;
484      if (R == AliasAnalysis::MustAlias)
485        return MemDepResult::getDef(Inst);
486      if (isInvariantLoad)
487       continue;
488      return MemDepResult::getClobber(Inst);
489    }
490
491    // If this is an allocation, and if we know that the accessed pointer is to
492    // the allocation, return Def.  This means that there is no dependence and
493    // the access can be optimized based on that.  For example, a load could
494    // turn into undef.
495    // Note: Only determine this to be a malloc if Inst is the malloc call, not
496    // a subsequent bitcast of the malloc call result.  There can be stores to
497    // the malloced memory between the malloc call and its bitcast uses, and we
498    // need to continue scanning until the malloc call.
499    const TargetLibraryInfo *TLI = AA->getTargetLibraryInfo();
500    if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, TLI)) {
501      const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, TD);
502
503      if (AccessPtr == Inst || AA->isMustAlias(Inst, AccessPtr))
504        return MemDepResult::getDef(Inst);
505      // Be conservative if the accessed pointer may alias the allocation.
506      if (AA->alias(Inst, AccessPtr) != AliasAnalysis::NoAlias)
507        return MemDepResult::getClobber(Inst);
508      // If the allocation is not aliased and does not read memory (like
509      // strdup), it is safe to ignore.
510      if (isa<AllocaInst>(Inst) ||
511          isMallocLikeFn(Inst, TLI) || isCallocLikeFn(Inst, TLI))
512        continue;
513    }
514
515    // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
516    AliasAnalysis::ModRefResult MR = AA->getModRefInfo(Inst, MemLoc);
517    // If necessary, perform additional analysis.
518    if (MR == AliasAnalysis::ModRef)
519      MR = AA->callCapturesBefore(Inst, MemLoc, DT);
520    switch (MR) {
521    case AliasAnalysis::NoModRef:
522      // If the call has no effect on the queried pointer, just ignore it.
523      continue;
524    case AliasAnalysis::Mod:
525      return MemDepResult::getClobber(Inst);
526    case AliasAnalysis::Ref:
527      // If the call is known to never store to the pointer, and if this is a
528      // load query, we can safely ignore it (scan past it).
529      if (isLoad)
530        continue;
531    default:
532      // Otherwise, there is a potential dependence.  Return a clobber.
533      return MemDepResult::getClobber(Inst);
534    }
535  }
536
537  // No dependence found.  If this is the entry block of the function, it is
538  // unknown, otherwise it is non-local.
539  if (BB != &BB->getParent()->getEntryBlock())
540    return MemDepResult::getNonLocal();
541  return MemDepResult::getNonFuncLocal();
542}
543
544/// getDependency - Return the instruction on which a memory operation
545/// depends.
546MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) {
547  Instruction *ScanPos = QueryInst;
548
549  // Check for a cached result
550  MemDepResult &LocalCache = LocalDeps[QueryInst];
551
552  // If the cached entry is non-dirty, just return it.  Note that this depends
553  // on MemDepResult's default constructing to 'dirty'.
554  if (!LocalCache.isDirty())
555    return LocalCache;
556
557  // Otherwise, if we have a dirty entry, we know we can start the scan at that
558  // instruction, which may save us some work.
559  if (Instruction *Inst = LocalCache.getInst()) {
560    ScanPos = Inst;
561
562    RemoveFromReverseMap(ReverseLocalDeps, Inst, QueryInst);
563  }
564
565  BasicBlock *QueryParent = QueryInst->getParent();
566
567  // Do the scan.
568  if (BasicBlock::iterator(QueryInst) == QueryParent->begin()) {
569    // No dependence found.  If this is the entry block of the function, it is
570    // unknown, otherwise it is non-local.
571    if (QueryParent != &QueryParent->getParent()->getEntryBlock())
572      LocalCache = MemDepResult::getNonLocal();
573    else
574      LocalCache = MemDepResult::getNonFuncLocal();
575  } else {
576    AliasAnalysis::Location MemLoc;
577    AliasAnalysis::ModRefResult MR = GetLocation(QueryInst, MemLoc, AA);
578    if (MemLoc.Ptr) {
579      // If we can do a pointer scan, make it happen.
580      bool isLoad = !(MR & AliasAnalysis::Mod);
581      if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(QueryInst))
582        isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_start;
583
584      LocalCache = getPointerDependencyFrom(MemLoc, isLoad, ScanPos,
585                                            QueryParent, QueryInst);
586    } else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) {
587      CallSite QueryCS(QueryInst);
588      bool isReadOnly = AA->onlyReadsMemory(QueryCS);
589      LocalCache = getCallSiteDependencyFrom(QueryCS, isReadOnly, ScanPos,
590                                             QueryParent);
591    } else
592      // Non-memory instruction.
593      LocalCache = MemDepResult::getUnknown();
594  }
595
596  // Remember the result!
597  if (Instruction *I = LocalCache.getInst())
598    ReverseLocalDeps[I].insert(QueryInst);
599
600  return LocalCache;
601}
602
603#ifndef NDEBUG
604/// AssertSorted - This method is used when -debug is specified to verify that
605/// cache arrays are properly kept sorted.
606static void AssertSorted(MemoryDependenceAnalysis::NonLocalDepInfo &Cache,
607                         int Count = -1) {
608  if (Count == -1) Count = Cache.size();
609  if (Count == 0) return;
610
611  for (unsigned i = 1; i != unsigned(Count); ++i)
612    assert(!(Cache[i] < Cache[i-1]) && "Cache isn't sorted!");
613}
614#endif
615
616/// getNonLocalCallDependency - Perform a full dependency query for the
617/// specified call, returning the set of blocks that the value is
618/// potentially live across.  The returned set of results will include a
619/// "NonLocal" result for all blocks where the value is live across.
620///
621/// This method assumes the instruction returns a "NonLocal" dependency
622/// within its own block.
623///
624/// This returns a reference to an internal data structure that may be
625/// invalidated on the next non-local query or when an instruction is
626/// removed.  Clients must copy this data if they want it around longer than
627/// that.
628const MemoryDependenceAnalysis::NonLocalDepInfo &
629MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) {
630  assert(getDependency(QueryCS.getInstruction()).isNonLocal() &&
631 "getNonLocalCallDependency should only be used on calls with non-local deps!");
632  PerInstNLInfo &CacheP = NonLocalDeps[QueryCS.getInstruction()];
633  NonLocalDepInfo &Cache = CacheP.first;
634
635  /// DirtyBlocks - This is the set of blocks that need to be recomputed.  In
636  /// the cached case, this can happen due to instructions being deleted etc. In
637  /// the uncached case, this starts out as the set of predecessors we care
638  /// about.
639  SmallVector<BasicBlock*, 32> DirtyBlocks;
640
641  if (!Cache.empty()) {
642    // Okay, we have a cache entry.  If we know it is not dirty, just return it
643    // with no computation.
644    if (!CacheP.second) {
645      ++NumCacheNonLocal;
646      return Cache;
647    }
648
649    // If we already have a partially computed set of results, scan them to
650    // determine what is dirty, seeding our initial DirtyBlocks worklist.
651    for (NonLocalDepInfo::iterator I = Cache.begin(), E = Cache.end();
652       I != E; ++I)
653      if (I->getResult().isDirty())
654        DirtyBlocks.push_back(I->getBB());
655
656    // Sort the cache so that we can do fast binary search lookups below.
657    std::sort(Cache.begin(), Cache.end());
658
659    ++NumCacheDirtyNonLocal;
660    //cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: "
661    //     << Cache.size() << " cached: " << *QueryInst;
662  } else {
663    // Seed DirtyBlocks with each of the preds of QueryInst's block.
664    BasicBlock *QueryBB = QueryCS.getInstruction()->getParent();
665    for (BasicBlock **PI = PredCache->GetPreds(QueryBB); *PI; ++PI)
666      DirtyBlocks.push_back(*PI);
667    ++NumUncacheNonLocal;
668  }
669
670  // isReadonlyCall - If this is a read-only call, we can be more aggressive.
671  bool isReadonlyCall = AA->onlyReadsMemory(QueryCS);
672
673  SmallPtrSet<BasicBlock*, 64> Visited;
674
675  unsigned NumSortedEntries = Cache.size();
676  DEBUG(AssertSorted(Cache));
677
678  // Iterate while we still have blocks to update.
679  while (!DirtyBlocks.empty()) {
680    BasicBlock *DirtyBB = DirtyBlocks.back();
681    DirtyBlocks.pop_back();
682
683    // Already processed this block?
684    if (!Visited.insert(DirtyBB))
685      continue;
686
687    // Do a binary search to see if we already have an entry for this block in
688    // the cache set.  If so, find it.
689    DEBUG(AssertSorted(Cache, NumSortedEntries));
690    NonLocalDepInfo::iterator Entry =
691      std::upper_bound(Cache.begin(), Cache.begin()+NumSortedEntries,
692                       NonLocalDepEntry(DirtyBB));
693    if (Entry != Cache.begin() && prior(Entry)->getBB() == DirtyBB)
694      --Entry;
695
696    NonLocalDepEntry *ExistingResult = 0;
697    if (Entry != Cache.begin()+NumSortedEntries &&
698        Entry->getBB() == DirtyBB) {
699      // If we already have an entry, and if it isn't already dirty, the block
700      // is done.
701      if (!Entry->getResult().isDirty())
702        continue;
703
704      // Otherwise, remember this slot so we can update the value.
705      ExistingResult = &*Entry;
706    }
707
708    // If the dirty entry has a pointer, start scanning from it so we don't have
709    // to rescan the entire block.
710    BasicBlock::iterator ScanPos = DirtyBB->end();
711    if (ExistingResult) {
712      if (Instruction *Inst = ExistingResult->getResult().getInst()) {
713        ScanPos = Inst;
714        // We're removing QueryInst's use of Inst.
715        RemoveFromReverseMap(ReverseNonLocalDeps, Inst,
716                             QueryCS.getInstruction());
717      }
718    }
719
720    // Find out if this block has a local dependency for QueryInst.
721    MemDepResult Dep;
722
723    if (ScanPos != DirtyBB->begin()) {
724      Dep = getCallSiteDependencyFrom(QueryCS, isReadonlyCall,ScanPos, DirtyBB);
725    } else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) {
726      // No dependence found.  If this is the entry block of the function, it is
727      // a clobber, otherwise it is unknown.
728      Dep = MemDepResult::getNonLocal();
729    } else {
730      Dep = MemDepResult::getNonFuncLocal();
731    }
732
733    // If we had a dirty entry for the block, update it.  Otherwise, just add
734    // a new entry.
735    if (ExistingResult)
736      ExistingResult->setResult(Dep);
737    else
738      Cache.push_back(NonLocalDepEntry(DirtyBB, Dep));
739
740    // If the block has a dependency (i.e. it isn't completely transparent to
741    // the value), remember the association!
742    if (!Dep.isNonLocal()) {
743      // Keep the ReverseNonLocalDeps map up to date so we can efficiently
744      // update this when we remove instructions.
745      if (Instruction *Inst = Dep.getInst())
746        ReverseNonLocalDeps[Inst].insert(QueryCS.getInstruction());
747    } else {
748
749      // If the block *is* completely transparent to the load, we need to check
750      // the predecessors of this block.  Add them to our worklist.
751      for (BasicBlock **PI = PredCache->GetPreds(DirtyBB); *PI; ++PI)
752        DirtyBlocks.push_back(*PI);
753    }
754  }
755
756  return Cache;
757}
758
759/// getNonLocalPointerDependency - Perform a full dependency query for an
760/// access to the specified (non-volatile) memory location, returning the
761/// set of instructions that either define or clobber the value.
762///
763/// This method assumes the pointer has a "NonLocal" dependency within its
764/// own block.
765///
766void MemoryDependenceAnalysis::
767getNonLocalPointerDependency(const AliasAnalysis::Location &Loc, bool isLoad,
768                             BasicBlock *FromBB,
769                             SmallVectorImpl<NonLocalDepResult> &Result) {
770  assert(Loc.Ptr->getType()->isPointerTy() &&
771         "Can't get pointer deps of a non-pointer!");
772  Result.clear();
773
774  PHITransAddr Address(const_cast<Value *>(Loc.Ptr), TD);
775
776  // This is the set of blocks we've inspected, and the pointer we consider in
777  // each block.  Because of critical edges, we currently bail out if querying
778  // a block with multiple different pointers.  This can happen during PHI
779  // translation.
780  DenseMap<BasicBlock*, Value*> Visited;
781  if (!getNonLocalPointerDepFromBB(Address, Loc, isLoad, FromBB,
782                                   Result, Visited, true))
783    return;
784  Result.clear();
785  Result.push_back(NonLocalDepResult(FromBB,
786                                     MemDepResult::getUnknown(),
787                                     const_cast<Value *>(Loc.Ptr)));
788}
789
790/// GetNonLocalInfoForBlock - Compute the memdep value for BB with
791/// Pointer/PointeeSize using either cached information in Cache or by doing a
792/// lookup (which may use dirty cache info if available).  If we do a lookup,
793/// add the result to the cache.
794MemDepResult MemoryDependenceAnalysis::
795GetNonLocalInfoForBlock(const AliasAnalysis::Location &Loc,
796                        bool isLoad, BasicBlock *BB,
797                        NonLocalDepInfo *Cache, unsigned NumSortedEntries) {
798
799  // Do a binary search to see if we already have an entry for this block in
800  // the cache set.  If so, find it.
801  NonLocalDepInfo::iterator Entry =
802    std::upper_bound(Cache->begin(), Cache->begin()+NumSortedEntries,
803                     NonLocalDepEntry(BB));
804  if (Entry != Cache->begin() && (Entry-1)->getBB() == BB)
805    --Entry;
806
807  NonLocalDepEntry *ExistingResult = 0;
808  if (Entry != Cache->begin()+NumSortedEntries && Entry->getBB() == BB)
809    ExistingResult = &*Entry;
810
811  // If we have a cached entry, and it is non-dirty, use it as the value for
812  // this dependency.
813  if (ExistingResult && !ExistingResult->getResult().isDirty()) {
814    ++NumCacheNonLocalPtr;
815    return ExistingResult->getResult();
816  }
817
818  // Otherwise, we have to scan for the value.  If we have a dirty cache
819  // entry, start scanning from its position, otherwise we scan from the end
820  // of the block.
821  BasicBlock::iterator ScanPos = BB->end();
822  if (ExistingResult && ExistingResult->getResult().getInst()) {
823    assert(ExistingResult->getResult().getInst()->getParent() == BB &&
824           "Instruction invalidated?");
825    ++NumCacheDirtyNonLocalPtr;
826    ScanPos = ExistingResult->getResult().getInst();
827
828    // Eliminating the dirty entry from 'Cache', so update the reverse info.
829    ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
830    RemoveFromReverseMap(ReverseNonLocalPtrDeps, ScanPos, CacheKey);
831  } else {
832    ++NumUncacheNonLocalPtr;
833  }
834
835  // Scan the block for the dependency.
836  MemDepResult Dep = getPointerDependencyFrom(Loc, isLoad, ScanPos, BB);
837
838  // If we had a dirty entry for the block, update it.  Otherwise, just add
839  // a new entry.
840  if (ExistingResult)
841    ExistingResult->setResult(Dep);
842  else
843    Cache->push_back(NonLocalDepEntry(BB, Dep));
844
845  // If the block has a dependency (i.e. it isn't completely transparent to
846  // the value), remember the reverse association because we just added it
847  // to Cache!
848  if (!Dep.isDef() && !Dep.isClobber())
849    return Dep;
850
851  // Keep the ReverseNonLocalPtrDeps map up to date so we can efficiently
852  // update MemDep when we remove instructions.
853  Instruction *Inst = Dep.getInst();
854  assert(Inst && "Didn't depend on anything?");
855  ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
856  ReverseNonLocalPtrDeps[Inst].insert(CacheKey);
857  return Dep;
858}
859
860/// SortNonLocalDepInfoCache - Sort the a NonLocalDepInfo cache, given a certain
861/// number of elements in the array that are already properly ordered.  This is
862/// optimized for the case when only a few entries are added.
863static void
864SortNonLocalDepInfoCache(MemoryDependenceAnalysis::NonLocalDepInfo &Cache,
865                         unsigned NumSortedEntries) {
866  switch (Cache.size() - NumSortedEntries) {
867  case 0:
868    // done, no new entries.
869    break;
870  case 2: {
871    // Two new entries, insert the last one into place.
872    NonLocalDepEntry Val = Cache.back();
873    Cache.pop_back();
874    MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry =
875      std::upper_bound(Cache.begin(), Cache.end()-1, Val);
876    Cache.insert(Entry, Val);
877    // FALL THROUGH.
878  }
879  case 1:
880    // One new entry, Just insert the new value at the appropriate position.
881    if (Cache.size() != 1) {
882      NonLocalDepEntry Val = Cache.back();
883      Cache.pop_back();
884      MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry =
885        std::upper_bound(Cache.begin(), Cache.end(), Val);
886      Cache.insert(Entry, Val);
887    }
888    break;
889  default:
890    // Added many values, do a full scale sort.
891    std::sort(Cache.begin(), Cache.end());
892    break;
893  }
894}
895
896/// getNonLocalPointerDepFromBB - Perform a dependency query based on
897/// pointer/pointeesize starting at the end of StartBB.  Add any clobber/def
898/// results to the results vector and keep track of which blocks are visited in
899/// 'Visited'.
900///
901/// This has special behavior for the first block queries (when SkipFirstBlock
902/// is true).  In this special case, it ignores the contents of the specified
903/// block and starts returning dependence info for its predecessors.
904///
905/// This function returns false on success, or true to indicate that it could
906/// not compute dependence information for some reason.  This should be treated
907/// as a clobber dependence on the first instruction in the predecessor block.
908bool MemoryDependenceAnalysis::
909getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
910                            const AliasAnalysis::Location &Loc,
911                            bool isLoad, BasicBlock *StartBB,
912                            SmallVectorImpl<NonLocalDepResult> &Result,
913                            DenseMap<BasicBlock*, Value*> &Visited,
914                            bool SkipFirstBlock) {
915  // Look up the cached info for Pointer.
916  ValueIsLoadPair CacheKey(Pointer.getAddr(), isLoad);
917
918  // Set up a temporary NLPI value. If the map doesn't yet have an entry for
919  // CacheKey, this value will be inserted as the associated value. Otherwise,
920  // it'll be ignored, and we'll have to check to see if the cached size and
921  // tbaa tag are consistent with the current query.
922  NonLocalPointerInfo InitialNLPI;
923  InitialNLPI.Size = Loc.Size;
924  InitialNLPI.TBAATag = Loc.TBAATag;
925
926  // Get the NLPI for CacheKey, inserting one into the map if it doesn't
927  // already have one.
928  std::pair<CachedNonLocalPointerInfo::iterator, bool> Pair =
929    NonLocalPointerDeps.insert(std::make_pair(CacheKey, InitialNLPI));
930  NonLocalPointerInfo *CacheInfo = &Pair.first->second;
931
932  // If we already have a cache entry for this CacheKey, we may need to do some
933  // work to reconcile the cache entry and the current query.
934  if (!Pair.second) {
935    if (CacheInfo->Size < Loc.Size) {
936      // The query's Size is greater than the cached one. Throw out the
937      // cached data and proceed with the query at the greater size.
938      CacheInfo->Pair = BBSkipFirstBlockPair();
939      CacheInfo->Size = Loc.Size;
940      for (NonLocalDepInfo::iterator DI = CacheInfo->NonLocalDeps.begin(),
941           DE = CacheInfo->NonLocalDeps.end(); DI != DE; ++DI)
942        if (Instruction *Inst = DI->getResult().getInst())
943          RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey);
944      CacheInfo->NonLocalDeps.clear();
945    } else if (CacheInfo->Size > Loc.Size) {
946      // This query's Size is less than the cached one. Conservatively restart
947      // the query using the greater size.
948      return getNonLocalPointerDepFromBB(Pointer,
949                                         Loc.getWithNewSize(CacheInfo->Size),
950                                         isLoad, StartBB, Result, Visited,
951                                         SkipFirstBlock);
952    }
953
954    // If the query's TBAATag is inconsistent with the cached one,
955    // conservatively throw out the cached data and restart the query with
956    // no tag if needed.
957    if (CacheInfo->TBAATag != Loc.TBAATag) {
958      if (CacheInfo->TBAATag) {
959        CacheInfo->Pair = BBSkipFirstBlockPair();
960        CacheInfo->TBAATag = 0;
961        for (NonLocalDepInfo::iterator DI = CacheInfo->NonLocalDeps.begin(),
962             DE = CacheInfo->NonLocalDeps.end(); DI != DE; ++DI)
963          if (Instruction *Inst = DI->getResult().getInst())
964            RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey);
965        CacheInfo->NonLocalDeps.clear();
966      }
967      if (Loc.TBAATag)
968        return getNonLocalPointerDepFromBB(Pointer, Loc.getWithoutTBAATag(),
969                                           isLoad, StartBB, Result, Visited,
970                                           SkipFirstBlock);
971    }
972  }
973
974  NonLocalDepInfo *Cache = &CacheInfo->NonLocalDeps;
975
976  // If we have valid cached information for exactly the block we are
977  // investigating, just return it with no recomputation.
978  if (CacheInfo->Pair == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) {
979    // We have a fully cached result for this query then we can just return the
980    // cached results and populate the visited set.  However, we have to verify
981    // that we don't already have conflicting results for these blocks.  Check
982    // to ensure that if a block in the results set is in the visited set that
983    // it was for the same pointer query.
984    if (!Visited.empty()) {
985      for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end();
986           I != E; ++I) {
987        DenseMap<BasicBlock*, Value*>::iterator VI = Visited.find(I->getBB());
988        if (VI == Visited.end() || VI->second == Pointer.getAddr())
989          continue;
990
991        // We have a pointer mismatch in a block.  Just return clobber, saying
992        // that something was clobbered in this result.  We could also do a
993        // non-fully cached query, but there is little point in doing this.
994        return true;
995      }
996    }
997
998    Value *Addr = Pointer.getAddr();
999    for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end();
1000         I != E; ++I) {
1001      Visited.insert(std::make_pair(I->getBB(), Addr));
1002      if (I->getResult().isNonLocal()) {
1003        continue;
1004      }
1005
1006      if (!DT) {
1007        Result.push_back(NonLocalDepResult(I->getBB(),
1008                                           MemDepResult::getUnknown(),
1009                                           Addr));
1010      } else if (DT->isReachableFromEntry(I->getBB())) {
1011        Result.push_back(NonLocalDepResult(I->getBB(), I->getResult(), Addr));
1012      }
1013    }
1014    ++NumCacheCompleteNonLocalPtr;
1015    return false;
1016  }
1017
1018  // Otherwise, either this is a new block, a block with an invalid cache
1019  // pointer or one that we're about to invalidate by putting more info into it
1020  // than its valid cache info.  If empty, the result will be valid cache info,
1021  // otherwise it isn't.
1022  if (Cache->empty())
1023    CacheInfo->Pair = BBSkipFirstBlockPair(StartBB, SkipFirstBlock);
1024  else
1025    CacheInfo->Pair = BBSkipFirstBlockPair();
1026
1027  SmallVector<BasicBlock*, 32> Worklist;
1028  Worklist.push_back(StartBB);
1029
1030  // PredList used inside loop.
1031  SmallVector<std::pair<BasicBlock*, PHITransAddr>, 16> PredList;
1032
1033  // Keep track of the entries that we know are sorted.  Previously cached
1034  // entries will all be sorted.  The entries we add we only sort on demand (we
1035  // don't insert every element into its sorted position).  We know that we
1036  // won't get any reuse from currently inserted values, because we don't
1037  // revisit blocks after we insert info for them.
1038  unsigned NumSortedEntries = Cache->size();
1039  DEBUG(AssertSorted(*Cache));
1040
1041  while (!Worklist.empty()) {
1042    BasicBlock *BB = Worklist.pop_back_val();
1043
1044    // Skip the first block if we have it.
1045    if (!SkipFirstBlock) {
1046      // Analyze the dependency of *Pointer in FromBB.  See if we already have
1047      // been here.
1048      assert(Visited.count(BB) && "Should check 'visited' before adding to WL");
1049
1050      // Get the dependency info for Pointer in BB.  If we have cached
1051      // information, we will use it, otherwise we compute it.
1052      DEBUG(AssertSorted(*Cache, NumSortedEntries));
1053      MemDepResult Dep = GetNonLocalInfoForBlock(Loc, isLoad, BB, Cache,
1054                                                 NumSortedEntries);
1055
1056      // If we got a Def or Clobber, add this to the list of results.
1057      if (!Dep.isNonLocal()) {
1058        if (!DT) {
1059          Result.push_back(NonLocalDepResult(BB,
1060                                             MemDepResult::getUnknown(),
1061                                             Pointer.getAddr()));
1062          continue;
1063        } else if (DT->isReachableFromEntry(BB)) {
1064          Result.push_back(NonLocalDepResult(BB, Dep, Pointer.getAddr()));
1065          continue;
1066        }
1067      }
1068    }
1069
1070    // If 'Pointer' is an instruction defined in this block, then we need to do
1071    // phi translation to change it into a value live in the predecessor block.
1072    // If not, we just add the predecessors to the worklist and scan them with
1073    // the same Pointer.
1074    if (!Pointer.NeedsPHITranslationFromBlock(BB)) {
1075      SkipFirstBlock = false;
1076      SmallVector<BasicBlock*, 16> NewBlocks;
1077      for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) {
1078        // Verify that we haven't looked at this block yet.
1079        std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool>
1080          InsertRes = Visited.insert(std::make_pair(*PI, Pointer.getAddr()));
1081        if (InsertRes.second) {
1082          // First time we've looked at *PI.
1083          NewBlocks.push_back(*PI);
1084          continue;
1085        }
1086
1087        // If we have seen this block before, but it was with a different
1088        // pointer then we have a phi translation failure and we have to treat
1089        // this as a clobber.
1090        if (InsertRes.first->second != Pointer.getAddr()) {
1091          // Make sure to clean up the Visited map before continuing on to
1092          // PredTranslationFailure.
1093          for (unsigned i = 0; i < NewBlocks.size(); i++)
1094            Visited.erase(NewBlocks[i]);
1095          goto PredTranslationFailure;
1096        }
1097      }
1098      Worklist.append(NewBlocks.begin(), NewBlocks.end());
1099      continue;
1100    }
1101
1102    // We do need to do phi translation, if we know ahead of time we can't phi
1103    // translate this value, don't even try.
1104    if (!Pointer.IsPotentiallyPHITranslatable())
1105      goto PredTranslationFailure;
1106
1107    // We may have added values to the cache list before this PHI translation.
1108    // If so, we haven't done anything to ensure that the cache remains sorted.
1109    // Sort it now (if needed) so that recursive invocations of
1110    // getNonLocalPointerDepFromBB and other routines that could reuse the cache
1111    // value will only see properly sorted cache arrays.
1112    if (Cache && NumSortedEntries != Cache->size()) {
1113      SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
1114      NumSortedEntries = Cache->size();
1115    }
1116    Cache = 0;
1117
1118    PredList.clear();
1119    for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) {
1120      BasicBlock *Pred = *PI;
1121      PredList.push_back(std::make_pair(Pred, Pointer));
1122
1123      // Get the PHI translated pointer in this predecessor.  This can fail if
1124      // not translatable, in which case the getAddr() returns null.
1125      PHITransAddr &PredPointer = PredList.back().second;
1126      PredPointer.PHITranslateValue(BB, Pred, 0);
1127
1128      Value *PredPtrVal = PredPointer.getAddr();
1129
1130      // Check to see if we have already visited this pred block with another
1131      // pointer.  If so, we can't do this lookup.  This failure can occur
1132      // with PHI translation when a critical edge exists and the PHI node in
1133      // the successor translates to a pointer value different than the
1134      // pointer the block was first analyzed with.
1135      std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool>
1136        InsertRes = Visited.insert(std::make_pair(Pred, PredPtrVal));
1137
1138      if (!InsertRes.second) {
1139        // We found the pred; take it off the list of preds to visit.
1140        PredList.pop_back();
1141
1142        // If the predecessor was visited with PredPtr, then we already did
1143        // the analysis and can ignore it.
1144        if (InsertRes.first->second == PredPtrVal)
1145          continue;
1146
1147        // Otherwise, the block was previously analyzed with a different
1148        // pointer.  We can't represent the result of this case, so we just
1149        // treat this as a phi translation failure.
1150
1151        // Make sure to clean up the Visited map before continuing on to
1152        // PredTranslationFailure.
1153        for (unsigned i = 0, n = PredList.size(); i < n; ++i)
1154          Visited.erase(PredList[i].first);
1155
1156        goto PredTranslationFailure;
1157      }
1158    }
1159
1160    // Actually process results here; this need to be a separate loop to avoid
1161    // calling getNonLocalPointerDepFromBB for blocks we don't want to return
1162    // any results for.  (getNonLocalPointerDepFromBB will modify our
1163    // datastructures in ways the code after the PredTranslationFailure label
1164    // doesn't expect.)
1165    for (unsigned i = 0, n = PredList.size(); i < n; ++i) {
1166      BasicBlock *Pred = PredList[i].first;
1167      PHITransAddr &PredPointer = PredList[i].second;
1168      Value *PredPtrVal = PredPointer.getAddr();
1169
1170      bool CanTranslate = true;
1171      // If PHI translation was unable to find an available pointer in this
1172      // predecessor, then we have to assume that the pointer is clobbered in
1173      // that predecessor.  We can still do PRE of the load, which would insert
1174      // a computation of the pointer in this predecessor.
1175      if (PredPtrVal == 0)
1176        CanTranslate = false;
1177
1178      // FIXME: it is entirely possible that PHI translating will end up with
1179      // the same value.  Consider PHI translating something like:
1180      // X = phi [x, bb1], [y, bb2].  PHI translating for bb1 doesn't *need*
1181      // to recurse here, pedantically speaking.
1182
1183      // If getNonLocalPointerDepFromBB fails here, that means the cached
1184      // result conflicted with the Visited list; we have to conservatively
1185      // assume it is unknown, but this also does not block PRE of the load.
1186      if (!CanTranslate ||
1187          getNonLocalPointerDepFromBB(PredPointer,
1188                                      Loc.getWithNewPtr(PredPtrVal),
1189                                      isLoad, Pred,
1190                                      Result, Visited)) {
1191        // Add the entry to the Result list.
1192        NonLocalDepResult Entry(Pred, MemDepResult::getUnknown(), PredPtrVal);
1193        Result.push_back(Entry);
1194
1195        // Since we had a phi translation failure, the cache for CacheKey won't
1196        // include all of the entries that we need to immediately satisfy future
1197        // queries.  Mark this in NonLocalPointerDeps by setting the
1198        // BBSkipFirstBlockPair pointer to null.  This requires reuse of the
1199        // cached value to do more work but not miss the phi trans failure.
1200        NonLocalPointerInfo &NLPI = NonLocalPointerDeps[CacheKey];
1201        NLPI.Pair = BBSkipFirstBlockPair();
1202        continue;
1203      }
1204    }
1205
1206    // Refresh the CacheInfo/Cache pointer so that it isn't invalidated.
1207    CacheInfo = &NonLocalPointerDeps[CacheKey];
1208    Cache = &CacheInfo->NonLocalDeps;
1209    NumSortedEntries = Cache->size();
1210
1211    // Since we did phi translation, the "Cache" set won't contain all of the
1212    // results for the query.  This is ok (we can still use it to accelerate
1213    // specific block queries) but we can't do the fastpath "return all
1214    // results from the set"  Clear out the indicator for this.
1215    CacheInfo->Pair = BBSkipFirstBlockPair();
1216    SkipFirstBlock = false;
1217    continue;
1218
1219  PredTranslationFailure:
1220    // The following code is "failure"; we can't produce a sane translation
1221    // for the given block.  It assumes that we haven't modified any of
1222    // our datastructures while processing the current block.
1223
1224    if (Cache == 0) {
1225      // Refresh the CacheInfo/Cache pointer if it got invalidated.
1226      CacheInfo = &NonLocalPointerDeps[CacheKey];
1227      Cache = &CacheInfo->NonLocalDeps;
1228      NumSortedEntries = Cache->size();
1229    }
1230
1231    // Since we failed phi translation, the "Cache" set won't contain all of the
1232    // results for the query.  This is ok (we can still use it to accelerate
1233    // specific block queries) but we can't do the fastpath "return all
1234    // results from the set".  Clear out the indicator for this.
1235    CacheInfo->Pair = BBSkipFirstBlockPair();
1236
1237    // If *nothing* works, mark the pointer as unknown.
1238    //
1239    // If this is the magic first block, return this as a clobber of the whole
1240    // incoming value.  Since we can't phi translate to one of the predecessors,
1241    // we have to bail out.
1242    if (SkipFirstBlock)
1243      return true;
1244
1245    for (NonLocalDepInfo::reverse_iterator I = Cache->rbegin(); ; ++I) {
1246      assert(I != Cache->rend() && "Didn't find current block??");
1247      if (I->getBB() != BB)
1248        continue;
1249
1250      assert(I->getResult().isNonLocal() &&
1251             "Should only be here with transparent block");
1252      I->setResult(MemDepResult::getUnknown());
1253      Result.push_back(NonLocalDepResult(I->getBB(), I->getResult(),
1254                                         Pointer.getAddr()));
1255      break;
1256    }
1257  }
1258
1259  // Okay, we're done now.  If we added new values to the cache, re-sort it.
1260  SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
1261  DEBUG(AssertSorted(*Cache));
1262  return false;
1263}
1264
1265/// RemoveCachedNonLocalPointerDependencies - If P exists in
1266/// CachedNonLocalPointerInfo, remove it.
1267void MemoryDependenceAnalysis::
1268RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair P) {
1269  CachedNonLocalPointerInfo::iterator It =
1270    NonLocalPointerDeps.find(P);
1271  if (It == NonLocalPointerDeps.end()) return;
1272
1273  // Remove all of the entries in the BB->val map.  This involves removing
1274  // instructions from the reverse map.
1275  NonLocalDepInfo &PInfo = It->second.NonLocalDeps;
1276
1277  for (unsigned i = 0, e = PInfo.size(); i != e; ++i) {
1278    Instruction *Target = PInfo[i].getResult().getInst();
1279    if (Target == 0) continue;  // Ignore non-local dep results.
1280    assert(Target->getParent() == PInfo[i].getBB());
1281
1282    // Eliminating the dirty entry from 'Cache', so update the reverse info.
1283    RemoveFromReverseMap(ReverseNonLocalPtrDeps, Target, P);
1284  }
1285
1286  // Remove P from NonLocalPointerDeps (which deletes NonLocalDepInfo).
1287  NonLocalPointerDeps.erase(It);
1288}
1289
1290
1291/// invalidateCachedPointerInfo - This method is used to invalidate cached
1292/// information about the specified pointer, because it may be too
1293/// conservative in memdep.  This is an optional call that can be used when
1294/// the client detects an equivalence between the pointer and some other
1295/// value and replaces the other value with ptr. This can make Ptr available
1296/// in more places that cached info does not necessarily keep.
1297void MemoryDependenceAnalysis::invalidateCachedPointerInfo(Value *Ptr) {
1298  // If Ptr isn't really a pointer, just ignore it.
1299  if (!Ptr->getType()->isPointerTy()) return;
1300  // Flush store info for the pointer.
1301  RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, false));
1302  // Flush load info for the pointer.
1303  RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, true));
1304}
1305
1306/// invalidateCachedPredecessors - Clear the PredIteratorCache info.
1307/// This needs to be done when the CFG changes, e.g., due to splitting
1308/// critical edges.
1309void MemoryDependenceAnalysis::invalidateCachedPredecessors() {
1310  PredCache->clear();
1311}
1312
1313/// removeInstruction - Remove an instruction from the dependence analysis,
1314/// updating the dependence of instructions that previously depended on it.
1315/// This method attempts to keep the cache coherent using the reverse map.
1316void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) {
1317  // Walk through the Non-local dependencies, removing this one as the value
1318  // for any cached queries.
1319  NonLocalDepMapType::iterator NLDI = NonLocalDeps.find(RemInst);
1320  if (NLDI != NonLocalDeps.end()) {
1321    NonLocalDepInfo &BlockMap = NLDI->second.first;
1322    for (NonLocalDepInfo::iterator DI = BlockMap.begin(), DE = BlockMap.end();
1323         DI != DE; ++DI)
1324      if (Instruction *Inst = DI->getResult().getInst())
1325        RemoveFromReverseMap(ReverseNonLocalDeps, Inst, RemInst);
1326    NonLocalDeps.erase(NLDI);
1327  }
1328
1329  // If we have a cached local dependence query for this instruction, remove it.
1330  //
1331  LocalDepMapType::iterator LocalDepEntry = LocalDeps.find(RemInst);
1332  if (LocalDepEntry != LocalDeps.end()) {
1333    // Remove us from DepInst's reverse set now that the local dep info is gone.
1334    if (Instruction *Inst = LocalDepEntry->second.getInst())
1335      RemoveFromReverseMap(ReverseLocalDeps, Inst, RemInst);
1336
1337    // Remove this local dependency info.
1338    LocalDeps.erase(LocalDepEntry);
1339  }
1340
1341  // If we have any cached pointer dependencies on this instruction, remove
1342  // them.  If the instruction has non-pointer type, then it can't be a pointer
1343  // base.
1344
1345  // Remove it from both the load info and the store info.  The instruction
1346  // can't be in either of these maps if it is non-pointer.
1347  if (RemInst->getType()->isPointerTy()) {
1348    RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, false));
1349    RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, true));
1350  }
1351
1352  // Loop over all of the things that depend on the instruction we're removing.
1353  //
1354  SmallVector<std::pair<Instruction*, Instruction*>, 8> ReverseDepsToAdd;
1355
1356  // If we find RemInst as a clobber or Def in any of the maps for other values,
1357  // we need to replace its entry with a dirty version of the instruction after
1358  // it.  If RemInst is a terminator, we use a null dirty value.
1359  //
1360  // Using a dirty version of the instruction after RemInst saves having to scan
1361  // the entire block to get to this point.
1362  MemDepResult NewDirtyVal;
1363  if (!RemInst->isTerminator())
1364    NewDirtyVal = MemDepResult::getDirty(++BasicBlock::iterator(RemInst));
1365
1366  ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst);
1367  if (ReverseDepIt != ReverseLocalDeps.end()) {
1368    SmallPtrSet<Instruction*, 4> &ReverseDeps = ReverseDepIt->second;
1369    // RemInst can't be the terminator if it has local stuff depending on it.
1370    assert(!ReverseDeps.empty() && !isa<TerminatorInst>(RemInst) &&
1371           "Nothing can locally depend on a terminator");
1372
1373    for (SmallPtrSet<Instruction*, 4>::iterator I = ReverseDeps.begin(),
1374         E = ReverseDeps.end(); I != E; ++I) {
1375      Instruction *InstDependingOnRemInst = *I;
1376      assert(InstDependingOnRemInst != RemInst &&
1377             "Already removed our local dep info");
1378
1379      LocalDeps[InstDependingOnRemInst] = NewDirtyVal;
1380
1381      // Make sure to remember that new things depend on NewDepInst.
1382      assert(NewDirtyVal.getInst() && "There is no way something else can have "
1383             "a local dep on this if it is a terminator!");
1384      ReverseDepsToAdd.push_back(std::make_pair(NewDirtyVal.getInst(),
1385                                                InstDependingOnRemInst));
1386    }
1387
1388    ReverseLocalDeps.erase(ReverseDepIt);
1389
1390    // Add new reverse deps after scanning the set, to avoid invalidating the
1391    // 'ReverseDeps' reference.
1392    while (!ReverseDepsToAdd.empty()) {
1393      ReverseLocalDeps[ReverseDepsToAdd.back().first]
1394        .insert(ReverseDepsToAdd.back().second);
1395      ReverseDepsToAdd.pop_back();
1396    }
1397  }
1398
1399  ReverseDepIt = ReverseNonLocalDeps.find(RemInst);
1400  if (ReverseDepIt != ReverseNonLocalDeps.end()) {
1401    SmallPtrSet<Instruction*, 4> &Set = ReverseDepIt->second;
1402    for (SmallPtrSet<Instruction*, 4>::iterator I = Set.begin(), E = Set.end();
1403         I != E; ++I) {
1404      assert(*I != RemInst && "Already removed NonLocalDep info for RemInst");
1405
1406      PerInstNLInfo &INLD = NonLocalDeps[*I];
1407      // The information is now dirty!
1408      INLD.second = true;
1409
1410      for (NonLocalDepInfo::iterator DI = INLD.first.begin(),
1411           DE = INLD.first.end(); DI != DE; ++DI) {
1412        if (DI->getResult().getInst() != RemInst) continue;
1413
1414        // Convert to a dirty entry for the subsequent instruction.
1415        DI->setResult(NewDirtyVal);
1416
1417        if (Instruction *NextI = NewDirtyVal.getInst())
1418          ReverseDepsToAdd.push_back(std::make_pair(NextI, *I));
1419      }
1420    }
1421
1422    ReverseNonLocalDeps.erase(ReverseDepIt);
1423
1424    // Add new reverse deps after scanning the set, to avoid invalidating 'Set'
1425    while (!ReverseDepsToAdd.empty()) {
1426      ReverseNonLocalDeps[ReverseDepsToAdd.back().first]
1427        .insert(ReverseDepsToAdd.back().second);
1428      ReverseDepsToAdd.pop_back();
1429    }
1430  }
1431
1432  // If the instruction is in ReverseNonLocalPtrDeps then it appears as a
1433  // value in the NonLocalPointerDeps info.
1434  ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt =
1435    ReverseNonLocalPtrDeps.find(RemInst);
1436  if (ReversePtrDepIt != ReverseNonLocalPtrDeps.end()) {
1437    SmallPtrSet<ValueIsLoadPair, 4> &Set = ReversePtrDepIt->second;
1438    SmallVector<std::pair<Instruction*, ValueIsLoadPair>,8> ReversePtrDepsToAdd;
1439
1440    for (SmallPtrSet<ValueIsLoadPair, 4>::iterator I = Set.begin(),
1441         E = Set.end(); I != E; ++I) {
1442      ValueIsLoadPair P = *I;
1443      assert(P.getPointer() != RemInst &&
1444             "Already removed NonLocalPointerDeps info for RemInst");
1445
1446      NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].NonLocalDeps;
1447
1448      // The cache is not valid for any specific block anymore.
1449      NonLocalPointerDeps[P].Pair = BBSkipFirstBlockPair();
1450
1451      // Update any entries for RemInst to use the instruction after it.
1452      for (NonLocalDepInfo::iterator DI = NLPDI.begin(), DE = NLPDI.end();
1453           DI != DE; ++DI) {
1454        if (DI->getResult().getInst() != RemInst) continue;
1455
1456        // Convert to a dirty entry for the subsequent instruction.
1457        DI->setResult(NewDirtyVal);
1458
1459        if (Instruction *NewDirtyInst = NewDirtyVal.getInst())
1460          ReversePtrDepsToAdd.push_back(std::make_pair(NewDirtyInst, P));
1461      }
1462
1463      // Re-sort the NonLocalDepInfo.  Changing the dirty entry to its
1464      // subsequent value may invalidate the sortedness.
1465      std::sort(NLPDI.begin(), NLPDI.end());
1466    }
1467
1468    ReverseNonLocalPtrDeps.erase(ReversePtrDepIt);
1469
1470    while (!ReversePtrDepsToAdd.empty()) {
1471      ReverseNonLocalPtrDeps[ReversePtrDepsToAdd.back().first]
1472        .insert(ReversePtrDepsToAdd.back().second);
1473      ReversePtrDepsToAdd.pop_back();
1474    }
1475  }
1476
1477
1478  assert(!NonLocalDeps.count(RemInst) && "RemInst got reinserted?");
1479  AA->deleteValue(RemInst);
1480  DEBUG(verifyRemoved(RemInst));
1481}
1482/// verifyRemoved - Verify that the specified instruction does not occur
1483/// in our internal data structures.
1484void MemoryDependenceAnalysis::verifyRemoved(Instruction *D) const {
1485  for (LocalDepMapType::const_iterator I = LocalDeps.begin(),
1486       E = LocalDeps.end(); I != E; ++I) {
1487    assert(I->first != D && "Inst occurs in data structures");
1488    assert(I->second.getInst() != D &&
1489           "Inst occurs in data structures");
1490  }
1491
1492  for (CachedNonLocalPointerInfo::const_iterator I =NonLocalPointerDeps.begin(),
1493       E = NonLocalPointerDeps.end(); I != E; ++I) {
1494    assert(I->first.getPointer() != D && "Inst occurs in NLPD map key");
1495    const NonLocalDepInfo &Val = I->second.NonLocalDeps;
1496    for (NonLocalDepInfo::const_iterator II = Val.begin(), E = Val.end();
1497         II != E; ++II)
1498      assert(II->getResult().getInst() != D && "Inst occurs as NLPD value");
1499  }
1500
1501  for (NonLocalDepMapType::const_iterator I = NonLocalDeps.begin(),
1502       E = NonLocalDeps.end(); I != E; ++I) {
1503    assert(I->first != D && "Inst occurs in data structures");
1504    const PerInstNLInfo &INLD = I->second;
1505    for (NonLocalDepInfo::const_iterator II = INLD.first.begin(),
1506         EE = INLD.first.end(); II  != EE; ++II)
1507      assert(II->getResult().getInst() != D && "Inst occurs in data structures");
1508  }
1509
1510  for (ReverseDepMapType::const_iterator I = ReverseLocalDeps.begin(),
1511       E = ReverseLocalDeps.end(); I != E; ++I) {
1512    assert(I->first != D && "Inst occurs in data structures");
1513    for (SmallPtrSet<Instruction*, 4>::const_iterator II = I->second.begin(),
1514         EE = I->second.end(); II != EE; ++II)
1515      assert(*II != D && "Inst occurs in data structures");
1516  }
1517
1518  for (ReverseDepMapType::const_iterator I = ReverseNonLocalDeps.begin(),
1519       E = ReverseNonLocalDeps.end();
1520       I != E; ++I) {
1521    assert(I->first != D && "Inst occurs in data structures");
1522    for (SmallPtrSet<Instruction*, 4>::const_iterator II = I->second.begin(),
1523         EE = I->second.end(); II != EE; ++II)
1524      assert(*II != D && "Inst occurs in data structures");
1525  }
1526
1527  for (ReverseNonLocalPtrDepTy::const_iterator
1528       I = ReverseNonLocalPtrDeps.begin(),
1529       E = ReverseNonLocalPtrDeps.end(); I != E; ++I) {
1530    assert(I->first != D && "Inst occurs in rev NLPD map");
1531
1532    for (SmallPtrSet<ValueIsLoadPair, 4>::const_iterator II = I->second.begin(),
1533         E = I->second.end(); II != E; ++II)
1534      assert(*II != ValueIsLoadPair(D, false) &&
1535             *II != ValueIsLoadPair(D, true) &&
1536             "Inst occurs in ReverseNonLocalPtrDeps map");
1537  }
1538
1539}
1540