GlobalOpt.cpp revision 239462
1//===- GlobalOpt.cpp - Optimize Global Variables --------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass transforms simple global variables that never have their address 11// taken. If obviously true, it marks read/write globals as constant, deletes 12// variables only stored to, etc. 13// 14//===----------------------------------------------------------------------===// 15 16#define DEBUG_TYPE "globalopt" 17#include "llvm/Transforms/IPO.h" 18#include "llvm/CallingConv.h" 19#include "llvm/Constants.h" 20#include "llvm/DerivedTypes.h" 21#include "llvm/Instructions.h" 22#include "llvm/IntrinsicInst.h" 23#include "llvm/Module.h" 24#include "llvm/Operator.h" 25#include "llvm/Pass.h" 26#include "llvm/Analysis/ConstantFolding.h" 27#include "llvm/Analysis/MemoryBuiltins.h" 28#include "llvm/Target/TargetData.h" 29#include "llvm/Target/TargetLibraryInfo.h" 30#include "llvm/Support/CallSite.h" 31#include "llvm/Support/Debug.h" 32#include "llvm/Support/ErrorHandling.h" 33#include "llvm/Support/GetElementPtrTypeIterator.h" 34#include "llvm/Support/MathExtras.h" 35#include "llvm/Support/raw_ostream.h" 36#include "llvm/ADT/DenseMap.h" 37#include "llvm/ADT/SmallPtrSet.h" 38#include "llvm/ADT/SmallVector.h" 39#include "llvm/ADT/Statistic.h" 40#include "llvm/ADT/STLExtras.h" 41#include <algorithm> 42using namespace llvm; 43 44STATISTIC(NumMarked , "Number of globals marked constant"); 45STATISTIC(NumUnnamed , "Number of globals marked unnamed_addr"); 46STATISTIC(NumSRA , "Number of aggregate globals broken into scalars"); 47STATISTIC(NumHeapSRA , "Number of heap objects SRA'd"); 48STATISTIC(NumSubstitute,"Number of globals with initializers stored into them"); 49STATISTIC(NumDeleted , "Number of globals deleted"); 50STATISTIC(NumFnDeleted , "Number of functions deleted"); 51STATISTIC(NumGlobUses , "Number of global uses devirtualized"); 52STATISTIC(NumLocalized , "Number of globals localized"); 53STATISTIC(NumShrunkToBool , "Number of global vars shrunk to booleans"); 54STATISTIC(NumFastCallFns , "Number of functions converted to fastcc"); 55STATISTIC(NumCtorsEvaluated, "Number of static ctors evaluated"); 56STATISTIC(NumNestRemoved , "Number of nest attributes removed"); 57STATISTIC(NumAliasesResolved, "Number of global aliases resolved"); 58STATISTIC(NumAliasesRemoved, "Number of global aliases eliminated"); 59STATISTIC(NumCXXDtorsRemoved, "Number of global C++ destructors removed"); 60 61namespace { 62 struct GlobalStatus; 63 struct GlobalOpt : public ModulePass { 64 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 65 AU.addRequired<TargetLibraryInfo>(); 66 } 67 static char ID; // Pass identification, replacement for typeid 68 GlobalOpt() : ModulePass(ID) { 69 initializeGlobalOptPass(*PassRegistry::getPassRegistry()); 70 } 71 72 bool runOnModule(Module &M); 73 74 private: 75 GlobalVariable *FindGlobalCtors(Module &M); 76 bool OptimizeFunctions(Module &M); 77 bool OptimizeGlobalVars(Module &M); 78 bool OptimizeGlobalAliases(Module &M); 79 bool OptimizeGlobalCtorsList(GlobalVariable *&GCL); 80 bool ProcessGlobal(GlobalVariable *GV,Module::global_iterator &GVI); 81 bool ProcessInternalGlobal(GlobalVariable *GV,Module::global_iterator &GVI, 82 const SmallPtrSet<const PHINode*, 16> &PHIUsers, 83 const GlobalStatus &GS); 84 bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn); 85 86 TargetData *TD; 87 TargetLibraryInfo *TLI; 88 }; 89} 90 91char GlobalOpt::ID = 0; 92INITIALIZE_PASS_BEGIN(GlobalOpt, "globalopt", 93 "Global Variable Optimizer", false, false) 94INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo) 95INITIALIZE_PASS_END(GlobalOpt, "globalopt", 96 "Global Variable Optimizer", false, false) 97 98ModulePass *llvm::createGlobalOptimizerPass() { return new GlobalOpt(); } 99 100namespace { 101 102/// GlobalStatus - As we analyze each global, keep track of some information 103/// about it. If we find out that the address of the global is taken, none of 104/// this info will be accurate. 105struct GlobalStatus { 106 /// isCompared - True if the global's address is used in a comparison. 107 bool isCompared; 108 109 /// isLoaded - True if the global is ever loaded. If the global isn't ever 110 /// loaded it can be deleted. 111 bool isLoaded; 112 113 /// StoredType - Keep track of what stores to the global look like. 114 /// 115 enum StoredType { 116 /// NotStored - There is no store to this global. It can thus be marked 117 /// constant. 118 NotStored, 119 120 /// isInitializerStored - This global is stored to, but the only thing 121 /// stored is the constant it was initialized with. This is only tracked 122 /// for scalar globals. 123 isInitializerStored, 124 125 /// isStoredOnce - This global is stored to, but only its initializer and 126 /// one other value is ever stored to it. If this global isStoredOnce, we 127 /// track the value stored to it in StoredOnceValue below. This is only 128 /// tracked for scalar globals. 129 isStoredOnce, 130 131 /// isStored - This global is stored to by multiple values or something else 132 /// that we cannot track. 133 isStored 134 } StoredType; 135 136 /// StoredOnceValue - If only one value (besides the initializer constant) is 137 /// ever stored to this global, keep track of what value it is. 138 Value *StoredOnceValue; 139 140 /// AccessingFunction/HasMultipleAccessingFunctions - These start out 141 /// null/false. When the first accessing function is noticed, it is recorded. 142 /// When a second different accessing function is noticed, 143 /// HasMultipleAccessingFunctions is set to true. 144 const Function *AccessingFunction; 145 bool HasMultipleAccessingFunctions; 146 147 /// HasNonInstructionUser - Set to true if this global has a user that is not 148 /// an instruction (e.g. a constant expr or GV initializer). 149 bool HasNonInstructionUser; 150 151 /// HasPHIUser - Set to true if this global has a user that is a PHI node. 152 bool HasPHIUser; 153 154 /// AtomicOrdering - Set to the strongest atomic ordering requirement. 155 AtomicOrdering Ordering; 156 157 GlobalStatus() : isCompared(false), isLoaded(false), StoredType(NotStored), 158 StoredOnceValue(0), AccessingFunction(0), 159 HasMultipleAccessingFunctions(false), 160 HasNonInstructionUser(false), HasPHIUser(false), 161 Ordering(NotAtomic) {} 162}; 163 164} 165 166/// StrongerOrdering - Return the stronger of the two ordering. If the two 167/// orderings are acquire and release, then return AcquireRelease. 168/// 169static AtomicOrdering StrongerOrdering(AtomicOrdering X, AtomicOrdering Y) { 170 if (X == Acquire && Y == Release) return AcquireRelease; 171 if (Y == Acquire && X == Release) return AcquireRelease; 172 return (AtomicOrdering)std::max(X, Y); 173} 174 175/// SafeToDestroyConstant - It is safe to destroy a constant iff it is only used 176/// by constants itself. Note that constants cannot be cyclic, so this test is 177/// pretty easy to implement recursively. 178/// 179static bool SafeToDestroyConstant(const Constant *C) { 180 if (isa<GlobalValue>(C)) return false; 181 182 for (Value::const_use_iterator UI = C->use_begin(), E = C->use_end(); UI != E; 183 ++UI) 184 if (const Constant *CU = dyn_cast<Constant>(*UI)) { 185 if (!SafeToDestroyConstant(CU)) return false; 186 } else 187 return false; 188 return true; 189} 190 191 192/// AnalyzeGlobal - Look at all uses of the global and fill in the GlobalStatus 193/// structure. If the global has its address taken, return true to indicate we 194/// can't do anything with it. 195/// 196static bool AnalyzeGlobal(const Value *V, GlobalStatus &GS, 197 SmallPtrSet<const PHINode*, 16> &PHIUsers) { 198 for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; 199 ++UI) { 200 const User *U = *UI; 201 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) { 202 GS.HasNonInstructionUser = true; 203 204 // If the result of the constantexpr isn't pointer type, then we won't 205 // know to expect it in various places. Just reject early. 206 if (!isa<PointerType>(CE->getType())) return true; 207 208 if (AnalyzeGlobal(CE, GS, PHIUsers)) return true; 209 } else if (const Instruction *I = dyn_cast<Instruction>(U)) { 210 if (!GS.HasMultipleAccessingFunctions) { 211 const Function *F = I->getParent()->getParent(); 212 if (GS.AccessingFunction == 0) 213 GS.AccessingFunction = F; 214 else if (GS.AccessingFunction != F) 215 GS.HasMultipleAccessingFunctions = true; 216 } 217 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) { 218 GS.isLoaded = true; 219 // Don't hack on volatile loads. 220 if (LI->isVolatile()) return true; 221 GS.Ordering = StrongerOrdering(GS.Ordering, LI->getOrdering()); 222 } else if (const StoreInst *SI = dyn_cast<StoreInst>(I)) { 223 // Don't allow a store OF the address, only stores TO the address. 224 if (SI->getOperand(0) == V) return true; 225 226 // Don't hack on volatile stores. 227 if (SI->isVolatile()) return true; 228 GS.Ordering = StrongerOrdering(GS.Ordering, SI->getOrdering()); 229 230 // If this is a direct store to the global (i.e., the global is a scalar 231 // value, not an aggregate), keep more specific information about 232 // stores. 233 if (GS.StoredType != GlobalStatus::isStored) { 234 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>( 235 SI->getOperand(1))) { 236 Value *StoredVal = SI->getOperand(0); 237 if (StoredVal == GV->getInitializer()) { 238 if (GS.StoredType < GlobalStatus::isInitializerStored) 239 GS.StoredType = GlobalStatus::isInitializerStored; 240 } else if (isa<LoadInst>(StoredVal) && 241 cast<LoadInst>(StoredVal)->getOperand(0) == GV) { 242 if (GS.StoredType < GlobalStatus::isInitializerStored) 243 GS.StoredType = GlobalStatus::isInitializerStored; 244 } else if (GS.StoredType < GlobalStatus::isStoredOnce) { 245 GS.StoredType = GlobalStatus::isStoredOnce; 246 GS.StoredOnceValue = StoredVal; 247 } else if (GS.StoredType == GlobalStatus::isStoredOnce && 248 GS.StoredOnceValue == StoredVal) { 249 // noop. 250 } else { 251 GS.StoredType = GlobalStatus::isStored; 252 } 253 } else { 254 GS.StoredType = GlobalStatus::isStored; 255 } 256 } 257 } else if (isa<BitCastInst>(I)) { 258 if (AnalyzeGlobal(I, GS, PHIUsers)) return true; 259 } else if (isa<GetElementPtrInst>(I)) { 260 if (AnalyzeGlobal(I, GS, PHIUsers)) return true; 261 } else if (isa<SelectInst>(I)) { 262 if (AnalyzeGlobal(I, GS, PHIUsers)) return true; 263 } else if (const PHINode *PN = dyn_cast<PHINode>(I)) { 264 // PHI nodes we can check just like select or GEP instructions, but we 265 // have to be careful about infinite recursion. 266 if (PHIUsers.insert(PN)) // Not already visited. 267 if (AnalyzeGlobal(I, GS, PHIUsers)) return true; 268 GS.HasPHIUser = true; 269 } else if (isa<CmpInst>(I)) { 270 GS.isCompared = true; 271 } else if (const MemTransferInst *MTI = dyn_cast<MemTransferInst>(I)) { 272 if (MTI->isVolatile()) return true; 273 if (MTI->getArgOperand(0) == V) 274 GS.StoredType = GlobalStatus::isStored; 275 if (MTI->getArgOperand(1) == V) 276 GS.isLoaded = true; 277 } else if (const MemSetInst *MSI = dyn_cast<MemSetInst>(I)) { 278 assert(MSI->getArgOperand(0) == V && "Memset only takes one pointer!"); 279 if (MSI->isVolatile()) return true; 280 GS.StoredType = GlobalStatus::isStored; 281 } else { 282 return true; // Any other non-load instruction might take address! 283 } 284 } else if (const Constant *C = dyn_cast<Constant>(U)) { 285 GS.HasNonInstructionUser = true; 286 // We might have a dead and dangling constant hanging off of here. 287 if (!SafeToDestroyConstant(C)) 288 return true; 289 } else { 290 GS.HasNonInstructionUser = true; 291 // Otherwise must be some other user. 292 return true; 293 } 294 } 295 296 return false; 297} 298 299/// isLeakCheckerRoot - Is this global variable possibly used by a leak checker 300/// as a root? If so, we might not really want to eliminate the stores to it. 301static bool isLeakCheckerRoot(GlobalVariable *GV) { 302 // A global variable is a root if it is a pointer, or could plausibly contain 303 // a pointer. There are two challenges; one is that we could have a struct 304 // the has an inner member which is a pointer. We recurse through the type to 305 // detect these (up to a point). The other is that we may actually be a union 306 // of a pointer and another type, and so our LLVM type is an integer which 307 // gets converted into a pointer, or our type is an [i8 x #] with a pointer 308 // potentially contained here. 309 310 if (GV->hasPrivateLinkage()) 311 return false; 312 313 SmallVector<Type *, 4> Types; 314 Types.push_back(cast<PointerType>(GV->getType())->getElementType()); 315 316 unsigned Limit = 20; 317 do { 318 Type *Ty = Types.pop_back_val(); 319 switch (Ty->getTypeID()) { 320 default: break; 321 case Type::PointerTyID: return true; 322 case Type::ArrayTyID: 323 case Type::VectorTyID: { 324 SequentialType *STy = cast<SequentialType>(Ty); 325 Types.push_back(STy->getElementType()); 326 break; 327 } 328 case Type::StructTyID: { 329 StructType *STy = cast<StructType>(Ty); 330 if (STy->isOpaque()) return true; 331 for (StructType::element_iterator I = STy->element_begin(), 332 E = STy->element_end(); I != E; ++I) { 333 Type *InnerTy = *I; 334 if (isa<PointerType>(InnerTy)) return true; 335 if (isa<CompositeType>(InnerTy)) 336 Types.push_back(InnerTy); 337 } 338 break; 339 } 340 } 341 if (--Limit == 0) return true; 342 } while (!Types.empty()); 343 return false; 344} 345 346/// Given a value that is stored to a global but never read, determine whether 347/// it's safe to remove the store and the chain of computation that feeds the 348/// store. 349static bool IsSafeComputationToRemove(Value *V) { 350 do { 351 if (isa<Constant>(V)) 352 return true; 353 if (!V->hasOneUse()) 354 return false; 355 if (isa<LoadInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V) || 356 isa<GlobalValue>(V)) 357 return false; 358 if (isAllocationFn(V)) 359 return true; 360 361 Instruction *I = cast<Instruction>(V); 362 if (I->mayHaveSideEffects()) 363 return false; 364 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) { 365 if (!GEP->hasAllConstantIndices()) 366 return false; 367 } else if (I->getNumOperands() != 1) { 368 return false; 369 } 370 371 V = I->getOperand(0); 372 } while (1); 373} 374 375/// CleanupPointerRootUsers - This GV is a pointer root. Loop over all users 376/// of the global and clean up any that obviously don't assign the global a 377/// value that isn't dynamically allocated. 378/// 379static bool CleanupPointerRootUsers(GlobalVariable *GV) { 380 // A brief explanation of leak checkers. The goal is to find bugs where 381 // pointers are forgotten, causing an accumulating growth in memory 382 // usage over time. The common strategy for leak checkers is to whitelist the 383 // memory pointed to by globals at exit. This is popular because it also 384 // solves another problem where the main thread of a C++ program may shut down 385 // before other threads that are still expecting to use those globals. To 386 // handle that case, we expect the program may create a singleton and never 387 // destroy it. 388 389 bool Changed = false; 390 391 // If Dead[n].first is the only use of a malloc result, we can delete its 392 // chain of computation and the store to the global in Dead[n].second. 393 SmallVector<std::pair<Instruction *, Instruction *>, 32> Dead; 394 395 // Constants can't be pointers to dynamically allocated memory. 396 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); 397 UI != E;) { 398 User *U = *UI++; 399 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 400 Value *V = SI->getValueOperand(); 401 if (isa<Constant>(V)) { 402 Changed = true; 403 SI->eraseFromParent(); 404 } else if (Instruction *I = dyn_cast<Instruction>(V)) { 405 if (I->hasOneUse()) 406 Dead.push_back(std::make_pair(I, SI)); 407 } 408 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(U)) { 409 if (isa<Constant>(MSI->getValue())) { 410 Changed = true; 411 MSI->eraseFromParent(); 412 } else if (Instruction *I = dyn_cast<Instruction>(MSI->getValue())) { 413 if (I->hasOneUse()) 414 Dead.push_back(std::make_pair(I, MSI)); 415 } 416 } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(U)) { 417 GlobalVariable *MemSrc = dyn_cast<GlobalVariable>(MTI->getSource()); 418 if (MemSrc && MemSrc->isConstant()) { 419 Changed = true; 420 MTI->eraseFromParent(); 421 } else if (Instruction *I = dyn_cast<Instruction>(MemSrc)) { 422 if (I->hasOneUse()) 423 Dead.push_back(std::make_pair(I, MTI)); 424 } 425 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) { 426 if (CE->use_empty()) { 427 CE->destroyConstant(); 428 Changed = true; 429 } 430 } else if (Constant *C = dyn_cast<Constant>(U)) { 431 if (SafeToDestroyConstant(C)) { 432 C->destroyConstant(); 433 // This could have invalidated UI, start over from scratch. 434 Dead.clear(); 435 CleanupPointerRootUsers(GV); 436 return true; 437 } 438 } 439 } 440 441 for (int i = 0, e = Dead.size(); i != e; ++i) { 442 if (IsSafeComputationToRemove(Dead[i].first)) { 443 Dead[i].second->eraseFromParent(); 444 Instruction *I = Dead[i].first; 445 do { 446 if (isAllocationFn(I)) 447 break; 448 Instruction *J = dyn_cast<Instruction>(I->getOperand(0)); 449 if (!J) 450 break; 451 I->eraseFromParent(); 452 I = J; 453 } while (1); 454 I->eraseFromParent(); 455 } 456 } 457 458 return Changed; 459} 460 461/// CleanupConstantGlobalUsers - We just marked GV constant. Loop over all 462/// users of the global, cleaning up the obvious ones. This is largely just a 463/// quick scan over the use list to clean up the easy and obvious cruft. This 464/// returns true if it made a change. 465static bool CleanupConstantGlobalUsers(Value *V, Constant *Init, 466 TargetData *TD, TargetLibraryInfo *TLI) { 467 bool Changed = false; 468 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;) { 469 User *U = *UI++; 470 471 if (LoadInst *LI = dyn_cast<LoadInst>(U)) { 472 if (Init) { 473 // Replace the load with the initializer. 474 LI->replaceAllUsesWith(Init); 475 LI->eraseFromParent(); 476 Changed = true; 477 } 478 } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 479 // Store must be unreachable or storing Init into the global. 480 SI->eraseFromParent(); 481 Changed = true; 482 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) { 483 if (CE->getOpcode() == Instruction::GetElementPtr) { 484 Constant *SubInit = 0; 485 if (Init) 486 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE); 487 Changed |= CleanupConstantGlobalUsers(CE, SubInit, TD, TLI); 488 } else if (CE->getOpcode() == Instruction::BitCast && 489 CE->getType()->isPointerTy()) { 490 // Pointer cast, delete any stores and memsets to the global. 491 Changed |= CleanupConstantGlobalUsers(CE, 0, TD, TLI); 492 } 493 494 if (CE->use_empty()) { 495 CE->destroyConstant(); 496 Changed = true; 497 } 498 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) { 499 // Do not transform "gepinst (gep constexpr (GV))" here, because forming 500 // "gepconstexpr (gep constexpr (GV))" will cause the two gep's to fold 501 // and will invalidate our notion of what Init is. 502 Constant *SubInit = 0; 503 if (!isa<ConstantExpr>(GEP->getOperand(0))) { 504 ConstantExpr *CE = 505 dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP, TD, TLI)); 506 if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr) 507 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE); 508 509 // If the initializer is an all-null value and we have an inbounds GEP, 510 // we already know what the result of any load from that GEP is. 511 // TODO: Handle splats. 512 if (Init && isa<ConstantAggregateZero>(Init) && GEP->isInBounds()) 513 SubInit = Constant::getNullValue(GEP->getType()->getElementType()); 514 } 515 Changed |= CleanupConstantGlobalUsers(GEP, SubInit, TD, TLI); 516 517 if (GEP->use_empty()) { 518 GEP->eraseFromParent(); 519 Changed = true; 520 } 521 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U)) { // memset/cpy/mv 522 if (MI->getRawDest() == V) { 523 MI->eraseFromParent(); 524 Changed = true; 525 } 526 527 } else if (Constant *C = dyn_cast<Constant>(U)) { 528 // If we have a chain of dead constantexprs or other things dangling from 529 // us, and if they are all dead, nuke them without remorse. 530 if (SafeToDestroyConstant(C)) { 531 C->destroyConstant(); 532 // This could have invalidated UI, start over from scratch. 533 CleanupConstantGlobalUsers(V, Init, TD, TLI); 534 return true; 535 } 536 } 537 } 538 return Changed; 539} 540 541/// isSafeSROAElementUse - Return true if the specified instruction is a safe 542/// user of a derived expression from a global that we want to SROA. 543static bool isSafeSROAElementUse(Value *V) { 544 // We might have a dead and dangling constant hanging off of here. 545 if (Constant *C = dyn_cast<Constant>(V)) 546 return SafeToDestroyConstant(C); 547 548 Instruction *I = dyn_cast<Instruction>(V); 549 if (!I) return false; 550 551 // Loads are ok. 552 if (isa<LoadInst>(I)) return true; 553 554 // Stores *to* the pointer are ok. 555 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 556 return SI->getOperand(0) != V; 557 558 // Otherwise, it must be a GEP. 559 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I); 560 if (GEPI == 0) return false; 561 562 if (GEPI->getNumOperands() < 3 || !isa<Constant>(GEPI->getOperand(1)) || 563 !cast<Constant>(GEPI->getOperand(1))->isNullValue()) 564 return false; 565 566 for (Value::use_iterator I = GEPI->use_begin(), E = GEPI->use_end(); 567 I != E; ++I) 568 if (!isSafeSROAElementUse(*I)) 569 return false; 570 return true; 571} 572 573 574/// IsUserOfGlobalSafeForSRA - U is a direct user of the specified global value. 575/// Look at it and its uses and decide whether it is safe to SROA this global. 576/// 577static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) { 578 // The user of the global must be a GEP Inst or a ConstantExpr GEP. 579 if (!isa<GetElementPtrInst>(U) && 580 (!isa<ConstantExpr>(U) || 581 cast<ConstantExpr>(U)->getOpcode() != Instruction::GetElementPtr)) 582 return false; 583 584 // Check to see if this ConstantExpr GEP is SRA'able. In particular, we 585 // don't like < 3 operand CE's, and we don't like non-constant integer 586 // indices. This enforces that all uses are 'gep GV, 0, C, ...' for some 587 // value of C. 588 if (U->getNumOperands() < 3 || !isa<Constant>(U->getOperand(1)) || 589 !cast<Constant>(U->getOperand(1))->isNullValue() || 590 !isa<ConstantInt>(U->getOperand(2))) 591 return false; 592 593 gep_type_iterator GEPI = gep_type_begin(U), E = gep_type_end(U); 594 ++GEPI; // Skip over the pointer index. 595 596 // If this is a use of an array allocation, do a bit more checking for sanity. 597 if (ArrayType *AT = dyn_cast<ArrayType>(*GEPI)) { 598 uint64_t NumElements = AT->getNumElements(); 599 ConstantInt *Idx = cast<ConstantInt>(U->getOperand(2)); 600 601 // Check to make sure that index falls within the array. If not, 602 // something funny is going on, so we won't do the optimization. 603 // 604 if (Idx->getZExtValue() >= NumElements) 605 return false; 606 607 // We cannot scalar repl this level of the array unless any array 608 // sub-indices are in-range constants. In particular, consider: 609 // A[0][i]. We cannot know that the user isn't doing invalid things like 610 // allowing i to index an out-of-range subscript that accesses A[1]. 611 // 612 // Scalar replacing *just* the outer index of the array is probably not 613 // going to be a win anyway, so just give up. 614 for (++GEPI; // Skip array index. 615 GEPI != E; 616 ++GEPI) { 617 uint64_t NumElements; 618 if (ArrayType *SubArrayTy = dyn_cast<ArrayType>(*GEPI)) 619 NumElements = SubArrayTy->getNumElements(); 620 else if (VectorType *SubVectorTy = dyn_cast<VectorType>(*GEPI)) 621 NumElements = SubVectorTy->getNumElements(); 622 else { 623 assert((*GEPI)->isStructTy() && 624 "Indexed GEP type is not array, vector, or struct!"); 625 continue; 626 } 627 628 ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPI.getOperand()); 629 if (!IdxVal || IdxVal->getZExtValue() >= NumElements) 630 return false; 631 } 632 } 633 634 for (Value::use_iterator I = U->use_begin(), E = U->use_end(); I != E; ++I) 635 if (!isSafeSROAElementUse(*I)) 636 return false; 637 return true; 638} 639 640/// GlobalUsersSafeToSRA - Look at all uses of the global and decide whether it 641/// is safe for us to perform this transformation. 642/// 643static bool GlobalUsersSafeToSRA(GlobalValue *GV) { 644 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); 645 UI != E; ++UI) { 646 if (!IsUserOfGlobalSafeForSRA(*UI, GV)) 647 return false; 648 } 649 return true; 650} 651 652 653/// SRAGlobal - Perform scalar replacement of aggregates on the specified global 654/// variable. This opens the door for other optimizations by exposing the 655/// behavior of the program in a more fine-grained way. We have determined that 656/// this transformation is safe already. We return the first global variable we 657/// insert so that the caller can reprocess it. 658static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD) { 659 // Make sure this global only has simple uses that we can SRA. 660 if (!GlobalUsersSafeToSRA(GV)) 661 return 0; 662 663 assert(GV->hasLocalLinkage() && !GV->isConstant()); 664 Constant *Init = GV->getInitializer(); 665 Type *Ty = Init->getType(); 666 667 std::vector<GlobalVariable*> NewGlobals; 668 Module::GlobalListType &Globals = GV->getParent()->getGlobalList(); 669 670 // Get the alignment of the global, either explicit or target-specific. 671 unsigned StartAlignment = GV->getAlignment(); 672 if (StartAlignment == 0) 673 StartAlignment = TD.getABITypeAlignment(GV->getType()); 674 675 if (StructType *STy = dyn_cast<StructType>(Ty)) { 676 NewGlobals.reserve(STy->getNumElements()); 677 const StructLayout &Layout = *TD.getStructLayout(STy); 678 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 679 Constant *In = Init->getAggregateElement(i); 680 assert(In && "Couldn't get element of initializer?"); 681 GlobalVariable *NGV = new GlobalVariable(STy->getElementType(i), false, 682 GlobalVariable::InternalLinkage, 683 In, GV->getName()+"."+Twine(i), 684 GV->getThreadLocalMode(), 685 GV->getType()->getAddressSpace()); 686 Globals.insert(GV, NGV); 687 NewGlobals.push_back(NGV); 688 689 // Calculate the known alignment of the field. If the original aggregate 690 // had 256 byte alignment for example, something might depend on that: 691 // propagate info to each field. 692 uint64_t FieldOffset = Layout.getElementOffset(i); 693 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, FieldOffset); 694 if (NewAlign > TD.getABITypeAlignment(STy->getElementType(i))) 695 NGV->setAlignment(NewAlign); 696 } 697 } else if (SequentialType *STy = dyn_cast<SequentialType>(Ty)) { 698 unsigned NumElements = 0; 699 if (ArrayType *ATy = dyn_cast<ArrayType>(STy)) 700 NumElements = ATy->getNumElements(); 701 else 702 NumElements = cast<VectorType>(STy)->getNumElements(); 703 704 if (NumElements > 16 && GV->hasNUsesOrMore(16)) 705 return 0; // It's not worth it. 706 NewGlobals.reserve(NumElements); 707 708 uint64_t EltSize = TD.getTypeAllocSize(STy->getElementType()); 709 unsigned EltAlign = TD.getABITypeAlignment(STy->getElementType()); 710 for (unsigned i = 0, e = NumElements; i != e; ++i) { 711 Constant *In = Init->getAggregateElement(i); 712 assert(In && "Couldn't get element of initializer?"); 713 714 GlobalVariable *NGV = new GlobalVariable(STy->getElementType(), false, 715 GlobalVariable::InternalLinkage, 716 In, GV->getName()+"."+Twine(i), 717 GV->getThreadLocalMode(), 718 GV->getType()->getAddressSpace()); 719 Globals.insert(GV, NGV); 720 NewGlobals.push_back(NGV); 721 722 // Calculate the known alignment of the field. If the original aggregate 723 // had 256 byte alignment for example, something might depend on that: 724 // propagate info to each field. 725 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, EltSize*i); 726 if (NewAlign > EltAlign) 727 NGV->setAlignment(NewAlign); 728 } 729 } 730 731 if (NewGlobals.empty()) 732 return 0; 733 734 DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV); 735 736 Constant *NullInt =Constant::getNullValue(Type::getInt32Ty(GV->getContext())); 737 738 // Loop over all of the uses of the global, replacing the constantexpr geps, 739 // with smaller constantexpr geps or direct references. 740 while (!GV->use_empty()) { 741 User *GEP = GV->use_back(); 742 assert(((isa<ConstantExpr>(GEP) && 743 cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)|| 744 isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!"); 745 746 // Ignore the 1th operand, which has to be zero or else the program is quite 747 // broken (undefined). Get the 2nd operand, which is the structure or array 748 // index. 749 unsigned Val = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue(); 750 if (Val >= NewGlobals.size()) Val = 0; // Out of bound array access. 751 752 Value *NewPtr = NewGlobals[Val]; 753 754 // Form a shorter GEP if needed. 755 if (GEP->getNumOperands() > 3) { 756 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GEP)) { 757 SmallVector<Constant*, 8> Idxs; 758 Idxs.push_back(NullInt); 759 for (unsigned i = 3, e = CE->getNumOperands(); i != e; ++i) 760 Idxs.push_back(CE->getOperand(i)); 761 NewPtr = ConstantExpr::getGetElementPtr(cast<Constant>(NewPtr), Idxs); 762 } else { 763 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(GEP); 764 SmallVector<Value*, 8> Idxs; 765 Idxs.push_back(NullInt); 766 for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i) 767 Idxs.push_back(GEPI->getOperand(i)); 768 NewPtr = GetElementPtrInst::Create(NewPtr, Idxs, 769 GEPI->getName()+"."+Twine(Val),GEPI); 770 } 771 } 772 GEP->replaceAllUsesWith(NewPtr); 773 774 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(GEP)) 775 GEPI->eraseFromParent(); 776 else 777 cast<ConstantExpr>(GEP)->destroyConstant(); 778 } 779 780 // Delete the old global, now that it is dead. 781 Globals.erase(GV); 782 ++NumSRA; 783 784 // Loop over the new globals array deleting any globals that are obviously 785 // dead. This can arise due to scalarization of a structure or an array that 786 // has elements that are dead. 787 unsigned FirstGlobal = 0; 788 for (unsigned i = 0, e = NewGlobals.size(); i != e; ++i) 789 if (NewGlobals[i]->use_empty()) { 790 Globals.erase(NewGlobals[i]); 791 if (FirstGlobal == i) ++FirstGlobal; 792 } 793 794 return FirstGlobal != NewGlobals.size() ? NewGlobals[FirstGlobal] : 0; 795} 796 797/// AllUsesOfValueWillTrapIfNull - Return true if all users of the specified 798/// value will trap if the value is dynamically null. PHIs keeps track of any 799/// phi nodes we've seen to avoid reprocessing them. 800static bool AllUsesOfValueWillTrapIfNull(const Value *V, 801 SmallPtrSet<const PHINode*, 8> &PHIs) { 802 for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; 803 ++UI) { 804 const User *U = *UI; 805 806 if (isa<LoadInst>(U)) { 807 // Will trap. 808 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) { 809 if (SI->getOperand(0) == V) { 810 //cerr << "NONTRAPPING USE: " << *U; 811 return false; // Storing the value. 812 } 813 } else if (const CallInst *CI = dyn_cast<CallInst>(U)) { 814 if (CI->getCalledValue() != V) { 815 //cerr << "NONTRAPPING USE: " << *U; 816 return false; // Not calling the ptr 817 } 818 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(U)) { 819 if (II->getCalledValue() != V) { 820 //cerr << "NONTRAPPING USE: " << *U; 821 return false; // Not calling the ptr 822 } 823 } else if (const BitCastInst *CI = dyn_cast<BitCastInst>(U)) { 824 if (!AllUsesOfValueWillTrapIfNull(CI, PHIs)) return false; 825 } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { 826 if (!AllUsesOfValueWillTrapIfNull(GEPI, PHIs)) return false; 827 } else if (const PHINode *PN = dyn_cast<PHINode>(U)) { 828 // If we've already seen this phi node, ignore it, it has already been 829 // checked. 830 if (PHIs.insert(PN) && !AllUsesOfValueWillTrapIfNull(PN, PHIs)) 831 return false; 832 } else if (isa<ICmpInst>(U) && 833 isa<ConstantPointerNull>(UI->getOperand(1))) { 834 // Ignore icmp X, null 835 } else { 836 //cerr << "NONTRAPPING USE: " << *U; 837 return false; 838 } 839 } 840 return true; 841} 842 843/// AllUsesOfLoadedValueWillTrapIfNull - Return true if all uses of any loads 844/// from GV will trap if the loaded value is null. Note that this also permits 845/// comparisons of the loaded value against null, as a special case. 846static bool AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) { 847 for (Value::const_use_iterator UI = GV->use_begin(), E = GV->use_end(); 848 UI != E; ++UI) { 849 const User *U = *UI; 850 851 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) { 852 SmallPtrSet<const PHINode*, 8> PHIs; 853 if (!AllUsesOfValueWillTrapIfNull(LI, PHIs)) 854 return false; 855 } else if (isa<StoreInst>(U)) { 856 // Ignore stores to the global. 857 } else { 858 // We don't know or understand this user, bail out. 859 //cerr << "UNKNOWN USER OF GLOBAL!: " << *U; 860 return false; 861 } 862 } 863 return true; 864} 865 866static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) { 867 bool Changed = false; 868 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ) { 869 Instruction *I = cast<Instruction>(*UI++); 870 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 871 LI->setOperand(0, NewV); 872 Changed = true; 873 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 874 if (SI->getOperand(1) == V) { 875 SI->setOperand(1, NewV); 876 Changed = true; 877 } 878 } else if (isa<CallInst>(I) || isa<InvokeInst>(I)) { 879 CallSite CS(I); 880 if (CS.getCalledValue() == V) { 881 // Calling through the pointer! Turn into a direct call, but be careful 882 // that the pointer is not also being passed as an argument. 883 CS.setCalledFunction(NewV); 884 Changed = true; 885 bool PassedAsArg = false; 886 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) 887 if (CS.getArgument(i) == V) { 888 PassedAsArg = true; 889 CS.setArgument(i, NewV); 890 } 891 892 if (PassedAsArg) { 893 // Being passed as an argument also. Be careful to not invalidate UI! 894 UI = V->use_begin(); 895 } 896 } 897 } else if (CastInst *CI = dyn_cast<CastInst>(I)) { 898 Changed |= OptimizeAwayTrappingUsesOfValue(CI, 899 ConstantExpr::getCast(CI->getOpcode(), 900 NewV, CI->getType())); 901 if (CI->use_empty()) { 902 Changed = true; 903 CI->eraseFromParent(); 904 } 905 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 906 // Should handle GEP here. 907 SmallVector<Constant*, 8> Idxs; 908 Idxs.reserve(GEPI->getNumOperands()-1); 909 for (User::op_iterator i = GEPI->op_begin() + 1, e = GEPI->op_end(); 910 i != e; ++i) 911 if (Constant *C = dyn_cast<Constant>(*i)) 912 Idxs.push_back(C); 913 else 914 break; 915 if (Idxs.size() == GEPI->getNumOperands()-1) 916 Changed |= OptimizeAwayTrappingUsesOfValue(GEPI, 917 ConstantExpr::getGetElementPtr(NewV, Idxs)); 918 if (GEPI->use_empty()) { 919 Changed = true; 920 GEPI->eraseFromParent(); 921 } 922 } 923 } 924 925 return Changed; 926} 927 928 929/// OptimizeAwayTrappingUsesOfLoads - The specified global has only one non-null 930/// value stored into it. If there are uses of the loaded value that would trap 931/// if the loaded value is dynamically null, then we know that they cannot be 932/// reachable with a null optimize away the load. 933static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV, 934 TargetData *TD, 935 TargetLibraryInfo *TLI) { 936 bool Changed = false; 937 938 // Keep track of whether we are able to remove all the uses of the global 939 // other than the store that defines it. 940 bool AllNonStoreUsesGone = true; 941 942 // Replace all uses of loads with uses of uses of the stored value. 943 for (Value::use_iterator GUI = GV->use_begin(), E = GV->use_end(); GUI != E;){ 944 User *GlobalUser = *GUI++; 945 if (LoadInst *LI = dyn_cast<LoadInst>(GlobalUser)) { 946 Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV); 947 // If we were able to delete all uses of the loads 948 if (LI->use_empty()) { 949 LI->eraseFromParent(); 950 Changed = true; 951 } else { 952 AllNonStoreUsesGone = false; 953 } 954 } else if (isa<StoreInst>(GlobalUser)) { 955 // Ignore the store that stores "LV" to the global. 956 assert(GlobalUser->getOperand(1) == GV && 957 "Must be storing *to* the global"); 958 } else { 959 AllNonStoreUsesGone = false; 960 961 // If we get here we could have other crazy uses that are transitively 962 // loaded. 963 assert((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) || 964 isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser)) && 965 "Only expect load and stores!"); 966 } 967 } 968 969 if (Changed) { 970 DEBUG(dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV); 971 ++NumGlobUses; 972 } 973 974 // If we nuked all of the loads, then none of the stores are needed either, 975 // nor is the global. 976 if (AllNonStoreUsesGone) { 977 if (isLeakCheckerRoot(GV)) { 978 Changed |= CleanupPointerRootUsers(GV); 979 } else { 980 Changed = true; 981 CleanupConstantGlobalUsers(GV, 0, TD, TLI); 982 } 983 if (GV->use_empty()) { 984 DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n"); 985 Changed = true; 986 GV->eraseFromParent(); 987 ++NumDeleted; 988 } 989 } 990 return Changed; 991} 992 993/// ConstantPropUsersOf - Walk the use list of V, constant folding all of the 994/// instructions that are foldable. 995static void ConstantPropUsersOf(Value *V, 996 TargetData *TD, TargetLibraryInfo *TLI) { 997 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ) 998 if (Instruction *I = dyn_cast<Instruction>(*UI++)) 999 if (Constant *NewC = ConstantFoldInstruction(I, TD, TLI)) { 1000 I->replaceAllUsesWith(NewC); 1001 1002 // Advance UI to the next non-I use to avoid invalidating it! 1003 // Instructions could multiply use V. 1004 while (UI != E && *UI == I) 1005 ++UI; 1006 I->eraseFromParent(); 1007 } 1008} 1009 1010/// OptimizeGlobalAddressOfMalloc - This function takes the specified global 1011/// variable, and transforms the program as if it always contained the result of 1012/// the specified malloc. Because it is always the result of the specified 1013/// malloc, there is no reason to actually DO the malloc. Instead, turn the 1014/// malloc into a global, and any loads of GV as uses of the new global. 1015static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, 1016 CallInst *CI, 1017 Type *AllocTy, 1018 ConstantInt *NElements, 1019 TargetData *TD, 1020 TargetLibraryInfo *TLI) { 1021 DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n'); 1022 1023 Type *GlobalType; 1024 if (NElements->getZExtValue() == 1) 1025 GlobalType = AllocTy; 1026 else 1027 // If we have an array allocation, the global variable is of an array. 1028 GlobalType = ArrayType::get(AllocTy, NElements->getZExtValue()); 1029 1030 // Create the new global variable. The contents of the malloc'd memory is 1031 // undefined, so initialize with an undef value. 1032 GlobalVariable *NewGV = new GlobalVariable(*GV->getParent(), 1033 GlobalType, false, 1034 GlobalValue::InternalLinkage, 1035 UndefValue::get(GlobalType), 1036 GV->getName()+".body", 1037 GV, 1038 GV->getThreadLocalMode()); 1039 1040 // If there are bitcast users of the malloc (which is typical, usually we have 1041 // a malloc + bitcast) then replace them with uses of the new global. Update 1042 // other users to use the global as well. 1043 BitCastInst *TheBC = 0; 1044 while (!CI->use_empty()) { 1045 Instruction *User = cast<Instruction>(CI->use_back()); 1046 if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) { 1047 if (BCI->getType() == NewGV->getType()) { 1048 BCI->replaceAllUsesWith(NewGV); 1049 BCI->eraseFromParent(); 1050 } else { 1051 BCI->setOperand(0, NewGV); 1052 } 1053 } else { 1054 if (TheBC == 0) 1055 TheBC = new BitCastInst(NewGV, CI->getType(), "newgv", CI); 1056 User->replaceUsesOfWith(CI, TheBC); 1057 } 1058 } 1059 1060 Constant *RepValue = NewGV; 1061 if (NewGV->getType() != GV->getType()->getElementType()) 1062 RepValue = ConstantExpr::getBitCast(RepValue, 1063 GV->getType()->getElementType()); 1064 1065 // If there is a comparison against null, we will insert a global bool to 1066 // keep track of whether the global was initialized yet or not. 1067 GlobalVariable *InitBool = 1068 new GlobalVariable(Type::getInt1Ty(GV->getContext()), false, 1069 GlobalValue::InternalLinkage, 1070 ConstantInt::getFalse(GV->getContext()), 1071 GV->getName()+".init", GV->getThreadLocalMode()); 1072 bool InitBoolUsed = false; 1073 1074 // Loop over all uses of GV, processing them in turn. 1075 while (!GV->use_empty()) { 1076 if (StoreInst *SI = dyn_cast<StoreInst>(GV->use_back())) { 1077 // The global is initialized when the store to it occurs. 1078 new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, false, 0, 1079 SI->getOrdering(), SI->getSynchScope(), SI); 1080 SI->eraseFromParent(); 1081 continue; 1082 } 1083 1084 LoadInst *LI = cast<LoadInst>(GV->use_back()); 1085 while (!LI->use_empty()) { 1086 Use &LoadUse = LI->use_begin().getUse(); 1087 if (!isa<ICmpInst>(LoadUse.getUser())) { 1088 LoadUse = RepValue; 1089 continue; 1090 } 1091 1092 ICmpInst *ICI = cast<ICmpInst>(LoadUse.getUser()); 1093 // Replace the cmp X, 0 with a use of the bool value. 1094 // Sink the load to where the compare was, if atomic rules allow us to. 1095 Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", false, 0, 1096 LI->getOrdering(), LI->getSynchScope(), 1097 LI->isUnordered() ? (Instruction*)ICI : LI); 1098 InitBoolUsed = true; 1099 switch (ICI->getPredicate()) { 1100 default: llvm_unreachable("Unknown ICmp Predicate!"); 1101 case ICmpInst::ICMP_ULT: 1102 case ICmpInst::ICMP_SLT: // X < null -> always false 1103 LV = ConstantInt::getFalse(GV->getContext()); 1104 break; 1105 case ICmpInst::ICMP_ULE: 1106 case ICmpInst::ICMP_SLE: 1107 case ICmpInst::ICMP_EQ: 1108 LV = BinaryOperator::CreateNot(LV, "notinit", ICI); 1109 break; 1110 case ICmpInst::ICMP_NE: 1111 case ICmpInst::ICMP_UGE: 1112 case ICmpInst::ICMP_SGE: 1113 case ICmpInst::ICMP_UGT: 1114 case ICmpInst::ICMP_SGT: 1115 break; // no change. 1116 } 1117 ICI->replaceAllUsesWith(LV); 1118 ICI->eraseFromParent(); 1119 } 1120 LI->eraseFromParent(); 1121 } 1122 1123 // If the initialization boolean was used, insert it, otherwise delete it. 1124 if (!InitBoolUsed) { 1125 while (!InitBool->use_empty()) // Delete initializations 1126 cast<StoreInst>(InitBool->use_back())->eraseFromParent(); 1127 delete InitBool; 1128 } else 1129 GV->getParent()->getGlobalList().insert(GV, InitBool); 1130 1131 // Now the GV is dead, nuke it and the malloc.. 1132 GV->eraseFromParent(); 1133 CI->eraseFromParent(); 1134 1135 // To further other optimizations, loop over all users of NewGV and try to 1136 // constant prop them. This will promote GEP instructions with constant 1137 // indices into GEP constant-exprs, which will allow global-opt to hack on it. 1138 ConstantPropUsersOf(NewGV, TD, TLI); 1139 if (RepValue != NewGV) 1140 ConstantPropUsersOf(RepValue, TD, TLI); 1141 1142 return NewGV; 1143} 1144 1145/// ValueIsOnlyUsedLocallyOrStoredToOneGlobal - Scan the use-list of V checking 1146/// to make sure that there are no complex uses of V. We permit simple things 1147/// like dereferencing the pointer, but not storing through the address, unless 1148/// it is to the specified global. 1149static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V, 1150 const GlobalVariable *GV, 1151 SmallPtrSet<const PHINode*, 8> &PHIs) { 1152 for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); 1153 UI != E; ++UI) { 1154 const Instruction *Inst = cast<Instruction>(*UI); 1155 1156 if (isa<LoadInst>(Inst) || isa<CmpInst>(Inst)) { 1157 continue; // Fine, ignore. 1158 } 1159 1160 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 1161 if (SI->getOperand(0) == V && SI->getOperand(1) != GV) 1162 return false; // Storing the pointer itself... bad. 1163 continue; // Otherwise, storing through it, or storing into GV... fine. 1164 } 1165 1166 // Must index into the array and into the struct. 1167 if (isa<GetElementPtrInst>(Inst) && Inst->getNumOperands() >= 3) { 1168 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Inst, GV, PHIs)) 1169 return false; 1170 continue; 1171 } 1172 1173 if (const PHINode *PN = dyn_cast<PHINode>(Inst)) { 1174 // PHIs are ok if all uses are ok. Don't infinitely recurse through PHI 1175 // cycles. 1176 if (PHIs.insert(PN)) 1177 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(PN, GV, PHIs)) 1178 return false; 1179 continue; 1180 } 1181 1182 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Inst)) { 1183 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(BCI, GV, PHIs)) 1184 return false; 1185 continue; 1186 } 1187 1188 return false; 1189 } 1190 return true; 1191} 1192 1193/// ReplaceUsesOfMallocWithGlobal - The Alloc pointer is stored into GV 1194/// somewhere. Transform all uses of the allocation into loads from the 1195/// global and uses of the resultant pointer. Further, delete the store into 1196/// GV. This assumes that these value pass the 1197/// 'ValueIsOnlyUsedLocallyOrStoredToOneGlobal' predicate. 1198static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc, 1199 GlobalVariable *GV) { 1200 while (!Alloc->use_empty()) { 1201 Instruction *U = cast<Instruction>(*Alloc->use_begin()); 1202 Instruction *InsertPt = U; 1203 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 1204 // If this is the store of the allocation into the global, remove it. 1205 if (SI->getOperand(1) == GV) { 1206 SI->eraseFromParent(); 1207 continue; 1208 } 1209 } else if (PHINode *PN = dyn_cast<PHINode>(U)) { 1210 // Insert the load in the corresponding predecessor, not right before the 1211 // PHI. 1212 InsertPt = PN->getIncomingBlock(Alloc->use_begin())->getTerminator(); 1213 } else if (isa<BitCastInst>(U)) { 1214 // Must be bitcast between the malloc and store to initialize the global. 1215 ReplaceUsesOfMallocWithGlobal(U, GV); 1216 U->eraseFromParent(); 1217 continue; 1218 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { 1219 // If this is a "GEP bitcast" and the user is a store to the global, then 1220 // just process it as a bitcast. 1221 if (GEPI->hasAllZeroIndices() && GEPI->hasOneUse()) 1222 if (StoreInst *SI = dyn_cast<StoreInst>(GEPI->use_back())) 1223 if (SI->getOperand(1) == GV) { 1224 // Must be bitcast GEP between the malloc and store to initialize 1225 // the global. 1226 ReplaceUsesOfMallocWithGlobal(GEPI, GV); 1227 GEPI->eraseFromParent(); 1228 continue; 1229 } 1230 } 1231 1232 // Insert a load from the global, and use it instead of the malloc. 1233 Value *NL = new LoadInst(GV, GV->getName()+".val", InsertPt); 1234 U->replaceUsesOfWith(Alloc, NL); 1235 } 1236} 1237 1238/// LoadUsesSimpleEnoughForHeapSRA - Verify that all uses of V (a load, or a phi 1239/// of a load) are simple enough to perform heap SRA on. This permits GEP's 1240/// that index through the array and struct field, icmps of null, and PHIs. 1241static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V, 1242 SmallPtrSet<const PHINode*, 32> &LoadUsingPHIs, 1243 SmallPtrSet<const PHINode*, 32> &LoadUsingPHIsPerLoad) { 1244 // We permit two users of the load: setcc comparing against the null 1245 // pointer, and a getelementptr of a specific form. 1246 for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; 1247 ++UI) { 1248 const Instruction *User = cast<Instruction>(*UI); 1249 1250 // Comparison against null is ok. 1251 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(User)) { 1252 if (!isa<ConstantPointerNull>(ICI->getOperand(1))) 1253 return false; 1254 continue; 1255 } 1256 1257 // getelementptr is also ok, but only a simple form. 1258 if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) { 1259 // Must index into the array and into the struct. 1260 if (GEPI->getNumOperands() < 3) 1261 return false; 1262 1263 // Otherwise the GEP is ok. 1264 continue; 1265 } 1266 1267 if (const PHINode *PN = dyn_cast<PHINode>(User)) { 1268 if (!LoadUsingPHIsPerLoad.insert(PN)) 1269 // This means some phi nodes are dependent on each other. 1270 // Avoid infinite looping! 1271 return false; 1272 if (!LoadUsingPHIs.insert(PN)) 1273 // If we have already analyzed this PHI, then it is safe. 1274 continue; 1275 1276 // Make sure all uses of the PHI are simple enough to transform. 1277 if (!LoadUsesSimpleEnoughForHeapSRA(PN, 1278 LoadUsingPHIs, LoadUsingPHIsPerLoad)) 1279 return false; 1280 1281 continue; 1282 } 1283 1284 // Otherwise we don't know what this is, not ok. 1285 return false; 1286 } 1287 1288 return true; 1289} 1290 1291 1292/// AllGlobalLoadUsesSimpleEnoughForHeapSRA - If all users of values loaded from 1293/// GV are simple enough to perform HeapSRA, return true. 1294static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(const GlobalVariable *GV, 1295 Instruction *StoredVal) { 1296 SmallPtrSet<const PHINode*, 32> LoadUsingPHIs; 1297 SmallPtrSet<const PHINode*, 32> LoadUsingPHIsPerLoad; 1298 for (Value::const_use_iterator UI = GV->use_begin(), E = GV->use_end(); 1299 UI != E; ++UI) 1300 if (const LoadInst *LI = dyn_cast<LoadInst>(*UI)) { 1301 if (!LoadUsesSimpleEnoughForHeapSRA(LI, LoadUsingPHIs, 1302 LoadUsingPHIsPerLoad)) 1303 return false; 1304 LoadUsingPHIsPerLoad.clear(); 1305 } 1306 1307 // If we reach here, we know that all uses of the loads and transitive uses 1308 // (through PHI nodes) are simple enough to transform. However, we don't know 1309 // that all inputs the to the PHI nodes are in the same equivalence sets. 1310 // Check to verify that all operands of the PHIs are either PHIS that can be 1311 // transformed, loads from GV, or MI itself. 1312 for (SmallPtrSet<const PHINode*, 32>::const_iterator I = LoadUsingPHIs.begin() 1313 , E = LoadUsingPHIs.end(); I != E; ++I) { 1314 const PHINode *PN = *I; 1315 for (unsigned op = 0, e = PN->getNumIncomingValues(); op != e; ++op) { 1316 Value *InVal = PN->getIncomingValue(op); 1317 1318 // PHI of the stored value itself is ok. 1319 if (InVal == StoredVal) continue; 1320 1321 if (const PHINode *InPN = dyn_cast<PHINode>(InVal)) { 1322 // One of the PHIs in our set is (optimistically) ok. 1323 if (LoadUsingPHIs.count(InPN)) 1324 continue; 1325 return false; 1326 } 1327 1328 // Load from GV is ok. 1329 if (const LoadInst *LI = dyn_cast<LoadInst>(InVal)) 1330 if (LI->getOperand(0) == GV) 1331 continue; 1332 1333 // UNDEF? NULL? 1334 1335 // Anything else is rejected. 1336 return false; 1337 } 1338 } 1339 1340 return true; 1341} 1342 1343static Value *GetHeapSROAValue(Value *V, unsigned FieldNo, 1344 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, 1345 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) { 1346 std::vector<Value*> &FieldVals = InsertedScalarizedValues[V]; 1347 1348 if (FieldNo >= FieldVals.size()) 1349 FieldVals.resize(FieldNo+1); 1350 1351 // If we already have this value, just reuse the previously scalarized 1352 // version. 1353 if (Value *FieldVal = FieldVals[FieldNo]) 1354 return FieldVal; 1355 1356 // Depending on what instruction this is, we have several cases. 1357 Value *Result; 1358 if (LoadInst *LI = dyn_cast<LoadInst>(V)) { 1359 // This is a scalarized version of the load from the global. Just create 1360 // a new Load of the scalarized global. 1361 Result = new LoadInst(GetHeapSROAValue(LI->getOperand(0), FieldNo, 1362 InsertedScalarizedValues, 1363 PHIsToRewrite), 1364 LI->getName()+".f"+Twine(FieldNo), LI); 1365 } else if (PHINode *PN = dyn_cast<PHINode>(V)) { 1366 // PN's type is pointer to struct. Make a new PHI of pointer to struct 1367 // field. 1368 StructType *ST = 1369 cast<StructType>(cast<PointerType>(PN->getType())->getElementType()); 1370 1371 PHINode *NewPN = 1372 PHINode::Create(PointerType::getUnqual(ST->getElementType(FieldNo)), 1373 PN->getNumIncomingValues(), 1374 PN->getName()+".f"+Twine(FieldNo), PN); 1375 Result = NewPN; 1376 PHIsToRewrite.push_back(std::make_pair(PN, FieldNo)); 1377 } else { 1378 llvm_unreachable("Unknown usable value"); 1379 } 1380 1381 return FieldVals[FieldNo] = Result; 1382} 1383 1384/// RewriteHeapSROALoadUser - Given a load instruction and a value derived from 1385/// the load, rewrite the derived value to use the HeapSRoA'd load. 1386static void RewriteHeapSROALoadUser(Instruction *LoadUser, 1387 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, 1388 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) { 1389 // If this is a comparison against null, handle it. 1390 if (ICmpInst *SCI = dyn_cast<ICmpInst>(LoadUser)) { 1391 assert(isa<ConstantPointerNull>(SCI->getOperand(1))); 1392 // If we have a setcc of the loaded pointer, we can use a setcc of any 1393 // field. 1394 Value *NPtr = GetHeapSROAValue(SCI->getOperand(0), 0, 1395 InsertedScalarizedValues, PHIsToRewrite); 1396 1397 Value *New = new ICmpInst(SCI, SCI->getPredicate(), NPtr, 1398 Constant::getNullValue(NPtr->getType()), 1399 SCI->getName()); 1400 SCI->replaceAllUsesWith(New); 1401 SCI->eraseFromParent(); 1402 return; 1403 } 1404 1405 // Handle 'getelementptr Ptr, Idx, i32 FieldNo ...' 1406 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(LoadUser)) { 1407 assert(GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2)) 1408 && "Unexpected GEPI!"); 1409 1410 // Load the pointer for this field. 1411 unsigned FieldNo = cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue(); 1412 Value *NewPtr = GetHeapSROAValue(GEPI->getOperand(0), FieldNo, 1413 InsertedScalarizedValues, PHIsToRewrite); 1414 1415 // Create the new GEP idx vector. 1416 SmallVector<Value*, 8> GEPIdx; 1417 GEPIdx.push_back(GEPI->getOperand(1)); 1418 GEPIdx.append(GEPI->op_begin()+3, GEPI->op_end()); 1419 1420 Value *NGEPI = GetElementPtrInst::Create(NewPtr, GEPIdx, 1421 GEPI->getName(), GEPI); 1422 GEPI->replaceAllUsesWith(NGEPI); 1423 GEPI->eraseFromParent(); 1424 return; 1425 } 1426 1427 // Recursively transform the users of PHI nodes. This will lazily create the 1428 // PHIs that are needed for individual elements. Keep track of what PHIs we 1429 // see in InsertedScalarizedValues so that we don't get infinite loops (very 1430 // antisocial). If the PHI is already in InsertedScalarizedValues, it has 1431 // already been seen first by another load, so its uses have already been 1432 // processed. 1433 PHINode *PN = cast<PHINode>(LoadUser); 1434 if (!InsertedScalarizedValues.insert(std::make_pair(PN, 1435 std::vector<Value*>())).second) 1436 return; 1437 1438 // If this is the first time we've seen this PHI, recursively process all 1439 // users. 1440 for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end(); UI != E; ) { 1441 Instruction *User = cast<Instruction>(*UI++); 1442 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite); 1443 } 1444} 1445 1446/// RewriteUsesOfLoadForHeapSRoA - We are performing Heap SRoA on a global. Ptr 1447/// is a value loaded from the global. Eliminate all uses of Ptr, making them 1448/// use FieldGlobals instead. All uses of loaded values satisfy 1449/// AllGlobalLoadUsesSimpleEnoughForHeapSRA. 1450static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load, 1451 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, 1452 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) { 1453 for (Value::use_iterator UI = Load->use_begin(), E = Load->use_end(); 1454 UI != E; ) { 1455 Instruction *User = cast<Instruction>(*UI++); 1456 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite); 1457 } 1458 1459 if (Load->use_empty()) { 1460 Load->eraseFromParent(); 1461 InsertedScalarizedValues.erase(Load); 1462 } 1463} 1464 1465/// PerformHeapAllocSRoA - CI is an allocation of an array of structures. Break 1466/// it up into multiple allocations of arrays of the fields. 1467static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI, 1468 Value *NElems, TargetData *TD) { 1469 DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n'); 1470 Type *MAT = getMallocAllocatedType(CI); 1471 StructType *STy = cast<StructType>(MAT); 1472 1473 // There is guaranteed to be at least one use of the malloc (storing 1474 // it into GV). If there are other uses, change them to be uses of 1475 // the global to simplify later code. This also deletes the store 1476 // into GV. 1477 ReplaceUsesOfMallocWithGlobal(CI, GV); 1478 1479 // Okay, at this point, there are no users of the malloc. Insert N 1480 // new mallocs at the same place as CI, and N globals. 1481 std::vector<Value*> FieldGlobals; 1482 std::vector<Value*> FieldMallocs; 1483 1484 for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){ 1485 Type *FieldTy = STy->getElementType(FieldNo); 1486 PointerType *PFieldTy = PointerType::getUnqual(FieldTy); 1487 1488 GlobalVariable *NGV = 1489 new GlobalVariable(*GV->getParent(), 1490 PFieldTy, false, GlobalValue::InternalLinkage, 1491 Constant::getNullValue(PFieldTy), 1492 GV->getName() + ".f" + Twine(FieldNo), GV, 1493 GV->getThreadLocalMode()); 1494 FieldGlobals.push_back(NGV); 1495 1496 unsigned TypeSize = TD->getTypeAllocSize(FieldTy); 1497 if (StructType *ST = dyn_cast<StructType>(FieldTy)) 1498 TypeSize = TD->getStructLayout(ST)->getSizeInBytes(); 1499 Type *IntPtrTy = TD->getIntPtrType(CI->getContext()); 1500 Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy, 1501 ConstantInt::get(IntPtrTy, TypeSize), 1502 NElems, 0, 1503 CI->getName() + ".f" + Twine(FieldNo)); 1504 FieldMallocs.push_back(NMI); 1505 new StoreInst(NMI, NGV, CI); 1506 } 1507 1508 // The tricky aspect of this transformation is handling the case when malloc 1509 // fails. In the original code, malloc failing would set the result pointer 1510 // of malloc to null. In this case, some mallocs could succeed and others 1511 // could fail. As such, we emit code that looks like this: 1512 // F0 = malloc(field0) 1513 // F1 = malloc(field1) 1514 // F2 = malloc(field2) 1515 // if (F0 == 0 || F1 == 0 || F2 == 0) { 1516 // if (F0) { free(F0); F0 = 0; } 1517 // if (F1) { free(F1); F1 = 0; } 1518 // if (F2) { free(F2); F2 = 0; } 1519 // } 1520 // The malloc can also fail if its argument is too large. 1521 Constant *ConstantZero = ConstantInt::get(CI->getArgOperand(0)->getType(), 0); 1522 Value *RunningOr = new ICmpInst(CI, ICmpInst::ICMP_SLT, CI->getArgOperand(0), 1523 ConstantZero, "isneg"); 1524 for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) { 1525 Value *Cond = new ICmpInst(CI, ICmpInst::ICMP_EQ, FieldMallocs[i], 1526 Constant::getNullValue(FieldMallocs[i]->getType()), 1527 "isnull"); 1528 RunningOr = BinaryOperator::CreateOr(RunningOr, Cond, "tmp", CI); 1529 } 1530 1531 // Split the basic block at the old malloc. 1532 BasicBlock *OrigBB = CI->getParent(); 1533 BasicBlock *ContBB = OrigBB->splitBasicBlock(CI, "malloc_cont"); 1534 1535 // Create the block to check the first condition. Put all these blocks at the 1536 // end of the function as they are unlikely to be executed. 1537 BasicBlock *NullPtrBlock = BasicBlock::Create(OrigBB->getContext(), 1538 "malloc_ret_null", 1539 OrigBB->getParent()); 1540 1541 // Remove the uncond branch from OrigBB to ContBB, turning it into a cond 1542 // branch on RunningOr. 1543 OrigBB->getTerminator()->eraseFromParent(); 1544 BranchInst::Create(NullPtrBlock, ContBB, RunningOr, OrigBB); 1545 1546 // Within the NullPtrBlock, we need to emit a comparison and branch for each 1547 // pointer, because some may be null while others are not. 1548 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { 1549 Value *GVVal = new LoadInst(FieldGlobals[i], "tmp", NullPtrBlock); 1550 Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal, 1551 Constant::getNullValue(GVVal->getType())); 1552 BasicBlock *FreeBlock = BasicBlock::Create(Cmp->getContext(), "free_it", 1553 OrigBB->getParent()); 1554 BasicBlock *NextBlock = BasicBlock::Create(Cmp->getContext(), "next", 1555 OrigBB->getParent()); 1556 Instruction *BI = BranchInst::Create(FreeBlock, NextBlock, 1557 Cmp, NullPtrBlock); 1558 1559 // Fill in FreeBlock. 1560 CallInst::CreateFree(GVVal, BI); 1561 new StoreInst(Constant::getNullValue(GVVal->getType()), FieldGlobals[i], 1562 FreeBlock); 1563 BranchInst::Create(NextBlock, FreeBlock); 1564 1565 NullPtrBlock = NextBlock; 1566 } 1567 1568 BranchInst::Create(ContBB, NullPtrBlock); 1569 1570 // CI is no longer needed, remove it. 1571 CI->eraseFromParent(); 1572 1573 /// InsertedScalarizedLoads - As we process loads, if we can't immediately 1574 /// update all uses of the load, keep track of what scalarized loads are 1575 /// inserted for a given load. 1576 DenseMap<Value*, std::vector<Value*> > InsertedScalarizedValues; 1577 InsertedScalarizedValues[GV] = FieldGlobals; 1578 1579 std::vector<std::pair<PHINode*, unsigned> > PHIsToRewrite; 1580 1581 // Okay, the malloc site is completely handled. All of the uses of GV are now 1582 // loads, and all uses of those loads are simple. Rewrite them to use loads 1583 // of the per-field globals instead. 1584 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); UI != E;) { 1585 Instruction *User = cast<Instruction>(*UI++); 1586 1587 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 1588 RewriteUsesOfLoadForHeapSRoA(LI, InsertedScalarizedValues, PHIsToRewrite); 1589 continue; 1590 } 1591 1592 // Must be a store of null. 1593 StoreInst *SI = cast<StoreInst>(User); 1594 assert(isa<ConstantPointerNull>(SI->getOperand(0)) && 1595 "Unexpected heap-sra user!"); 1596 1597 // Insert a store of null into each global. 1598 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { 1599 PointerType *PT = cast<PointerType>(FieldGlobals[i]->getType()); 1600 Constant *Null = Constant::getNullValue(PT->getElementType()); 1601 new StoreInst(Null, FieldGlobals[i], SI); 1602 } 1603 // Erase the original store. 1604 SI->eraseFromParent(); 1605 } 1606 1607 // While we have PHIs that are interesting to rewrite, do it. 1608 while (!PHIsToRewrite.empty()) { 1609 PHINode *PN = PHIsToRewrite.back().first; 1610 unsigned FieldNo = PHIsToRewrite.back().second; 1611 PHIsToRewrite.pop_back(); 1612 PHINode *FieldPN = cast<PHINode>(InsertedScalarizedValues[PN][FieldNo]); 1613 assert(FieldPN->getNumIncomingValues() == 0 &&"Already processed this phi"); 1614 1615 // Add all the incoming values. This can materialize more phis. 1616 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1617 Value *InVal = PN->getIncomingValue(i); 1618 InVal = GetHeapSROAValue(InVal, FieldNo, InsertedScalarizedValues, 1619 PHIsToRewrite); 1620 FieldPN->addIncoming(InVal, PN->getIncomingBlock(i)); 1621 } 1622 } 1623 1624 // Drop all inter-phi links and any loads that made it this far. 1625 for (DenseMap<Value*, std::vector<Value*> >::iterator 1626 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end(); 1627 I != E; ++I) { 1628 if (PHINode *PN = dyn_cast<PHINode>(I->first)) 1629 PN->dropAllReferences(); 1630 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first)) 1631 LI->dropAllReferences(); 1632 } 1633 1634 // Delete all the phis and loads now that inter-references are dead. 1635 for (DenseMap<Value*, std::vector<Value*> >::iterator 1636 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end(); 1637 I != E; ++I) { 1638 if (PHINode *PN = dyn_cast<PHINode>(I->first)) 1639 PN->eraseFromParent(); 1640 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first)) 1641 LI->eraseFromParent(); 1642 } 1643 1644 // The old global is now dead, remove it. 1645 GV->eraseFromParent(); 1646 1647 ++NumHeapSRA; 1648 return cast<GlobalVariable>(FieldGlobals[0]); 1649} 1650 1651/// TryToOptimizeStoreOfMallocToGlobal - This function is called when we see a 1652/// pointer global variable with a single value stored it that is a malloc or 1653/// cast of malloc. 1654static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, 1655 CallInst *CI, 1656 Type *AllocTy, 1657 AtomicOrdering Ordering, 1658 Module::global_iterator &GVI, 1659 TargetData *TD, 1660 TargetLibraryInfo *TLI) { 1661 if (!TD) 1662 return false; 1663 1664 // If this is a malloc of an abstract type, don't touch it. 1665 if (!AllocTy->isSized()) 1666 return false; 1667 1668 // We can't optimize this global unless all uses of it are *known* to be 1669 // of the malloc value, not of the null initializer value (consider a use 1670 // that compares the global's value against zero to see if the malloc has 1671 // been reached). To do this, we check to see if all uses of the global 1672 // would trap if the global were null: this proves that they must all 1673 // happen after the malloc. 1674 if (!AllUsesOfLoadedValueWillTrapIfNull(GV)) 1675 return false; 1676 1677 // We can't optimize this if the malloc itself is used in a complex way, 1678 // for example, being stored into multiple globals. This allows the 1679 // malloc to be stored into the specified global, loaded icmp'd, and 1680 // GEP'd. These are all things we could transform to using the global 1681 // for. 1682 SmallPtrSet<const PHINode*, 8> PHIs; 1683 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(CI, GV, PHIs)) 1684 return false; 1685 1686 // If we have a global that is only initialized with a fixed size malloc, 1687 // transform the program to use global memory instead of malloc'd memory. 1688 // This eliminates dynamic allocation, avoids an indirection accessing the 1689 // data, and exposes the resultant global to further GlobalOpt. 1690 // We cannot optimize the malloc if we cannot determine malloc array size. 1691 Value *NElems = getMallocArraySize(CI, TD, true); 1692 if (!NElems) 1693 return false; 1694 1695 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems)) 1696 // Restrict this transformation to only working on small allocations 1697 // (2048 bytes currently), as we don't want to introduce a 16M global or 1698 // something. 1699 if (NElements->getZExtValue() * TD->getTypeAllocSize(AllocTy) < 2048) { 1700 GVI = OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, TD, TLI); 1701 return true; 1702 } 1703 1704 // If the allocation is an array of structures, consider transforming this 1705 // into multiple malloc'd arrays, one for each field. This is basically 1706 // SRoA for malloc'd memory. 1707 1708 if (Ordering != NotAtomic) 1709 return false; 1710 1711 // If this is an allocation of a fixed size array of structs, analyze as a 1712 // variable size array. malloc [100 x struct],1 -> malloc struct, 100 1713 if (NElems == ConstantInt::get(CI->getArgOperand(0)->getType(), 1)) 1714 if (ArrayType *AT = dyn_cast<ArrayType>(AllocTy)) 1715 AllocTy = AT->getElementType(); 1716 1717 StructType *AllocSTy = dyn_cast<StructType>(AllocTy); 1718 if (!AllocSTy) 1719 return false; 1720 1721 // This the structure has an unreasonable number of fields, leave it 1722 // alone. 1723 if (AllocSTy->getNumElements() <= 16 && AllocSTy->getNumElements() != 0 && 1724 AllGlobalLoadUsesSimpleEnoughForHeapSRA(GV, CI)) { 1725 1726 // If this is a fixed size array, transform the Malloc to be an alloc of 1727 // structs. malloc [100 x struct],1 -> malloc struct, 100 1728 if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI))) { 1729 Type *IntPtrTy = TD->getIntPtrType(CI->getContext()); 1730 unsigned TypeSize = TD->getStructLayout(AllocSTy)->getSizeInBytes(); 1731 Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize); 1732 Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements()); 1733 Instruction *Malloc = CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy, 1734 AllocSize, NumElements, 1735 0, CI->getName()); 1736 Instruction *Cast = new BitCastInst(Malloc, CI->getType(), "tmp", CI); 1737 CI->replaceAllUsesWith(Cast); 1738 CI->eraseFromParent(); 1739 if (BitCastInst *BCI = dyn_cast<BitCastInst>(Malloc)) 1740 CI = cast<CallInst>(BCI->getOperand(0)); 1741 else 1742 CI = cast<CallInst>(Malloc); 1743 } 1744 1745 GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, TD, true), TD); 1746 return true; 1747 } 1748 1749 return false; 1750} 1751 1752// OptimizeOnceStoredGlobal - Try to optimize globals based on the knowledge 1753// that only one value (besides its initializer) is ever stored to the global. 1754static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal, 1755 AtomicOrdering Ordering, 1756 Module::global_iterator &GVI, 1757 TargetData *TD, TargetLibraryInfo *TLI) { 1758 // Ignore no-op GEPs and bitcasts. 1759 StoredOnceVal = StoredOnceVal->stripPointerCasts(); 1760 1761 // If we are dealing with a pointer global that is initialized to null and 1762 // only has one (non-null) value stored into it, then we can optimize any 1763 // users of the loaded value (often calls and loads) that would trap if the 1764 // value was null. 1765 if (GV->getInitializer()->getType()->isPointerTy() && 1766 GV->getInitializer()->isNullValue()) { 1767 if (Constant *SOVC = dyn_cast<Constant>(StoredOnceVal)) { 1768 if (GV->getInitializer()->getType() != SOVC->getType()) 1769 SOVC = ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType()); 1770 1771 // Optimize away any trapping uses of the loaded value. 1772 if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, TD, TLI)) 1773 return true; 1774 } else if (CallInst *CI = extractMallocCall(StoredOnceVal)) { 1775 Type *MallocType = getMallocAllocatedType(CI); 1776 if (MallocType && 1777 TryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType, Ordering, GVI, 1778 TD, TLI)) 1779 return true; 1780 } 1781 } 1782 1783 return false; 1784} 1785 1786/// TryToShrinkGlobalToBoolean - At this point, we have learned that the only 1787/// two values ever stored into GV are its initializer and OtherVal. See if we 1788/// can shrink the global into a boolean and select between the two values 1789/// whenever it is used. This exposes the values to other scalar optimizations. 1790static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) { 1791 Type *GVElType = GV->getType()->getElementType(); 1792 1793 // If GVElType is already i1, it is already shrunk. If the type of the GV is 1794 // an FP value, pointer or vector, don't do this optimization because a select 1795 // between them is very expensive and unlikely to lead to later 1796 // simplification. In these cases, we typically end up with "cond ? v1 : v2" 1797 // where v1 and v2 both require constant pool loads, a big loss. 1798 if (GVElType == Type::getInt1Ty(GV->getContext()) || 1799 GVElType->isFloatingPointTy() || 1800 GVElType->isPointerTy() || GVElType->isVectorTy()) 1801 return false; 1802 1803 // Walk the use list of the global seeing if all the uses are load or store. 1804 // If there is anything else, bail out. 1805 for (Value::use_iterator I = GV->use_begin(), E = GV->use_end(); I != E; ++I){ 1806 User *U = *I; 1807 if (!isa<LoadInst>(U) && !isa<StoreInst>(U)) 1808 return false; 1809 } 1810 1811 DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV); 1812 1813 // Create the new global, initializing it to false. 1814 GlobalVariable *NewGV = new GlobalVariable(Type::getInt1Ty(GV->getContext()), 1815 false, 1816 GlobalValue::InternalLinkage, 1817 ConstantInt::getFalse(GV->getContext()), 1818 GV->getName()+".b", 1819 GV->getThreadLocalMode()); 1820 GV->getParent()->getGlobalList().insert(GV, NewGV); 1821 1822 Constant *InitVal = GV->getInitializer(); 1823 assert(InitVal->getType() != Type::getInt1Ty(GV->getContext()) && 1824 "No reason to shrink to bool!"); 1825 1826 // If initialized to zero and storing one into the global, we can use a cast 1827 // instead of a select to synthesize the desired value. 1828 bool IsOneZero = false; 1829 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) 1830 IsOneZero = InitVal->isNullValue() && CI->isOne(); 1831 1832 while (!GV->use_empty()) { 1833 Instruction *UI = cast<Instruction>(GV->use_back()); 1834 if (StoreInst *SI = dyn_cast<StoreInst>(UI)) { 1835 // Change the store into a boolean store. 1836 bool StoringOther = SI->getOperand(0) == OtherVal; 1837 // Only do this if we weren't storing a loaded value. 1838 Value *StoreVal; 1839 if (StoringOther || SI->getOperand(0) == InitVal) 1840 StoreVal = ConstantInt::get(Type::getInt1Ty(GV->getContext()), 1841 StoringOther); 1842 else { 1843 // Otherwise, we are storing a previously loaded copy. To do this, 1844 // change the copy from copying the original value to just copying the 1845 // bool. 1846 Instruction *StoredVal = cast<Instruction>(SI->getOperand(0)); 1847 1848 // If we've already replaced the input, StoredVal will be a cast or 1849 // select instruction. If not, it will be a load of the original 1850 // global. 1851 if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) { 1852 assert(LI->getOperand(0) == GV && "Not a copy!"); 1853 // Insert a new load, to preserve the saved value. 1854 StoreVal = new LoadInst(NewGV, LI->getName()+".b", false, 0, 1855 LI->getOrdering(), LI->getSynchScope(), LI); 1856 } else { 1857 assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) && 1858 "This is not a form that we understand!"); 1859 StoreVal = StoredVal->getOperand(0); 1860 assert(isa<LoadInst>(StoreVal) && "Not a load of NewGV!"); 1861 } 1862 } 1863 new StoreInst(StoreVal, NewGV, false, 0, 1864 SI->getOrdering(), SI->getSynchScope(), SI); 1865 } else { 1866 // Change the load into a load of bool then a select. 1867 LoadInst *LI = cast<LoadInst>(UI); 1868 LoadInst *NLI = new LoadInst(NewGV, LI->getName()+".b", false, 0, 1869 LI->getOrdering(), LI->getSynchScope(), LI); 1870 Value *NSI; 1871 if (IsOneZero) 1872 NSI = new ZExtInst(NLI, LI->getType(), "", LI); 1873 else 1874 NSI = SelectInst::Create(NLI, OtherVal, InitVal, "", LI); 1875 NSI->takeName(LI); 1876 LI->replaceAllUsesWith(NSI); 1877 } 1878 UI->eraseFromParent(); 1879 } 1880 1881 GV->eraseFromParent(); 1882 return true; 1883} 1884 1885 1886/// ProcessGlobal - Analyze the specified global variable and optimize it if 1887/// possible. If we make a change, return true. 1888bool GlobalOpt::ProcessGlobal(GlobalVariable *GV, 1889 Module::global_iterator &GVI) { 1890 if (!GV->isDiscardableIfUnused()) 1891 return false; 1892 1893 // Do more involved optimizations if the global is internal. 1894 GV->removeDeadConstantUsers(); 1895 1896 if (GV->use_empty()) { 1897 DEBUG(dbgs() << "GLOBAL DEAD: " << *GV); 1898 GV->eraseFromParent(); 1899 ++NumDeleted; 1900 return true; 1901 } 1902 1903 if (!GV->hasLocalLinkage()) 1904 return false; 1905 1906 SmallPtrSet<const PHINode*, 16> PHIUsers; 1907 GlobalStatus GS; 1908 1909 if (AnalyzeGlobal(GV, GS, PHIUsers)) 1910 return false; 1911 1912 if (!GS.isCompared && !GV->hasUnnamedAddr()) { 1913 GV->setUnnamedAddr(true); 1914 NumUnnamed++; 1915 } 1916 1917 if (GV->isConstant() || !GV->hasInitializer()) 1918 return false; 1919 1920 return ProcessInternalGlobal(GV, GVI, PHIUsers, GS); 1921} 1922 1923/// ProcessInternalGlobal - Analyze the specified global variable and optimize 1924/// it if possible. If we make a change, return true. 1925bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV, 1926 Module::global_iterator &GVI, 1927 const SmallPtrSet<const PHINode*, 16> &PHIUsers, 1928 const GlobalStatus &GS) { 1929 // If this is a first class global and has only one accessing function 1930 // and this function is main (which we know is not recursive we can make 1931 // this global a local variable) we replace the global with a local alloca 1932 // in this function. 1933 // 1934 // NOTE: It doesn't make sense to promote non single-value types since we 1935 // are just replacing static memory to stack memory. 1936 // 1937 // If the global is in different address space, don't bring it to stack. 1938 if (!GS.HasMultipleAccessingFunctions && 1939 GS.AccessingFunction && !GS.HasNonInstructionUser && 1940 GV->getType()->getElementType()->isSingleValueType() && 1941 GS.AccessingFunction->getName() == "main" && 1942 GS.AccessingFunction->hasExternalLinkage() && 1943 GV->getType()->getAddressSpace() == 0) { 1944 DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV); 1945 Instruction &FirstI = const_cast<Instruction&>(*GS.AccessingFunction 1946 ->getEntryBlock().begin()); 1947 Type *ElemTy = GV->getType()->getElementType(); 1948 // FIXME: Pass Global's alignment when globals have alignment 1949 AllocaInst *Alloca = new AllocaInst(ElemTy, NULL, GV->getName(), &FirstI); 1950 if (!isa<UndefValue>(GV->getInitializer())) 1951 new StoreInst(GV->getInitializer(), Alloca, &FirstI); 1952 1953 GV->replaceAllUsesWith(Alloca); 1954 GV->eraseFromParent(); 1955 ++NumLocalized; 1956 return true; 1957 } 1958 1959 // If the global is never loaded (but may be stored to), it is dead. 1960 // Delete it now. 1961 if (!GS.isLoaded) { 1962 DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV); 1963 1964 bool Changed; 1965 if (isLeakCheckerRoot(GV)) { 1966 // Delete any constant stores to the global. 1967 Changed = CleanupPointerRootUsers(GV); 1968 } else { 1969 // Delete any stores we can find to the global. We may not be able to 1970 // make it completely dead though. 1971 Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(), TD, TLI); 1972 } 1973 1974 // If the global is dead now, delete it. 1975 if (GV->use_empty()) { 1976 GV->eraseFromParent(); 1977 ++NumDeleted; 1978 Changed = true; 1979 } 1980 return Changed; 1981 1982 } else if (GS.StoredType <= GlobalStatus::isInitializerStored) { 1983 DEBUG(dbgs() << "MARKING CONSTANT: " << *GV); 1984 GV->setConstant(true); 1985 1986 // Clean up any obviously simplifiable users now. 1987 CleanupConstantGlobalUsers(GV, GV->getInitializer(), TD, TLI); 1988 1989 // If the global is dead now, just nuke it. 1990 if (GV->use_empty()) { 1991 DEBUG(dbgs() << " *** Marking constant allowed us to simplify " 1992 << "all users and delete global!\n"); 1993 GV->eraseFromParent(); 1994 ++NumDeleted; 1995 } 1996 1997 ++NumMarked; 1998 return true; 1999 } else if (!GV->getInitializer()->getType()->isSingleValueType()) { 2000 if (TargetData *TD = getAnalysisIfAvailable<TargetData>()) 2001 if (GlobalVariable *FirstNewGV = SRAGlobal(GV, *TD)) { 2002 GVI = FirstNewGV; // Don't skip the newly produced globals! 2003 return true; 2004 } 2005 } else if (GS.StoredType == GlobalStatus::isStoredOnce) { 2006 // If the initial value for the global was an undef value, and if only 2007 // one other value was stored into it, we can just change the 2008 // initializer to be the stored value, then delete all stores to the 2009 // global. This allows us to mark it constant. 2010 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) 2011 if (isa<UndefValue>(GV->getInitializer())) { 2012 // Change the initial value here. 2013 GV->setInitializer(SOVConstant); 2014 2015 // Clean up any obviously simplifiable users now. 2016 CleanupConstantGlobalUsers(GV, GV->getInitializer(), TD, TLI); 2017 2018 if (GV->use_empty()) { 2019 DEBUG(dbgs() << " *** Substituting initializer allowed us to " 2020 << "simplify all users and delete global!\n"); 2021 GV->eraseFromParent(); 2022 ++NumDeleted; 2023 } else { 2024 GVI = GV; 2025 } 2026 ++NumSubstitute; 2027 return true; 2028 } 2029 2030 // Try to optimize globals based on the knowledge that only one value 2031 // (besides its initializer) is ever stored to the global. 2032 if (OptimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GS.Ordering, GVI, 2033 TD, TLI)) 2034 return true; 2035 2036 // Otherwise, if the global was not a boolean, we can shrink it to be a 2037 // boolean. 2038 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) 2039 if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) { 2040 ++NumShrunkToBool; 2041 return true; 2042 } 2043 } 2044 2045 return false; 2046} 2047 2048/// ChangeCalleesToFastCall - Walk all of the direct calls of the specified 2049/// function, changing them to FastCC. 2050static void ChangeCalleesToFastCall(Function *F) { 2051 for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){ 2052 if (isa<BlockAddress>(*UI)) 2053 continue; 2054 CallSite User(cast<Instruction>(*UI)); 2055 User.setCallingConv(CallingConv::Fast); 2056 } 2057} 2058 2059static AttrListPtr StripNest(const AttrListPtr &Attrs) { 2060 for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) { 2061 if ((Attrs.getSlot(i).Attrs & Attribute::Nest) == 0) 2062 continue; 2063 2064 // There can be only one. 2065 return Attrs.removeAttr(Attrs.getSlot(i).Index, Attribute::Nest); 2066 } 2067 2068 return Attrs; 2069} 2070 2071static void RemoveNestAttribute(Function *F) { 2072 F->setAttributes(StripNest(F->getAttributes())); 2073 for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){ 2074 if (isa<BlockAddress>(*UI)) 2075 continue; 2076 CallSite User(cast<Instruction>(*UI)); 2077 User.setAttributes(StripNest(User.getAttributes())); 2078 } 2079} 2080 2081bool GlobalOpt::OptimizeFunctions(Module &M) { 2082 bool Changed = false; 2083 // Optimize functions. 2084 for (Module::iterator FI = M.begin(), E = M.end(); FI != E; ) { 2085 Function *F = FI++; 2086 // Functions without names cannot be referenced outside this module. 2087 if (!F->hasName() && !F->isDeclaration()) 2088 F->setLinkage(GlobalValue::InternalLinkage); 2089 F->removeDeadConstantUsers(); 2090 if (F->isDefTriviallyDead()) { 2091 F->eraseFromParent(); 2092 Changed = true; 2093 ++NumFnDeleted; 2094 } else if (F->hasLocalLinkage()) { 2095 if (F->getCallingConv() == CallingConv::C && !F->isVarArg() && 2096 !F->hasAddressTaken()) { 2097 // If this function has C calling conventions, is not a varargs 2098 // function, and is only called directly, promote it to use the Fast 2099 // calling convention. 2100 F->setCallingConv(CallingConv::Fast); 2101 ChangeCalleesToFastCall(F); 2102 ++NumFastCallFns; 2103 Changed = true; 2104 } 2105 2106 if (F->getAttributes().hasAttrSomewhere(Attribute::Nest) && 2107 !F->hasAddressTaken()) { 2108 // The function is not used by a trampoline intrinsic, so it is safe 2109 // to remove the 'nest' attribute. 2110 RemoveNestAttribute(F); 2111 ++NumNestRemoved; 2112 Changed = true; 2113 } 2114 } 2115 } 2116 return Changed; 2117} 2118 2119bool GlobalOpt::OptimizeGlobalVars(Module &M) { 2120 bool Changed = false; 2121 for (Module::global_iterator GVI = M.global_begin(), E = M.global_end(); 2122 GVI != E; ) { 2123 GlobalVariable *GV = GVI++; 2124 // Global variables without names cannot be referenced outside this module. 2125 if (!GV->hasName() && !GV->isDeclaration()) 2126 GV->setLinkage(GlobalValue::InternalLinkage); 2127 // Simplify the initializer. 2128 if (GV->hasInitializer()) 2129 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GV->getInitializer())) { 2130 Constant *New = ConstantFoldConstantExpression(CE, TD, TLI); 2131 if (New && New != CE) 2132 GV->setInitializer(New); 2133 } 2134 2135 Changed |= ProcessGlobal(GV, GVI); 2136 } 2137 return Changed; 2138} 2139 2140/// FindGlobalCtors - Find the llvm.global_ctors list, verifying that all 2141/// initializers have an init priority of 65535. 2142GlobalVariable *GlobalOpt::FindGlobalCtors(Module &M) { 2143 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors"); 2144 if (GV == 0) return 0; 2145 2146 // Verify that the initializer is simple enough for us to handle. We are 2147 // only allowed to optimize the initializer if it is unique. 2148 if (!GV->hasUniqueInitializer()) return 0; 2149 2150 if (isa<ConstantAggregateZero>(GV->getInitializer())) 2151 return GV; 2152 ConstantArray *CA = cast<ConstantArray>(GV->getInitializer()); 2153 2154 for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) { 2155 if (isa<ConstantAggregateZero>(*i)) 2156 continue; 2157 ConstantStruct *CS = cast<ConstantStruct>(*i); 2158 if (isa<ConstantPointerNull>(CS->getOperand(1))) 2159 continue; 2160 2161 // Must have a function or null ptr. 2162 if (!isa<Function>(CS->getOperand(1))) 2163 return 0; 2164 2165 // Init priority must be standard. 2166 ConstantInt *CI = cast<ConstantInt>(CS->getOperand(0)); 2167 if (CI->getZExtValue() != 65535) 2168 return 0; 2169 } 2170 2171 return GV; 2172} 2173 2174/// ParseGlobalCtors - Given a llvm.global_ctors list that we can understand, 2175/// return a list of the functions and null terminator as a vector. 2176static std::vector<Function*> ParseGlobalCtors(GlobalVariable *GV) { 2177 if (GV->getInitializer()->isNullValue()) 2178 return std::vector<Function*>(); 2179 ConstantArray *CA = cast<ConstantArray>(GV->getInitializer()); 2180 std::vector<Function*> Result; 2181 Result.reserve(CA->getNumOperands()); 2182 for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) { 2183 ConstantStruct *CS = cast<ConstantStruct>(*i); 2184 Result.push_back(dyn_cast<Function>(CS->getOperand(1))); 2185 } 2186 return Result; 2187} 2188 2189/// InstallGlobalCtors - Given a specified llvm.global_ctors list, install the 2190/// specified array, returning the new global to use. 2191static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL, 2192 const std::vector<Function*> &Ctors) { 2193 // If we made a change, reassemble the initializer list. 2194 Constant *CSVals[2]; 2195 CSVals[0] = ConstantInt::get(Type::getInt32Ty(GCL->getContext()), 65535); 2196 CSVals[1] = 0; 2197 2198 StructType *StructTy = 2199 cast <StructType>( 2200 cast<ArrayType>(GCL->getType()->getElementType())->getElementType()); 2201 2202 // Create the new init list. 2203 std::vector<Constant*> CAList; 2204 for (unsigned i = 0, e = Ctors.size(); i != e; ++i) { 2205 if (Ctors[i]) { 2206 CSVals[1] = Ctors[i]; 2207 } else { 2208 Type *FTy = FunctionType::get(Type::getVoidTy(GCL->getContext()), 2209 false); 2210 PointerType *PFTy = PointerType::getUnqual(FTy); 2211 CSVals[1] = Constant::getNullValue(PFTy); 2212 CSVals[0] = ConstantInt::get(Type::getInt32Ty(GCL->getContext()), 2213 0x7fffffff); 2214 } 2215 CAList.push_back(ConstantStruct::get(StructTy, CSVals)); 2216 } 2217 2218 // Create the array initializer. 2219 Constant *CA = ConstantArray::get(ArrayType::get(StructTy, 2220 CAList.size()), CAList); 2221 2222 // If we didn't change the number of elements, don't create a new GV. 2223 if (CA->getType() == GCL->getInitializer()->getType()) { 2224 GCL->setInitializer(CA); 2225 return GCL; 2226 } 2227 2228 // Create the new global and insert it next to the existing list. 2229 GlobalVariable *NGV = new GlobalVariable(CA->getType(), GCL->isConstant(), 2230 GCL->getLinkage(), CA, "", 2231 GCL->getThreadLocalMode()); 2232 GCL->getParent()->getGlobalList().insert(GCL, NGV); 2233 NGV->takeName(GCL); 2234 2235 // Nuke the old list, replacing any uses with the new one. 2236 if (!GCL->use_empty()) { 2237 Constant *V = NGV; 2238 if (V->getType() != GCL->getType()) 2239 V = ConstantExpr::getBitCast(V, GCL->getType()); 2240 GCL->replaceAllUsesWith(V); 2241 } 2242 GCL->eraseFromParent(); 2243 2244 if (Ctors.size()) 2245 return NGV; 2246 else 2247 return 0; 2248} 2249 2250 2251static inline bool 2252isSimpleEnoughValueToCommit(Constant *C, 2253 SmallPtrSet<Constant*, 8> &SimpleConstants, 2254 const TargetData *TD); 2255 2256 2257/// isSimpleEnoughValueToCommit - Return true if the specified constant can be 2258/// handled by the code generator. We don't want to generate something like: 2259/// void *X = &X/42; 2260/// because the code generator doesn't have a relocation that can handle that. 2261/// 2262/// This function should be called if C was not found (but just got inserted) 2263/// in SimpleConstants to avoid having to rescan the same constants all the 2264/// time. 2265static bool isSimpleEnoughValueToCommitHelper(Constant *C, 2266 SmallPtrSet<Constant*, 8> &SimpleConstants, 2267 const TargetData *TD) { 2268 // Simple integer, undef, constant aggregate zero, global addresses, etc are 2269 // all supported. 2270 if (C->getNumOperands() == 0 || isa<BlockAddress>(C) || 2271 isa<GlobalValue>(C)) 2272 return true; 2273 2274 // Aggregate values are safe if all their elements are. 2275 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C) || 2276 isa<ConstantVector>(C)) { 2277 for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i) { 2278 Constant *Op = cast<Constant>(C->getOperand(i)); 2279 if (!isSimpleEnoughValueToCommit(Op, SimpleConstants, TD)) 2280 return false; 2281 } 2282 return true; 2283 } 2284 2285 // We don't know exactly what relocations are allowed in constant expressions, 2286 // so we allow &global+constantoffset, which is safe and uniformly supported 2287 // across targets. 2288 ConstantExpr *CE = cast<ConstantExpr>(C); 2289 switch (CE->getOpcode()) { 2290 case Instruction::BitCast: 2291 // Bitcast is fine if the casted value is fine. 2292 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD); 2293 2294 case Instruction::IntToPtr: 2295 case Instruction::PtrToInt: 2296 // int <=> ptr is fine if the int type is the same size as the 2297 // pointer type. 2298 if (!TD || TD->getTypeSizeInBits(CE->getType()) != 2299 TD->getTypeSizeInBits(CE->getOperand(0)->getType())) 2300 return false; 2301 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD); 2302 2303 // GEP is fine if it is simple + constant offset. 2304 case Instruction::GetElementPtr: 2305 for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i) 2306 if (!isa<ConstantInt>(CE->getOperand(i))) 2307 return false; 2308 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD); 2309 2310 case Instruction::Add: 2311 // We allow simple+cst. 2312 if (!isa<ConstantInt>(CE->getOperand(1))) 2313 return false; 2314 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD); 2315 } 2316 return false; 2317} 2318 2319static inline bool 2320isSimpleEnoughValueToCommit(Constant *C, 2321 SmallPtrSet<Constant*, 8> &SimpleConstants, 2322 const TargetData *TD) { 2323 // If we already checked this constant, we win. 2324 if (!SimpleConstants.insert(C)) return true; 2325 // Check the constant. 2326 return isSimpleEnoughValueToCommitHelper(C, SimpleConstants, TD); 2327} 2328 2329 2330/// isSimpleEnoughPointerToCommit - Return true if this constant is simple 2331/// enough for us to understand. In particular, if it is a cast to anything 2332/// other than from one pointer type to another pointer type, we punt. 2333/// We basically just support direct accesses to globals and GEP's of 2334/// globals. This should be kept up to date with CommitValueTo. 2335static bool isSimpleEnoughPointerToCommit(Constant *C) { 2336 // Conservatively, avoid aggregate types. This is because we don't 2337 // want to worry about them partially overlapping other stores. 2338 if (!cast<PointerType>(C->getType())->getElementType()->isSingleValueType()) 2339 return false; 2340 2341 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) 2342 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or 2343 // external globals. 2344 return GV->hasUniqueInitializer(); 2345 2346 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { 2347 // Handle a constantexpr gep. 2348 if (CE->getOpcode() == Instruction::GetElementPtr && 2349 isa<GlobalVariable>(CE->getOperand(0)) && 2350 cast<GEPOperator>(CE)->isInBounds()) { 2351 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); 2352 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or 2353 // external globals. 2354 if (!GV->hasUniqueInitializer()) 2355 return false; 2356 2357 // The first index must be zero. 2358 ConstantInt *CI = dyn_cast<ConstantInt>(*llvm::next(CE->op_begin())); 2359 if (!CI || !CI->isZero()) return false; 2360 2361 // The remaining indices must be compile-time known integers within the 2362 // notional bounds of the corresponding static array types. 2363 if (!CE->isGEPWithNoNotionalOverIndexing()) 2364 return false; 2365 2366 return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE); 2367 2368 // A constantexpr bitcast from a pointer to another pointer is a no-op, 2369 // and we know how to evaluate it by moving the bitcast from the pointer 2370 // operand to the value operand. 2371 } else if (CE->getOpcode() == Instruction::BitCast && 2372 isa<GlobalVariable>(CE->getOperand(0))) { 2373 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or 2374 // external globals. 2375 return cast<GlobalVariable>(CE->getOperand(0))->hasUniqueInitializer(); 2376 } 2377 } 2378 2379 return false; 2380} 2381 2382/// EvaluateStoreInto - Evaluate a piece of a constantexpr store into a global 2383/// initializer. This returns 'Init' modified to reflect 'Val' stored into it. 2384/// At this point, the GEP operands of Addr [0, OpNo) have been stepped into. 2385static Constant *EvaluateStoreInto(Constant *Init, Constant *Val, 2386 ConstantExpr *Addr, unsigned OpNo) { 2387 // Base case of the recursion. 2388 if (OpNo == Addr->getNumOperands()) { 2389 assert(Val->getType() == Init->getType() && "Type mismatch!"); 2390 return Val; 2391 } 2392 2393 SmallVector<Constant*, 32> Elts; 2394 if (StructType *STy = dyn_cast<StructType>(Init->getType())) { 2395 // Break up the constant into its elements. 2396 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) 2397 Elts.push_back(Init->getAggregateElement(i)); 2398 2399 // Replace the element that we are supposed to. 2400 ConstantInt *CU = cast<ConstantInt>(Addr->getOperand(OpNo)); 2401 unsigned Idx = CU->getZExtValue(); 2402 assert(Idx < STy->getNumElements() && "Struct index out of range!"); 2403 Elts[Idx] = EvaluateStoreInto(Elts[Idx], Val, Addr, OpNo+1); 2404 2405 // Return the modified struct. 2406 return ConstantStruct::get(STy, Elts); 2407 } 2408 2409 ConstantInt *CI = cast<ConstantInt>(Addr->getOperand(OpNo)); 2410 SequentialType *InitTy = cast<SequentialType>(Init->getType()); 2411 2412 uint64_t NumElts; 2413 if (ArrayType *ATy = dyn_cast<ArrayType>(InitTy)) 2414 NumElts = ATy->getNumElements(); 2415 else 2416 NumElts = InitTy->getVectorNumElements(); 2417 2418 // Break up the array into elements. 2419 for (uint64_t i = 0, e = NumElts; i != e; ++i) 2420 Elts.push_back(Init->getAggregateElement(i)); 2421 2422 assert(CI->getZExtValue() < NumElts); 2423 Elts[CI->getZExtValue()] = 2424 EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1); 2425 2426 if (Init->getType()->isArrayTy()) 2427 return ConstantArray::get(cast<ArrayType>(InitTy), Elts); 2428 return ConstantVector::get(Elts); 2429} 2430 2431/// CommitValueTo - We have decided that Addr (which satisfies the predicate 2432/// isSimpleEnoughPointerToCommit) should get Val as its value. Make it happen. 2433static void CommitValueTo(Constant *Val, Constant *Addr) { 2434 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) { 2435 assert(GV->hasInitializer()); 2436 GV->setInitializer(Val); 2437 return; 2438 } 2439 2440 ConstantExpr *CE = cast<ConstantExpr>(Addr); 2441 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); 2442 GV->setInitializer(EvaluateStoreInto(GV->getInitializer(), Val, CE, 2)); 2443} 2444 2445namespace { 2446 2447/// Evaluator - This class evaluates LLVM IR, producing the Constant 2448/// representing each SSA instruction. Changes to global variables are stored 2449/// in a mapping that can be iterated over after the evaluation is complete. 2450/// Once an evaluation call fails, the evaluation object should not be reused. 2451class Evaluator { 2452public: 2453 Evaluator(const TargetData *TD, const TargetLibraryInfo *TLI) 2454 : TD(TD), TLI(TLI) { 2455 ValueStack.push_back(new DenseMap<Value*, Constant*>); 2456 } 2457 2458 ~Evaluator() { 2459 DeleteContainerPointers(ValueStack); 2460 while (!AllocaTmps.empty()) { 2461 GlobalVariable *Tmp = AllocaTmps.back(); 2462 AllocaTmps.pop_back(); 2463 2464 // If there are still users of the alloca, the program is doing something 2465 // silly, e.g. storing the address of the alloca somewhere and using it 2466 // later. Since this is undefined, we'll just make it be null. 2467 if (!Tmp->use_empty()) 2468 Tmp->replaceAllUsesWith(Constant::getNullValue(Tmp->getType())); 2469 delete Tmp; 2470 } 2471 } 2472 2473 /// EvaluateFunction - Evaluate a call to function F, returning true if 2474 /// successful, false if we can't evaluate it. ActualArgs contains the formal 2475 /// arguments for the function. 2476 bool EvaluateFunction(Function *F, Constant *&RetVal, 2477 const SmallVectorImpl<Constant*> &ActualArgs); 2478 2479 /// EvaluateBlock - Evaluate all instructions in block BB, returning true if 2480 /// successful, false if we can't evaluate it. NewBB returns the next BB that 2481 /// control flows into, or null upon return. 2482 bool EvaluateBlock(BasicBlock::iterator CurInst, BasicBlock *&NextBB); 2483 2484 Constant *getVal(Value *V) { 2485 if (Constant *CV = dyn_cast<Constant>(V)) return CV; 2486 Constant *R = ValueStack.back()->lookup(V); 2487 assert(R && "Reference to an uncomputed value!"); 2488 return R; 2489 } 2490 2491 void setVal(Value *V, Constant *C) { 2492 ValueStack.back()->operator[](V) = C; 2493 } 2494 2495 const DenseMap<Constant*, Constant*> &getMutatedMemory() const { 2496 return MutatedMemory; 2497 } 2498 2499 const SmallPtrSet<GlobalVariable*, 8> &getInvariants() const { 2500 return Invariants; 2501 } 2502 2503private: 2504 Constant *ComputeLoadResult(Constant *P); 2505 2506 /// ValueStack - As we compute SSA register values, we store their contents 2507 /// here. The back of the vector contains the current function and the stack 2508 /// contains the values in the calling frames. 2509 SmallVector<DenseMap<Value*, Constant*>*, 4> ValueStack; 2510 2511 /// CallStack - This is used to detect recursion. In pathological situations 2512 /// we could hit exponential behavior, but at least there is nothing 2513 /// unbounded. 2514 SmallVector<Function*, 4> CallStack; 2515 2516 /// MutatedMemory - For each store we execute, we update this map. Loads 2517 /// check this to get the most up-to-date value. If evaluation is successful, 2518 /// this state is committed to the process. 2519 DenseMap<Constant*, Constant*> MutatedMemory; 2520 2521 /// AllocaTmps - To 'execute' an alloca, we create a temporary global variable 2522 /// to represent its body. This vector is needed so we can delete the 2523 /// temporary globals when we are done. 2524 SmallVector<GlobalVariable*, 32> AllocaTmps; 2525 2526 /// Invariants - These global variables have been marked invariant by the 2527 /// static constructor. 2528 SmallPtrSet<GlobalVariable*, 8> Invariants; 2529 2530 /// SimpleConstants - These are constants we have checked and know to be 2531 /// simple enough to live in a static initializer of a global. 2532 SmallPtrSet<Constant*, 8> SimpleConstants; 2533 2534 const TargetData *TD; 2535 const TargetLibraryInfo *TLI; 2536}; 2537 2538} // anonymous namespace 2539 2540/// ComputeLoadResult - Return the value that would be computed by a load from 2541/// P after the stores reflected by 'memory' have been performed. If we can't 2542/// decide, return null. 2543Constant *Evaluator::ComputeLoadResult(Constant *P) { 2544 // If this memory location has been recently stored, use the stored value: it 2545 // is the most up-to-date. 2546 DenseMap<Constant*, Constant*>::const_iterator I = MutatedMemory.find(P); 2547 if (I != MutatedMemory.end()) return I->second; 2548 2549 // Access it. 2550 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) { 2551 if (GV->hasDefinitiveInitializer()) 2552 return GV->getInitializer(); 2553 return 0; 2554 } 2555 2556 // Handle a constantexpr getelementptr. 2557 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(P)) 2558 if (CE->getOpcode() == Instruction::GetElementPtr && 2559 isa<GlobalVariable>(CE->getOperand(0))) { 2560 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); 2561 if (GV->hasDefinitiveInitializer()) 2562 return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE); 2563 } 2564 2565 return 0; // don't know how to evaluate. 2566} 2567 2568/// EvaluateBlock - Evaluate all instructions in block BB, returning true if 2569/// successful, false if we can't evaluate it. NewBB returns the next BB that 2570/// control flows into, or null upon return. 2571bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, 2572 BasicBlock *&NextBB) { 2573 // This is the main evaluation loop. 2574 while (1) { 2575 Constant *InstResult = 0; 2576 2577 if (StoreInst *SI = dyn_cast<StoreInst>(CurInst)) { 2578 if (!SI->isSimple()) return false; // no volatile/atomic accesses. 2579 Constant *Ptr = getVal(SI->getOperand(1)); 2580 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) 2581 Ptr = ConstantFoldConstantExpression(CE, TD, TLI); 2582 if (!isSimpleEnoughPointerToCommit(Ptr)) 2583 // If this is too complex for us to commit, reject it. 2584 return false; 2585 2586 Constant *Val = getVal(SI->getOperand(0)); 2587 2588 // If this might be too difficult for the backend to handle (e.g. the addr 2589 // of one global variable divided by another) then we can't commit it. 2590 if (!isSimpleEnoughValueToCommit(Val, SimpleConstants, TD)) 2591 return false; 2592 2593 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) 2594 if (CE->getOpcode() == Instruction::BitCast) { 2595 // If we're evaluating a store through a bitcast, then we need 2596 // to pull the bitcast off the pointer type and push it onto the 2597 // stored value. 2598 Ptr = CE->getOperand(0); 2599 2600 Type *NewTy = cast<PointerType>(Ptr->getType())->getElementType(); 2601 2602 // In order to push the bitcast onto the stored value, a bitcast 2603 // from NewTy to Val's type must be legal. If it's not, we can try 2604 // introspecting NewTy to find a legal conversion. 2605 while (!Val->getType()->canLosslesslyBitCastTo(NewTy)) { 2606 // If NewTy is a struct, we can convert the pointer to the struct 2607 // into a pointer to its first member. 2608 // FIXME: This could be extended to support arrays as well. 2609 if (StructType *STy = dyn_cast<StructType>(NewTy)) { 2610 NewTy = STy->getTypeAtIndex(0U); 2611 2612 IntegerType *IdxTy = IntegerType::get(NewTy->getContext(), 32); 2613 Constant *IdxZero = ConstantInt::get(IdxTy, 0, false); 2614 Constant * const IdxList[] = {IdxZero, IdxZero}; 2615 2616 Ptr = ConstantExpr::getGetElementPtr(Ptr, IdxList); 2617 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) 2618 Ptr = ConstantFoldConstantExpression(CE, TD, TLI); 2619 2620 // If we can't improve the situation by introspecting NewTy, 2621 // we have to give up. 2622 } else { 2623 return false; 2624 } 2625 } 2626 2627 // If we found compatible types, go ahead and push the bitcast 2628 // onto the stored value. 2629 Val = ConstantExpr::getBitCast(Val, NewTy); 2630 } 2631 2632 MutatedMemory[Ptr] = Val; 2633 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CurInst)) { 2634 InstResult = ConstantExpr::get(BO->getOpcode(), 2635 getVal(BO->getOperand(0)), 2636 getVal(BO->getOperand(1))); 2637 } else if (CmpInst *CI = dyn_cast<CmpInst>(CurInst)) { 2638 InstResult = ConstantExpr::getCompare(CI->getPredicate(), 2639 getVal(CI->getOperand(0)), 2640 getVal(CI->getOperand(1))); 2641 } else if (CastInst *CI = dyn_cast<CastInst>(CurInst)) { 2642 InstResult = ConstantExpr::getCast(CI->getOpcode(), 2643 getVal(CI->getOperand(0)), 2644 CI->getType()); 2645 } else if (SelectInst *SI = dyn_cast<SelectInst>(CurInst)) { 2646 InstResult = ConstantExpr::getSelect(getVal(SI->getOperand(0)), 2647 getVal(SI->getOperand(1)), 2648 getVal(SI->getOperand(2))); 2649 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurInst)) { 2650 Constant *P = getVal(GEP->getOperand(0)); 2651 SmallVector<Constant*, 8> GEPOps; 2652 for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); 2653 i != e; ++i) 2654 GEPOps.push_back(getVal(*i)); 2655 InstResult = 2656 ConstantExpr::getGetElementPtr(P, GEPOps, 2657 cast<GEPOperator>(GEP)->isInBounds()); 2658 } else if (LoadInst *LI = dyn_cast<LoadInst>(CurInst)) { 2659 if (!LI->isSimple()) return false; // no volatile/atomic accesses. 2660 Constant *Ptr = getVal(LI->getOperand(0)); 2661 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) 2662 Ptr = ConstantFoldConstantExpression(CE, TD, TLI); 2663 InstResult = ComputeLoadResult(Ptr); 2664 if (InstResult == 0) return false; // Could not evaluate load. 2665 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(CurInst)) { 2666 if (AI->isArrayAllocation()) return false; // Cannot handle array allocs. 2667 Type *Ty = AI->getType()->getElementType(); 2668 AllocaTmps.push_back(new GlobalVariable(Ty, false, 2669 GlobalValue::InternalLinkage, 2670 UndefValue::get(Ty), 2671 AI->getName())); 2672 InstResult = AllocaTmps.back(); 2673 } else if (isa<CallInst>(CurInst) || isa<InvokeInst>(CurInst)) { 2674 CallSite CS(CurInst); 2675 2676 // Debug info can safely be ignored here. 2677 if (isa<DbgInfoIntrinsic>(CS.getInstruction())) { 2678 ++CurInst; 2679 continue; 2680 } 2681 2682 // Cannot handle inline asm. 2683 if (isa<InlineAsm>(CS.getCalledValue())) return false; 2684 2685 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) { 2686 if (MemSetInst *MSI = dyn_cast<MemSetInst>(II)) { 2687 if (MSI->isVolatile()) return false; 2688 Constant *Ptr = getVal(MSI->getDest()); 2689 Constant *Val = getVal(MSI->getValue()); 2690 Constant *DestVal = ComputeLoadResult(getVal(Ptr)); 2691 if (Val->isNullValue() && DestVal && DestVal->isNullValue()) { 2692 // This memset is a no-op. 2693 ++CurInst; 2694 continue; 2695 } 2696 } 2697 2698 if (II->getIntrinsicID() == Intrinsic::lifetime_start || 2699 II->getIntrinsicID() == Intrinsic::lifetime_end) { 2700 ++CurInst; 2701 continue; 2702 } 2703 2704 if (II->getIntrinsicID() == Intrinsic::invariant_start) { 2705 // We don't insert an entry into Values, as it doesn't have a 2706 // meaningful return value. 2707 if (!II->use_empty()) 2708 return false; 2709 ConstantInt *Size = cast<ConstantInt>(II->getArgOperand(0)); 2710 Value *PtrArg = getVal(II->getArgOperand(1)); 2711 Value *Ptr = PtrArg->stripPointerCasts(); 2712 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) { 2713 Type *ElemTy = cast<PointerType>(GV->getType())->getElementType(); 2714 if (!Size->isAllOnesValue() && 2715 Size->getValue().getLimitedValue() >= 2716 TD->getTypeStoreSize(ElemTy)) 2717 Invariants.insert(GV); 2718 } 2719 // Continue even if we do nothing. 2720 ++CurInst; 2721 continue; 2722 } 2723 return false; 2724 } 2725 2726 // Resolve function pointers. 2727 Function *Callee = dyn_cast<Function>(getVal(CS.getCalledValue())); 2728 if (!Callee || Callee->mayBeOverridden()) 2729 return false; // Cannot resolve. 2730 2731 SmallVector<Constant*, 8> Formals; 2732 for (User::op_iterator i = CS.arg_begin(), e = CS.arg_end(); i != e; ++i) 2733 Formals.push_back(getVal(*i)); 2734 2735 if (Callee->isDeclaration()) { 2736 // If this is a function we can constant fold, do it. 2737 if (Constant *C = ConstantFoldCall(Callee, Formals, TLI)) { 2738 InstResult = C; 2739 } else { 2740 return false; 2741 } 2742 } else { 2743 if (Callee->getFunctionType()->isVarArg()) 2744 return false; 2745 2746 Constant *RetVal; 2747 // Execute the call, if successful, use the return value. 2748 ValueStack.push_back(new DenseMap<Value*, Constant*>); 2749 if (!EvaluateFunction(Callee, RetVal, Formals)) 2750 return false; 2751 delete ValueStack.pop_back_val(); 2752 InstResult = RetVal; 2753 } 2754 } else if (isa<TerminatorInst>(CurInst)) { 2755 if (BranchInst *BI = dyn_cast<BranchInst>(CurInst)) { 2756 if (BI->isUnconditional()) { 2757 NextBB = BI->getSuccessor(0); 2758 } else { 2759 ConstantInt *Cond = 2760 dyn_cast<ConstantInt>(getVal(BI->getCondition())); 2761 if (!Cond) return false; // Cannot determine. 2762 2763 NextBB = BI->getSuccessor(!Cond->getZExtValue()); 2764 } 2765 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(CurInst)) { 2766 ConstantInt *Val = 2767 dyn_cast<ConstantInt>(getVal(SI->getCondition())); 2768 if (!Val) return false; // Cannot determine. 2769 NextBB = SI->findCaseValue(Val).getCaseSuccessor(); 2770 } else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(CurInst)) { 2771 Value *Val = getVal(IBI->getAddress())->stripPointerCasts(); 2772 if (BlockAddress *BA = dyn_cast<BlockAddress>(Val)) 2773 NextBB = BA->getBasicBlock(); 2774 else 2775 return false; // Cannot determine. 2776 } else if (isa<ReturnInst>(CurInst)) { 2777 NextBB = 0; 2778 } else { 2779 // invoke, unwind, resume, unreachable. 2780 return false; // Cannot handle this terminator. 2781 } 2782 2783 // We succeeded at evaluating this block! 2784 return true; 2785 } else { 2786 // Did not know how to evaluate this! 2787 return false; 2788 } 2789 2790 if (!CurInst->use_empty()) { 2791 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(InstResult)) 2792 InstResult = ConstantFoldConstantExpression(CE, TD, TLI); 2793 2794 setVal(CurInst, InstResult); 2795 } 2796 2797 // If we just processed an invoke, we finished evaluating the block. 2798 if (InvokeInst *II = dyn_cast<InvokeInst>(CurInst)) { 2799 NextBB = II->getNormalDest(); 2800 return true; 2801 } 2802 2803 // Advance program counter. 2804 ++CurInst; 2805 } 2806} 2807 2808/// EvaluateFunction - Evaluate a call to function F, returning true if 2809/// successful, false if we can't evaluate it. ActualArgs contains the formal 2810/// arguments for the function. 2811bool Evaluator::EvaluateFunction(Function *F, Constant *&RetVal, 2812 const SmallVectorImpl<Constant*> &ActualArgs) { 2813 // Check to see if this function is already executing (recursion). If so, 2814 // bail out. TODO: we might want to accept limited recursion. 2815 if (std::find(CallStack.begin(), CallStack.end(), F) != CallStack.end()) 2816 return false; 2817 2818 CallStack.push_back(F); 2819 2820 // Initialize arguments to the incoming values specified. 2821 unsigned ArgNo = 0; 2822 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E; 2823 ++AI, ++ArgNo) 2824 setVal(AI, ActualArgs[ArgNo]); 2825 2826 // ExecutedBlocks - We only handle non-looping, non-recursive code. As such, 2827 // we can only evaluate any one basic block at most once. This set keeps 2828 // track of what we have executed so we can detect recursive cases etc. 2829 SmallPtrSet<BasicBlock*, 32> ExecutedBlocks; 2830 2831 // CurBB - The current basic block we're evaluating. 2832 BasicBlock *CurBB = F->begin(); 2833 2834 BasicBlock::iterator CurInst = CurBB->begin(); 2835 2836 while (1) { 2837 BasicBlock *NextBB = 0; // Initialized to avoid compiler warnings. 2838 if (!EvaluateBlock(CurInst, NextBB)) 2839 return false; 2840 2841 if (NextBB == 0) { 2842 // Successfully running until there's no next block means that we found 2843 // the return. Fill it the return value and pop the call stack. 2844 ReturnInst *RI = cast<ReturnInst>(CurBB->getTerminator()); 2845 if (RI->getNumOperands()) 2846 RetVal = getVal(RI->getOperand(0)); 2847 CallStack.pop_back(); 2848 return true; 2849 } 2850 2851 // Okay, we succeeded in evaluating this control flow. See if we have 2852 // executed the new block before. If so, we have a looping function, 2853 // which we cannot evaluate in reasonable time. 2854 if (!ExecutedBlocks.insert(NextBB)) 2855 return false; // looped! 2856 2857 // Okay, we have never been in this block before. Check to see if there 2858 // are any PHI nodes. If so, evaluate them with information about where 2859 // we came from. 2860 PHINode *PN = 0; 2861 for (CurInst = NextBB->begin(); 2862 (PN = dyn_cast<PHINode>(CurInst)); ++CurInst) 2863 setVal(PN, getVal(PN->getIncomingValueForBlock(CurBB))); 2864 2865 // Advance to the next block. 2866 CurBB = NextBB; 2867 } 2868} 2869 2870/// EvaluateStaticConstructor - Evaluate static constructors in the function, if 2871/// we can. Return true if we can, false otherwise. 2872static bool EvaluateStaticConstructor(Function *F, const TargetData *TD, 2873 const TargetLibraryInfo *TLI) { 2874 // Call the function. 2875 Evaluator Eval(TD, TLI); 2876 Constant *RetValDummy; 2877 bool EvalSuccess = Eval.EvaluateFunction(F, RetValDummy, 2878 SmallVector<Constant*, 0>()); 2879 2880 if (EvalSuccess) { 2881 // We succeeded at evaluation: commit the result. 2882 DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '" 2883 << F->getName() << "' to " << Eval.getMutatedMemory().size() 2884 << " stores.\n"); 2885 for (DenseMap<Constant*, Constant*>::const_iterator I = 2886 Eval.getMutatedMemory().begin(), E = Eval.getMutatedMemory().end(); 2887 I != E; ++I) 2888 CommitValueTo(I->second, I->first); 2889 for (SmallPtrSet<GlobalVariable*, 8>::const_iterator I = 2890 Eval.getInvariants().begin(), E = Eval.getInvariants().end(); 2891 I != E; ++I) 2892 (*I)->setConstant(true); 2893 } 2894 2895 return EvalSuccess; 2896} 2897 2898/// OptimizeGlobalCtorsList - Simplify and evaluation global ctors if possible. 2899/// Return true if anything changed. 2900bool GlobalOpt::OptimizeGlobalCtorsList(GlobalVariable *&GCL) { 2901 std::vector<Function*> Ctors = ParseGlobalCtors(GCL); 2902 bool MadeChange = false; 2903 if (Ctors.empty()) return false; 2904 2905 // Loop over global ctors, optimizing them when we can. 2906 for (unsigned i = 0; i != Ctors.size(); ++i) { 2907 Function *F = Ctors[i]; 2908 // Found a null terminator in the middle of the list, prune off the rest of 2909 // the list. 2910 if (F == 0) { 2911 if (i != Ctors.size()-1) { 2912 Ctors.resize(i+1); 2913 MadeChange = true; 2914 } 2915 break; 2916 } 2917 2918 // We cannot simplify external ctor functions. 2919 if (F->empty()) continue; 2920 2921 // If we can evaluate the ctor at compile time, do. 2922 if (EvaluateStaticConstructor(F, TD, TLI)) { 2923 Ctors.erase(Ctors.begin()+i); 2924 MadeChange = true; 2925 --i; 2926 ++NumCtorsEvaluated; 2927 continue; 2928 } 2929 } 2930 2931 if (!MadeChange) return false; 2932 2933 GCL = InstallGlobalCtors(GCL, Ctors); 2934 return true; 2935} 2936 2937bool GlobalOpt::OptimizeGlobalAliases(Module &M) { 2938 bool Changed = false; 2939 2940 for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end(); 2941 I != E;) { 2942 Module::alias_iterator J = I++; 2943 // Aliases without names cannot be referenced outside this module. 2944 if (!J->hasName() && !J->isDeclaration()) 2945 J->setLinkage(GlobalValue::InternalLinkage); 2946 // If the aliasee may change at link time, nothing can be done - bail out. 2947 if (J->mayBeOverridden()) 2948 continue; 2949 2950 Constant *Aliasee = J->getAliasee(); 2951 GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts()); 2952 Target->removeDeadConstantUsers(); 2953 bool hasOneUse = Target->hasOneUse() && Aliasee->hasOneUse(); 2954 2955 // Make all users of the alias use the aliasee instead. 2956 if (!J->use_empty()) { 2957 J->replaceAllUsesWith(Aliasee); 2958 ++NumAliasesResolved; 2959 Changed = true; 2960 } 2961 2962 // If the alias is externally visible, we may still be able to simplify it. 2963 if (!J->hasLocalLinkage()) { 2964 // If the aliasee has internal linkage, give it the name and linkage 2965 // of the alias, and delete the alias. This turns: 2966 // define internal ... @f(...) 2967 // @a = alias ... @f 2968 // into: 2969 // define ... @a(...) 2970 if (!Target->hasLocalLinkage()) 2971 continue; 2972 2973 // Do not perform the transform if multiple aliases potentially target the 2974 // aliasee. This check also ensures that it is safe to replace the section 2975 // and other attributes of the aliasee with those of the alias. 2976 if (!hasOneUse) 2977 continue; 2978 2979 // Give the aliasee the name, linkage and other attributes of the alias. 2980 Target->takeName(J); 2981 Target->setLinkage(J->getLinkage()); 2982 Target->GlobalValue::copyAttributesFrom(J); 2983 } 2984 2985 // Delete the alias. 2986 M.getAliasList().erase(J); 2987 ++NumAliasesRemoved; 2988 Changed = true; 2989 } 2990 2991 return Changed; 2992} 2993 2994static Function *FindCXAAtExit(Module &M, TargetLibraryInfo *TLI) { 2995 if (!TLI->has(LibFunc::cxa_atexit)) 2996 return 0; 2997 2998 Function *Fn = M.getFunction(TLI->getName(LibFunc::cxa_atexit)); 2999 3000 if (!Fn) 3001 return 0; 3002 3003 FunctionType *FTy = Fn->getFunctionType(); 3004 3005 // Checking that the function has the right return type, the right number of 3006 // parameters and that they all have pointer types should be enough. 3007 if (!FTy->getReturnType()->isIntegerTy() || 3008 FTy->getNumParams() != 3 || 3009 !FTy->getParamType(0)->isPointerTy() || 3010 !FTy->getParamType(1)->isPointerTy() || 3011 !FTy->getParamType(2)->isPointerTy()) 3012 return 0; 3013 3014 return Fn; 3015} 3016 3017/// cxxDtorIsEmpty - Returns whether the given function is an empty C++ 3018/// destructor and can therefore be eliminated. 3019/// Note that we assume that other optimization passes have already simplified 3020/// the code so we only look for a function with a single basic block, where 3021/// the only allowed instructions are 'ret', 'call' to an empty C++ dtor and 3022/// other side-effect free instructions. 3023static bool cxxDtorIsEmpty(const Function &Fn, 3024 SmallPtrSet<const Function *, 8> &CalledFunctions) { 3025 // FIXME: We could eliminate C++ destructors if they're readonly/readnone and 3026 // nounwind, but that doesn't seem worth doing. 3027 if (Fn.isDeclaration()) 3028 return false; 3029 3030 if (++Fn.begin() != Fn.end()) 3031 return false; 3032 3033 const BasicBlock &EntryBlock = Fn.getEntryBlock(); 3034 for (BasicBlock::const_iterator I = EntryBlock.begin(), E = EntryBlock.end(); 3035 I != E; ++I) { 3036 if (const CallInst *CI = dyn_cast<CallInst>(I)) { 3037 // Ignore debug intrinsics. 3038 if (isa<DbgInfoIntrinsic>(CI)) 3039 continue; 3040 3041 const Function *CalledFn = CI->getCalledFunction(); 3042 3043 if (!CalledFn) 3044 return false; 3045 3046 SmallPtrSet<const Function *, 8> NewCalledFunctions(CalledFunctions); 3047 3048 // Don't treat recursive functions as empty. 3049 if (!NewCalledFunctions.insert(CalledFn)) 3050 return false; 3051 3052 if (!cxxDtorIsEmpty(*CalledFn, NewCalledFunctions)) 3053 return false; 3054 } else if (isa<ReturnInst>(*I)) 3055 return true; // We're done. 3056 else if (I->mayHaveSideEffects()) 3057 return false; // Destructor with side effects, bail. 3058 } 3059 3060 return false; 3061} 3062 3063bool GlobalOpt::OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) { 3064 /// Itanium C++ ABI p3.3.5: 3065 /// 3066 /// After constructing a global (or local static) object, that will require 3067 /// destruction on exit, a termination function is registered as follows: 3068 /// 3069 /// extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d ); 3070 /// 3071 /// This registration, e.g. __cxa_atexit(f,p,d), is intended to cause the 3072 /// call f(p) when DSO d is unloaded, before all such termination calls 3073 /// registered before this one. It returns zero if registration is 3074 /// successful, nonzero on failure. 3075 3076 // This pass will look for calls to __cxa_atexit where the function is trivial 3077 // and remove them. 3078 bool Changed = false; 3079 3080 for (Function::use_iterator I = CXAAtExitFn->use_begin(), 3081 E = CXAAtExitFn->use_end(); I != E;) { 3082 // We're only interested in calls. Theoretically, we could handle invoke 3083 // instructions as well, but neither llvm-gcc nor clang generate invokes 3084 // to __cxa_atexit. 3085 CallInst *CI = dyn_cast<CallInst>(*I++); 3086 if (!CI) 3087 continue; 3088 3089 Function *DtorFn = 3090 dyn_cast<Function>(CI->getArgOperand(0)->stripPointerCasts()); 3091 if (!DtorFn) 3092 continue; 3093 3094 SmallPtrSet<const Function *, 8> CalledFunctions; 3095 if (!cxxDtorIsEmpty(*DtorFn, CalledFunctions)) 3096 continue; 3097 3098 // Just remove the call. 3099 CI->replaceAllUsesWith(Constant::getNullValue(CI->getType())); 3100 CI->eraseFromParent(); 3101 3102 ++NumCXXDtorsRemoved; 3103 3104 Changed |= true; 3105 } 3106 3107 return Changed; 3108} 3109 3110bool GlobalOpt::runOnModule(Module &M) { 3111 bool Changed = false; 3112 3113 TD = getAnalysisIfAvailable<TargetData>(); 3114 TLI = &getAnalysis<TargetLibraryInfo>(); 3115 3116 // Try to find the llvm.globalctors list. 3117 GlobalVariable *GlobalCtors = FindGlobalCtors(M); 3118 3119 Function *CXAAtExitFn = FindCXAAtExit(M, TLI); 3120 3121 bool LocalChange = true; 3122 while (LocalChange) { 3123 LocalChange = false; 3124 3125 // Delete functions that are trivially dead, ccc -> fastcc 3126 LocalChange |= OptimizeFunctions(M); 3127 3128 // Optimize global_ctors list. 3129 if (GlobalCtors) 3130 LocalChange |= OptimizeGlobalCtorsList(GlobalCtors); 3131 3132 // Optimize non-address-taken globals. 3133 LocalChange |= OptimizeGlobalVars(M); 3134 3135 // Resolve aliases, when possible. 3136 LocalChange |= OptimizeGlobalAliases(M); 3137 3138 // Try to remove trivial global destructors. 3139 if (CXAAtExitFn) 3140 LocalChange |= OptimizeEmptyGlobalCXXDtors(CXAAtExitFn); 3141 3142 Changed |= LocalChange; 3143 } 3144 3145 // TODO: Move all global ctors functions to the end of the module for code 3146 // layout. 3147 3148 return Changed; 3149} 3150