ScheduleDAGInstrs.cpp revision 198953
1//===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements the ScheduleDAGInstrs class, which implements re-scheduling 11// of MachineInstrs. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "sched-instrs" 16#include "ScheduleDAGInstrs.h" 17#include "llvm/Operator.h" 18#include "llvm/Analysis/AliasAnalysis.h" 19#include "llvm/CodeGen/MachineFunctionPass.h" 20#include "llvm/CodeGen/MachineMemOperand.h" 21#include "llvm/CodeGen/MachineRegisterInfo.h" 22#include "llvm/CodeGen/PseudoSourceValue.h" 23#include "llvm/Target/TargetMachine.h" 24#include "llvm/Target/TargetInstrInfo.h" 25#include "llvm/Target/TargetRegisterInfo.h" 26#include "llvm/Target/TargetSubtarget.h" 27#include "llvm/Support/Debug.h" 28#include "llvm/Support/raw_ostream.h" 29#include "llvm/ADT/SmallSet.h" 30using namespace llvm; 31 32ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf, 33 const MachineLoopInfo &mli, 34 const MachineDominatorTree &mdt) 35 : ScheduleDAG(mf), MLI(mli), MDT(mdt), LoopRegs(MLI, MDT) { 36 MFI = mf.getFrameInfo(); 37} 38 39/// Run - perform scheduling. 40/// 41void ScheduleDAGInstrs::Run(MachineBasicBlock *bb, 42 MachineBasicBlock::iterator begin, 43 MachineBasicBlock::iterator end, 44 unsigned endcount) { 45 BB = bb; 46 Begin = begin; 47 InsertPosIndex = endcount; 48 49 ScheduleDAG::Run(bb, end); 50} 51 52/// getUnderlyingObjectFromInt - This is the function that does the work of 53/// looking through basic ptrtoint+arithmetic+inttoptr sequences. 54static const Value *getUnderlyingObjectFromInt(const Value *V) { 55 do { 56 if (const Operator *U = dyn_cast<Operator>(V)) { 57 // If we find a ptrtoint, we can transfer control back to the 58 // regular getUnderlyingObjectFromInt. 59 if (U->getOpcode() == Instruction::PtrToInt) 60 return U->getOperand(0); 61 // If we find an add of a constant or a multiplied value, it's 62 // likely that the other operand will lead us to the base 63 // object. We don't have to worry about the case where the 64 // object address is somehow being computed by the multiply, 65 // because our callers only care when the result is an 66 // identifibale object. 67 if (U->getOpcode() != Instruction::Add || 68 (!isa<ConstantInt>(U->getOperand(1)) && 69 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul)) 70 return V; 71 V = U->getOperand(0); 72 } else { 73 return V; 74 } 75 assert(isa<IntegerType>(V->getType()) && "Unexpected operand type!"); 76 } while (1); 77} 78 79/// getUnderlyingObject - This is a wrapper around Value::getUnderlyingObject 80/// and adds support for basic ptrtoint+arithmetic+inttoptr sequences. 81static const Value *getUnderlyingObject(const Value *V) { 82 // First just call Value::getUnderlyingObject to let it do what it does. 83 do { 84 V = V->getUnderlyingObject(); 85 // If it found an inttoptr, use special code to continue climing. 86 if (Operator::getOpcode(V) != Instruction::IntToPtr) 87 break; 88 const Value *O = getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0)); 89 // If that succeeded in finding a pointer, continue the search. 90 if (!isa<PointerType>(O->getType())) 91 break; 92 V = O; 93 } while (1); 94 return V; 95} 96 97/// getUnderlyingObjectForInstr - If this machine instr has memory reference 98/// information and it can be tracked to a normal reference to a known 99/// object, return the Value for that object. Otherwise return null. 100static const Value *getUnderlyingObjectForInstr(const MachineInstr *MI, 101 const MachineFrameInfo *MFI, 102 bool &MayAlias) { 103 MayAlias = true; 104 if (!MI->hasOneMemOperand() || 105 !(*MI->memoperands_begin())->getValue() || 106 (*MI->memoperands_begin())->isVolatile()) 107 return 0; 108 109 const Value *V = (*MI->memoperands_begin())->getValue(); 110 if (!V) 111 return 0; 112 113 V = getUnderlyingObject(V); 114 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) { 115 MayAlias = PSV->mayAlias(MFI); 116 // For now, ignore PseudoSourceValues which may alias LLVM IR values 117 // because the code that uses this function has no way to cope with 118 // such aliases. 119 if (PSV->isAliased(MFI)) 120 return 0; 121 return V; 122 } 123 124 if (isIdentifiedObject(V)) 125 return V; 126 127 return 0; 128} 129 130static bool mayUnderlyingObjectForInstrAlias(const MachineInstr *MI, 131 const MachineFrameInfo *MFI) { 132 if (!MI->hasOneMemOperand() || 133 !(*MI->memoperands_begin())->getValue() || 134 (*MI->memoperands_begin())->isVolatile()) 135 return true; 136 137 const Value *V = (*MI->memoperands_begin())->getValue(); 138 if (!V) 139 return true; 140 141 V = getUnderlyingObject(V); 142 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) 143 return PSV->mayAlias(MFI); 144 return true; 145} 146 147void ScheduleDAGInstrs::StartBlock(MachineBasicBlock *BB) { 148 if (MachineLoop *ML = MLI.getLoopFor(BB)) 149 if (BB == ML->getLoopLatch()) { 150 MachineBasicBlock *Header = ML->getHeader(); 151 for (MachineBasicBlock::livein_iterator I = Header->livein_begin(), 152 E = Header->livein_end(); I != E; ++I) 153 LoopLiveInRegs.insert(*I); 154 LoopRegs.VisitLoop(ML); 155 } 156} 157 158void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) { 159 // We'll be allocating one SUnit for each instruction, plus one for 160 // the region exit node. 161 SUnits.reserve(BB->size()); 162 163 // We build scheduling units by walking a block's instruction list from bottom 164 // to top. 165 166 // Remember where a generic side-effecting instruction is as we procede. If 167 // ChainMMO is null, this is assumed to have arbitrary side-effects. If 168 // ChainMMO is non-null, then Chain makes only a single memory reference. 169 SUnit *Chain = 0; 170 MachineMemOperand *ChainMMO = 0; 171 172 // Memory references to specific known memory locations are tracked so that 173 // they can be given more precise dependencies. 174 std::map<const Value *, SUnit *> MemDefs; 175 std::map<const Value *, std::vector<SUnit *> > MemUses; 176 177 // Check to see if the scheduler cares about latencies. 178 bool UnitLatencies = ForceUnitLatencies(); 179 180 // Ask the target if address-backscheduling is desirable, and if so how much. 181 const TargetSubtarget &ST = TM.getSubtarget<TargetSubtarget>(); 182 unsigned SpecialAddressLatency = ST.getSpecialAddressLatency(); 183 184 // Walk the list of instructions, from bottom moving up. 185 for (MachineBasicBlock::iterator MII = InsertPos, MIE = Begin; 186 MII != MIE; --MII) { 187 MachineInstr *MI = prior(MII); 188 const TargetInstrDesc &TID = MI->getDesc(); 189 assert(!TID.isTerminator() && !MI->isLabel() && 190 "Cannot schedule terminators or labels!"); 191 // Create the SUnit for this MI. 192 SUnit *SU = NewSUnit(MI); 193 194 // Assign the Latency field of SU using target-provided information. 195 if (UnitLatencies) 196 SU->Latency = 1; 197 else 198 ComputeLatency(SU); 199 200 // Add register-based dependencies (data, anti, and output). 201 for (unsigned j = 0, n = MI->getNumOperands(); j != n; ++j) { 202 const MachineOperand &MO = MI->getOperand(j); 203 if (!MO.isReg()) continue; 204 unsigned Reg = MO.getReg(); 205 if (Reg == 0) continue; 206 207 assert(TRI->isPhysicalRegister(Reg) && "Virtual register encountered!"); 208 std::vector<SUnit *> &UseList = Uses[Reg]; 209 std::vector<SUnit *> &DefList = Defs[Reg]; 210 // Optionally add output and anti dependencies. For anti 211 // dependencies we use a latency of 0 because for a multi-issue 212 // target we want to allow the defining instruction to issue 213 // in the same cycle as the using instruction. 214 // TODO: Using a latency of 1 here for output dependencies assumes 215 // there's no cost for reusing registers. 216 SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output; 217 unsigned AOLatency = (Kind == SDep::Anti) ? 0 : 1; 218 for (unsigned i = 0, e = DefList.size(); i != e; ++i) { 219 SUnit *DefSU = DefList[i]; 220 if (DefSU != SU && 221 (Kind != SDep::Output || !MO.isDead() || 222 !DefSU->getInstr()->registerDefIsDead(Reg))) 223 DefSU->addPred(SDep(SU, Kind, AOLatency, /*Reg=*/Reg)); 224 } 225 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) { 226 std::vector<SUnit *> &DefList = Defs[*Alias]; 227 for (unsigned i = 0, e = DefList.size(); i != e; ++i) { 228 SUnit *DefSU = DefList[i]; 229 if (DefSU != SU && 230 (Kind != SDep::Output || !MO.isDead() || 231 !DefSU->getInstr()->registerDefIsDead(*Alias))) 232 DefSU->addPred(SDep(SU, Kind, AOLatency, /*Reg=*/ *Alias)); 233 } 234 } 235 236 if (MO.isDef()) { 237 // Add any data dependencies. 238 unsigned DataLatency = SU->Latency; 239 for (unsigned i = 0, e = UseList.size(); i != e; ++i) { 240 SUnit *UseSU = UseList[i]; 241 if (UseSU != SU) { 242 unsigned LDataLatency = DataLatency; 243 // Optionally add in a special extra latency for nodes that 244 // feed addresses. 245 // TODO: Do this for register aliases too. 246 // TODO: Perhaps we should get rid of 247 // SpecialAddressLatency and just move this into 248 // adjustSchedDependency for the targets that care about 249 // it. 250 if (SpecialAddressLatency != 0 && !UnitLatencies) { 251 MachineInstr *UseMI = UseSU->getInstr(); 252 const TargetInstrDesc &UseTID = UseMI->getDesc(); 253 int RegUseIndex = UseMI->findRegisterUseOperandIdx(Reg); 254 assert(RegUseIndex >= 0 && "UseMI doesn's use register!"); 255 if ((UseTID.mayLoad() || UseTID.mayStore()) && 256 (unsigned)RegUseIndex < UseTID.getNumOperands() && 257 UseTID.OpInfo[RegUseIndex].isLookupPtrRegClass()) 258 LDataLatency += SpecialAddressLatency; 259 } 260 // Adjust the dependence latency using operand def/use 261 // information (if any), and then allow the target to 262 // perform its own adjustments. 263 const SDep& dep = SDep(SU, SDep::Data, LDataLatency, Reg); 264 if (!UnitLatencies) { 265 ComputeOperandLatency(SU, UseSU, (SDep &)dep); 266 ST.adjustSchedDependency(SU, UseSU, (SDep &)dep); 267 } 268 UseSU->addPred(dep); 269 } 270 } 271 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) { 272 std::vector<SUnit *> &UseList = Uses[*Alias]; 273 for (unsigned i = 0, e = UseList.size(); i != e; ++i) { 274 SUnit *UseSU = UseList[i]; 275 if (UseSU != SU) { 276 const SDep& dep = SDep(SU, SDep::Data, DataLatency, *Alias); 277 if (!UnitLatencies) { 278 ComputeOperandLatency(SU, UseSU, (SDep &)dep); 279 ST.adjustSchedDependency(SU, UseSU, (SDep &)dep); 280 } 281 UseSU->addPred(dep); 282 } 283 } 284 } 285 286 // If a def is going to wrap back around to the top of the loop, 287 // backschedule it. 288 if (!UnitLatencies && DefList.empty()) { 289 LoopDependencies::LoopDeps::iterator I = LoopRegs.Deps.find(Reg); 290 if (I != LoopRegs.Deps.end()) { 291 const MachineOperand *UseMO = I->second.first; 292 unsigned Count = I->second.second; 293 const MachineInstr *UseMI = UseMO->getParent(); 294 unsigned UseMOIdx = UseMO - &UseMI->getOperand(0); 295 const TargetInstrDesc &UseTID = UseMI->getDesc(); 296 // TODO: If we knew the total depth of the region here, we could 297 // handle the case where the whole loop is inside the region but 298 // is large enough that the isScheduleHigh trick isn't needed. 299 if (UseMOIdx < UseTID.getNumOperands()) { 300 // Currently, we only support scheduling regions consisting of 301 // single basic blocks. Check to see if the instruction is in 302 // the same region by checking to see if it has the same parent. 303 if (UseMI->getParent() != MI->getParent()) { 304 unsigned Latency = SU->Latency; 305 if (UseTID.OpInfo[UseMOIdx].isLookupPtrRegClass()) 306 Latency += SpecialAddressLatency; 307 // This is a wild guess as to the portion of the latency which 308 // will be overlapped by work done outside the current 309 // scheduling region. 310 Latency -= std::min(Latency, Count); 311 // Add the artifical edge. 312 ExitSU.addPred(SDep(SU, SDep::Order, Latency, 313 /*Reg=*/0, /*isNormalMemory=*/false, 314 /*isMustAlias=*/false, 315 /*isArtificial=*/true)); 316 } else if (SpecialAddressLatency > 0 && 317 UseTID.OpInfo[UseMOIdx].isLookupPtrRegClass()) { 318 // The entire loop body is within the current scheduling region 319 // and the latency of this operation is assumed to be greater 320 // than the latency of the loop. 321 // TODO: Recursively mark data-edge predecessors as 322 // isScheduleHigh too. 323 SU->isScheduleHigh = true; 324 } 325 } 326 LoopRegs.Deps.erase(I); 327 } 328 } 329 330 UseList.clear(); 331 if (!MO.isDead()) 332 DefList.clear(); 333 DefList.push_back(SU); 334 } else { 335 UseList.push_back(SU); 336 } 337 } 338 339 // Add chain dependencies. 340 // Chain dependencies used to enforce memory order should have 341 // latency of 0 (except for true dependency of Store followed by 342 // aliased Load... we estimate that with a single cycle of latency 343 // assuming the hardware will bypass) 344 // Note that isStoreToStackSlot and isLoadFromStackSLot are not usable 345 // after stack slots are lowered to actual addresses. 346 // TODO: Use an AliasAnalysis and do real alias-analysis queries, and 347 // produce more precise dependence information. 348#define STORE_LOAD_LATENCY 1 349 unsigned TrueMemOrderLatency = 0; 350 if (TID.isCall() || TID.hasUnmodeledSideEffects()) { 351 new_chain: 352 // This is the conservative case. Add dependencies on all memory 353 // references. 354 if (Chain) 355 Chain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 356 Chain = SU; 357 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k) 358 PendingLoads[k]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); 359 PendingLoads.clear(); 360 for (std::map<const Value *, SUnit *>::iterator I = MemDefs.begin(), 361 E = MemDefs.end(); I != E; ++I) { 362 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 363 I->second = SU; 364 } 365 for (std::map<const Value *, std::vector<SUnit *> >::iterator I = 366 MemUses.begin(), E = MemUses.end(); I != E; ++I) { 367 for (unsigned i = 0, e = I->second.size(); i != e; ++i) 368 I->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); 369 I->second.clear(); 370 I->second.push_back(SU); 371 } 372 // See if it is known to just have a single memory reference. 373 MachineInstr *ChainMI = Chain->getInstr(); 374 const TargetInstrDesc &ChainTID = ChainMI->getDesc(); 375 if (!ChainTID.isCall() && 376 !ChainTID.hasUnmodeledSideEffects() && 377 ChainMI->hasOneMemOperand() && 378 !(*ChainMI->memoperands_begin())->isVolatile() && 379 (*ChainMI->memoperands_begin())->getValue()) 380 // We know that the Chain accesses one specific memory location. 381 ChainMMO = *ChainMI->memoperands_begin(); 382 else 383 // Unknown memory accesses. Assume the worst. 384 ChainMMO = 0; 385 } else if (TID.mayStore()) { 386 bool MayAlias = true; 387 TrueMemOrderLatency = STORE_LOAD_LATENCY; 388 if (const Value *V = getUnderlyingObjectForInstr(MI, MFI, MayAlias)) { 389 // A store to a specific PseudoSourceValue. Add precise dependencies. 390 // Handle the def in MemDefs, if there is one. 391 std::map<const Value *, SUnit *>::iterator I = MemDefs.find(V); 392 if (I != MemDefs.end()) { 393 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0, /*Reg=*/0, 394 /*isNormalMemory=*/true)); 395 I->second = SU; 396 } else { 397 MemDefs[V] = SU; 398 } 399 // Handle the uses in MemUses, if there are any. 400 std::map<const Value *, std::vector<SUnit *> >::iterator J = 401 MemUses.find(V); 402 if (J != MemUses.end()) { 403 for (unsigned i = 0, e = J->second.size(); i != e; ++i) 404 J->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency, 405 /*Reg=*/0, /*isNormalMemory=*/true)); 406 J->second.clear(); 407 } 408 if (MayAlias) { 409 // Add dependencies from all the PendingLoads, since without 410 // memoperands we must assume they alias anything. 411 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k) 412 PendingLoads[k]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); 413 // Add a general dependence too, if needed. 414 if (Chain) 415 Chain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 416 } 417 } else { 418 // Treat all other stores conservatively. 419 goto new_chain; 420 } 421 } else if (TID.mayLoad()) { 422 bool MayAlias = true; 423 TrueMemOrderLatency = 0; 424 if (MI->isInvariantLoad(AA)) { 425 // Invariant load, no chain dependencies needed! 426 } else if (const Value *V = 427 getUnderlyingObjectForInstr(MI, MFI, MayAlias)) { 428 // A load from a specific PseudoSourceValue. Add precise dependencies. 429 std::map<const Value *, SUnit *>::iterator I = MemDefs.find(V); 430 if (I != MemDefs.end()) 431 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0, /*Reg=*/0, 432 /*isNormalMemory=*/true)); 433 MemUses[V].push_back(SU); 434 435 // Add a general dependence too, if needed. 436 if (Chain && (!ChainMMO || 437 (ChainMMO->isStore() || ChainMMO->isVolatile()))) 438 Chain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 439 } else if (MI->hasVolatileMemoryRef()) { 440 // Treat volatile loads conservatively. Note that this includes 441 // cases where memoperand information is unavailable. 442 goto new_chain; 443 } else { 444 // A "MayAlias" load. Depend on the general chain, as well as on 445 // all stores. In the absense of MachineMemOperand information, 446 // we can't even assume that the load doesn't alias well-behaved 447 // memory locations. 448 if (Chain) 449 Chain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 450 for (std::map<const Value *, SUnit *>::iterator I = MemDefs.begin(), 451 E = MemDefs.end(); I != E; ++I) { 452 SUnit *DefSU = I->second; 453 if (mayUnderlyingObjectForInstrAlias(DefSU->getInstr(), MFI)) 454 DefSU->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 455 } 456 PendingLoads.push_back(SU); 457 } 458 } 459 } 460 461 for (int i = 0, e = TRI->getNumRegs(); i != e; ++i) { 462 Defs[i].clear(); 463 Uses[i].clear(); 464 } 465 PendingLoads.clear(); 466} 467 468void ScheduleDAGInstrs::FinishBlock() { 469 // Nothing to do. 470} 471 472void ScheduleDAGInstrs::ComputeLatency(SUnit *SU) { 473 const InstrItineraryData &InstrItins = TM.getInstrItineraryData(); 474 475 // Compute the latency for the node. 476 SU->Latency = 477 InstrItins.getStageLatency(SU->getInstr()->getDesc().getSchedClass()); 478 479 // Simplistic target-independent heuristic: assume that loads take 480 // extra time. 481 if (InstrItins.isEmpty()) 482 if (SU->getInstr()->getDesc().mayLoad()) 483 SU->Latency += 2; 484} 485 486void ScheduleDAGInstrs::ComputeOperandLatency(SUnit *Def, SUnit *Use, 487 SDep& dep) const { 488 const InstrItineraryData &InstrItins = TM.getInstrItineraryData(); 489 if (InstrItins.isEmpty()) 490 return; 491 492 // For a data dependency with a known register... 493 if ((dep.getKind() != SDep::Data) || (dep.getReg() == 0)) 494 return; 495 496 const unsigned Reg = dep.getReg(); 497 498 // ... find the definition of the register in the defining 499 // instruction 500 MachineInstr *DefMI = Def->getInstr(); 501 int DefIdx = DefMI->findRegisterDefOperandIdx(Reg); 502 if (DefIdx != -1) { 503 int DefCycle = InstrItins.getOperandCycle(DefMI->getDesc().getSchedClass(), DefIdx); 504 if (DefCycle >= 0) { 505 MachineInstr *UseMI = Use->getInstr(); 506 const unsigned UseClass = UseMI->getDesc().getSchedClass(); 507 508 // For all uses of the register, calculate the maxmimum latency 509 int Latency = -1; 510 for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) { 511 const MachineOperand &MO = UseMI->getOperand(i); 512 if (!MO.isReg() || !MO.isUse()) 513 continue; 514 unsigned MOReg = MO.getReg(); 515 if (MOReg != Reg) 516 continue; 517 518 int UseCycle = InstrItins.getOperandCycle(UseClass, i); 519 if (UseCycle >= 0) 520 Latency = std::max(Latency, DefCycle - UseCycle + 1); 521 } 522 523 // If we found a latency, then replace the existing dependence latency. 524 if (Latency >= 0) 525 dep.setLatency(Latency); 526 } 527 } 528} 529 530void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const { 531 SU->getInstr()->dump(); 532} 533 534std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const { 535 std::string s; 536 raw_string_ostream oss(s); 537 if (SU == &EntrySU) 538 oss << "<entry>"; 539 else if (SU == &ExitSU) 540 oss << "<exit>"; 541 else 542 SU->getInstr()->print(oss); 543 return oss.str(); 544} 545 546// EmitSchedule - Emit the machine code in scheduled order. 547MachineBasicBlock *ScheduleDAGInstrs:: 548EmitSchedule(DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) { 549 // For MachineInstr-based scheduling, we're rescheduling the instructions in 550 // the block, so start by removing them from the block. 551 while (Begin != InsertPos) { 552 MachineBasicBlock::iterator I = Begin; 553 ++Begin; 554 BB->remove(I); 555 } 556 557 // Then re-insert them according to the given schedule. 558 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 559 SUnit *SU = Sequence[i]; 560 if (!SU) { 561 // Null SUnit* is a noop. 562 EmitNoop(); 563 continue; 564 } 565 566 BB->insert(InsertPos, SU->getInstr()); 567 } 568 569 // Update the Begin iterator, as the first instruction in the block 570 // may have been scheduled later. 571 if (!Sequence.empty()) 572 Begin = Sequence[0]->getInstr(); 573 574 return BB; 575} 576