ScheduleDAGSDNodes.cpp revision 202878
1193323Sed//===--- ScheduleDAGSDNodes.cpp - Implement the ScheduleDAGSDNodes class --===//
2193323Sed//
3193323Sed//                     The LLVM Compiler Infrastructure
4193323Sed//
5193323Sed// This file is distributed under the University of Illinois Open Source
6193323Sed// License. See LICENSE.TXT for details.
7193323Sed//
8193323Sed//===----------------------------------------------------------------------===//
9193323Sed//
10193323Sed// This implements the ScheduleDAG class, which is a base class used by
11193323Sed// scheduling implementation classes.
12193323Sed//
13193323Sed//===----------------------------------------------------------------------===//
14193323Sed
15193323Sed#define DEBUG_TYPE "pre-RA-sched"
16193323Sed#include "ScheduleDAGSDNodes.h"
17198090Srdivacky#include "InstrEmitter.h"
18193323Sed#include "llvm/CodeGen/SelectionDAG.h"
19193323Sed#include "llvm/Target/TargetMachine.h"
20193323Sed#include "llvm/Target/TargetInstrInfo.h"
21193323Sed#include "llvm/Target/TargetRegisterInfo.h"
22198090Srdivacky#include "llvm/Target/TargetSubtarget.h"
23202878Srdivacky#include "llvm/ADT/DenseMap.h"
24202878Srdivacky#include "llvm/ADT/SmallPtrSet.h"
25202878Srdivacky#include "llvm/ADT/SmallVector.h"
26202878Srdivacky#include "llvm/ADT/Statistic.h"
27193323Sed#include "llvm/Support/Debug.h"
28193323Sed#include "llvm/Support/raw_ostream.h"
29193323Sedusing namespace llvm;
30193323Sed
31202878SrdivackySTATISTIC(LoadsClustered, "Number of loads clustered together");
32202878Srdivacky
33193323SedScheduleDAGSDNodes::ScheduleDAGSDNodes(MachineFunction &mf)
34193323Sed  : ScheduleDAG(mf) {
35193323Sed}
36193323Sed
37193323Sed/// Run - perform scheduling.
38193323Sed///
39193323Sedvoid ScheduleDAGSDNodes::Run(SelectionDAG *dag, MachineBasicBlock *bb,
40193323Sed                             MachineBasicBlock::iterator insertPos) {
41193323Sed  DAG = dag;
42193323Sed  ScheduleDAG::Run(bb, insertPos);
43193323Sed}
44193323Sed
45193323SedSUnit *ScheduleDAGSDNodes::Clone(SUnit *Old) {
46193323Sed  SUnit *SU = NewSUnit(Old->getNode());
47193323Sed  SU->OrigNode = Old->OrigNode;
48193323Sed  SU->Latency = Old->Latency;
49193323Sed  SU->isTwoAddress = Old->isTwoAddress;
50193323Sed  SU->isCommutable = Old->isCommutable;
51193323Sed  SU->hasPhysRegDefs = Old->hasPhysRegDefs;
52193323Sed  SU->hasPhysRegClobbers = Old->hasPhysRegClobbers;
53193323Sed  Old->isCloned = true;
54193323Sed  return SU;
55193323Sed}
56193323Sed
57193323Sed/// CheckForPhysRegDependency - Check if the dependency between def and use of
58193323Sed/// a specified operand is a physical register dependency. If so, returns the
59193323Sed/// register and the cost of copying the register.
60193323Sedstatic void CheckForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op,
61193323Sed                                      const TargetRegisterInfo *TRI,
62193323Sed                                      const TargetInstrInfo *TII,
63193323Sed                                      unsigned &PhysReg, int &Cost) {
64193323Sed  if (Op != 2 || User->getOpcode() != ISD::CopyToReg)
65193323Sed    return;
66193323Sed
67193323Sed  unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
68193323Sed  if (TargetRegisterInfo::isVirtualRegister(Reg))
69193323Sed    return;
70193323Sed
71193323Sed  unsigned ResNo = User->getOperand(2).getResNo();
72193323Sed  if (Def->isMachineOpcode()) {
73193323Sed    const TargetInstrDesc &II = TII->get(Def->getMachineOpcode());
74193323Sed    if (ResNo >= II.getNumDefs() &&
75193323Sed        II.ImplicitDefs[ResNo - II.getNumDefs()] == Reg) {
76193323Sed      PhysReg = Reg;
77193323Sed      const TargetRegisterClass *RC =
78193323Sed        TRI->getPhysicalRegisterRegClass(Reg, Def->getValueType(ResNo));
79193323Sed      Cost = RC->getCopyCost();
80193323Sed    }
81193323Sed  }
82193323Sed}
83193323Sed
84202878Srdivackystatic void AddFlags(SDNode *N, SDValue Flag, bool AddFlag,
85202878Srdivacky                     SelectionDAG *DAG) {
86202878Srdivacky  SmallVector<EVT, 4> VTs;
87202878Srdivacky  for (unsigned i = 0, e = N->getNumValues(); i != e; ++i)
88202878Srdivacky    VTs.push_back(N->getValueType(i));
89202878Srdivacky  if (AddFlag)
90202878Srdivacky    VTs.push_back(MVT::Flag);
91202878Srdivacky  SmallVector<SDValue, 4> Ops;
92202878Srdivacky  for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
93202878Srdivacky    Ops.push_back(N->getOperand(i));
94202878Srdivacky  if (Flag.getNode())
95202878Srdivacky    Ops.push_back(Flag);
96202878Srdivacky  SDVTList VTList = DAG->getVTList(&VTs[0], VTs.size());
97202878Srdivacky  DAG->MorphNodeTo(N, N->getOpcode(), VTList, &Ops[0], Ops.size());
98202878Srdivacky}
99202878Srdivacky
100202878Srdivacky/// ClusterNeighboringLoads - Force nearby loads together by "flagging" them.
101202878Srdivacky/// This function finds loads of the same base and different offsets. If the
102202878Srdivacky/// offsets are not far apart (target specific), it add MVT::Flag inputs and
103202878Srdivacky/// outputs to ensure they are scheduled together and in order. This
104202878Srdivacky/// optimization may benefit some targets by improving cache locality.
105202878Srdivackyvoid ScheduleDAGSDNodes::ClusterNeighboringLoads() {
106202878Srdivacky  SmallPtrSet<SDNode*, 16> Visited;
107202878Srdivacky  SmallVector<int64_t, 4> Offsets;
108202878Srdivacky  DenseMap<long long, SDNode*> O2SMap;  // Map from offset to SDNode.
109202878Srdivacky  for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(),
110202878Srdivacky       E = DAG->allnodes_end(); NI != E; ++NI) {
111202878Srdivacky    SDNode *Node = &*NI;
112202878Srdivacky    if (!Node || !Node->isMachineOpcode())
113202878Srdivacky      continue;
114202878Srdivacky
115202878Srdivacky    unsigned Opc = Node->getMachineOpcode();
116202878Srdivacky    const TargetInstrDesc &TID = TII->get(Opc);
117202878Srdivacky    if (!TID.mayLoad())
118202878Srdivacky      continue;
119202878Srdivacky
120202878Srdivacky    SDNode *Chain = 0;
121202878Srdivacky    unsigned NumOps = Node->getNumOperands();
122202878Srdivacky    if (Node->getOperand(NumOps-1).getValueType() == MVT::Other)
123202878Srdivacky      Chain = Node->getOperand(NumOps-1).getNode();
124202878Srdivacky    if (!Chain)
125202878Srdivacky      continue;
126202878Srdivacky
127202878Srdivacky    // Look for other loads of the same chain. Find loads that are loading from
128202878Srdivacky    // the same base pointer and different offsets.
129202878Srdivacky    Visited.clear();
130202878Srdivacky    Offsets.clear();
131202878Srdivacky    O2SMap.clear();
132202878Srdivacky    bool Cluster = false;
133202878Srdivacky    SDNode *Base = Node;
134202878Srdivacky    int64_t BaseOffset;
135202878Srdivacky    for (SDNode::use_iterator I = Chain->use_begin(), E = Chain->use_end();
136202878Srdivacky         I != E; ++I) {
137202878Srdivacky      SDNode *User = *I;
138202878Srdivacky      if (User == Node || !Visited.insert(User))
139202878Srdivacky        continue;
140202878Srdivacky      int64_t Offset1, Offset2;
141202878Srdivacky      if (!TII->areLoadsFromSameBasePtr(Base, User, Offset1, Offset2) ||
142202878Srdivacky          Offset1 == Offset2)
143202878Srdivacky        // FIXME: Should be ok if they addresses are identical. But earlier
144202878Srdivacky        // optimizations really should have eliminated one of the loads.
145202878Srdivacky        continue;
146202878Srdivacky      if (O2SMap.insert(std::make_pair(Offset1, Base)).second)
147202878Srdivacky        Offsets.push_back(Offset1);
148202878Srdivacky      O2SMap.insert(std::make_pair(Offset2, User));
149202878Srdivacky      Offsets.push_back(Offset2);
150202878Srdivacky      if (Offset2 < Offset1) {
151202878Srdivacky        Base = User;
152202878Srdivacky        BaseOffset = Offset2;
153202878Srdivacky      } else {
154202878Srdivacky        BaseOffset = Offset1;
155202878Srdivacky      }
156202878Srdivacky      Cluster = true;
157202878Srdivacky    }
158202878Srdivacky
159202878Srdivacky    if (!Cluster)
160202878Srdivacky      continue;
161202878Srdivacky
162202878Srdivacky    // Sort them in increasing order.
163202878Srdivacky    std::sort(Offsets.begin(), Offsets.end());
164202878Srdivacky
165202878Srdivacky    // Check if the loads are close enough.
166202878Srdivacky    SmallVector<SDNode*, 4> Loads;
167202878Srdivacky    unsigned NumLoads = 0;
168202878Srdivacky    int64_t BaseOff = Offsets[0];
169202878Srdivacky    SDNode *BaseLoad = O2SMap[BaseOff];
170202878Srdivacky    Loads.push_back(BaseLoad);
171202878Srdivacky    for (unsigned i = 1, e = Offsets.size(); i != e; ++i) {
172202878Srdivacky      int64_t Offset = Offsets[i];
173202878Srdivacky      SDNode *Load = O2SMap[Offset];
174202878Srdivacky      if (!TII->shouldScheduleLoadsNear(BaseLoad, Load, BaseOff, Offset,
175202878Srdivacky                                        NumLoads))
176202878Srdivacky        break; // Stop right here. Ignore loads that are further away.
177202878Srdivacky      Loads.push_back(Load);
178202878Srdivacky      ++NumLoads;
179202878Srdivacky    }
180202878Srdivacky
181202878Srdivacky    if (NumLoads == 0)
182202878Srdivacky      continue;
183202878Srdivacky
184202878Srdivacky    // Cluster loads by adding MVT::Flag outputs and inputs. This also
185202878Srdivacky    // ensure they are scheduled in order of increasing addresses.
186202878Srdivacky    SDNode *Lead = Loads[0];
187202878Srdivacky    AddFlags(Lead, SDValue(0,0), true, DAG);
188202878Srdivacky    SDValue InFlag = SDValue(Lead, Lead->getNumValues()-1);
189202878Srdivacky    for (unsigned i = 1, e = Loads.size(); i != e; ++i) {
190202878Srdivacky      bool OutFlag = i < e-1;
191202878Srdivacky      SDNode *Load = Loads[i];
192202878Srdivacky      AddFlags(Load, InFlag, OutFlag, DAG);
193202878Srdivacky      if (OutFlag)
194202878Srdivacky        InFlag = SDValue(Load, Load->getNumValues()-1);
195202878Srdivacky      ++LoadsClustered;
196202878Srdivacky    }
197202878Srdivacky  }
198202878Srdivacky}
199202878Srdivacky
200193323Sedvoid ScheduleDAGSDNodes::BuildSchedUnits() {
201193323Sed  // During scheduling, the NodeId field of SDNode is used to map SDNodes
202193323Sed  // to their associated SUnits by holding SUnits table indices. A value
203193323Sed  // of -1 means the SDNode does not yet have an associated SUnit.
204193323Sed  unsigned NumNodes = 0;
205193323Sed  for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(),
206193323Sed       E = DAG->allnodes_end(); NI != E; ++NI) {
207193323Sed    NI->setNodeId(-1);
208193323Sed    ++NumNodes;
209193323Sed  }
210193323Sed
211193323Sed  // Reserve entries in the vector for each of the SUnits we are creating.  This
212193323Sed  // ensure that reallocation of the vector won't happen, so SUnit*'s won't get
213193323Sed  // invalidated.
214193323Sed  // FIXME: Multiply by 2 because we may clone nodes during scheduling.
215193323Sed  // This is a temporary workaround.
216193323Sed  SUnits.reserve(NumNodes * 2);
217193323Sed
218193323Sed  // Check to see if the scheduler cares about latencies.
219193323Sed  bool UnitLatencies = ForceUnitLatencies();
220193323Sed
221193323Sed  for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(),
222193323Sed       E = DAG->allnodes_end(); NI != E; ++NI) {
223193323Sed    if (isPassiveNode(NI))  // Leaf node, e.g. a TargetImmediate.
224193323Sed      continue;
225193323Sed
226193323Sed    // If this node has already been processed, stop now.
227193323Sed    if (NI->getNodeId() != -1) continue;
228193323Sed
229193323Sed    SUnit *NodeSUnit = NewSUnit(NI);
230193323Sed
231193323Sed    // See if anything is flagged to this node, if so, add them to flagged
232193323Sed    // nodes.  Nodes can have at most one flag input and one flag output.  Flags
233193323Sed    // are required to be the last operand and result of a node.
234193323Sed
235193323Sed    // Scan up to find flagged preds.
236193323Sed    SDNode *N = NI;
237193323Sed    while (N->getNumOperands() &&
238193323Sed           N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Flag) {
239193323Sed      N = N->getOperand(N->getNumOperands()-1).getNode();
240193323Sed      assert(N->getNodeId() == -1 && "Node already inserted!");
241193323Sed      N->setNodeId(NodeSUnit->NodeNum);
242193323Sed    }
243193323Sed
244193323Sed    // Scan down to find any flagged succs.
245193323Sed    N = NI;
246193323Sed    while (N->getValueType(N->getNumValues()-1) == MVT::Flag) {
247193323Sed      SDValue FlagVal(N, N->getNumValues()-1);
248193323Sed
249193323Sed      // There are either zero or one users of the Flag result.
250193323Sed      bool HasFlagUse = false;
251193323Sed      for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
252193323Sed           UI != E; ++UI)
253193323Sed        if (FlagVal.isOperandOf(*UI)) {
254193323Sed          HasFlagUse = true;
255193323Sed          assert(N->getNodeId() == -1 && "Node already inserted!");
256193323Sed          N->setNodeId(NodeSUnit->NodeNum);
257193323Sed          N = *UI;
258193323Sed          break;
259193323Sed        }
260193323Sed      if (!HasFlagUse) break;
261193323Sed    }
262193323Sed
263193323Sed    // If there are flag operands involved, N is now the bottom-most node
264193323Sed    // of the sequence of nodes that are flagged together.
265193323Sed    // Update the SUnit.
266193323Sed    NodeSUnit->setNode(N);
267193323Sed    assert(N->getNodeId() == -1 && "Node already inserted!");
268193323Sed    N->setNodeId(NodeSUnit->NodeNum);
269193323Sed
270193323Sed    // Assign the Latency field of NodeSUnit using target-provided information.
271193323Sed    if (UnitLatencies)
272193323Sed      NodeSUnit->Latency = 1;
273193323Sed    else
274193323Sed      ComputeLatency(NodeSUnit);
275193323Sed  }
276193323Sed}
277193323Sed
278193323Sedvoid ScheduleDAGSDNodes::AddSchedEdges() {
279198090Srdivacky  const TargetSubtarget &ST = TM.getSubtarget<TargetSubtarget>();
280198090Srdivacky
281198090Srdivacky  // Check to see if the scheduler cares about latencies.
282198090Srdivacky  bool UnitLatencies = ForceUnitLatencies();
283198090Srdivacky
284193323Sed  // Pass 2: add the preds, succs, etc.
285193323Sed  for (unsigned su = 0, e = SUnits.size(); su != e; ++su) {
286193323Sed    SUnit *SU = &SUnits[su];
287193323Sed    SDNode *MainNode = SU->getNode();
288193323Sed
289193323Sed    if (MainNode->isMachineOpcode()) {
290193323Sed      unsigned Opc = MainNode->getMachineOpcode();
291193323Sed      const TargetInstrDesc &TID = TII->get(Opc);
292193323Sed      for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
293193323Sed        if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
294193323Sed          SU->isTwoAddress = true;
295193323Sed          break;
296193323Sed        }
297193323Sed      }
298193323Sed      if (TID.isCommutable())
299193323Sed        SU->isCommutable = true;
300193323Sed    }
301193323Sed
302193323Sed    // Find all predecessors and successors of the group.
303193323Sed    for (SDNode *N = SU->getNode(); N; N = N->getFlaggedNode()) {
304193323Sed      if (N->isMachineOpcode() &&
305193323Sed          TII->get(N->getMachineOpcode()).getImplicitDefs()) {
306193323Sed        SU->hasPhysRegClobbers = true;
307198090Srdivacky        unsigned NumUsed = InstrEmitter::CountResults(N);
308193323Sed        while (NumUsed != 0 && !N->hasAnyUseOfValue(NumUsed - 1))
309193323Sed          --NumUsed;    // Skip over unused values at the end.
310193323Sed        if (NumUsed > TII->get(N->getMachineOpcode()).getNumDefs())
311193323Sed          SU->hasPhysRegDefs = true;
312193323Sed      }
313193323Sed
314193323Sed      for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
315193323Sed        SDNode *OpN = N->getOperand(i).getNode();
316193323Sed        if (isPassiveNode(OpN)) continue;   // Not scheduled.
317193323Sed        SUnit *OpSU = &SUnits[OpN->getNodeId()];
318193323Sed        assert(OpSU && "Node has no SUnit!");
319193323Sed        if (OpSU == SU) continue;           // In the same group.
320193323Sed
321198090Srdivacky        EVT OpVT = N->getOperand(i).getValueType();
322193323Sed        assert(OpVT != MVT::Flag && "Flagged nodes should be in same sunit!");
323193323Sed        bool isChain = OpVT == MVT::Other;
324193323Sed
325193323Sed        unsigned PhysReg = 0;
326193323Sed        int Cost = 1;
327193323Sed        // Determine if this is a physical register dependency.
328193323Sed        CheckForPhysRegDependency(OpN, N, i, TRI, TII, PhysReg, Cost);
329193323Sed        assert((PhysReg == 0 || !isChain) &&
330193323Sed               "Chain dependence via physreg data?");
331193323Sed        // FIXME: See ScheduleDAGSDNodes::EmitCopyFromReg. For now, scheduler
332193323Sed        // emits a copy from the physical register to a virtual register unless
333193323Sed        // it requires a cross class copy (cost < 0). That means we are only
334193323Sed        // treating "expensive to copy" register dependency as physical register
335193323Sed        // dependency. This may change in the future though.
336193323Sed        if (Cost >= 0)
337193323Sed          PhysReg = 0;
338198090Srdivacky
339198090Srdivacky        const SDep& dep = SDep(OpSU, isChain ? SDep::Order : SDep::Data,
340198090Srdivacky                               OpSU->Latency, PhysReg);
341198090Srdivacky        if (!isChain && !UnitLatencies) {
342198090Srdivacky          ComputeOperandLatency(OpSU, SU, (SDep &)dep);
343198090Srdivacky          ST.adjustSchedDependency(OpSU, SU, (SDep &)dep);
344198090Srdivacky        }
345198090Srdivacky
346198090Srdivacky        SU->addPred(dep);
347193323Sed      }
348193323Sed    }
349193323Sed  }
350193323Sed}
351193323Sed
352193323Sed/// BuildSchedGraph - Build the SUnit graph from the selection dag that we
353193323Sed/// are input.  This SUnit graph is similar to the SelectionDAG, but
354193323Sed/// excludes nodes that aren't interesting to scheduling, and represents
355193323Sed/// flagged together nodes with a single SUnit.
356198090Srdivackyvoid ScheduleDAGSDNodes::BuildSchedGraph(AliasAnalysis *AA) {
357202878Srdivacky  // Cluster loads from "near" addresses into combined SUnits.
358202878Srdivacky  ClusterNeighboringLoads();
359193323Sed  // Populate the SUnits array.
360193323Sed  BuildSchedUnits();
361193323Sed  // Compute all the scheduling dependencies between nodes.
362193323Sed  AddSchedEdges();
363193323Sed}
364193323Sed
365193323Sedvoid ScheduleDAGSDNodes::ComputeLatency(SUnit *SU) {
366193323Sed  const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
367193323Sed
368193323Sed  // Compute the latency for the node.  We use the sum of the latencies for
369193323Sed  // all nodes flagged together into this SUnit.
370193323Sed  SU->Latency = 0;
371193323Sed  for (SDNode *N = SU->getNode(); N; N = N->getFlaggedNode())
372193323Sed    if (N->isMachineOpcode()) {
373198090Srdivacky      SU->Latency += InstrItins.
374198090Srdivacky        getStageLatency(TII->get(N->getMachineOpcode()).getSchedClass());
375193323Sed    }
376193323Sed}
377193323Sed
378193323Sedvoid ScheduleDAGSDNodes::dumpNode(const SUnit *SU) const {
379193323Sed  if (!SU->getNode()) {
380202375Srdivacky    dbgs() << "PHYS REG COPY\n";
381193323Sed    return;
382193323Sed  }
383193323Sed
384193323Sed  SU->getNode()->dump(DAG);
385202375Srdivacky  dbgs() << "\n";
386193323Sed  SmallVector<SDNode *, 4> FlaggedNodes;
387193323Sed  for (SDNode *N = SU->getNode()->getFlaggedNode(); N; N = N->getFlaggedNode())
388193323Sed    FlaggedNodes.push_back(N);
389193323Sed  while (!FlaggedNodes.empty()) {
390202375Srdivacky    dbgs() << "    ";
391193323Sed    FlaggedNodes.back()->dump(DAG);
392202375Srdivacky    dbgs() << "\n";
393193323Sed    FlaggedNodes.pop_back();
394193323Sed  }
395193323Sed}
396198090Srdivacky
397198090Srdivacky/// EmitSchedule - Emit the machine code in scheduled order.
398198090SrdivackyMachineBasicBlock *ScheduleDAGSDNodes::
399198090SrdivackyEmitSchedule(DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) {
400198090Srdivacky  InstrEmitter Emitter(BB, InsertPos);
401198090Srdivacky  DenseMap<SDValue, unsigned> VRBaseMap;
402198090Srdivacky  DenseMap<SUnit*, unsigned> CopyVRBaseMap;
403198090Srdivacky  for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
404198090Srdivacky    SUnit *SU = Sequence[i];
405198090Srdivacky    if (!SU) {
406198090Srdivacky      // Null SUnit* is a noop.
407198090Srdivacky      EmitNoop();
408198090Srdivacky      continue;
409198090Srdivacky    }
410198090Srdivacky
411198090Srdivacky    // For pre-regalloc scheduling, create instructions corresponding to the
412198090Srdivacky    // SDNode and any flagged SDNodes and append them to the block.
413198090Srdivacky    if (!SU->getNode()) {
414198090Srdivacky      // Emit a copy.
415198090Srdivacky      EmitPhysRegCopy(SU, CopyVRBaseMap);
416198090Srdivacky      continue;
417198090Srdivacky    }
418198090Srdivacky
419198090Srdivacky    SmallVector<SDNode *, 4> FlaggedNodes;
420198090Srdivacky    for (SDNode *N = SU->getNode()->getFlaggedNode(); N;
421198090Srdivacky         N = N->getFlaggedNode())
422198090Srdivacky      FlaggedNodes.push_back(N);
423198090Srdivacky    while (!FlaggedNodes.empty()) {
424198090Srdivacky      Emitter.EmitNode(FlaggedNodes.back(), SU->OrigNode != SU, SU->isCloned,
425198090Srdivacky                       VRBaseMap, EM);
426198090Srdivacky      FlaggedNodes.pop_back();
427198090Srdivacky    }
428198090Srdivacky    Emitter.EmitNode(SU->getNode(), SU->OrigNode != SU, SU->isCloned,
429198090Srdivacky                     VRBaseMap, EM);
430198090Srdivacky  }
431198090Srdivacky
432198090Srdivacky  BB = Emitter.getBlock();
433198090Srdivacky  InsertPos = Emitter.getInsertPos();
434198090Srdivacky  return BB;
435198090Srdivacky}
436