ScheduleDAGSDNodes.cpp revision 221345
1//===--- ScheduleDAGSDNodes.cpp - Implement the ScheduleDAGSDNodes class --===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements the ScheduleDAG class, which is a base class used by 11// scheduling implementation classes. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "pre-RA-sched" 16#include "SDNodeDbgValue.h" 17#include "ScheduleDAGSDNodes.h" 18#include "InstrEmitter.h" 19#include "llvm/CodeGen/SelectionDAG.h" 20#include "llvm/Target/TargetMachine.h" 21#include "llvm/Target/TargetInstrInfo.h" 22#include "llvm/Target/TargetLowering.h" 23#include "llvm/Target/TargetRegisterInfo.h" 24#include "llvm/Target/TargetSubtarget.h" 25#include "llvm/ADT/DenseMap.h" 26#include "llvm/ADT/SmallPtrSet.h" 27#include "llvm/ADT/SmallSet.h" 28#include "llvm/ADT/SmallVector.h" 29#include "llvm/ADT/Statistic.h" 30#include "llvm/Support/CommandLine.h" 31#include "llvm/Support/Debug.h" 32#include "llvm/Support/raw_ostream.h" 33using namespace llvm; 34 35STATISTIC(LoadsClustered, "Number of loads clustered together"); 36 37// This allows latency based scheduler to notice high latency instructions 38// without a target itinerary. The choise if number here has more to do with 39// balancing scheduler heursitics than with the actual machine latency. 40static cl::opt<int> HighLatencyCycles( 41 "sched-high-latency-cycles", cl::Hidden, cl::init(10), 42 cl::desc("Roughly estimate the number of cycles that 'long latency'" 43 "instructions take for targets with no itinerary")); 44 45ScheduleDAGSDNodes::ScheduleDAGSDNodes(MachineFunction &mf) 46 : ScheduleDAG(mf), 47 InstrItins(mf.getTarget().getInstrItineraryData()) {} 48 49/// Run - perform scheduling. 50/// 51void ScheduleDAGSDNodes::Run(SelectionDAG *dag, MachineBasicBlock *bb, 52 MachineBasicBlock::iterator insertPos) { 53 DAG = dag; 54 ScheduleDAG::Run(bb, insertPos); 55} 56 57/// NewSUnit - Creates a new SUnit and return a ptr to it. 58/// 59SUnit *ScheduleDAGSDNodes::NewSUnit(SDNode *N) { 60#ifndef NDEBUG 61 const SUnit *Addr = 0; 62 if (!SUnits.empty()) 63 Addr = &SUnits[0]; 64#endif 65 SUnits.push_back(SUnit(N, (unsigned)SUnits.size())); 66 assert((Addr == 0 || Addr == &SUnits[0]) && 67 "SUnits std::vector reallocated on the fly!"); 68 SUnits.back().OrigNode = &SUnits.back(); 69 SUnit *SU = &SUnits.back(); 70 const TargetLowering &TLI = DAG->getTargetLoweringInfo(); 71 if (!N || 72 (N->isMachineOpcode() && 73 N->getMachineOpcode() == TargetOpcode::IMPLICIT_DEF)) 74 SU->SchedulingPref = Sched::None; 75 else 76 SU->SchedulingPref = TLI.getSchedulingPreference(N); 77 return SU; 78} 79 80SUnit *ScheduleDAGSDNodes::Clone(SUnit *Old) { 81 SUnit *SU = NewSUnit(Old->getNode()); 82 SU->OrigNode = Old->OrigNode; 83 SU->Latency = Old->Latency; 84 SU->isVRegCycle = Old->isVRegCycle; 85 SU->isCall = Old->isCall; 86 SU->isCallOp = Old->isCallOp; 87 SU->isTwoAddress = Old->isTwoAddress; 88 SU->isCommutable = Old->isCommutable; 89 SU->hasPhysRegDefs = Old->hasPhysRegDefs; 90 SU->hasPhysRegClobbers = Old->hasPhysRegClobbers; 91 SU->isScheduleHigh = Old->isScheduleHigh; 92 SU->isScheduleLow = Old->isScheduleLow; 93 SU->SchedulingPref = Old->SchedulingPref; 94 Old->isCloned = true; 95 return SU; 96} 97 98/// CheckForPhysRegDependency - Check if the dependency between def and use of 99/// a specified operand is a physical register dependency. If so, returns the 100/// register and the cost of copying the register. 101static void CheckForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op, 102 const TargetRegisterInfo *TRI, 103 const TargetInstrInfo *TII, 104 unsigned &PhysReg, int &Cost) { 105 if (Op != 2 || User->getOpcode() != ISD::CopyToReg) 106 return; 107 108 unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg(); 109 if (TargetRegisterInfo::isVirtualRegister(Reg)) 110 return; 111 112 unsigned ResNo = User->getOperand(2).getResNo(); 113 if (Def->isMachineOpcode()) { 114 const TargetInstrDesc &II = TII->get(Def->getMachineOpcode()); 115 if (ResNo >= II.getNumDefs() && 116 II.ImplicitDefs[ResNo - II.getNumDefs()] == Reg) { 117 PhysReg = Reg; 118 const TargetRegisterClass *RC = 119 TRI->getMinimalPhysRegClass(Reg, Def->getValueType(ResNo)); 120 Cost = RC->getCopyCost(); 121 } 122 } 123} 124 125static void AddGlue(SDNode *N, SDValue Glue, bool AddGlue, SelectionDAG *DAG) { 126 SmallVector<EVT, 4> VTs; 127 SDNode *GlueDestNode = Glue.getNode(); 128 129 // Don't add glue from a node to itself. 130 if (GlueDestNode == N) return; 131 132 // Don't add glue to something which already has glue. 133 if (N->getValueType(N->getNumValues() - 1) == MVT::Glue) return; 134 135 for (unsigned I = 0, E = N->getNumValues(); I != E; ++I) 136 VTs.push_back(N->getValueType(I)); 137 138 if (AddGlue) 139 VTs.push_back(MVT::Glue); 140 141 SmallVector<SDValue, 4> Ops; 142 for (unsigned I = 0, E = N->getNumOperands(); I != E; ++I) 143 Ops.push_back(N->getOperand(I)); 144 145 if (GlueDestNode) 146 Ops.push_back(Glue); 147 148 SDVTList VTList = DAG->getVTList(&VTs[0], VTs.size()); 149 MachineSDNode::mmo_iterator Begin = 0, End = 0; 150 MachineSDNode *MN = dyn_cast<MachineSDNode>(N); 151 152 // Store memory references. 153 if (MN) { 154 Begin = MN->memoperands_begin(); 155 End = MN->memoperands_end(); 156 } 157 158 DAG->MorphNodeTo(N, N->getOpcode(), VTList, &Ops[0], Ops.size()); 159 160 // Reset the memory references 161 if (MN) 162 MN->setMemRefs(Begin, End); 163} 164 165/// ClusterNeighboringLoads - Force nearby loads together by "gluing" them. 166/// This function finds loads of the same base and different offsets. If the 167/// offsets are not far apart (target specific), it add MVT::Glue inputs and 168/// outputs to ensure they are scheduled together and in order. This 169/// optimization may benefit some targets by improving cache locality. 170void ScheduleDAGSDNodes::ClusterNeighboringLoads(SDNode *Node) { 171 SDNode *Chain = 0; 172 unsigned NumOps = Node->getNumOperands(); 173 if (Node->getOperand(NumOps-1).getValueType() == MVT::Other) 174 Chain = Node->getOperand(NumOps-1).getNode(); 175 if (!Chain) 176 return; 177 178 // Look for other loads of the same chain. Find loads that are loading from 179 // the same base pointer and different offsets. 180 SmallPtrSet<SDNode*, 16> Visited; 181 SmallVector<int64_t, 4> Offsets; 182 DenseMap<long long, SDNode*> O2SMap; // Map from offset to SDNode. 183 bool Cluster = false; 184 SDNode *Base = Node; 185 for (SDNode::use_iterator I = Chain->use_begin(), E = Chain->use_end(); 186 I != E; ++I) { 187 SDNode *User = *I; 188 if (User == Node || !Visited.insert(User)) 189 continue; 190 int64_t Offset1, Offset2; 191 if (!TII->areLoadsFromSameBasePtr(Base, User, Offset1, Offset2) || 192 Offset1 == Offset2) 193 // FIXME: Should be ok if they addresses are identical. But earlier 194 // optimizations really should have eliminated one of the loads. 195 continue; 196 if (O2SMap.insert(std::make_pair(Offset1, Base)).second) 197 Offsets.push_back(Offset1); 198 O2SMap.insert(std::make_pair(Offset2, User)); 199 Offsets.push_back(Offset2); 200 if (Offset2 < Offset1) 201 Base = User; 202 Cluster = true; 203 } 204 205 if (!Cluster) 206 return; 207 208 // Sort them in increasing order. 209 std::sort(Offsets.begin(), Offsets.end()); 210 211 // Check if the loads are close enough. 212 SmallVector<SDNode*, 4> Loads; 213 unsigned NumLoads = 0; 214 int64_t BaseOff = Offsets[0]; 215 SDNode *BaseLoad = O2SMap[BaseOff]; 216 Loads.push_back(BaseLoad); 217 for (unsigned i = 1, e = Offsets.size(); i != e; ++i) { 218 int64_t Offset = Offsets[i]; 219 SDNode *Load = O2SMap[Offset]; 220 if (!TII->shouldScheduleLoadsNear(BaseLoad, Load, BaseOff, Offset,NumLoads)) 221 break; // Stop right here. Ignore loads that are further away. 222 Loads.push_back(Load); 223 ++NumLoads; 224 } 225 226 if (NumLoads == 0) 227 return; 228 229 // Cluster loads by adding MVT::Glue outputs and inputs. This also 230 // ensure they are scheduled in order of increasing addresses. 231 SDNode *Lead = Loads[0]; 232 AddGlue(Lead, SDValue(0, 0), true, DAG); 233 234 SDValue InGlue = SDValue(Lead, Lead->getNumValues() - 1); 235 for (unsigned I = 1, E = Loads.size(); I != E; ++I) { 236 bool OutGlue = I < E - 1; 237 SDNode *Load = Loads[I]; 238 239 AddGlue(Load, InGlue, OutGlue, DAG); 240 241 if (OutGlue) 242 InGlue = SDValue(Load, Load->getNumValues() - 1); 243 244 ++LoadsClustered; 245 } 246} 247 248/// ClusterNodes - Cluster certain nodes which should be scheduled together. 249/// 250void ScheduleDAGSDNodes::ClusterNodes() { 251 for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(), 252 E = DAG->allnodes_end(); NI != E; ++NI) { 253 SDNode *Node = &*NI; 254 if (!Node || !Node->isMachineOpcode()) 255 continue; 256 257 unsigned Opc = Node->getMachineOpcode(); 258 const TargetInstrDesc &TID = TII->get(Opc); 259 if (TID.mayLoad()) 260 // Cluster loads from "near" addresses into combined SUnits. 261 ClusterNeighboringLoads(Node); 262 } 263} 264 265void ScheduleDAGSDNodes::BuildSchedUnits() { 266 // During scheduling, the NodeId field of SDNode is used to map SDNodes 267 // to their associated SUnits by holding SUnits table indices. A value 268 // of -1 means the SDNode does not yet have an associated SUnit. 269 unsigned NumNodes = 0; 270 for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(), 271 E = DAG->allnodes_end(); NI != E; ++NI) { 272 NI->setNodeId(-1); 273 ++NumNodes; 274 } 275 276 // Reserve entries in the vector for each of the SUnits we are creating. This 277 // ensure that reallocation of the vector won't happen, so SUnit*'s won't get 278 // invalidated. 279 // FIXME: Multiply by 2 because we may clone nodes during scheduling. 280 // This is a temporary workaround. 281 SUnits.reserve(NumNodes * 2); 282 283 // Add all nodes in depth first order. 284 SmallVector<SDNode*, 64> Worklist; 285 SmallPtrSet<SDNode*, 64> Visited; 286 Worklist.push_back(DAG->getRoot().getNode()); 287 Visited.insert(DAG->getRoot().getNode()); 288 289 SmallVector<SUnit*, 8> CallSUnits; 290 while (!Worklist.empty()) { 291 SDNode *NI = Worklist.pop_back_val(); 292 293 // Add all operands to the worklist unless they've already been added. 294 for (unsigned i = 0, e = NI->getNumOperands(); i != e; ++i) 295 if (Visited.insert(NI->getOperand(i).getNode())) 296 Worklist.push_back(NI->getOperand(i).getNode()); 297 298 if (isPassiveNode(NI)) // Leaf node, e.g. a TargetImmediate. 299 continue; 300 301 // If this node has already been processed, stop now. 302 if (NI->getNodeId() != -1) continue; 303 304 SUnit *NodeSUnit = NewSUnit(NI); 305 306 // See if anything is glued to this node, if so, add them to glued 307 // nodes. Nodes can have at most one glue input and one glue output. Glue 308 // is required to be the last operand and result of a node. 309 310 // Scan up to find glued preds. 311 SDNode *N = NI; 312 while (N->getNumOperands() && 313 N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Glue) { 314 N = N->getOperand(N->getNumOperands()-1).getNode(); 315 assert(N->getNodeId() == -1 && "Node already inserted!"); 316 N->setNodeId(NodeSUnit->NodeNum); 317 if (N->isMachineOpcode() && TII->get(N->getMachineOpcode()).isCall()) 318 NodeSUnit->isCall = true; 319 } 320 321 // Scan down to find any glued succs. 322 N = NI; 323 while (N->getValueType(N->getNumValues()-1) == MVT::Glue) { 324 SDValue GlueVal(N, N->getNumValues()-1); 325 326 // There are either zero or one users of the Glue result. 327 bool HasGlueUse = false; 328 for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end(); 329 UI != E; ++UI) 330 if (GlueVal.isOperandOf(*UI)) { 331 HasGlueUse = true; 332 assert(N->getNodeId() == -1 && "Node already inserted!"); 333 N->setNodeId(NodeSUnit->NodeNum); 334 N = *UI; 335 if (N->isMachineOpcode() && TII->get(N->getMachineOpcode()).isCall()) 336 NodeSUnit->isCall = true; 337 break; 338 } 339 if (!HasGlueUse) break; 340 } 341 342 if (NodeSUnit->isCall) 343 CallSUnits.push_back(NodeSUnit); 344 345 // Schedule zero-latency TokenFactor below any nodes that may increase the 346 // schedule height. Otherwise, ancestors of the TokenFactor may appear to 347 // have false stalls. 348 if (NI->getOpcode() == ISD::TokenFactor) 349 NodeSUnit->isScheduleLow = true; 350 351 // If there are glue operands involved, N is now the bottom-most node 352 // of the sequence of nodes that are glued together. 353 // Update the SUnit. 354 NodeSUnit->setNode(N); 355 assert(N->getNodeId() == -1 && "Node already inserted!"); 356 N->setNodeId(NodeSUnit->NodeNum); 357 358 // Compute NumRegDefsLeft. This must be done before AddSchedEdges. 359 InitNumRegDefsLeft(NodeSUnit); 360 361 // Assign the Latency field of NodeSUnit using target-provided information. 362 ComputeLatency(NodeSUnit); 363 } 364 365 // Find all call operands. 366 while (!CallSUnits.empty()) { 367 SUnit *SU = CallSUnits.pop_back_val(); 368 for (const SDNode *SUNode = SU->getNode(); SUNode; 369 SUNode = SUNode->getGluedNode()) { 370 if (SUNode->getOpcode() != ISD::CopyToReg) 371 continue; 372 SDNode *SrcN = SUNode->getOperand(2).getNode(); 373 if (isPassiveNode(SrcN)) continue; // Not scheduled. 374 SUnit *SrcSU = &SUnits[SrcN->getNodeId()]; 375 SrcSU->isCallOp = true; 376 } 377 } 378} 379 380void ScheduleDAGSDNodes::AddSchedEdges() { 381 const TargetSubtarget &ST = TM.getSubtarget<TargetSubtarget>(); 382 383 // Check to see if the scheduler cares about latencies. 384 bool UnitLatencies = ForceUnitLatencies(); 385 386 // Pass 2: add the preds, succs, etc. 387 for (unsigned su = 0, e = SUnits.size(); su != e; ++su) { 388 SUnit *SU = &SUnits[su]; 389 SDNode *MainNode = SU->getNode(); 390 391 if (MainNode->isMachineOpcode()) { 392 unsigned Opc = MainNode->getMachineOpcode(); 393 const TargetInstrDesc &TID = TII->get(Opc); 394 for (unsigned i = 0; i != TID.getNumOperands(); ++i) { 395 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) { 396 SU->isTwoAddress = true; 397 break; 398 } 399 } 400 if (TID.isCommutable()) 401 SU->isCommutable = true; 402 } 403 404 // Find all predecessors and successors of the group. 405 for (SDNode *N = SU->getNode(); N; N = N->getGluedNode()) { 406 if (N->isMachineOpcode() && 407 TII->get(N->getMachineOpcode()).getImplicitDefs()) { 408 SU->hasPhysRegClobbers = true; 409 unsigned NumUsed = InstrEmitter::CountResults(N); 410 while (NumUsed != 0 && !N->hasAnyUseOfValue(NumUsed - 1)) 411 --NumUsed; // Skip over unused values at the end. 412 if (NumUsed > TII->get(N->getMachineOpcode()).getNumDefs()) 413 SU->hasPhysRegDefs = true; 414 } 415 416 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 417 SDNode *OpN = N->getOperand(i).getNode(); 418 if (isPassiveNode(OpN)) continue; // Not scheduled. 419 SUnit *OpSU = &SUnits[OpN->getNodeId()]; 420 assert(OpSU && "Node has no SUnit!"); 421 if (OpSU == SU) continue; // In the same group. 422 423 EVT OpVT = N->getOperand(i).getValueType(); 424 assert(OpVT != MVT::Glue && "Glued nodes should be in same sunit!"); 425 bool isChain = OpVT == MVT::Other; 426 427 unsigned PhysReg = 0; 428 int Cost = 1; 429 // Determine if this is a physical register dependency. 430 CheckForPhysRegDependency(OpN, N, i, TRI, TII, PhysReg, Cost); 431 assert((PhysReg == 0 || !isChain) && 432 "Chain dependence via physreg data?"); 433 // FIXME: See ScheduleDAGSDNodes::EmitCopyFromReg. For now, scheduler 434 // emits a copy from the physical register to a virtual register unless 435 // it requires a cross class copy (cost < 0). That means we are only 436 // treating "expensive to copy" register dependency as physical register 437 // dependency. This may change in the future though. 438 if (Cost >= 0) 439 PhysReg = 0; 440 441 // If this is a ctrl dep, latency is 1. 442 unsigned OpLatency = isChain ? 1 : OpSU->Latency; 443 // Special-case TokenFactor chains as zero-latency. 444 if(isChain && OpN->getOpcode() == ISD::TokenFactor) 445 OpLatency = 0; 446 447 const SDep &dep = SDep(OpSU, isChain ? SDep::Order : SDep::Data, 448 OpLatency, PhysReg); 449 if (!isChain && !UnitLatencies) { 450 ComputeOperandLatency(OpN, N, i, const_cast<SDep &>(dep)); 451 ST.adjustSchedDependency(OpSU, SU, const_cast<SDep &>(dep)); 452 } 453 454 if (!SU->addPred(dep) && !dep.isCtrl() && OpSU->NumRegDefsLeft > 1) { 455 // Multiple register uses are combined in the same SUnit. For example, 456 // we could have a set of glued nodes with all their defs consumed by 457 // another set of glued nodes. Register pressure tracking sees this as 458 // a single use, so to keep pressure balanced we reduce the defs. 459 // 460 // We can't tell (without more book-keeping) if this results from 461 // glued nodes or duplicate operands. As long as we don't reduce 462 // NumRegDefsLeft to zero, we handle the common cases well. 463 --OpSU->NumRegDefsLeft; 464 } 465 } 466 } 467 } 468} 469 470/// BuildSchedGraph - Build the SUnit graph from the selection dag that we 471/// are input. This SUnit graph is similar to the SelectionDAG, but 472/// excludes nodes that aren't interesting to scheduling, and represents 473/// glued together nodes with a single SUnit. 474void ScheduleDAGSDNodes::BuildSchedGraph(AliasAnalysis *AA) { 475 // Cluster certain nodes which should be scheduled together. 476 ClusterNodes(); 477 // Populate the SUnits array. 478 BuildSchedUnits(); 479 // Compute all the scheduling dependencies between nodes. 480 AddSchedEdges(); 481} 482 483// Initialize NumNodeDefs for the current Node's opcode. 484void ScheduleDAGSDNodes::RegDefIter::InitNodeNumDefs() { 485 // Check for phys reg copy. 486 if (!Node) 487 return; 488 489 if (!Node->isMachineOpcode()) { 490 if (Node->getOpcode() == ISD::CopyFromReg) 491 NodeNumDefs = 1; 492 else 493 NodeNumDefs = 0; 494 return; 495 } 496 unsigned POpc = Node->getMachineOpcode(); 497 if (POpc == TargetOpcode::IMPLICIT_DEF) { 498 // No register need be allocated for this. 499 NodeNumDefs = 0; 500 return; 501 } 502 unsigned NRegDefs = SchedDAG->TII->get(Node->getMachineOpcode()).getNumDefs(); 503 // Some instructions define regs that are not represented in the selection DAG 504 // (e.g. unused flags). See tMOVi8. Make sure we don't access past NumValues. 505 NodeNumDefs = std::min(Node->getNumValues(), NRegDefs); 506 DefIdx = 0; 507} 508 509// Construct a RegDefIter for this SUnit and find the first valid value. 510ScheduleDAGSDNodes::RegDefIter::RegDefIter(const SUnit *SU, 511 const ScheduleDAGSDNodes *SD) 512 : SchedDAG(SD), Node(SU->getNode()), DefIdx(0), NodeNumDefs(0) { 513 InitNodeNumDefs(); 514 Advance(); 515} 516 517// Advance to the next valid value defined by the SUnit. 518void ScheduleDAGSDNodes::RegDefIter::Advance() { 519 for (;Node;) { // Visit all glued nodes. 520 for (;DefIdx < NodeNumDefs; ++DefIdx) { 521 if (!Node->hasAnyUseOfValue(DefIdx)) 522 continue; 523 if (Node->isMachineOpcode() && 524 Node->getMachineOpcode() == TargetOpcode::EXTRACT_SUBREG) { 525 // Propagate the incoming (full-register) type. I doubt it's needed. 526 ValueType = Node->getOperand(0).getValueType(); 527 } 528 else { 529 ValueType = Node->getValueType(DefIdx); 530 } 531 ++DefIdx; 532 return; // Found a normal regdef. 533 } 534 Node = Node->getGluedNode(); 535 if (Node == NULL) { 536 return; // No values left to visit. 537 } 538 InitNodeNumDefs(); 539 } 540} 541 542void ScheduleDAGSDNodes::InitNumRegDefsLeft(SUnit *SU) { 543 assert(SU->NumRegDefsLeft == 0 && "expect a new node"); 544 for (RegDefIter I(SU, this); I.IsValid(); I.Advance()) { 545 assert(SU->NumRegDefsLeft < USHRT_MAX && "overflow is ok but unexpected"); 546 ++SU->NumRegDefsLeft; 547 } 548} 549 550void ScheduleDAGSDNodes::ComputeLatency(SUnit *SU) { 551 SDNode *N = SU->getNode(); 552 553 // TokenFactor operands are considered zero latency, and some schedulers 554 // (e.g. Top-Down list) may rely on the fact that operand latency is nonzero 555 // whenever node latency is nonzero. 556 if (N && N->getOpcode() == ISD::TokenFactor) { 557 SU->Latency = 0; 558 return; 559 } 560 561 // Check to see if the scheduler cares about latencies. 562 if (ForceUnitLatencies()) { 563 SU->Latency = 1; 564 return; 565 } 566 567 if (!InstrItins || InstrItins->isEmpty()) { 568 if (N && N->isMachineOpcode() && 569 TII->isHighLatencyDef(N->getMachineOpcode())) 570 SU->Latency = HighLatencyCycles; 571 else 572 SU->Latency = 1; 573 return; 574 } 575 576 // Compute the latency for the node. We use the sum of the latencies for 577 // all nodes glued together into this SUnit. 578 SU->Latency = 0; 579 for (SDNode *N = SU->getNode(); N; N = N->getGluedNode()) 580 if (N->isMachineOpcode()) 581 SU->Latency += TII->getInstrLatency(InstrItins, N); 582} 583 584void ScheduleDAGSDNodes::ComputeOperandLatency(SDNode *Def, SDNode *Use, 585 unsigned OpIdx, SDep& dep) const{ 586 // Check to see if the scheduler cares about latencies. 587 if (ForceUnitLatencies()) 588 return; 589 590 if (dep.getKind() != SDep::Data) 591 return; 592 593 unsigned DefIdx = Use->getOperand(OpIdx).getResNo(); 594 if (Use->isMachineOpcode()) 595 // Adjust the use operand index by num of defs. 596 OpIdx += TII->get(Use->getMachineOpcode()).getNumDefs(); 597 int Latency = TII->getOperandLatency(InstrItins, Def, DefIdx, Use, OpIdx); 598 if (Latency > 1 && Use->getOpcode() == ISD::CopyToReg && 599 !BB->succ_empty()) { 600 unsigned Reg = cast<RegisterSDNode>(Use->getOperand(1))->getReg(); 601 if (TargetRegisterInfo::isVirtualRegister(Reg)) 602 // This copy is a liveout value. It is likely coalesced, so reduce the 603 // latency so not to penalize the def. 604 // FIXME: need target specific adjustment here? 605 Latency = (Latency > 1) ? Latency - 1 : 1; 606 } 607 if (Latency >= 0) 608 dep.setLatency(Latency); 609} 610 611void ScheduleDAGSDNodes::dumpNode(const SUnit *SU) const { 612 if (!SU->getNode()) { 613 dbgs() << "PHYS REG COPY\n"; 614 return; 615 } 616 617 SU->getNode()->dump(DAG); 618 dbgs() << "\n"; 619 SmallVector<SDNode *, 4> GluedNodes; 620 for (SDNode *N = SU->getNode()->getGluedNode(); N; N = N->getGluedNode()) 621 GluedNodes.push_back(N); 622 while (!GluedNodes.empty()) { 623 dbgs() << " "; 624 GluedNodes.back()->dump(DAG); 625 dbgs() << "\n"; 626 GluedNodes.pop_back(); 627 } 628} 629 630namespace { 631 struct OrderSorter { 632 bool operator()(const std::pair<unsigned, MachineInstr*> &A, 633 const std::pair<unsigned, MachineInstr*> &B) { 634 return A.first < B.first; 635 } 636 }; 637} 638 639/// ProcessSDDbgValues - Process SDDbgValues associated with this node. 640static void ProcessSDDbgValues(SDNode *N, SelectionDAG *DAG, 641 InstrEmitter &Emitter, 642 SmallVector<std::pair<unsigned, MachineInstr*>, 32> &Orders, 643 DenseMap<SDValue, unsigned> &VRBaseMap, 644 unsigned Order) { 645 if (!N->getHasDebugValue()) 646 return; 647 648 // Opportunistically insert immediate dbg_value uses, i.e. those with source 649 // order number right after the N. 650 MachineBasicBlock *BB = Emitter.getBlock(); 651 MachineBasicBlock::iterator InsertPos = Emitter.getInsertPos(); 652 SmallVector<SDDbgValue*,2> &DVs = DAG->GetDbgValues(N); 653 for (unsigned i = 0, e = DVs.size(); i != e; ++i) { 654 if (DVs[i]->isInvalidated()) 655 continue; 656 unsigned DVOrder = DVs[i]->getOrder(); 657 if (!Order || DVOrder == ++Order) { 658 MachineInstr *DbgMI = Emitter.EmitDbgValue(DVs[i], VRBaseMap); 659 if (DbgMI) { 660 Orders.push_back(std::make_pair(DVOrder, DbgMI)); 661 BB->insert(InsertPos, DbgMI); 662 } 663 DVs[i]->setIsInvalidated(); 664 } 665 } 666} 667 668// ProcessSourceNode - Process nodes with source order numbers. These are added 669// to a vector which EmitSchedule uses to determine how to insert dbg_value 670// instructions in the right order. 671static void ProcessSourceNode(SDNode *N, SelectionDAG *DAG, 672 InstrEmitter &Emitter, 673 DenseMap<SDValue, unsigned> &VRBaseMap, 674 SmallVector<std::pair<unsigned, MachineInstr*>, 32> &Orders, 675 SmallSet<unsigned, 8> &Seen) { 676 unsigned Order = DAG->GetOrdering(N); 677 if (!Order || !Seen.insert(Order)) { 678 // Process any valid SDDbgValues even if node does not have any order 679 // assigned. 680 ProcessSDDbgValues(N, DAG, Emitter, Orders, VRBaseMap, 0); 681 return; 682 } 683 684 MachineBasicBlock *BB = Emitter.getBlock(); 685 if (Emitter.getInsertPos() == BB->begin() || BB->back().isPHI()) { 686 // Did not insert any instruction. 687 Orders.push_back(std::make_pair(Order, (MachineInstr*)0)); 688 return; 689 } 690 691 Orders.push_back(std::make_pair(Order, prior(Emitter.getInsertPos()))); 692 ProcessSDDbgValues(N, DAG, Emitter, Orders, VRBaseMap, Order); 693} 694 695 696/// EmitSchedule - Emit the machine code in scheduled order. 697MachineBasicBlock *ScheduleDAGSDNodes::EmitSchedule() { 698 InstrEmitter Emitter(BB, InsertPos); 699 DenseMap<SDValue, unsigned> VRBaseMap; 700 DenseMap<SUnit*, unsigned> CopyVRBaseMap; 701 SmallVector<std::pair<unsigned, MachineInstr*>, 32> Orders; 702 SmallSet<unsigned, 8> Seen; 703 bool HasDbg = DAG->hasDebugValues(); 704 705 // If this is the first BB, emit byval parameter dbg_value's. 706 if (HasDbg && BB->getParent()->begin() == MachineFunction::iterator(BB)) { 707 SDDbgInfo::DbgIterator PDI = DAG->ByvalParmDbgBegin(); 708 SDDbgInfo::DbgIterator PDE = DAG->ByvalParmDbgEnd(); 709 for (; PDI != PDE; ++PDI) { 710 MachineInstr *DbgMI= Emitter.EmitDbgValue(*PDI, VRBaseMap); 711 if (DbgMI) 712 BB->insert(InsertPos, DbgMI); 713 } 714 } 715 716 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 717 SUnit *SU = Sequence[i]; 718 if (!SU) { 719 // Null SUnit* is a noop. 720 EmitNoop(); 721 continue; 722 } 723 724 // For pre-regalloc scheduling, create instructions corresponding to the 725 // SDNode and any glued SDNodes and append them to the block. 726 if (!SU->getNode()) { 727 // Emit a copy. 728 EmitPhysRegCopy(SU, CopyVRBaseMap); 729 continue; 730 } 731 732 SmallVector<SDNode *, 4> GluedNodes; 733 for (SDNode *N = SU->getNode()->getGluedNode(); N; 734 N = N->getGluedNode()) 735 GluedNodes.push_back(N); 736 while (!GluedNodes.empty()) { 737 SDNode *N = GluedNodes.back(); 738 Emitter.EmitNode(GluedNodes.back(), SU->OrigNode != SU, SU->isCloned, 739 VRBaseMap); 740 // Remember the source order of the inserted instruction. 741 if (HasDbg) 742 ProcessSourceNode(N, DAG, Emitter, VRBaseMap, Orders, Seen); 743 GluedNodes.pop_back(); 744 } 745 Emitter.EmitNode(SU->getNode(), SU->OrigNode != SU, SU->isCloned, 746 VRBaseMap); 747 // Remember the source order of the inserted instruction. 748 if (HasDbg) 749 ProcessSourceNode(SU->getNode(), DAG, Emitter, VRBaseMap, Orders, 750 Seen); 751 } 752 753 // Insert all the dbg_values which have not already been inserted in source 754 // order sequence. 755 if (HasDbg) { 756 MachineBasicBlock::iterator BBBegin = BB->getFirstNonPHI(); 757 758 // Sort the source order instructions and use the order to insert debug 759 // values. 760 std::sort(Orders.begin(), Orders.end(), OrderSorter()); 761 762 SDDbgInfo::DbgIterator DI = DAG->DbgBegin(); 763 SDDbgInfo::DbgIterator DE = DAG->DbgEnd(); 764 // Now emit the rest according to source order. 765 unsigned LastOrder = 0; 766 for (unsigned i = 0, e = Orders.size(); i != e && DI != DE; ++i) { 767 unsigned Order = Orders[i].first; 768 MachineInstr *MI = Orders[i].second; 769 // Insert all SDDbgValue's whose order(s) are before "Order". 770 if (!MI) 771 continue; 772 for (; DI != DE && 773 (*DI)->getOrder() >= LastOrder && (*DI)->getOrder() < Order; ++DI) { 774 if ((*DI)->isInvalidated()) 775 continue; 776 MachineInstr *DbgMI = Emitter.EmitDbgValue(*DI, VRBaseMap); 777 if (DbgMI) { 778 if (!LastOrder) 779 // Insert to start of the BB (after PHIs). 780 BB->insert(BBBegin, DbgMI); 781 else { 782 // Insert at the instruction, which may be in a different 783 // block, if the block was split by a custom inserter. 784 MachineBasicBlock::iterator Pos = MI; 785 MI->getParent()->insert(llvm::next(Pos), DbgMI); 786 } 787 } 788 } 789 LastOrder = Order; 790 } 791 // Add trailing DbgValue's before the terminator. FIXME: May want to add 792 // some of them before one or more conditional branches? 793 while (DI != DE) { 794 MachineBasicBlock *InsertBB = Emitter.getBlock(); 795 MachineBasicBlock::iterator Pos= Emitter.getBlock()->getFirstTerminator(); 796 if (!(*DI)->isInvalidated()) { 797 MachineInstr *DbgMI= Emitter.EmitDbgValue(*DI, VRBaseMap); 798 if (DbgMI) 799 InsertBB->insert(Pos, DbgMI); 800 } 801 ++DI; 802 } 803 } 804 805 BB = Emitter.getBlock(); 806 InsertPos = Emitter.getInsertPos(); 807 return BB; 808} 809