1193323Sed//===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===// 2193323Sed// 3353358Sdim// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4353358Sdim// See https://llvm.org/LICENSE.txt for license information. 5353358Sdim// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6193323Sed// 7193323Sed//===----------------------------------------------------------------------===// 8193323Sed// 9321369Sdim/// \file This implements the ScheduleDAGInstrs class, which implements 10321369Sdim/// re-scheduling of MachineInstrs. 11193323Sed// 12193323Sed//===----------------------------------------------------------------------===// 13193323Sed 14249423Sdim#include "llvm/CodeGen/ScheduleDAGInstrs.h" 15296417Sdim#include "llvm/ADT/IntEqClasses.h" 16321369Sdim#include "llvm/ADT/MapVector.h" 17249423Sdim#include "llvm/ADT/SmallPtrSet.h" 18321369Sdim#include "llvm/ADT/SmallVector.h" 19321369Sdim#include "llvm/ADT/SparseSet.h" 20321369Sdim#include "llvm/ADT/iterator_range.h" 21218893Sdim#include "llvm/Analysis/ValueTracking.h" 22327952Sdim#include "llvm/CodeGen/LiveIntervals.h" 23321369Sdim#include "llvm/CodeGen/LivePhysRegs.h" 24321369Sdim#include "llvm/CodeGen/MachineBasicBlock.h" 25288943Sdim#include "llvm/CodeGen/MachineFrameInfo.h" 26321369Sdim#include "llvm/CodeGen/MachineFunction.h" 27321369Sdim#include "llvm/CodeGen/MachineInstr.h" 28321369Sdim#include "llvm/CodeGen/MachineInstrBundle.h" 29198090Srdivacky#include "llvm/CodeGen/MachineMemOperand.h" 30321369Sdim#include "llvm/CodeGen/MachineOperand.h" 31193323Sed#include "llvm/CodeGen/MachineRegisterInfo.h" 32193323Sed#include "llvm/CodeGen/PseudoSourceValue.h" 33239462Sdim#include "llvm/CodeGen/RegisterPressure.h" 34321369Sdim#include "llvm/CodeGen/ScheduleDAG.h" 35249423Sdim#include "llvm/CodeGen/ScheduleDFS.h" 36321369Sdim#include "llvm/CodeGen/SlotIndexes.h" 37327952Sdim#include "llvm/CodeGen/TargetRegisterInfo.h" 38327952Sdim#include "llvm/CodeGen/TargetSubtargetInfo.h" 39341825Sdim#include "llvm/Config/llvm-config.h" 40321369Sdim#include "llvm/IR/Constants.h" 41309124Sdim#include "llvm/IR/Function.h" 42321369Sdim#include "llvm/IR/Instruction.h" 43321369Sdim#include "llvm/IR/Instructions.h" 44321369Sdim#include "llvm/IR/Operator.h" 45309124Sdim#include "llvm/IR/Type.h" 46321369Sdim#include "llvm/IR/Value.h" 47321369Sdim#include "llvm/MC/LaneBitmask.h" 48321369Sdim#include "llvm/MC/MCRegisterInfo.h" 49321369Sdim#include "llvm/Support/Casting.h" 50239462Sdim#include "llvm/Support/CommandLine.h" 51321369Sdim#include "llvm/Support/Compiler.h" 52193323Sed#include "llvm/Support/Debug.h" 53321369Sdim#include "llvm/Support/ErrorHandling.h" 54243830Sdim#include "llvm/Support/Format.h" 55193323Sed#include "llvm/Support/raw_ostream.h" 56321369Sdim#include <algorithm> 57321369Sdim#include <cassert> 58321369Sdim#include <iterator> 59321369Sdim#include <string> 60321369Sdim#include <utility> 61321369Sdim#include <vector> 62261991Sdim 63193323Sedusing namespace llvm; 64193323Sed 65321369Sdim#define DEBUG_TYPE "machine-scheduler" 66276479Sdim 67239462Sdimstatic cl::opt<bool> EnableAASchedMI("enable-aa-sched-mi", cl::Hidden, 68239462Sdim cl::ZeroOrMore, cl::init(false), 69280031Sdim cl::desc("Enable use of AA during MI DAG construction")); 70239462Sdim 71276479Sdimstatic cl::opt<bool> UseTBAA("use-tbaa-in-sched-mi", cl::Hidden, 72280031Sdim cl::init(true), cl::desc("Enable use of TBAA during MI DAG construction")); 73276479Sdim 74309124Sdim// Note: the two options below might be used in tuning compile time vs 75309124Sdim// output quality. Setting HugeRegion so large that it will never be 76309124Sdim// reached means best-effort, but may be slow. 77309124Sdim 78309124Sdim// When Stores and Loads maps (or NonAliasStores and NonAliasLoads) 79309124Sdim// together hold this many SUs, a reduction of maps will be done. 80309124Sdimstatic cl::opt<unsigned> HugeRegion("dag-maps-huge-region", cl::Hidden, 81309124Sdim cl::init(1000), cl::desc("The limit to use while constructing the DAG " 82309124Sdim "prior to scheduling, at which point a trade-off " 83309124Sdim "is made to avoid excessive compile time.")); 84309124Sdim 85309124Sdimstatic cl::opt<unsigned> ReductionSize( 86309124Sdim "dag-maps-reduction-size", cl::Hidden, 87309124Sdim cl::desc("A huge scheduling region will have maps reduced by this many " 88309124Sdim "nodes at a time. Defaults to HugeRegion / 2.")); 89309124Sdim 90309124Sdimstatic unsigned getReductionSize() { 91309124Sdim // Always reduce a huge region with half of the elements, except 92309124Sdim // when user sets this number explicitly. 93309124Sdim if (ReductionSize.getNumOccurrences() == 0) 94309124Sdim return HugeRegion / 2; 95309124Sdim return ReductionSize; 96309124Sdim} 97309124Sdim 98309124Sdimstatic void dumpSUList(ScheduleDAGInstrs::SUList &L) { 99309124Sdim#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 100309124Sdim dbgs() << "{ "; 101314564Sdim for (const SUnit *su : L) { 102309124Sdim dbgs() << "SU(" << su->NodeNum << ")"; 103309124Sdim if (su != L.back()) 104309124Sdim dbgs() << ", "; 105309124Sdim } 106309124Sdim dbgs() << "}\n"; 107309124Sdim#endif 108309124Sdim} 109309124Sdim 110193323SedScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf, 111280031Sdim const MachineLoopInfo *mli, 112296417Sdim bool RemoveKillFlags) 113296417Sdim : ScheduleDAG(mf), MLI(mli), MFI(mf.getFrameInfo()), 114321369Sdim RemoveKillFlags(RemoveKillFlags), 115309124Sdim UnknownValue(UndefValue::get( 116353358Sdim Type::getVoidTy(mf.getFunction().getContext()))), Topo(SUnits, &ExitSU) { 117223017Sdim DbgValues.clear(); 118243830Sdim 119288943Sdim const TargetSubtargetInfo &ST = mf.getSubtarget(); 120341825Sdim SchedModel.init(&ST); 121198396Srdivacky} 122193323Sed 123327952Sdim/// If this machine instr has memory reference information and it can be 124327952Sdim/// tracked to a normal reference to a known object, return the Value 125327952Sdim/// for that object. This function returns false the memory location is 126327952Sdim/// unknown or may alias anything. 127327952Sdimstatic bool getUnderlyingObjectsForInstr(const MachineInstr *MI, 128314564Sdim const MachineFrameInfo &MFI, 129288943Sdim UnderlyingObjectsVector &Objects, 130288943Sdim const DataLayout &DL) { 131309124Sdim auto allMMOsOkay = [&]() { 132309124Sdim for (const MachineMemOperand *MMO : MI->memoperands()) { 133353358Sdim // TODO: Figure out whether isAtomic is really necessary (see D57601). 134353358Sdim if (MMO->isVolatile() || MMO->isAtomic()) 135309124Sdim return false; 136193323Sed 137309124Sdim if (const PseudoSourceValue *PSV = MMO->getPseudoValue()) { 138309124Sdim // Function that contain tail calls don't have unique PseudoSourceValue 139309124Sdim // objects. Two PseudoSourceValues might refer to the same or 140309124Sdim // overlapping locations. The client code calling this function assumes 141309124Sdim // this is not the case. So return a conservative answer of no known 142309124Sdim // object. 143314564Sdim if (MFI.hasTailCall()) 144309124Sdim return false; 145288943Sdim 146309124Sdim // For now, ignore PseudoSourceValues which may alias LLVM IR values 147309124Sdim // because the code that uses this function has no way to cope with 148309124Sdim // such aliases. 149314564Sdim if (PSV->isAliased(&MFI)) 150309124Sdim return false; 151276479Sdim 152314564Sdim bool MayAlias = PSV->mayAlias(&MFI); 153309124Sdim Objects.push_back(UnderlyingObjectsVector::value_type(PSV, MayAlias)); 154309124Sdim } else if (const Value *V = MMO->getValue()) { 155309124Sdim SmallVector<Value *, 4> Objs; 156327952Sdim if (!getUnderlyingObjectsForCodeGen(V, Objs, DL)) 157327952Sdim return false; 158193323Sed 159309124Sdim for (Value *V : Objs) { 160322320Sdim assert(isIdentifiedObject(V)); 161309124Sdim Objects.push_back(UnderlyingObjectsVector::value_type(V, true)); 162309124Sdim } 163309124Sdim } else 164309124Sdim return false; 165249423Sdim } 166309124Sdim return true; 167309124Sdim }; 168249423Sdim 169327952Sdim if (!allMMOsOkay()) { 170309124Sdim Objects.clear(); 171327952Sdim return false; 172327952Sdim } 173327952Sdim 174327952Sdim return true; 175193323Sed} 176193323Sed 177239462Sdimvoid ScheduleDAGInstrs::startBlock(MachineBasicBlock *bb) { 178239462Sdim BB = bb; 179193323Sed} 180193323Sed 181234353Sdimvoid ScheduleDAGInstrs::finishBlock() { 182239462Sdim // Subclasses should no longer refer to the old block. 183276479Sdim BB = nullptr; 184234353Sdim} 185234353Sdim 186234353Sdimvoid ScheduleDAGInstrs::enterRegion(MachineBasicBlock *bb, 187234353Sdim MachineBasicBlock::iterator begin, 188234353Sdim MachineBasicBlock::iterator end, 189261991Sdim unsigned regioninstrs) { 190239462Sdim assert(bb == BB && "startBlock should set BB"); 191234353Sdim RegionBegin = begin; 192234353Sdim RegionEnd = end; 193261991Sdim NumRegionInstrs = regioninstrs; 194234353Sdim} 195234353Sdim 196234353Sdimvoid ScheduleDAGInstrs::exitRegion() { 197234353Sdim // Nothing to do. 198234353Sdim} 199234353Sdim 200234353Sdimvoid ScheduleDAGInstrs::addSchedBarrierDeps() { 201276479Sdim MachineInstr *ExitMI = RegionEnd != BB->end() ? &*RegionEnd : nullptr; 202218893Sdim ExitSU.setInstr(ExitMI); 203314564Sdim // Add dependencies on the defs and uses of the instruction. 204314564Sdim if (ExitMI) { 205314564Sdim for (const MachineOperand &MO : ExitMI->operands()) { 206218893Sdim if (!MO.isReg() || MO.isDef()) continue; 207360784Sdim Register Reg = MO.getReg(); 208360784Sdim if (Register::isPhysicalRegister(Reg)) { 209249423Sdim Uses.insert(PhysRegSUOper(&ExitSU, -1, Reg)); 210360784Sdim } else if (Register::isVirtualRegister(Reg) && MO.readsReg()) { 211314564Sdim addVRegUseDeps(&ExitSU, ExitMI->getOperandNo(&MO)); 212314564Sdim } 213218893Sdim } 214314564Sdim } 215314564Sdim if (!ExitMI || (!ExitMI->isCall() && !ExitMI->isBarrier())) { 216218893Sdim // For others, e.g. fallthrough, conditional branch, assume the exit 217218893Sdim // uses all the registers that are livein to the successor blocks. 218314564Sdim for (const MachineBasicBlock *Succ : BB->successors()) { 219314564Sdim for (const auto &LI : Succ->liveins()) { 220296417Sdim if (!Uses.contains(LI.PhysReg)) 221296417Sdim Uses.insert(PhysRegSUOper(&ExitSU, -1, LI.PhysReg)); 222218893Sdim } 223314564Sdim } 224218893Sdim } 225218893Sdim} 226218893Sdim 227321369Sdim/// MO is an operand of SU's instruction that defines a physical register. Adds 228234353Sdim/// data dependencies from SU to any uses of the physical register. 229243830Sdimvoid ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, unsigned OperIdx) { 230243830Sdim const MachineOperand &MO = SU->getInstr()->getOperand(OperIdx); 231234353Sdim assert(MO.isDef() && "expect physreg def"); 232234353Sdim 233234353Sdim // Ask the target if address-backscheduling is desirable, and if so how much. 234288943Sdim const TargetSubtargetInfo &ST = MF.getSubtarget(); 235234353Sdim 236344779Sdim // Only use any non-zero latency for real defs/uses, in contrast to 237344779Sdim // "fake" operands added by regalloc. 238344779Sdim const MCInstrDesc *DefMIDesc = &SU->getInstr()->getDesc(); 239344779Sdim bool ImplicitPseudoDef = (OperIdx >= DefMIDesc->getNumOperands() && 240344779Sdim !DefMIDesc->hasImplicitDefOfPhysReg(MO.getReg())); 241239462Sdim for (MCRegAliasIterator Alias(MO.getReg(), TRI, true); 242239462Sdim Alias.isValid(); ++Alias) { 243234353Sdim if (!Uses.contains(*Alias)) 244234353Sdim continue; 245249423Sdim for (Reg2SUnitsMap::iterator I = Uses.find(*Alias); I != Uses.end(); ++I) { 246249423Sdim SUnit *UseSU = I->SU; 247234353Sdim if (UseSU == SU) 248234353Sdim continue; 249239462Sdim 250243830Sdim // Adjust the dependence latency using operand def/use information, 251243830Sdim // then allow the target to perform its own adjustments. 252249423Sdim int UseOp = I->OpIdx; 253276479Sdim MachineInstr *RegUse = nullptr; 254249423Sdim SDep Dep; 255249423Sdim if (UseOp < 0) 256249423Sdim Dep = SDep(SU, SDep::Artificial); 257249423Sdim else { 258251662Sdim // Set the hasPhysRegDefs only for physreg defs that have a use within 259251662Sdim // the scheduling region. 260251662Sdim SU->hasPhysRegDefs = true; 261249423Sdim Dep = SDep(SU, SDep::Data, *Alias); 262249423Sdim RegUse = UseSU->getInstr(); 263249423Sdim } 264344779Sdim const MCInstrDesc *UseMIDesc = 265344779Sdim (RegUse ? &UseSU->getInstr()->getDesc() : nullptr); 266344779Sdim bool ImplicitPseudoUse = 267344779Sdim (UseMIDesc && UseOp >= ((int)UseMIDesc->getNumOperands()) && 268344779Sdim !UseMIDesc->hasImplicitUseOfPhysReg(*Alias)); 269344779Sdim if (!ImplicitPseudoDef && !ImplicitPseudoUse) { 270344779Sdim Dep.setLatency(SchedModel.computeOperandLatency(SU->getInstr(), OperIdx, 271344779Sdim RegUse, UseOp)); 272344779Sdim ST.adjustSchedDependency(SU, UseSU, Dep); 273360784Sdim } else { 274344779Sdim Dep.setLatency(0); 275360784Sdim // FIXME: We could always let target to adjustSchedDependency(), and 276360784Sdim // remove this condition, but that currently asserts in Hexagon BE. 277360784Sdim if (SU->getInstr()->isBundle() || (RegUse && RegUse->isBundle())) 278360784Sdim ST.adjustSchedDependency(SU, UseSU, Dep); 279360784Sdim } 280243830Sdim 281249423Sdim UseSU->addPred(Dep); 282234353Sdim } 283234353Sdim } 284234353Sdim} 285234353Sdim 286341825Sdim/// Adds register dependencies (data, anti, and output) from this SUnit 287321369Sdim/// to following instructions in the same scheduling region that depend the 288321369Sdim/// physical register referenced at OperIdx. 289234353Sdimvoid ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) { 290276479Sdim MachineInstr *MI = SU->getInstr(); 291276479Sdim MachineOperand &MO = MI->getOperand(OperIdx); 292360784Sdim Register Reg = MO.getReg(); 293314564Sdim // We do not need to track any dependencies for constant registers. 294314564Sdim if (MRI.isConstantPhysReg(Reg)) 295314564Sdim return; 296234353Sdim 297234353Sdim // Optionally add output and anti dependencies. For anti 298234353Sdim // dependencies we use a latency of 0 because for a multi-issue 299234353Sdim // target we want to allow the defining instruction to issue 300234353Sdim // in the same cycle as the using instruction. 301234353Sdim // TODO: Using a latency of 1 here for output dependencies assumes 302234353Sdim // there's no cost for reusing registers. 303234353Sdim SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output; 304314564Sdim for (MCRegAliasIterator Alias(Reg, TRI, true); Alias.isValid(); ++Alias) { 305234353Sdim if (!Defs.contains(*Alias)) 306234353Sdim continue; 307249423Sdim for (Reg2SUnitsMap::iterator I = Defs.find(*Alias); I != Defs.end(); ++I) { 308249423Sdim SUnit *DefSU = I->SU; 309234353Sdim if (DefSU == &ExitSU) 310234353Sdim continue; 311234353Sdim if (DefSU != SU && 312234353Sdim (Kind != SDep::Output || !MO.isDead() || 313234353Sdim !DefSU->getInstr()->registerDefIsDead(*Alias))) { 314234353Sdim if (Kind == SDep::Anti) 315243830Sdim DefSU->addPred(SDep(SU, Kind, /*Reg=*/*Alias)); 316234353Sdim else { 317243830Sdim SDep Dep(SU, Kind, /*Reg=*/*Alias); 318261991Sdim Dep.setLatency( 319261991Sdim SchedModel.computeOutputLatency(MI, OperIdx, DefSU->getInstr())); 320243830Sdim DefSU->addPred(Dep); 321234353Sdim } 322234353Sdim } 323234353Sdim } 324234353Sdim } 325234353Sdim 326234353Sdim if (!MO.isDef()) { 327251662Sdim SU->hasPhysRegUses = true; 328234353Sdim // Either insert a new Reg2SUnits entry with an empty SUnits list, or 329234353Sdim // retrieve the existing SUnits list for this register's uses. 330234353Sdim // Push this SUnit on the use list. 331314564Sdim Uses.insert(PhysRegSUOper(SU, OperIdx, Reg)); 332276479Sdim if (RemoveKillFlags) 333276479Sdim MO.setIsKill(false); 334314564Sdim } else { 335243830Sdim addPhysRegDataDeps(SU, OperIdx); 336234353Sdim 337341825Sdim // Clear previous uses and defs of this register and its subergisters. 338341825Sdim for (MCSubRegIterator SubReg(Reg, TRI, true); SubReg.isValid(); ++SubReg) { 339341825Sdim if (Uses.contains(*SubReg)) 340341825Sdim Uses.eraseAll(*SubReg); 341341825Sdim if (!MO.isDead()) 342341825Sdim Defs.eraseAll(*SubReg); 343341825Sdim } 344341825Sdim if (MO.isDead() && SU->isCall) { 345249423Sdim // Calls will not be reordered because of chain dependencies (see 346249423Sdim // below). Since call operands are dead, calls may continue to be added 347249423Sdim // to the DefList making dependence checking quadratic in the size of 348249423Sdim // the block. Instead, we leave only one call at the back of the 349249423Sdim // DefList. 350249423Sdim Reg2SUnitsMap::RangePair P = Defs.equal_range(Reg); 351249423Sdim Reg2SUnitsMap::iterator B = P.first; 352249423Sdim Reg2SUnitsMap::iterator I = P.second; 353249423Sdim for (bool isBegin = I == B; !isBegin; /* empty */) { 354249423Sdim isBegin = (--I) == B; 355249423Sdim if (!I->SU->isCall) 356249423Sdim break; 357249423Sdim I = Defs.erase(I); 358249423Sdim } 359249423Sdim } 360234353Sdim 361234353Sdim // Defs are pushed in the order they are visited and never reordered. 362249423Sdim Defs.insert(PhysRegSUOper(SU, OperIdx, Reg)); 363234353Sdim } 364234353Sdim} 365234353Sdim 366296417SdimLaneBitmask ScheduleDAGInstrs::getLaneMaskForMO(const MachineOperand &MO) const 367296417Sdim{ 368360784Sdim Register Reg = MO.getReg(); 369296417Sdim // No point in tracking lanemasks if we don't have interesting subregisters. 370296417Sdim const TargetRegisterClass &RC = *MRI.getRegClass(Reg); 371296417Sdim if (!RC.HasDisjunctSubRegs) 372314564Sdim return LaneBitmask::getAll(); 373296417Sdim 374296417Sdim unsigned SubReg = MO.getSubReg(); 375296417Sdim if (SubReg == 0) 376296417Sdim return RC.getLaneMask(); 377296417Sdim return TRI->getSubRegIndexLaneMask(SubReg); 378296417Sdim} 379296417Sdim 380360784Sdimbool ScheduleDAGInstrs::deadDefHasNoUse(const MachineOperand &MO) { 381360784Sdim auto RegUse = CurrentVRegUses.find(MO.getReg()); 382360784Sdim if (RegUse == CurrentVRegUses.end()) 383360784Sdim return true; 384360784Sdim return (RegUse->LaneMask & getLaneMaskForMO(MO)).none(); 385360784Sdim} 386360784Sdim 387321369Sdim/// Adds register output and data dependencies from this SUnit to instructions 388321369Sdim/// that occur later in the same scheduling region if they read from or write to 389321369Sdim/// the virtual register defined at OperIdx. 390234353Sdim/// 391234353Sdim/// TODO: Hoist loop induction variable increments. This has to be 392234353Sdim/// reevaluated. Generally, IV scheduling should be done before coalescing. 393234353Sdimvoid ScheduleDAGInstrs::addVRegDefDeps(SUnit *SU, unsigned OperIdx) { 394296417Sdim MachineInstr *MI = SU->getInstr(); 395296417Sdim MachineOperand &MO = MI->getOperand(OperIdx); 396360784Sdim Register Reg = MO.getReg(); 397234353Sdim 398296417Sdim LaneBitmask DefLaneMask; 399296417Sdim LaneBitmask KillLaneMask; 400296417Sdim if (TrackLaneMasks) { 401296417Sdim bool IsKill = MO.getSubReg() == 0 || MO.isUndef(); 402296417Sdim DefLaneMask = getLaneMaskForMO(MO); 403296417Sdim // If we have a <read-undef> flag, none of the lane values comes from an 404296417Sdim // earlier instruction. 405314564Sdim KillLaneMask = IsKill ? LaneBitmask::getAll() : DefLaneMask; 406296417Sdim 407360784Sdim if (MO.getSubReg() != 0 && MO.isUndef()) { 408360784Sdim // There may be other subregister defs on the same instruction of the same 409360784Sdim // register in later operands. The lanes of other defs will now be live 410360784Sdim // after this instruction, so these should not be treated as killed by the 411360784Sdim // instruction even though they appear to be killed in this one operand. 412360784Sdim for (int I = OperIdx + 1, E = MI->getNumOperands(); I != E; ++I) { 413360784Sdim const MachineOperand &OtherMO = MI->getOperand(I); 414360784Sdim if (OtherMO.isReg() && OtherMO.isDef() && OtherMO.getReg() == Reg) 415360784Sdim KillLaneMask &= ~getLaneMaskForMO(OtherMO); 416360784Sdim } 417360784Sdim } 418360784Sdim 419296417Sdim // Clear undef flag, we'll re-add it later once we know which subregister 420296417Sdim // Def is first. 421296417Sdim MO.setIsUndef(false); 422296417Sdim } else { 423314564Sdim DefLaneMask = LaneBitmask::getAll(); 424314564Sdim KillLaneMask = LaneBitmask::getAll(); 425296417Sdim } 426296417Sdim 427296417Sdim if (MO.isDead()) { 428360784Sdim assert(deadDefHasNoUse(MO) && "Dead defs should have no uses"); 429296417Sdim } else { 430296417Sdim // Add data dependence to all uses we found so far. 431296417Sdim const TargetSubtargetInfo &ST = MF.getSubtarget(); 432296417Sdim for (VReg2SUnitOperIdxMultiMap::iterator I = CurrentVRegUses.find(Reg), 433296417Sdim E = CurrentVRegUses.end(); I != E; /*empty*/) { 434296417Sdim LaneBitmask LaneMask = I->LaneMask; 435296417Sdim // Ignore uses of other lanes. 436314564Sdim if ((LaneMask & KillLaneMask).none()) { 437296417Sdim ++I; 438296417Sdim continue; 439296417Sdim } 440296417Sdim 441314564Sdim if ((LaneMask & DefLaneMask).any()) { 442296417Sdim SUnit *UseSU = I->SU; 443296417Sdim MachineInstr *Use = UseSU->getInstr(); 444296417Sdim SDep Dep(SU, SDep::Data, Reg); 445296417Sdim Dep.setLatency(SchedModel.computeOperandLatency(MI, OperIdx, Use, 446296417Sdim I->OperandIndex)); 447296417Sdim ST.adjustSchedDependency(SU, UseSU, Dep); 448296417Sdim UseSU->addPred(Dep); 449296417Sdim } 450296417Sdim 451296417Sdim LaneMask &= ~KillLaneMask; 452296417Sdim // If we found a Def for all lanes of this use, remove it from the list. 453314564Sdim if (LaneMask.any()) { 454296417Sdim I->LaneMask = LaneMask; 455296417Sdim ++I; 456296417Sdim } else 457296417Sdim I = CurrentVRegUses.erase(I); 458296417Sdim } 459296417Sdim } 460296417Sdim 461296417Sdim // Shortcut: Singly defined vregs do not have output/anti dependencies. 462239462Sdim if (MRI.hasOneDef(Reg)) 463234353Sdim return; 464234353Sdim 465296417Sdim // Add output dependence to the next nearest defs of this vreg. 466234353Sdim // 467234353Sdim // Unless this definition is dead, the output dependence should be 468234353Sdim // transitively redundant with antidependencies from this definition's 469234353Sdim // uses. We're conservative for now until we have a way to guarantee the uses 470234353Sdim // are not eliminated sometime during scheduling. The output dependence edge 471234353Sdim // is also useful if output latency exceeds def-use latency. 472296417Sdim LaneBitmask LaneMask = DefLaneMask; 473296417Sdim for (VReg2SUnit &V2SU : make_range(CurrentVRegDefs.find(Reg), 474296417Sdim CurrentVRegDefs.end())) { 475296417Sdim // Ignore defs for other lanes. 476314564Sdim if ((V2SU.LaneMask & LaneMask).none()) 477296417Sdim continue; 478296417Sdim // Add an output dependence. 479296417Sdim SUnit *DefSU = V2SU.SU; 480296417Sdim // Ignore additional defs of the same lanes in one instruction. This can 481296417Sdim // happen because lanemasks are shared for targets with too many 482296417Sdim // subregisters. We also use some representration tricks/hacks where we 483296417Sdim // add super-register defs/uses, to imply that although we only access parts 484296417Sdim // of the reg we care about the full one. 485296417Sdim if (DefSU == SU) 486296417Sdim continue; 487296417Sdim SDep Dep(SU, SDep::Output, Reg); 488296417Sdim Dep.setLatency( 489296417Sdim SchedModel.computeOutputLatency(MI, OperIdx, DefSU->getInstr())); 490296417Sdim DefSU->addPred(Dep); 491296417Sdim 492296417Sdim // Update current definition. This can get tricky if the def was about a 493296417Sdim // bigger lanemask before. We then have to shrink it and create a new 494296417Sdim // VReg2SUnit for the non-overlapping part. 495296417Sdim LaneBitmask OverlapMask = V2SU.LaneMask & LaneMask; 496296417Sdim LaneBitmask NonOverlapMask = V2SU.LaneMask & ~LaneMask; 497296417Sdim V2SU.SU = SU; 498296417Sdim V2SU.LaneMask = OverlapMask; 499314564Sdim if (NonOverlapMask.any()) 500309124Sdim CurrentVRegDefs.insert(VReg2SUnit(Reg, NonOverlapMask, DefSU)); 501234353Sdim } 502296417Sdim // If there was no CurrentVRegDefs entry for some lanes yet, create one. 503314564Sdim if (LaneMask.any()) 504296417Sdim CurrentVRegDefs.insert(VReg2SUnit(Reg, LaneMask, SU)); 505234353Sdim} 506234353Sdim 507341825Sdim/// Adds a register data dependency if the instruction that defines the 508321369Sdim/// virtual register used at OperIdx is mapped to an SUnit. Add a register 509321369Sdim/// antidependency from this SUnit to instructions that occur later in the same 510321369Sdim/// scheduling region if they write the virtual register. 511234353Sdim/// 512234353Sdim/// TODO: Handle ExitSU "uses" properly. 513234353Sdimvoid ScheduleDAGInstrs::addVRegUseDeps(SUnit *SU, unsigned OperIdx) { 514296417Sdim const MachineInstr *MI = SU->getInstr(); 515296417Sdim const MachineOperand &MO = MI->getOperand(OperIdx); 516360784Sdim Register Reg = MO.getReg(); 517234353Sdim 518296417Sdim // Remember the use. Data dependencies will be added when we find the def. 519314564Sdim LaneBitmask LaneMask = TrackLaneMasks ? getLaneMaskForMO(MO) 520314564Sdim : LaneBitmask::getAll(); 521296417Sdim CurrentVRegUses.insert(VReg2SUnitOperIdx(Reg, LaneMask, OperIdx, SU)); 522261991Sdim 523296417Sdim // Add antidependences to the following defs of the vreg. 524296417Sdim for (VReg2SUnit &V2SU : make_range(CurrentVRegDefs.find(Reg), 525296417Sdim CurrentVRegDefs.end())) { 526296417Sdim // Ignore defs for unrelated lanes. 527296417Sdim LaneBitmask PrevDefLaneMask = V2SU.LaneMask; 528314564Sdim if ((PrevDefLaneMask & LaneMask).none()) 529296417Sdim continue; 530296417Sdim if (V2SU.SU == SU) 531296417Sdim continue; 532239462Sdim 533296417Sdim V2SU.SU->addPred(SDep(SU, SDep::Anti, Reg)); 534234353Sdim } 535234353Sdim} 536234353Sdim 537321369Sdim/// Returns true if MI is an instruction we are unable to reason about 538239462Sdim/// (like a call or something with unmodeled side effects). 539360784Sdimstatic inline bool isGlobalMemoryObject(AAResults *AA, MachineInstr *MI) { 540296417Sdim return MI->isCall() || MI->hasUnmodeledSideEffects() || 541314564Sdim (MI->hasOrderedMemoryRef() && !MI->isDereferenceableInvariantLoad(AA)); 542239462Sdim} 543239462Sdim 544309124Sdimvoid ScheduleDAGInstrs::addChainDependency (SUnit *SUa, SUnit *SUb, 545309124Sdim unsigned Latency) { 546321369Sdim if (SUa->getInstr()->mayAlias(AAForDep, *SUb->getInstr(), UseTBAA)) { 547309124Sdim SDep Dep(SUa, SDep::MayAliasMem); 548309124Sdim Dep.setLatency(Latency); 549243830Sdim SUb->addPred(Dep); 550243830Sdim } 551239462Sdim} 552239462Sdim 553341825Sdim/// Creates an SUnit for each real instruction, numbered in top-down 554321369Sdim/// topological order. The instruction order A < B, implies that no edge exists 555321369Sdim/// from B to A. 556234353Sdim/// 557234353Sdim/// Map each real instruction to its SUnit. 558234353Sdim/// 559234353Sdim/// After initSUnits, the SUnits vector cannot be resized and the scheduler may 560234353Sdim/// hang onto SUnit pointers. We may relax this in the future by using SUnit IDs 561234353Sdim/// instead of pointers. 562234353Sdim/// 563234353Sdim/// MachineScheduler relies on initSUnits numbering the nodes by their order in 564234353Sdim/// the original instruction list. 565234353Sdimvoid ScheduleDAGInstrs::initSUnits() { 566234353Sdim // We'll be allocating one SUnit for each real instruction in the region, 567234353Sdim // which is contained within a basic block. 568261991Sdim SUnits.reserve(NumRegionInstrs); 569193323Sed 570321369Sdim for (MachineInstr &MI : make_range(RegionBegin, RegionEnd)) { 571341825Sdim if (MI.isDebugInstr()) 572234353Sdim continue; 573234353Sdim 574309124Sdim SUnit *SU = newSUnit(&MI); 575309124Sdim MISUnitMap[&MI] = SU; 576234353Sdim 577309124Sdim SU->isCall = MI.isCall(); 578309124Sdim SU->isCommutable = MI.isCommutable(); 579234353Sdim 580234353Sdim // Assign the Latency field of SU using target-provided information. 581243830Sdim SU->Latency = SchedModel.computeInstrLatency(SU->getInstr()); 582276479Sdim 583276479Sdim // If this SUnit uses a reserved or unbuffered resource, mark it as such. 584276479Sdim // 585276479Sdim // Reserved resources block an instruction from issuing and stall the 586276479Sdim // entire pipeline. These are identified by BufferSize=0. 587276479Sdim // 588276479Sdim // Unbuffered resources prevent execution of subsequent instructions that 589276479Sdim // require the same resources. This is used for in-order execution pipelines 590276479Sdim // within an out-of-order core. These are identified by BufferSize=1. 591276479Sdim if (SchedModel.hasInstrSchedModel()) { 592276479Sdim const MCSchedClassDesc *SC = getSchedClass(SU); 593314564Sdim for (const MCWriteProcResEntry &PRE : 594314564Sdim make_range(SchedModel.getWriteProcResBegin(SC), 595314564Sdim SchedModel.getWriteProcResEnd(SC))) { 596314564Sdim switch (SchedModel.getProcResource(PRE.ProcResourceIdx)->BufferSize) { 597276479Sdim case 0: 598276479Sdim SU->hasReservedResource = true; 599276479Sdim break; 600276479Sdim case 1: 601276479Sdim SU->isUnbuffered = true; 602276479Sdim break; 603276479Sdim default: 604276479Sdim break; 605276479Sdim } 606276479Sdim } 607276479Sdim } 608234353Sdim } 609234353Sdim} 610234353Sdim 611309124Sdimclass ScheduleDAGInstrs::Value2SUsMap : public MapVector<ValueType, SUList> { 612309124Sdim /// Current total number of SUs in map. 613321369Sdim unsigned NumNodes = 0; 614309124Sdim 615309124Sdim /// 1 for loads, 0 for stores. (see comment in SUList) 616309124Sdim unsigned TrueMemOrderLatency; 617321369Sdim 618309124Sdimpublic: 619321369Sdim Value2SUsMap(unsigned lat = 0) : TrueMemOrderLatency(lat) {} 620309124Sdim 621309124Sdim /// To keep NumNodes up to date, insert() is used instead of 622309124Sdim /// this operator w/ push_back(). 623309124Sdim ValueType &operator[](const SUList &Key) { 624309124Sdim llvm_unreachable("Don't use. Use insert() instead."); }; 625309124Sdim 626321369Sdim /// Adds SU to the SUList of V. If Map grows huge, reduce its size by calling 627321369Sdim /// reduce(). 628309124Sdim void inline insert(SUnit *SU, ValueType V) { 629309124Sdim MapVector::operator[](V).push_back(SU); 630309124Sdim NumNodes++; 631309124Sdim } 632309124Sdim 633309124Sdim /// Clears the list of SUs mapped to V. 634309124Sdim void inline clearList(ValueType V) { 635309124Sdim iterator Itr = find(V); 636309124Sdim if (Itr != end()) { 637321369Sdim assert(NumNodes >= Itr->second.size()); 638309124Sdim NumNodes -= Itr->second.size(); 639309124Sdim 640309124Sdim Itr->second.clear(); 641309124Sdim } 642309124Sdim } 643309124Sdim 644309124Sdim /// Clears map from all contents. 645309124Sdim void clear() { 646309124Sdim MapVector<ValueType, SUList>::clear(); 647309124Sdim NumNodes = 0; 648309124Sdim } 649309124Sdim 650309124Sdim unsigned inline size() const { return NumNodes; } 651309124Sdim 652321369Sdim /// Counts the number of SUs in this map after a reduction. 653321369Sdim void reComputeSize() { 654309124Sdim NumNodes = 0; 655309124Sdim for (auto &I : *this) 656309124Sdim NumNodes += I.second.size(); 657309124Sdim } 658309124Sdim 659309124Sdim unsigned inline getTrueMemOrderLatency() const { 660309124Sdim return TrueMemOrderLatency; 661309124Sdim } 662309124Sdim 663309124Sdim void dump(); 664309124Sdim}; 665309124Sdim 666309124Sdimvoid ScheduleDAGInstrs::addChainDependencies(SUnit *SU, 667309124Sdim Value2SUsMap &Val2SUsMap) { 668309124Sdim for (auto &I : Val2SUsMap) 669309124Sdim addChainDependencies(SU, I.second, 670309124Sdim Val2SUsMap.getTrueMemOrderLatency()); 671309124Sdim} 672309124Sdim 673309124Sdimvoid ScheduleDAGInstrs::addChainDependencies(SUnit *SU, 674309124Sdim Value2SUsMap &Val2SUsMap, 675309124Sdim ValueType V) { 676309124Sdim Value2SUsMap::iterator Itr = Val2SUsMap.find(V); 677309124Sdim if (Itr != Val2SUsMap.end()) 678309124Sdim addChainDependencies(SU, Itr->second, 679309124Sdim Val2SUsMap.getTrueMemOrderLatency()); 680309124Sdim} 681309124Sdim 682309124Sdimvoid ScheduleDAGInstrs::addBarrierChain(Value2SUsMap &map) { 683321369Sdim assert(BarrierChain != nullptr); 684309124Sdim 685309124Sdim for (auto &I : map) { 686309124Sdim SUList &sus = I.second; 687309124Sdim for (auto *SU : sus) 688309124Sdim SU->addPredBarrier(BarrierChain); 689309124Sdim } 690309124Sdim map.clear(); 691309124Sdim} 692309124Sdim 693309124Sdimvoid ScheduleDAGInstrs::insertBarrierChain(Value2SUsMap &map) { 694321369Sdim assert(BarrierChain != nullptr); 695309124Sdim 696309124Sdim // Go through all lists of SUs. 697309124Sdim for (Value2SUsMap::iterator I = map.begin(), EE = map.end(); I != EE;) { 698309124Sdim Value2SUsMap::iterator CurrItr = I++; 699309124Sdim SUList &sus = CurrItr->second; 700309124Sdim SUList::iterator SUItr = sus.begin(), SUEE = sus.end(); 701309124Sdim for (; SUItr != SUEE; ++SUItr) { 702309124Sdim // Stop on BarrierChain or any instruction above it. 703309124Sdim if ((*SUItr)->NodeNum <= BarrierChain->NodeNum) 704309124Sdim break; 705309124Sdim 706309124Sdim (*SUItr)->addPredBarrier(BarrierChain); 707309124Sdim } 708309124Sdim 709309124Sdim // Remove also the BarrierChain from list if present. 710309124Sdim if (SUItr != SUEE && *SUItr == BarrierChain) 711309124Sdim SUItr++; 712309124Sdim 713309124Sdim // Remove all SUs that are now successors of BarrierChain. 714309124Sdim if (SUItr != sus.begin()) 715309124Sdim sus.erase(sus.begin(), SUItr); 716309124Sdim } 717309124Sdim 718309124Sdim // Remove all entries with empty su lists. 719309124Sdim map.remove_if([&](std::pair<ValueType, SUList> &mapEntry) { 720309124Sdim return (mapEntry.second.empty()); }); 721309124Sdim 722309124Sdim // Recompute the size of the map (NumNodes). 723309124Sdim map.reComputeSize(); 724309124Sdim} 725309124Sdim 726360784Sdimvoid ScheduleDAGInstrs::buildSchedGraph(AAResults *AA, 727261991Sdim RegPressureTracker *RPTracker, 728296417Sdim PressureDiffs *PDiffs, 729309124Sdim LiveIntervals *LIS, 730296417Sdim bool TrackLaneMasks) { 731288943Sdim const TargetSubtargetInfo &ST = MF.getSubtarget(); 732261991Sdim bool UseAA = EnableAASchedMI.getNumOccurrences() > 0 ? EnableAASchedMI 733261991Sdim : ST.useAA(); 734309124Sdim AAForDep = UseAA ? AA : nullptr; 735261991Sdim 736309124Sdim BarrierChain = nullptr; 737309124Sdim 738296417Sdim this->TrackLaneMasks = TrackLaneMasks; 739261991Sdim MISUnitMap.clear(); 740261991Sdim ScheduleDAG::clearDAG(); 741261991Sdim 742234353Sdim // Create an SUnit for each real instruction. 743234353Sdim initSUnits(); 744234353Sdim 745261991Sdim if (PDiffs) 746261991Sdim PDiffs->init(SUnits.size()); 747261991Sdim 748309124Sdim // We build scheduling units by walking a block's instruction list 749309124Sdim // from bottom to top. 750193323Sed 751309124Sdim // Each MIs' memory operand(s) is analyzed to a list of underlying 752309124Sdim // objects. The SU is then inserted in the SUList(s) mapped from the 753309124Sdim // Value(s). Each Value thus gets mapped to lists of SUs depending 754309124Sdim // on it, stores and loads kept separately. Two SUs are trivially 755309124Sdim // non-aliasing if they both depend on only identified Values and do 756309124Sdim // not share any common Value. 757309124Sdim Value2SUsMap Stores, Loads(1 /*TrueMemOrderLatency*/); 758193323Sed 759309124Sdim // Certain memory accesses are known to not alias any SU in Stores 760309124Sdim // or Loads, and have therefore their own 'NonAlias' 761309124Sdim // domain. E.g. spill / reload instructions never alias LLVM I/R 762309124Sdim // Values. It would be nice to assume that this type of memory 763309124Sdim // accesses always have a proper memory operand modelling, and are 764309124Sdim // therefore never unanalyzable, but this is conservatively not 765309124Sdim // done. 766309124Sdim Value2SUsMap NonAliasStores, NonAliasLoads(1 /*TrueMemOrderLatency*/); 767193323Sed 768353358Sdim // Track all instructions that may raise floating-point exceptions. 769353358Sdim // These do not depend on one other (or normal loads or stores), but 770353358Sdim // must not be rescheduled across global barriers. Note that we don't 771353358Sdim // really need a "map" here since we don't track those MIs by value; 772353358Sdim // using the same Value2SUsMap data type here is simply a matter of 773353358Sdim // convenience. 774353358Sdim Value2SUsMap FPExceptions; 775353358Sdim 776205218Srdivacky // Remove any stale debug info; sometimes BuildSchedGraph is called again 777205218Srdivacky // without emitting the info from the previous call. 778223017Sdim DbgValues.clear(); 779276479Sdim FirstDbgValue = nullptr; 780205218Srdivacky 781234353Sdim assert(Defs.empty() && Uses.empty() && 782234353Sdim "Only BuildGraph should update Defs/Uses"); 783249423Sdim Defs.setUniverse(TRI->getNumRegs()); 784249423Sdim Uses.setUniverse(TRI->getNumRegs()); 785234353Sdim 786296417Sdim assert(CurrentVRegDefs.empty() && "nobody else should use CurrentVRegDefs"); 787296417Sdim assert(CurrentVRegUses.empty() && "nobody else should use CurrentVRegUses"); 788296417Sdim unsigned NumVirtRegs = MRI.getNumVirtRegs(); 789296417Sdim CurrentVRegDefs.setUniverse(NumVirtRegs); 790296417Sdim CurrentVRegUses.setUniverse(NumVirtRegs); 791296417Sdim 792218893Sdim // Model data dependencies between instructions being scheduled and the 793218893Sdim // ExitSU. 794234353Sdim addSchedBarrierDeps(); 795218893Sdim 796193323Sed // Walk the list of instructions, from bottom moving up. 797276479Sdim MachineInstr *DbgMI = nullptr; 798234353Sdim for (MachineBasicBlock::iterator MII = RegionEnd, MIE = RegionBegin; 799193323Sed MII != MIE; --MII) { 800309124Sdim MachineInstr &MI = *std::prev(MII); 801309124Sdim if (DbgMI) { 802309124Sdim DbgValues.push_back(std::make_pair(DbgMI, &MI)); 803276479Sdim DbgMI = nullptr; 804223017Sdim } 805223017Sdim 806309124Sdim if (MI.isDebugValue()) { 807309124Sdim DbgMI = &MI; 808205218Srdivacky continue; 809205218Srdivacky } 810341825Sdim if (MI.isDebugLabel()) 811341825Sdim continue; 812341825Sdim 813309124Sdim SUnit *SU = MISUnitMap[&MI]; 814261991Sdim assert(SU && "No SUnit mapped to this MI"); 815261991Sdim 816239462Sdim if (RPTracker) { 817296417Sdim RegisterOperands RegOpers; 818309124Sdim RegOpers.collect(MI, *TRI, MRI, TrackLaneMasks, false); 819309124Sdim if (TrackLaneMasks) { 820309124Sdim SlotIndex SlotIdx = LIS->getInstructionIndex(MI); 821309124Sdim RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx); 822309124Sdim } 823296417Sdim if (PDiffs != nullptr) 824296417Sdim PDiffs->addInstruction(SU->NodeNum, RegOpers, MRI); 825296417Sdim 826327952Sdim if (RPTracker->getPos() == RegionEnd || &*RPTracker->getPos() != &MI) 827327952Sdim RPTracker->recedeSkipDebugValues(); 828309124Sdim assert(&*RPTracker->getPos() == &MI && "RPTracker in sync"); 829296417Sdim RPTracker->recede(RegOpers); 830239462Sdim } 831223017Sdim 832276479Sdim assert( 833309124Sdim (CanHandleTerminators || (!MI.isTerminator() && !MI.isPosition())) && 834276479Sdim "Cannot schedule terminators or labels!"); 835193323Sed 836193323Sed // Add register-based dependencies (data, anti, and output). 837309124Sdim // For some instructions (calls, returns, inline-asm, etc.) there can 838309124Sdim // be explicit uses and implicit defs, in which case the use will appear 839309124Sdim // on the operand list before the def. Do two passes over the operand 840309124Sdim // list to make sure that defs are processed before any uses. 841249423Sdim bool HasVRegDef = false; 842309124Sdim for (unsigned j = 0, n = MI.getNumOperands(); j != n; ++j) { 843309124Sdim const MachineOperand &MO = MI.getOperand(j); 844309124Sdim if (!MO.isReg() || !MO.isDef()) 845309124Sdim continue; 846360784Sdim Register Reg = MO.getReg(); 847360784Sdim if (Register::isPhysicalRegister(Reg)) { 848234353Sdim addPhysRegDeps(SU, j); 849360784Sdim } else if (Register::isVirtualRegister(Reg)) { 850309124Sdim HasVRegDef = true; 851309124Sdim addVRegDefDeps(SU, j); 852193323Sed } 853193323Sed } 854309124Sdim // Now process all uses. 855309124Sdim for (unsigned j = 0, n = MI.getNumOperands(); j != n; ++j) { 856309124Sdim const MachineOperand &MO = MI.getOperand(j); 857309124Sdim // Only look at use operands. 858309124Sdim // We do not need to check for MO.readsReg() here because subsequent 859309124Sdim // subregister defs will get output dependence edges and need no 860309124Sdim // additional use dependencies. 861309124Sdim if (!MO.isReg() || !MO.isUse()) 862309124Sdim continue; 863360784Sdim Register Reg = MO.getReg(); 864360784Sdim if (Register::isPhysicalRegister(Reg)) { 865309124Sdim addPhysRegDeps(SU, j); 866360784Sdim } else if (Register::isVirtualRegister(Reg) && MO.readsReg()) { 867309124Sdim addVRegUseDeps(SU, j); 868314564Sdim } 869309124Sdim } 870309124Sdim 871249423Sdim // If we haven't seen any uses in this scheduling region, create a 872249423Sdim // dependence edge to ExitSU to model the live-out latency. This is required 873249423Sdim // for vreg defs with no in-region use, and prefetches with no vreg def. 874249423Sdim // 875249423Sdim // FIXME: NumDataSuccs would be more precise than NumSuccs here. This 876249423Sdim // check currently relies on being called before adding chain deps. 877309124Sdim if (SU->NumSuccs == 0 && SU->Latency > 1 && (HasVRegDef || MI.mayLoad())) { 878249423Sdim SDep Dep(SU, SDep::Artificial); 879249423Sdim Dep.setLatency(SU->Latency - 1); 880249423Sdim ExitSU.addPred(Dep); 881249423Sdim } 882193323Sed 883309124Sdim // Add memory dependencies (Note: isStoreToStackSlot and 884309124Sdim // isLoadFromStackSLot are not usable after stack slots are lowered to 885309124Sdim // actual addresses). 886309124Sdim 887309124Sdim // This is a barrier event that acts as a pivotal node in the DAG. 888309124Sdim if (isGlobalMemoryObject(AA, &MI)) { 889309124Sdim 890309124Sdim // Become the barrier chain. 891199481Srdivacky if (BarrierChain) 892309124Sdim BarrierChain->addPredBarrier(SU); 893199481Srdivacky BarrierChain = SU; 894199481Srdivacky 895341825Sdim LLVM_DEBUG(dbgs() << "Global memory object and new barrier chain: SU(" 896341825Sdim << BarrierChain->NodeNum << ").\n";); 897280031Sdim 898309124Sdim // Add dependencies against everything below it and clear maps. 899309124Sdim addBarrierChain(Stores); 900309124Sdim addBarrierChain(Loads); 901309124Sdim addBarrierChain(NonAliasStores); 902309124Sdim addBarrierChain(NonAliasLoads); 903353358Sdim addBarrierChain(FPExceptions); 904249423Sdim 905309124Sdim continue; 906309124Sdim } 907249423Sdim 908353358Sdim // Instructions that may raise FP exceptions may not be moved 909353358Sdim // across any global barriers. 910353358Sdim if (MI.mayRaiseFPException()) { 911353358Sdim if (BarrierChain) 912353358Sdim BarrierChain->addPredBarrier(SU); 913353358Sdim 914353358Sdim FPExceptions.insert(SU, UnknownValue); 915353358Sdim 916353358Sdim if (FPExceptions.size() >= HugeRegion) { 917353358Sdim LLVM_DEBUG(dbgs() << "Reducing FPExceptions map.\n";); 918353358Sdim Value2SUsMap empty; 919353358Sdim reduceHugeMemNodeMaps(FPExceptions, empty, getReductionSize()); 920353358Sdim } 921353358Sdim } 922353358Sdim 923309124Sdim // If it's not a store or a variant load, we're done. 924314564Sdim if (!MI.mayStore() && 925314564Sdim !(MI.mayLoad() && !MI.isDereferenceableInvariantLoad(AA))) 926309124Sdim continue; 927249423Sdim 928309124Sdim // Always add dependecy edge to BarrierChain if present. 929309124Sdim if (BarrierChain) 930309124Sdim BarrierChain->addPredBarrier(SU); 931276479Sdim 932309124Sdim // Find the underlying objects for MI. The Objs vector is either 933309124Sdim // empty, or filled with the Values of memory locations which this 934327952Sdim // SU depends on. 935309124Sdim UnderlyingObjectsVector Objs; 936327952Sdim bool ObjsFound = getUnderlyingObjectsForInstr(&MI, MFI, Objs, 937327952Sdim MF.getDataLayout()); 938309124Sdim 939309124Sdim if (MI.mayStore()) { 940327952Sdim if (!ObjsFound) { 941309124Sdim // An unknown store depends on all stores and loads. 942309124Sdim addChainDependencies(SU, Stores); 943309124Sdim addChainDependencies(SU, NonAliasStores); 944309124Sdim addChainDependencies(SU, Loads); 945309124Sdim addChainDependencies(SU, NonAliasLoads); 946309124Sdim 947309124Sdim // Map this store to 'UnknownValue'. 948309124Sdim Stores.insert(SU, UnknownValue); 949198953Srdivacky } else { 950309124Sdim // Add precise dependencies against all previously seen memory 951309124Sdim // accesses mapped to the same Value(s). 952309124Sdim for (const UnderlyingObject &UnderlObj : Objs) { 953309124Sdim ValueType V = UnderlObj.getValue(); 954309124Sdim bool ThisMayAlias = UnderlObj.mayAlias(); 955249423Sdim 956309124Sdim // Add dependencies to previous stores and loads mapped to V. 957309124Sdim addChainDependencies(SU, (ThisMayAlias ? Stores : NonAliasStores), V); 958309124Sdim addChainDependencies(SU, (ThisMayAlias ? Loads : NonAliasLoads), V); 959309124Sdim } 960309124Sdim // Update the store map after all chains have been added to avoid adding 961309124Sdim // self-loop edge if multiple underlying objects are present. 962309124Sdim for (const UnderlyingObject &UnderlObj : Objs) { 963309124Sdim ValueType V = UnderlObj.getValue(); 964309124Sdim bool ThisMayAlias = UnderlObj.mayAlias(); 965223017Sdim 966309124Sdim // Map this store to V. 967309124Sdim (ThisMayAlias ? Stores : NonAliasStores).insert(SU, V); 968198892Srdivacky } 969309124Sdim // The store may have dependencies to unanalyzable loads and 970309124Sdim // stores. 971309124Sdim addChainDependencies(SU, Loads, UnknownValue); 972309124Sdim addChainDependencies(SU, Stores, UnknownValue); 973309124Sdim } 974309124Sdim } else { // SU is a load. 975327952Sdim if (!ObjsFound) { 976309124Sdim // An unknown load depends on all stores. 977309124Sdim addChainDependencies(SU, Stores); 978309124Sdim addChainDependencies(SU, NonAliasStores); 979249423Sdim 980309124Sdim Loads.insert(SU, UnknownValue); 981309124Sdim } else { 982309124Sdim for (const UnderlyingObject &UnderlObj : Objs) { 983309124Sdim ValueType V = UnderlObj.getValue(); 984309124Sdim bool ThisMayAlias = UnderlObj.mayAlias(); 985249423Sdim 986309124Sdim // Add precise dependencies against all previously seen stores 987309124Sdim // mapping to the same Value(s). 988309124Sdim addChainDependencies(SU, (ThisMayAlias ? Stores : NonAliasStores), V); 989249423Sdim 990309124Sdim // Map this load to V. 991309124Sdim (ThisMayAlias ? Loads : NonAliasLoads).insert(SU, V); 992249423Sdim } 993309124Sdim // The load may have dependencies to unanalyzable stores. 994309124Sdim addChainDependencies(SU, Stores, UnknownValue); 995223017Sdim } 996193323Sed } 997309124Sdim 998309124Sdim // Reduce maps if they grow huge. 999309124Sdim if (Stores.size() + Loads.size() >= HugeRegion) { 1000341825Sdim LLVM_DEBUG(dbgs() << "Reducing Stores and Loads maps.\n";); 1001309124Sdim reduceHugeMemNodeMaps(Stores, Loads, getReductionSize()); 1002309124Sdim } 1003309124Sdim if (NonAliasStores.size() + NonAliasLoads.size() >= HugeRegion) { 1004341825Sdim LLVM_DEBUG( 1005341825Sdim dbgs() << "Reducing NonAliasStores and NonAliasLoads maps.\n";); 1006309124Sdim reduceHugeMemNodeMaps(NonAliasStores, NonAliasLoads, getReductionSize()); 1007309124Sdim } 1008193323Sed } 1009309124Sdim 1010249423Sdim if (DbgMI) 1011249423Sdim FirstDbgValue = DbgMI; 1012193323Sed 1013234353Sdim Defs.clear(); 1014234353Sdim Uses.clear(); 1015296417Sdim CurrentVRegDefs.clear(); 1016296417Sdim CurrentVRegUses.clear(); 1017353358Sdim 1018353358Sdim Topo.MarkDirty(); 1019193323Sed} 1020193323Sed 1021309124Sdimraw_ostream &llvm::operator<<(raw_ostream &OS, const PseudoSourceValue* PSV) { 1022309124Sdim PSV->printCustom(OS); 1023309124Sdim return OS; 1024309124Sdim} 1025309124Sdim 1026309124Sdimvoid ScheduleDAGInstrs::Value2SUsMap::dump() { 1027309124Sdim for (auto &Itr : *this) { 1028309124Sdim if (Itr.first.is<const Value*>()) { 1029309124Sdim const Value *V = Itr.first.get<const Value*>(); 1030309124Sdim if (isa<UndefValue>(V)) 1031309124Sdim dbgs() << "Unknown"; 1032309124Sdim else 1033309124Sdim V->printAsOperand(dbgs()); 1034309124Sdim } 1035309124Sdim else if (Itr.first.is<const PseudoSourceValue*>()) 1036309124Sdim dbgs() << Itr.first.get<const PseudoSourceValue*>(); 1037309124Sdim else 1038309124Sdim llvm_unreachable("Unknown Value type."); 1039309124Sdim 1040309124Sdim dbgs() << " : "; 1041309124Sdim dumpSUList(Itr.second); 1042309124Sdim } 1043309124Sdim} 1044309124Sdim 1045309124Sdimvoid ScheduleDAGInstrs::reduceHugeMemNodeMaps(Value2SUsMap &stores, 1046309124Sdim Value2SUsMap &loads, unsigned N) { 1047341825Sdim LLVM_DEBUG(dbgs() << "Before reduction:\nStoring SUnits:\n"; stores.dump(); 1048341825Sdim dbgs() << "Loading SUnits:\n"; loads.dump()); 1049309124Sdim 1050309124Sdim // Insert all SU's NodeNums into a vector and sort it. 1051309124Sdim std::vector<unsigned> NodeNums; 1052309124Sdim NodeNums.reserve(stores.size() + loads.size()); 1053309124Sdim for (auto &I : stores) 1054309124Sdim for (auto *SU : I.second) 1055309124Sdim NodeNums.push_back(SU->NodeNum); 1056309124Sdim for (auto &I : loads) 1057309124Sdim for (auto *SU : I.second) 1058309124Sdim NodeNums.push_back(SU->NodeNum); 1059344779Sdim llvm::sort(NodeNums); 1060309124Sdim 1061309124Sdim // The N last elements in NodeNums will be removed, and the SU with 1062309124Sdim // the lowest NodeNum of them will become the new BarrierChain to 1063309124Sdim // let the not yet seen SUs have a dependency to the removed SUs. 1064321369Sdim assert(N <= NodeNums.size()); 1065309124Sdim SUnit *newBarrierChain = &SUnits[*(NodeNums.end() - N)]; 1066309124Sdim if (BarrierChain) { 1067309124Sdim // The aliasing and non-aliasing maps reduce independently of each 1068309124Sdim // other, but share a common BarrierChain. Check if the 1069309124Sdim // newBarrierChain is above the former one. If it is not, it may 1070309124Sdim // introduce a loop to use newBarrierChain, so keep the old one. 1071309124Sdim if (newBarrierChain->NodeNum < BarrierChain->NodeNum) { 1072309124Sdim BarrierChain->addPredBarrier(newBarrierChain); 1073309124Sdim BarrierChain = newBarrierChain; 1074341825Sdim LLVM_DEBUG(dbgs() << "Inserting new barrier chain: SU(" 1075341825Sdim << BarrierChain->NodeNum << ").\n";); 1076309124Sdim } 1077309124Sdim else 1078341825Sdim LLVM_DEBUG(dbgs() << "Keeping old barrier chain: SU(" 1079341825Sdim << BarrierChain->NodeNum << ").\n";); 1080309124Sdim } 1081309124Sdim else 1082309124Sdim BarrierChain = newBarrierChain; 1083309124Sdim 1084309124Sdim insertBarrierChain(stores); 1085309124Sdim insertBarrierChain(loads); 1086309124Sdim 1087341825Sdim LLVM_DEBUG(dbgs() << "After reduction:\nStoring SUnits:\n"; stores.dump(); 1088341825Sdim dbgs() << "Loading SUnits:\n"; loads.dump()); 1089309124Sdim} 1090309124Sdim 1091321369Sdimstatic void toggleKills(const MachineRegisterInfo &MRI, LivePhysRegs &LiveRegs, 1092321369Sdim MachineInstr &MI, bool addToLiveRegs) { 1093321369Sdim for (MachineOperand &MO : MI.operands()) { 1094321369Sdim if (!MO.isReg() || !MO.readsReg()) 1095321369Sdim continue; 1096360784Sdim Register Reg = MO.getReg(); 1097321369Sdim if (!Reg) 1098321369Sdim continue; 1099276479Sdim 1100321369Sdim // Things that are available after the instruction are killed by it. 1101321369Sdim bool IsKill = LiveRegs.available(MRI, Reg); 1102321369Sdim MO.setIsKill(IsKill); 1103321369Sdim if (addToLiveRegs) 1104321369Sdim LiveRegs.addReg(Reg); 1105276479Sdim } 1106276479Sdim} 1107276479Sdim 1108321369Sdimvoid ScheduleDAGInstrs::fixupKills(MachineBasicBlock &MBB) { 1109341825Sdim LLVM_DEBUG(dbgs() << "Fixup kills for " << printMBBReference(MBB) << '\n'); 1110288943Sdim 1111321369Sdim LiveRegs.init(*TRI); 1112321369Sdim LiveRegs.addLiveOuts(MBB); 1113288943Sdim 1114276479Sdim // Examine block from end to start... 1115321369Sdim for (MachineInstr &MI : make_range(MBB.rbegin(), MBB.rend())) { 1116341825Sdim if (MI.isDebugInstr()) 1117276479Sdim continue; 1118276479Sdim 1119276479Sdim // Update liveness. Registers that are defed but not used in this 1120276479Sdim // instruction are now dead. Mark register and all subregs as they 1121276479Sdim // are completely defined. 1122321369Sdim for (ConstMIBundleOperands O(MI); O.isValid(); ++O) { 1123321369Sdim const MachineOperand &MO = *O; 1124321369Sdim if (MO.isReg()) { 1125321369Sdim if (!MO.isDef()) 1126321369Sdim continue; 1127360784Sdim Register Reg = MO.getReg(); 1128321369Sdim if (!Reg) 1129321369Sdim continue; 1130321369Sdim LiveRegs.removeReg(Reg); 1131321369Sdim } else if (MO.isRegMask()) { 1132321369Sdim LiveRegs.removeRegsInMask(MO); 1133321369Sdim } 1134276479Sdim } 1135276479Sdim 1136321369Sdim // If there is a bundle header fix it up first. 1137321369Sdim if (!MI.isBundled()) { 1138321369Sdim toggleKills(MRI, LiveRegs, MI, true); 1139321369Sdim } else { 1140353358Sdim MachineBasicBlock::instr_iterator Bundle = MI.getIterator(); 1141353358Sdim if (MI.isBundle()) 1142321369Sdim toggleKills(MRI, LiveRegs, MI, false); 1143353358Sdim 1144321369Sdim // Some targets make the (questionable) assumtion that the instructions 1145321369Sdim // inside the bundle are ordered and consequently only the last use of 1146321369Sdim // a register inside the bundle can kill it. 1147353358Sdim MachineBasicBlock::instr_iterator I = std::next(Bundle); 1148321369Sdim while (I->isBundledWithSucc()) 1149321369Sdim ++I; 1150321369Sdim do { 1151341825Sdim if (!I->isDebugInstr()) 1152321369Sdim toggleKills(MRI, LiveRegs, *I, true); 1153321369Sdim --I; 1154353358Sdim } while (I != Bundle); 1155276479Sdim } 1156276479Sdim } 1157276479Sdim} 1158276479Sdim 1159344779Sdimvoid ScheduleDAGInstrs::dumpNode(const SUnit &SU) const { 1160243830Sdim#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1161344779Sdim dumpNodeName(SU); 1162344779Sdim dbgs() << ": "; 1163344779Sdim SU.getInstr()->dump(); 1164243830Sdim#endif 1165193323Sed} 1166193323Sed 1167344779Sdimvoid ScheduleDAGInstrs::dump() const { 1168344779Sdim#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1169344779Sdim if (EntrySU.getInstr() != nullptr) 1170344779Sdim dumpNodeAll(EntrySU); 1171344779Sdim for (const SUnit &SU : SUnits) 1172344779Sdim dumpNodeAll(SU); 1173344779Sdim if (ExitSU.getInstr() != nullptr) 1174344779Sdim dumpNodeAll(ExitSU); 1175344779Sdim#endif 1176344779Sdim} 1177344779Sdim 1178193323Sedstd::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const { 1179193323Sed std::string s; 1180193323Sed raw_string_ostream oss(s); 1181193323Sed if (SU == &EntrySU) 1182193323Sed oss << "<entry>"; 1183193323Sed else if (SU == &ExitSU) 1184193323Sed oss << "<exit>"; 1185193323Sed else 1186288943Sdim SU->getInstr()->print(oss, /*SkipOpers=*/true); 1187193323Sed return oss.str(); 1188193323Sed} 1189193323Sed 1190234353Sdim/// Return the basic block label. It is not necessarilly unique because a block 1191234353Sdim/// contains multiple scheduling regions. But it is fine for visualization. 1192234353Sdimstd::string ScheduleDAGInstrs::getDAGName() const { 1193234353Sdim return "dag." + BB->getFullName(); 1194193323Sed} 1195243830Sdim 1196353358Sdimbool ScheduleDAGInstrs::canAddEdge(SUnit *SuccSU, SUnit *PredSU) { 1197353358Sdim return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU); 1198353358Sdim} 1199353358Sdim 1200353358Sdimbool ScheduleDAGInstrs::addEdge(SUnit *SuccSU, const SDep &PredDep) { 1201353358Sdim if (SuccSU != &ExitSU) { 1202353358Sdim // Do not use WillCreateCycle, it assumes SD scheduling. 1203353358Sdim // If Pred is reachable from Succ, then the edge creates a cycle. 1204353358Sdim if (Topo.IsReachable(PredDep.getSUnit(), SuccSU)) 1205353358Sdim return false; 1206353358Sdim Topo.AddPredQueued(SuccSU, PredDep.getSUnit()); 1207353358Sdim } 1208353358Sdim SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial()); 1209353358Sdim // Return true regardless of whether a new edge needed to be inserted. 1210353358Sdim return true; 1211353358Sdim} 1212353358Sdim 1213249423Sdim//===----------------------------------------------------------------------===// 1214249423Sdim// SchedDFSResult Implementation 1215249423Sdim//===----------------------------------------------------------------------===// 1216249423Sdim 1217249423Sdimnamespace llvm { 1218321369Sdim 1219321369Sdim/// Internal state used to compute SchedDFSResult. 1220249423Sdimclass SchedDFSImpl { 1221249423Sdim SchedDFSResult &R; 1222249423Sdim 1223249423Sdim /// Join DAG nodes into equivalence classes by their subtree. 1224249423Sdim IntEqClasses SubtreeClasses; 1225249423Sdim /// List PredSU, SuccSU pairs that represent data edges between subtrees. 1226321369Sdim std::vector<std::pair<const SUnit *, const SUnit*>> ConnectionPairs; 1227249423Sdim 1228249423Sdim struct RootData { 1229249423Sdim unsigned NodeID; 1230321369Sdim unsigned ParentNodeID; ///< Parent node (member of the parent subtree). 1231321369Sdim unsigned SubInstrCount = 0; ///< Instr count in this tree only, not 1232321369Sdim /// children. 1233249423Sdim 1234249423Sdim RootData(unsigned id): NodeID(id), 1235321369Sdim ParentNodeID(SchedDFSResult::InvalidSubtreeID) {} 1236249423Sdim 1237249423Sdim unsigned getSparseSetIndex() const { return NodeID; } 1238249423Sdim }; 1239249423Sdim 1240249423Sdim SparseSet<RootData> RootSet; 1241249423Sdim 1242249423Sdimpublic: 1243249423Sdim SchedDFSImpl(SchedDFSResult &r): R(r), SubtreeClasses(R.DFSNodeData.size()) { 1244249423Sdim RootSet.setUniverse(R.DFSNodeData.size()); 1245249423Sdim } 1246249423Sdim 1247321369Sdim /// Returns true if this node been visited by the DFS traversal. 1248249423Sdim /// 1249249423Sdim /// During visitPostorderNode the Node's SubtreeID is assigned to the Node 1250249423Sdim /// ID. Later, SubtreeID is updated but remains valid. 1251249423Sdim bool isVisited(const SUnit *SU) const { 1252249423Sdim return R.DFSNodeData[SU->NodeNum].SubtreeID 1253249423Sdim != SchedDFSResult::InvalidSubtreeID; 1254249423Sdim } 1255249423Sdim 1256321369Sdim /// Initializes this node's instruction count. We don't need to flag the node 1257249423Sdim /// visited until visitPostorder because the DAG cannot have cycles. 1258249423Sdim void visitPreorder(const SUnit *SU) { 1259249423Sdim R.DFSNodeData[SU->NodeNum].InstrCount = 1260249423Sdim SU->getInstr()->isTransient() ? 0 : 1; 1261249423Sdim } 1262249423Sdim 1263249423Sdim /// Called once for each node after all predecessors are visited. Revisit this 1264249423Sdim /// node's predecessors and potentially join them now that we know the ILP of 1265249423Sdim /// the other predecessors. 1266249423Sdim void visitPostorderNode(const SUnit *SU) { 1267249423Sdim // Mark this node as the root of a subtree. It may be joined with its 1268249423Sdim // successors later. 1269249423Sdim R.DFSNodeData[SU->NodeNum].SubtreeID = SU->NodeNum; 1270249423Sdim RootData RData(SU->NodeNum); 1271249423Sdim RData.SubInstrCount = SU->getInstr()->isTransient() ? 0 : 1; 1272249423Sdim 1273249423Sdim // If any predecessors are still in their own subtree, they either cannot be 1274249423Sdim // joined or are large enough to remain separate. If this parent node's 1275249423Sdim // total instruction count is not greater than a child subtree by at least 1276249423Sdim // the subtree limit, then try to join it now since splitting subtrees is 1277249423Sdim // only useful if multiple high-pressure paths are possible. 1278249423Sdim unsigned InstrCount = R.DFSNodeData[SU->NodeNum].InstrCount; 1279314564Sdim for (const SDep &PredDep : SU->Preds) { 1280314564Sdim if (PredDep.getKind() != SDep::Data) 1281249423Sdim continue; 1282314564Sdim unsigned PredNum = PredDep.getSUnit()->NodeNum; 1283249423Sdim if ((InstrCount - R.DFSNodeData[PredNum].InstrCount) < R.SubtreeLimit) 1284314564Sdim joinPredSubtree(PredDep, SU, /*CheckLimit=*/false); 1285249423Sdim 1286249423Sdim // Either link or merge the TreeData entry from the child to the parent. 1287249423Sdim if (R.DFSNodeData[PredNum].SubtreeID == PredNum) { 1288249423Sdim // If the predecessor's parent is invalid, this is a tree edge and the 1289249423Sdim // current node is the parent. 1290249423Sdim if (RootSet[PredNum].ParentNodeID == SchedDFSResult::InvalidSubtreeID) 1291249423Sdim RootSet[PredNum].ParentNodeID = SU->NodeNum; 1292249423Sdim } 1293249423Sdim else if (RootSet.count(PredNum)) { 1294249423Sdim // The predecessor is not a root, but is still in the root set. This 1295249423Sdim // must be the new parent that it was just joined to. Note that 1296249423Sdim // RootSet[PredNum].ParentNodeID may either be invalid or may still be 1297249423Sdim // set to the original parent. 1298249423Sdim RData.SubInstrCount += RootSet[PredNum].SubInstrCount; 1299249423Sdim RootSet.erase(PredNum); 1300249423Sdim } 1301249423Sdim } 1302249423Sdim RootSet[SU->NodeNum] = RData; 1303249423Sdim } 1304249423Sdim 1305341825Sdim /// Called once for each tree edge after calling visitPostOrderNode on 1306321369Sdim /// the predecessor. Increment the parent node's instruction count and 1307249423Sdim /// preemptively join this subtree to its parent's if it is small enough. 1308249423Sdim void visitPostorderEdge(const SDep &PredDep, const SUnit *Succ) { 1309249423Sdim R.DFSNodeData[Succ->NodeNum].InstrCount 1310249423Sdim += R.DFSNodeData[PredDep.getSUnit()->NodeNum].InstrCount; 1311249423Sdim joinPredSubtree(PredDep, Succ); 1312249423Sdim } 1313249423Sdim 1314321369Sdim /// Adds a connection for cross edges. 1315249423Sdim void visitCrossEdge(const SDep &PredDep, const SUnit *Succ) { 1316249423Sdim ConnectionPairs.push_back(std::make_pair(PredDep.getSUnit(), Succ)); 1317249423Sdim } 1318249423Sdim 1319321369Sdim /// Sets each node's subtree ID to the representative ID and record 1320321369Sdim /// connections between trees. 1321249423Sdim void finalize() { 1322249423Sdim SubtreeClasses.compress(); 1323249423Sdim R.DFSTreeData.resize(SubtreeClasses.getNumClasses()); 1324249423Sdim assert(SubtreeClasses.getNumClasses() == RootSet.size() 1325249423Sdim && "number of roots should match trees"); 1326314564Sdim for (const RootData &Root : RootSet) { 1327314564Sdim unsigned TreeID = SubtreeClasses[Root.NodeID]; 1328314564Sdim if (Root.ParentNodeID != SchedDFSResult::InvalidSubtreeID) 1329314564Sdim R.DFSTreeData[TreeID].ParentTreeID = SubtreeClasses[Root.ParentNodeID]; 1330314564Sdim R.DFSTreeData[TreeID].SubInstrCount = Root.SubInstrCount; 1331249423Sdim // Note that SubInstrCount may be greater than InstrCount if we joined 1332249423Sdim // subtrees across a cross edge. InstrCount will be attributed to the 1333249423Sdim // original parent, while SubInstrCount will be attributed to the joined 1334249423Sdim // parent. 1335249423Sdim } 1336249423Sdim R.SubtreeConnections.resize(SubtreeClasses.getNumClasses()); 1337249423Sdim R.SubtreeConnectLevels.resize(SubtreeClasses.getNumClasses()); 1338341825Sdim LLVM_DEBUG(dbgs() << R.getNumSubtrees() << " subtrees:\n"); 1339249423Sdim for (unsigned Idx = 0, End = R.DFSNodeData.size(); Idx != End; ++Idx) { 1340249423Sdim R.DFSNodeData[Idx].SubtreeID = SubtreeClasses[Idx]; 1341341825Sdim LLVM_DEBUG(dbgs() << " SU(" << Idx << ") in tree " 1342341825Sdim << R.DFSNodeData[Idx].SubtreeID << '\n'); 1343249423Sdim } 1344314564Sdim for (const std::pair<const SUnit*, const SUnit*> &P : ConnectionPairs) { 1345314564Sdim unsigned PredTree = SubtreeClasses[P.first->NodeNum]; 1346314564Sdim unsigned SuccTree = SubtreeClasses[P.second->NodeNum]; 1347249423Sdim if (PredTree == SuccTree) 1348249423Sdim continue; 1349314564Sdim unsigned Depth = P.first->getDepth(); 1350249423Sdim addConnection(PredTree, SuccTree, Depth); 1351249423Sdim addConnection(SuccTree, PredTree, Depth); 1352249423Sdim } 1353249423Sdim } 1354249423Sdim 1355249423Sdimprotected: 1356321369Sdim /// Joins the predecessor subtree with the successor that is its DFS parent. 1357321369Sdim /// Applies some heuristics before joining. 1358249423Sdim bool joinPredSubtree(const SDep &PredDep, const SUnit *Succ, 1359249423Sdim bool CheckLimit = true) { 1360249423Sdim assert(PredDep.getKind() == SDep::Data && "Subtrees are for data edges"); 1361249423Sdim 1362249423Sdim // Check if the predecessor is already joined. 1363249423Sdim const SUnit *PredSU = PredDep.getSUnit(); 1364249423Sdim unsigned PredNum = PredSU->NodeNum; 1365249423Sdim if (R.DFSNodeData[PredNum].SubtreeID != PredNum) 1366249423Sdim return false; 1367249423Sdim 1368249423Sdim // Four is the magic number of successors before a node is considered a 1369249423Sdim // pinch point. 1370249423Sdim unsigned NumDataSucs = 0; 1371314564Sdim for (const SDep &SuccDep : PredSU->Succs) { 1372314564Sdim if (SuccDep.getKind() == SDep::Data) { 1373249423Sdim if (++NumDataSucs >= 4) 1374249423Sdim return false; 1375249423Sdim } 1376249423Sdim } 1377249423Sdim if (CheckLimit && R.DFSNodeData[PredNum].InstrCount > R.SubtreeLimit) 1378249423Sdim return false; 1379249423Sdim R.DFSNodeData[PredNum].SubtreeID = Succ->NodeNum; 1380249423Sdim SubtreeClasses.join(Succ->NodeNum, PredNum); 1381249423Sdim return true; 1382249423Sdim } 1383249423Sdim 1384249423Sdim /// Called by finalize() to record a connection between trees. 1385249423Sdim void addConnection(unsigned FromTree, unsigned ToTree, unsigned Depth) { 1386249423Sdim if (!Depth) 1387249423Sdim return; 1388249423Sdim 1389249423Sdim do { 1390249423Sdim SmallVectorImpl<SchedDFSResult::Connection> &Connections = 1391249423Sdim R.SubtreeConnections[FromTree]; 1392314564Sdim for (SchedDFSResult::Connection &C : Connections) { 1393314564Sdim if (C.TreeID == ToTree) { 1394314564Sdim C.Level = std::max(C.Level, Depth); 1395249423Sdim return; 1396249423Sdim } 1397249423Sdim } 1398249423Sdim Connections.push_back(SchedDFSResult::Connection(ToTree, Depth)); 1399249423Sdim FromTree = R.DFSTreeData[FromTree].ParentTreeID; 1400249423Sdim } while (FromTree != SchedDFSResult::InvalidSubtreeID); 1401249423Sdim } 1402249423Sdim}; 1403249423Sdim 1404321369Sdim} // end namespace llvm 1405321369Sdim 1406243830Sdimnamespace { 1407321369Sdim 1408321369Sdim/// Manage the stack used by a reverse depth-first search over the DAG. 1409243830Sdimclass SchedDAGReverseDFS { 1410321369Sdim std::vector<std::pair<const SUnit *, SUnit::const_pred_iterator>> DFSStack; 1411321369Sdim 1412243830Sdimpublic: 1413243830Sdim bool isComplete() const { return DFSStack.empty(); } 1414243830Sdim 1415243830Sdim void follow(const SUnit *SU) { 1416243830Sdim DFSStack.push_back(std::make_pair(SU, SU->Preds.begin())); 1417243830Sdim } 1418243830Sdim void advance() { ++DFSStack.back().second; } 1419243830Sdim 1420249423Sdim const SDep *backtrack() { 1421249423Sdim DFSStack.pop_back(); 1422276479Sdim return DFSStack.empty() ? nullptr : std::prev(DFSStack.back().second); 1423249423Sdim } 1424243830Sdim 1425243830Sdim const SUnit *getCurr() const { return DFSStack.back().first; } 1426243830Sdim 1427243830Sdim SUnit::const_pred_iterator getPred() const { return DFSStack.back().second; } 1428243830Sdim 1429243830Sdim SUnit::const_pred_iterator getPredEnd() const { 1430243830Sdim return getCurr()->Preds.end(); 1431243830Sdim } 1432243830Sdim}; 1433243830Sdim 1434321369Sdim} // end anonymous namespace 1435321369Sdim 1436249423Sdimstatic bool hasDataSucc(const SUnit *SU) { 1437314564Sdim for (const SDep &SuccDep : SU->Succs) { 1438314564Sdim if (SuccDep.getKind() == SDep::Data && 1439314564Sdim !SuccDep.getSUnit()->isBoundaryNode()) 1440249423Sdim return true; 1441249423Sdim } 1442249423Sdim return false; 1443243830Sdim} 1444243830Sdim 1445321369Sdim/// Computes an ILP metric for all nodes in the subDAG reachable via depth-first 1446243830Sdim/// search from this root. 1447249423Sdimvoid SchedDFSResult::compute(ArrayRef<SUnit> SUnits) { 1448243830Sdim if (!IsBottomUp) 1449327952Sdim llvm_unreachable("Top-down ILP metric is unimplemented"); 1450243830Sdim 1451249423Sdim SchedDFSImpl Impl(*this); 1452314564Sdim for (const SUnit &SU : SUnits) { 1453314564Sdim if (Impl.isVisited(&SU) || hasDataSucc(&SU)) 1454249423Sdim continue; 1455249423Sdim 1456249423Sdim SchedDAGReverseDFS DFS; 1457314564Sdim Impl.visitPreorder(&SU); 1458314564Sdim DFS.follow(&SU); 1459321369Sdim while (true) { 1460249423Sdim // Traverse the leftmost path as far as possible. 1461249423Sdim while (DFS.getPred() != DFS.getPredEnd()) { 1462249423Sdim const SDep &PredDep = *DFS.getPred(); 1463249423Sdim DFS.advance(); 1464249423Sdim // Ignore non-data edges. 1465249423Sdim if (PredDep.getKind() != SDep::Data 1466249423Sdim || PredDep.getSUnit()->isBoundaryNode()) { 1467249423Sdim continue; 1468249423Sdim } 1469249423Sdim // An already visited edge is a cross edge, assuming an acyclic DAG. 1470249423Sdim if (Impl.isVisited(PredDep.getSUnit())) { 1471249423Sdim Impl.visitCrossEdge(PredDep, DFS.getCurr()); 1472249423Sdim continue; 1473249423Sdim } 1474249423Sdim Impl.visitPreorder(PredDep.getSUnit()); 1475249423Sdim DFS.follow(PredDep.getSUnit()); 1476249423Sdim } 1477249423Sdim // Visit the top of the stack in postorder and backtrack. 1478249423Sdim const SUnit *Child = DFS.getCurr(); 1479249423Sdim const SDep *PredDep = DFS.backtrack(); 1480249423Sdim Impl.visitPostorderNode(Child); 1481249423Sdim if (PredDep) 1482249423Sdim Impl.visitPostorderEdge(*PredDep, DFS.getCurr()); 1483249423Sdim if (DFS.isComplete()) 1484249423Sdim break; 1485243830Sdim } 1486243830Sdim } 1487249423Sdim Impl.finalize(); 1488243830Sdim} 1489243830Sdim 1490249423Sdim/// The root of the given SubtreeID was just scheduled. For all subtrees 1491249423Sdim/// connected to this tree, record the depth of the connection so that the 1492249423Sdim/// nearest connected subtrees can be prioritized. 1493249423Sdimvoid SchedDFSResult::scheduleTree(unsigned SubtreeID) { 1494314564Sdim for (const Connection &C : SubtreeConnections[SubtreeID]) { 1495314564Sdim SubtreeConnectLevels[C.TreeID] = 1496314564Sdim std::max(SubtreeConnectLevels[C.TreeID], C.Level); 1497341825Sdim LLVM_DEBUG(dbgs() << " Tree: " << C.TreeID << " @" 1498341825Sdim << SubtreeConnectLevels[C.TreeID] << '\n'); 1499249423Sdim } 1500249423Sdim} 1501249423Sdim 1502321369Sdim#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1503321369SdimLLVM_DUMP_METHOD void ILPValue::print(raw_ostream &OS) const { 1504249423Sdim OS << InstrCount << " / " << Length << " = "; 1505249423Sdim if (!Length) 1506243830Sdim OS << "BADILP"; 1507249423Sdim else 1508249423Sdim OS << format("%g", ((double)InstrCount / Length)); 1509243830Sdim} 1510243830Sdim 1511321369SdimLLVM_DUMP_METHOD void ILPValue::dump() const { 1512243830Sdim dbgs() << *this << '\n'; 1513243830Sdim} 1514243830Sdim 1515243830Sdimnamespace llvm { 1516243830Sdim 1517276479SdimLLVM_DUMP_METHOD 1518243830Sdimraw_ostream &operator<<(raw_ostream &OS, const ILPValue &Val) { 1519243830Sdim Val.print(OS); 1520243830Sdim return OS; 1521243830Sdim} 1522243830Sdim 1523321369Sdim} // end namespace llvm 1524321369Sdim 1525321369Sdim#endif 1526