1//===- CodeGenSchedule.cpp - Scheduling MachineModels ---------------------===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This file defines structures to encapsulate the machine model as described in 10// the target description. 11// 12//===----------------------------------------------------------------------===// 13 14#include "CodeGenSchedule.h" 15#include "CodeGenInstruction.h" 16#include "CodeGenTarget.h" 17#include "llvm/ADT/MapVector.h" 18#include "llvm/ADT/STLExtras.h" 19#include "llvm/ADT/SmallPtrSet.h" 20#include "llvm/ADT/SmallSet.h" 21#include "llvm/ADT/SmallVector.h" 22#include "llvm/Support/Casting.h" 23#include "llvm/Support/Debug.h" 24#include "llvm/Support/Regex.h" 25#include "llvm/Support/raw_ostream.h" 26#include "llvm/TableGen/Error.h" 27#include <algorithm> 28#include <iterator> 29#include <utility> 30 31using namespace llvm; 32 33#define DEBUG_TYPE "subtarget-emitter" 34 35#ifndef NDEBUG 36static void dumpIdxVec(ArrayRef<unsigned> V) { 37 for (unsigned Idx : V) 38 dbgs() << Idx << ", "; 39} 40#endif 41 42namespace { 43 44// (instrs a, b, ...) Evaluate and union all arguments. Identical to AddOp. 45struct InstrsOp : public SetTheory::Operator { 46 void apply(SetTheory &ST, DagInit *Expr, SetTheory::RecSet &Elts, 47 ArrayRef<SMLoc> Loc) override { 48 ST.evaluate(Expr->arg_begin(), Expr->arg_end(), Elts, Loc); 49 } 50}; 51 52// (instregex "OpcPat",...) Find all instructions matching an opcode pattern. 53struct InstRegexOp : public SetTheory::Operator { 54 const CodeGenTarget &Target; 55 InstRegexOp(const CodeGenTarget &t): Target(t) {} 56 57 /// Remove any text inside of parentheses from S. 58 static std::string removeParens(llvm::StringRef S) { 59 std::string Result; 60 unsigned Paren = 0; 61 // NB: We don't care about escaped parens here. 62 for (char C : S) { 63 switch (C) { 64 case '(': 65 ++Paren; 66 break; 67 case ')': 68 --Paren; 69 break; 70 default: 71 if (Paren == 0) 72 Result += C; 73 } 74 } 75 return Result; 76 } 77 78 void apply(SetTheory &ST, DagInit *Expr, SetTheory::RecSet &Elts, 79 ArrayRef<SMLoc> Loc) override { 80 ArrayRef<const CodeGenInstruction *> Instructions = 81 Target.getInstructionsByEnumValue(); 82 83 unsigned NumGeneric = Target.getNumFixedInstructions(); 84 unsigned NumPseudos = Target.getNumPseudoInstructions(); 85 auto Generics = Instructions.slice(0, NumGeneric); 86 auto Pseudos = Instructions.slice(NumGeneric, NumPseudos); 87 auto NonPseudos = Instructions.slice(NumGeneric + NumPseudos); 88 89 for (Init *Arg : make_range(Expr->arg_begin(), Expr->arg_end())) { 90 StringInit *SI = dyn_cast<StringInit>(Arg); 91 if (!SI) 92 PrintFatalError(Loc, "instregex requires pattern string: " + 93 Expr->getAsString()); 94 StringRef Original = SI->getValue(); 95 96 // Extract a prefix that we can binary search on. 97 static const char RegexMetachars[] = "()^$|*+?.[]\\{}"; 98 auto FirstMeta = Original.find_first_of(RegexMetachars); 99 100 // Look for top-level | or ?. We cannot optimize them to binary search. 101 if (removeParens(Original).find_first_of("|?") != std::string::npos) 102 FirstMeta = 0; 103 104 Optional<Regex> Regexpr = None; 105 StringRef Prefix = Original.substr(0, FirstMeta); 106 StringRef PatStr = Original.substr(FirstMeta); 107 if (!PatStr.empty()) { 108 // For the rest use a python-style prefix match. 109 std::string pat = std::string(PatStr); 110 if (pat[0] != '^') { 111 pat.insert(0, "^("); 112 pat.insert(pat.end(), ')'); 113 } 114 Regexpr = Regex(pat); 115 } 116 117 int NumMatches = 0; 118 119 // The generic opcodes are unsorted, handle them manually. 120 for (auto *Inst : Generics) { 121 StringRef InstName = Inst->TheDef->getName(); 122 if (InstName.startswith(Prefix) && 123 (!Regexpr || Regexpr->match(InstName.substr(Prefix.size())))) { 124 Elts.insert(Inst->TheDef); 125 NumMatches++; 126 } 127 } 128 129 // Target instructions are split into two ranges: pseudo instructions 130 // first, than non-pseudos. Each range is in lexicographical order 131 // sorted by name. Find the sub-ranges that start with our prefix. 132 struct Comp { 133 bool operator()(const CodeGenInstruction *LHS, StringRef RHS) { 134 return LHS->TheDef->getName() < RHS; 135 } 136 bool operator()(StringRef LHS, const CodeGenInstruction *RHS) { 137 return LHS < RHS->TheDef->getName() && 138 !RHS->TheDef->getName().startswith(LHS); 139 } 140 }; 141 auto Range1 = 142 std::equal_range(Pseudos.begin(), Pseudos.end(), Prefix, Comp()); 143 auto Range2 = std::equal_range(NonPseudos.begin(), NonPseudos.end(), 144 Prefix, Comp()); 145 146 // For these ranges we know that instruction names start with the prefix. 147 // Check if there's a regex that needs to be checked. 148 const auto HandleNonGeneric = [&](const CodeGenInstruction *Inst) { 149 StringRef InstName = Inst->TheDef->getName(); 150 if (!Regexpr || Regexpr->match(InstName.substr(Prefix.size()))) { 151 Elts.insert(Inst->TheDef); 152 NumMatches++; 153 } 154 }; 155 std::for_each(Range1.first, Range1.second, HandleNonGeneric); 156 std::for_each(Range2.first, Range2.second, HandleNonGeneric); 157 158 if (0 == NumMatches) 159 PrintFatalError(Loc, "instregex has no matches: " + Original); 160 } 161 } 162}; 163 164} // end anonymous namespace 165 166/// CodeGenModels ctor interprets machine model records and populates maps. 167CodeGenSchedModels::CodeGenSchedModels(RecordKeeper &RK, 168 const CodeGenTarget &TGT): 169 Records(RK), Target(TGT) { 170 171 Sets.addFieldExpander("InstRW", "Instrs"); 172 173 // Allow Set evaluation to recognize the dags used in InstRW records: 174 // (instrs Op1, Op1...) 175 Sets.addOperator("instrs", std::make_unique<InstrsOp>()); 176 Sets.addOperator("instregex", std::make_unique<InstRegexOp>(Target)); 177 178 // Instantiate a CodeGenProcModel for each SchedMachineModel with the values 179 // that are explicitly referenced in tablegen records. Resources associated 180 // with each processor will be derived later. Populate ProcModelMap with the 181 // CodeGenProcModel instances. 182 collectProcModels(); 183 184 // Instantiate a CodeGenSchedRW for each SchedReadWrite record explicitly 185 // defined, and populate SchedReads and SchedWrites vectors. Implicit 186 // SchedReadWrites that represent sequences derived from expanded variant will 187 // be inferred later. 188 collectSchedRW(); 189 190 // Instantiate a CodeGenSchedClass for each unique SchedRW signature directly 191 // required by an instruction definition, and populate SchedClassIdxMap. Set 192 // NumItineraryClasses to the number of explicit itinerary classes referenced 193 // by instructions. Set NumInstrSchedClasses to the number of itinerary 194 // classes plus any classes implied by instructions that derive from class 195 // Sched and provide SchedRW list. This does not infer any new classes from 196 // SchedVariant. 197 collectSchedClasses(); 198 199 // Find instruction itineraries for each processor. Sort and populate 200 // CodeGenProcModel::ItinDefList. (Cycle-to-cycle itineraries). This requires 201 // all itinerary classes to be discovered. 202 collectProcItins(); 203 204 // Find ItinRW records for each processor and itinerary class. 205 // (For per-operand resources mapped to itinerary classes). 206 collectProcItinRW(); 207 208 // Find UnsupportedFeatures records for each processor. 209 // (For per-operand resources mapped to itinerary classes). 210 collectProcUnsupportedFeatures(); 211 212 // Infer new SchedClasses from SchedVariant. 213 inferSchedClasses(); 214 215 // Populate each CodeGenProcModel's WriteResDefs, ReadAdvanceDefs, and 216 // ProcResourceDefs. 217 LLVM_DEBUG( 218 dbgs() << "\n+++ RESOURCE DEFINITIONS (collectProcResources) +++\n"); 219 collectProcResources(); 220 221 // Collect optional processor description. 222 collectOptionalProcessorInfo(); 223 224 // Check MCInstPredicate definitions. 225 checkMCInstPredicates(); 226 227 // Check STIPredicate definitions. 228 checkSTIPredicates(); 229 230 // Find STIPredicate definitions for each processor model, and construct 231 // STIPredicateFunction objects. 232 collectSTIPredicates(); 233 234 checkCompleteness(); 235} 236 237void CodeGenSchedModels::checkSTIPredicates() const { 238 DenseMap<StringRef, const Record *> Declarations; 239 240 // There cannot be multiple declarations with the same name. 241 const RecVec Decls = Records.getAllDerivedDefinitions("STIPredicateDecl"); 242 for (const Record *R : Decls) { 243 StringRef Name = R->getValueAsString("Name"); 244 const auto It = Declarations.find(Name); 245 if (It == Declarations.end()) { 246 Declarations[Name] = R; 247 continue; 248 } 249 250 PrintError(R->getLoc(), "STIPredicate " + Name + " multiply declared."); 251 PrintNote(It->second->getLoc(), "Previous declaration was here."); 252 PrintFatalError(R->getLoc(), "Invalid STIPredicateDecl found."); 253 } 254 255 // Disallow InstructionEquivalenceClasses with an empty instruction list. 256 const RecVec Defs = 257 Records.getAllDerivedDefinitions("InstructionEquivalenceClass"); 258 for (const Record *R : Defs) { 259 RecVec Opcodes = R->getValueAsListOfDefs("Opcodes"); 260 if (Opcodes.empty()) { 261 PrintFatalError(R->getLoc(), "Invalid InstructionEquivalenceClass " 262 "defined with an empty opcode list."); 263 } 264 } 265} 266 267// Used by function `processSTIPredicate` to construct a mask of machine 268// instruction operands. 269static APInt constructOperandMask(ArrayRef<int64_t> Indices) { 270 APInt OperandMask; 271 if (Indices.empty()) 272 return OperandMask; 273 274 int64_t MaxIndex = *std::max_element(Indices.begin(), Indices.end()); 275 assert(MaxIndex >= 0 && "Invalid negative indices in input!"); 276 OperandMask = OperandMask.zext(MaxIndex + 1); 277 for (const int64_t Index : Indices) { 278 assert(Index >= 0 && "Invalid negative indices!"); 279 OperandMask.setBit(Index); 280 } 281 282 return OperandMask; 283} 284 285static void 286processSTIPredicate(STIPredicateFunction &Fn, 287 const DenseMap<Record *, unsigned> &ProcModelMap) { 288 DenseMap<const Record *, unsigned> Opcode2Index; 289 using OpcodeMapPair = std::pair<const Record *, OpcodeInfo>; 290 std::vector<OpcodeMapPair> OpcodeMappings; 291 std::vector<std::pair<APInt, APInt>> OpcodeMasks; 292 293 DenseMap<const Record *, unsigned> Predicate2Index; 294 unsigned NumUniquePredicates = 0; 295 296 // Number unique predicates and opcodes used by InstructionEquivalenceClass 297 // definitions. Each unique opcode will be associated with an OpcodeInfo 298 // object. 299 for (const Record *Def : Fn.getDefinitions()) { 300 RecVec Classes = Def->getValueAsListOfDefs("Classes"); 301 for (const Record *EC : Classes) { 302 const Record *Pred = EC->getValueAsDef("Predicate"); 303 if (Predicate2Index.find(Pred) == Predicate2Index.end()) 304 Predicate2Index[Pred] = NumUniquePredicates++; 305 306 RecVec Opcodes = EC->getValueAsListOfDefs("Opcodes"); 307 for (const Record *Opcode : Opcodes) { 308 if (Opcode2Index.find(Opcode) == Opcode2Index.end()) { 309 Opcode2Index[Opcode] = OpcodeMappings.size(); 310 OpcodeMappings.emplace_back(Opcode, OpcodeInfo()); 311 } 312 } 313 } 314 } 315 316 // Initialize vector `OpcodeMasks` with default values. We want to keep track 317 // of which processors "use" which opcodes. We also want to be able to 318 // identify predicates that are used by different processors for a same 319 // opcode. 320 // This information is used later on by this algorithm to sort OpcodeMapping 321 // elements based on their processor and predicate sets. 322 OpcodeMasks.resize(OpcodeMappings.size()); 323 APInt DefaultProcMask(ProcModelMap.size(), 0); 324 APInt DefaultPredMask(NumUniquePredicates, 0); 325 for (std::pair<APInt, APInt> &MaskPair : OpcodeMasks) 326 MaskPair = std::make_pair(DefaultProcMask, DefaultPredMask); 327 328 // Construct a OpcodeInfo object for every unique opcode declared by an 329 // InstructionEquivalenceClass definition. 330 for (const Record *Def : Fn.getDefinitions()) { 331 RecVec Classes = Def->getValueAsListOfDefs("Classes"); 332 const Record *SchedModel = Def->getValueAsDef("SchedModel"); 333 unsigned ProcIndex = ProcModelMap.find(SchedModel)->second; 334 APInt ProcMask(ProcModelMap.size(), 0); 335 ProcMask.setBit(ProcIndex); 336 337 for (const Record *EC : Classes) { 338 RecVec Opcodes = EC->getValueAsListOfDefs("Opcodes"); 339 340 std::vector<int64_t> OpIndices = 341 EC->getValueAsListOfInts("OperandIndices"); 342 APInt OperandMask = constructOperandMask(OpIndices); 343 344 const Record *Pred = EC->getValueAsDef("Predicate"); 345 APInt PredMask(NumUniquePredicates, 0); 346 PredMask.setBit(Predicate2Index[Pred]); 347 348 for (const Record *Opcode : Opcodes) { 349 unsigned OpcodeIdx = Opcode2Index[Opcode]; 350 if (OpcodeMasks[OpcodeIdx].first[ProcIndex]) { 351 std::string Message = 352 "Opcode " + Opcode->getName().str() + 353 " used by multiple InstructionEquivalenceClass definitions."; 354 PrintFatalError(EC->getLoc(), Message); 355 } 356 OpcodeMasks[OpcodeIdx].first |= ProcMask; 357 OpcodeMasks[OpcodeIdx].second |= PredMask; 358 OpcodeInfo &OI = OpcodeMappings[OpcodeIdx].second; 359 360 OI.addPredicateForProcModel(ProcMask, OperandMask, Pred); 361 } 362 } 363 } 364 365 // Sort OpcodeMappings elements based on their CPU and predicate masks. 366 // As a last resort, order elements by opcode identifier. 367 llvm::sort(OpcodeMappings, 368 [&](const OpcodeMapPair &Lhs, const OpcodeMapPair &Rhs) { 369 unsigned LhsIdx = Opcode2Index[Lhs.first]; 370 unsigned RhsIdx = Opcode2Index[Rhs.first]; 371 const std::pair<APInt, APInt> &LhsMasks = OpcodeMasks[LhsIdx]; 372 const std::pair<APInt, APInt> &RhsMasks = OpcodeMasks[RhsIdx]; 373 374 auto LessThan = [](const APInt &Lhs, const APInt &Rhs) { 375 unsigned LhsCountPopulation = Lhs.countPopulation(); 376 unsigned RhsCountPopulation = Rhs.countPopulation(); 377 return ((LhsCountPopulation < RhsCountPopulation) || 378 ((LhsCountPopulation == RhsCountPopulation) && 379 (Lhs.countLeadingZeros() > Rhs.countLeadingZeros()))); 380 }; 381 382 if (LhsMasks.first != RhsMasks.first) 383 return LessThan(LhsMasks.first, RhsMasks.first); 384 385 if (LhsMasks.second != RhsMasks.second) 386 return LessThan(LhsMasks.second, RhsMasks.second); 387 388 return LhsIdx < RhsIdx; 389 }); 390 391 // Now construct opcode groups. Groups are used by the SubtargetEmitter when 392 // expanding the body of a STIPredicate function. In particular, each opcode 393 // group is expanded into a sequence of labels in a switch statement. 394 // It identifies opcodes for which different processors define same predicates 395 // and same opcode masks. 396 for (OpcodeMapPair &Info : OpcodeMappings) 397 Fn.addOpcode(Info.first, std::move(Info.second)); 398} 399 400void CodeGenSchedModels::collectSTIPredicates() { 401 // Map STIPredicateDecl records to elements of vector 402 // CodeGenSchedModels::STIPredicates. 403 DenseMap<const Record *, unsigned> Decl2Index; 404 405 RecVec RV = Records.getAllDerivedDefinitions("STIPredicate"); 406 for (const Record *R : RV) { 407 const Record *Decl = R->getValueAsDef("Declaration"); 408 409 const auto It = Decl2Index.find(Decl); 410 if (It == Decl2Index.end()) { 411 Decl2Index[Decl] = STIPredicates.size(); 412 STIPredicateFunction Predicate(Decl); 413 Predicate.addDefinition(R); 414 STIPredicates.emplace_back(std::move(Predicate)); 415 continue; 416 } 417 418 STIPredicateFunction &PreviousDef = STIPredicates[It->second]; 419 PreviousDef.addDefinition(R); 420 } 421 422 for (STIPredicateFunction &Fn : STIPredicates) 423 processSTIPredicate(Fn, ProcModelMap); 424} 425 426void OpcodeInfo::addPredicateForProcModel(const llvm::APInt &CpuMask, 427 const llvm::APInt &OperandMask, 428 const Record *Predicate) { 429 auto It = llvm::find_if( 430 Predicates, [&OperandMask, &Predicate](const PredicateInfo &P) { 431 return P.Predicate == Predicate && P.OperandMask == OperandMask; 432 }); 433 if (It == Predicates.end()) { 434 Predicates.emplace_back(CpuMask, OperandMask, Predicate); 435 return; 436 } 437 It->ProcModelMask |= CpuMask; 438} 439 440void CodeGenSchedModels::checkMCInstPredicates() const { 441 RecVec MCPredicates = Records.getAllDerivedDefinitions("TIIPredicate"); 442 if (MCPredicates.empty()) 443 return; 444 445 // A target cannot have multiple TIIPredicate definitions with a same name. 446 llvm::StringMap<const Record *> TIIPredicates(MCPredicates.size()); 447 for (const Record *TIIPred : MCPredicates) { 448 StringRef Name = TIIPred->getValueAsString("FunctionName"); 449 StringMap<const Record *>::const_iterator It = TIIPredicates.find(Name); 450 if (It == TIIPredicates.end()) { 451 TIIPredicates[Name] = TIIPred; 452 continue; 453 } 454 455 PrintError(TIIPred->getLoc(), 456 "TIIPredicate " + Name + " is multiply defined."); 457 PrintNote(It->second->getLoc(), 458 " Previous definition of " + Name + " was here."); 459 PrintFatalError(TIIPred->getLoc(), 460 "Found conflicting definitions of TIIPredicate."); 461 } 462} 463 464void CodeGenSchedModels::collectRetireControlUnits() { 465 RecVec Units = Records.getAllDerivedDefinitions("RetireControlUnit"); 466 467 for (Record *RCU : Units) { 468 CodeGenProcModel &PM = getProcModel(RCU->getValueAsDef("SchedModel")); 469 if (PM.RetireControlUnit) { 470 PrintError(RCU->getLoc(), 471 "Expected a single RetireControlUnit definition"); 472 PrintNote(PM.RetireControlUnit->getLoc(), 473 "Previous definition of RetireControlUnit was here"); 474 } 475 PM.RetireControlUnit = RCU; 476 } 477} 478 479void CodeGenSchedModels::collectLoadStoreQueueInfo() { 480 RecVec Queues = Records.getAllDerivedDefinitions("MemoryQueue"); 481 482 for (Record *Queue : Queues) { 483 CodeGenProcModel &PM = getProcModel(Queue->getValueAsDef("SchedModel")); 484 if (Queue->isSubClassOf("LoadQueue")) { 485 if (PM.LoadQueue) { 486 PrintError(Queue->getLoc(), 487 "Expected a single LoadQueue definition"); 488 PrintNote(PM.LoadQueue->getLoc(), 489 "Previous definition of LoadQueue was here"); 490 } 491 492 PM.LoadQueue = Queue; 493 } 494 495 if (Queue->isSubClassOf("StoreQueue")) { 496 if (PM.StoreQueue) { 497 PrintError(Queue->getLoc(), 498 "Expected a single StoreQueue definition"); 499 PrintNote(PM.LoadQueue->getLoc(), 500 "Previous definition of StoreQueue was here"); 501 } 502 503 PM.StoreQueue = Queue; 504 } 505 } 506} 507 508/// Collect optional processor information. 509void CodeGenSchedModels::collectOptionalProcessorInfo() { 510 // Find register file definitions for each processor. 511 collectRegisterFiles(); 512 513 // Collect processor RetireControlUnit descriptors if available. 514 collectRetireControlUnits(); 515 516 // Collect information about load/store queues. 517 collectLoadStoreQueueInfo(); 518 519 checkCompleteness(); 520} 521 522/// Gather all processor models. 523void CodeGenSchedModels::collectProcModels() { 524 RecVec ProcRecords = Records.getAllDerivedDefinitions("Processor"); 525 llvm::sort(ProcRecords, LessRecordFieldName()); 526 527 // Reserve space because we can. Reallocation would be ok. 528 ProcModels.reserve(ProcRecords.size()+1); 529 530 // Use idx=0 for NoModel/NoItineraries. 531 Record *NoModelDef = Records.getDef("NoSchedModel"); 532 Record *NoItinsDef = Records.getDef("NoItineraries"); 533 ProcModels.emplace_back(0, "NoSchedModel", NoModelDef, NoItinsDef); 534 ProcModelMap[NoModelDef] = 0; 535 536 // For each processor, find a unique machine model. 537 LLVM_DEBUG(dbgs() << "+++ PROCESSOR MODELs (addProcModel) +++\n"); 538 for (Record *ProcRecord : ProcRecords) 539 addProcModel(ProcRecord); 540} 541 542/// Get a unique processor model based on the defined MachineModel and 543/// ProcessorItineraries. 544void CodeGenSchedModels::addProcModel(Record *ProcDef) { 545 Record *ModelKey = getModelOrItinDef(ProcDef); 546 if (!ProcModelMap.insert(std::make_pair(ModelKey, ProcModels.size())).second) 547 return; 548 549 std::string Name = std::string(ModelKey->getName()); 550 if (ModelKey->isSubClassOf("SchedMachineModel")) { 551 Record *ItinsDef = ModelKey->getValueAsDef("Itineraries"); 552 ProcModels.emplace_back(ProcModels.size(), Name, ModelKey, ItinsDef); 553 } 554 else { 555 // An itinerary is defined without a machine model. Infer a new model. 556 if (!ModelKey->getValueAsListOfDefs("IID").empty()) 557 Name = Name + "Model"; 558 ProcModels.emplace_back(ProcModels.size(), Name, 559 ProcDef->getValueAsDef("SchedModel"), ModelKey); 560 } 561 LLVM_DEBUG(ProcModels.back().dump()); 562} 563 564// Recursively find all reachable SchedReadWrite records. 565static void scanSchedRW(Record *RWDef, RecVec &RWDefs, 566 SmallPtrSet<Record*, 16> &RWSet) { 567 if (!RWSet.insert(RWDef).second) 568 return; 569 RWDefs.push_back(RWDef); 570 // Reads don't currently have sequence records, but it can be added later. 571 if (RWDef->isSubClassOf("WriteSequence")) { 572 RecVec Seq = RWDef->getValueAsListOfDefs("Writes"); 573 for (Record *WSRec : Seq) 574 scanSchedRW(WSRec, RWDefs, RWSet); 575 } 576 else if (RWDef->isSubClassOf("SchedVariant")) { 577 // Visit each variant (guarded by a different predicate). 578 RecVec Vars = RWDef->getValueAsListOfDefs("Variants"); 579 for (Record *Variant : Vars) { 580 // Visit each RW in the sequence selected by the current variant. 581 RecVec Selected = Variant->getValueAsListOfDefs("Selected"); 582 for (Record *SelDef : Selected) 583 scanSchedRW(SelDef, RWDefs, RWSet); 584 } 585 } 586} 587 588// Collect and sort all SchedReadWrites reachable via tablegen records. 589// More may be inferred later when inferring new SchedClasses from variants. 590void CodeGenSchedModels::collectSchedRW() { 591 // Reserve idx=0 for invalid writes/reads. 592 SchedWrites.resize(1); 593 SchedReads.resize(1); 594 595 SmallPtrSet<Record*, 16> RWSet; 596 597 // Find all SchedReadWrites referenced by instruction defs. 598 RecVec SWDefs, SRDefs; 599 for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue()) { 600 Record *SchedDef = Inst->TheDef; 601 if (SchedDef->isValueUnset("SchedRW")) 602 continue; 603 RecVec RWs = SchedDef->getValueAsListOfDefs("SchedRW"); 604 for (Record *RW : RWs) { 605 if (RW->isSubClassOf("SchedWrite")) 606 scanSchedRW(RW, SWDefs, RWSet); 607 else { 608 assert(RW->isSubClassOf("SchedRead") && "Unknown SchedReadWrite"); 609 scanSchedRW(RW, SRDefs, RWSet); 610 } 611 } 612 } 613 // Find all ReadWrites referenced by InstRW. 614 RecVec InstRWDefs = Records.getAllDerivedDefinitions("InstRW"); 615 for (Record *InstRWDef : InstRWDefs) { 616 // For all OperandReadWrites. 617 RecVec RWDefs = InstRWDef->getValueAsListOfDefs("OperandReadWrites"); 618 for (Record *RWDef : RWDefs) { 619 if (RWDef->isSubClassOf("SchedWrite")) 620 scanSchedRW(RWDef, SWDefs, RWSet); 621 else { 622 assert(RWDef->isSubClassOf("SchedRead") && "Unknown SchedReadWrite"); 623 scanSchedRW(RWDef, SRDefs, RWSet); 624 } 625 } 626 } 627 // Find all ReadWrites referenced by ItinRW. 628 RecVec ItinRWDefs = Records.getAllDerivedDefinitions("ItinRW"); 629 for (Record *ItinRWDef : ItinRWDefs) { 630 // For all OperandReadWrites. 631 RecVec RWDefs = ItinRWDef->getValueAsListOfDefs("OperandReadWrites"); 632 for (Record *RWDef : RWDefs) { 633 if (RWDef->isSubClassOf("SchedWrite")) 634 scanSchedRW(RWDef, SWDefs, RWSet); 635 else { 636 assert(RWDef->isSubClassOf("SchedRead") && "Unknown SchedReadWrite"); 637 scanSchedRW(RWDef, SRDefs, RWSet); 638 } 639 } 640 } 641 // Find all ReadWrites referenced by SchedAlias. AliasDefs needs to be sorted 642 // for the loop below that initializes Alias vectors. 643 RecVec AliasDefs = Records.getAllDerivedDefinitions("SchedAlias"); 644 llvm::sort(AliasDefs, LessRecord()); 645 for (Record *ADef : AliasDefs) { 646 Record *MatchDef = ADef->getValueAsDef("MatchRW"); 647 Record *AliasDef = ADef->getValueAsDef("AliasRW"); 648 if (MatchDef->isSubClassOf("SchedWrite")) { 649 if (!AliasDef->isSubClassOf("SchedWrite")) 650 PrintFatalError(ADef->getLoc(), "SchedWrite Alias must be SchedWrite"); 651 scanSchedRW(AliasDef, SWDefs, RWSet); 652 } 653 else { 654 assert(MatchDef->isSubClassOf("SchedRead") && "Unknown SchedReadWrite"); 655 if (!AliasDef->isSubClassOf("SchedRead")) 656 PrintFatalError(ADef->getLoc(), "SchedRead Alias must be SchedRead"); 657 scanSchedRW(AliasDef, SRDefs, RWSet); 658 } 659 } 660 // Sort and add the SchedReadWrites directly referenced by instructions or 661 // itinerary resources. Index reads and writes in separate domains. 662 llvm::sort(SWDefs, LessRecord()); 663 for (Record *SWDef : SWDefs) { 664 assert(!getSchedRWIdx(SWDef, /*IsRead=*/false) && "duplicate SchedWrite"); 665 SchedWrites.emplace_back(SchedWrites.size(), SWDef); 666 } 667 llvm::sort(SRDefs, LessRecord()); 668 for (Record *SRDef : SRDefs) { 669 assert(!getSchedRWIdx(SRDef, /*IsRead-*/true) && "duplicate SchedWrite"); 670 SchedReads.emplace_back(SchedReads.size(), SRDef); 671 } 672 // Initialize WriteSequence vectors. 673 for (CodeGenSchedRW &CGRW : SchedWrites) { 674 if (!CGRW.IsSequence) 675 continue; 676 findRWs(CGRW.TheDef->getValueAsListOfDefs("Writes"), CGRW.Sequence, 677 /*IsRead=*/false); 678 } 679 // Initialize Aliases vectors. 680 for (Record *ADef : AliasDefs) { 681 Record *AliasDef = ADef->getValueAsDef("AliasRW"); 682 getSchedRW(AliasDef).IsAlias = true; 683 Record *MatchDef = ADef->getValueAsDef("MatchRW"); 684 CodeGenSchedRW &RW = getSchedRW(MatchDef); 685 if (RW.IsAlias) 686 PrintFatalError(ADef->getLoc(), "Cannot Alias an Alias"); 687 RW.Aliases.push_back(ADef); 688 } 689 LLVM_DEBUG( 690 dbgs() << "\n+++ SCHED READS and WRITES (collectSchedRW) +++\n"; 691 for (unsigned WIdx = 0, WEnd = SchedWrites.size(); WIdx != WEnd; ++WIdx) { 692 dbgs() << WIdx << ": "; 693 SchedWrites[WIdx].dump(); 694 dbgs() << '\n'; 695 } for (unsigned RIdx = 0, REnd = SchedReads.size(); RIdx != REnd; 696 ++RIdx) { 697 dbgs() << RIdx << ": "; 698 SchedReads[RIdx].dump(); 699 dbgs() << '\n'; 700 } RecVec RWDefs = Records.getAllDerivedDefinitions("SchedReadWrite"); 701 for (Record *RWDef 702 : RWDefs) { 703 if (!getSchedRWIdx(RWDef, RWDef->isSubClassOf("SchedRead"))) { 704 StringRef Name = RWDef->getName(); 705 if (Name != "NoWrite" && Name != "ReadDefault") 706 dbgs() << "Unused SchedReadWrite " << Name << '\n'; 707 } 708 }); 709} 710 711/// Compute a SchedWrite name from a sequence of writes. 712std::string CodeGenSchedModels::genRWName(ArrayRef<unsigned> Seq, bool IsRead) { 713 std::string Name("("); 714 for (auto I = Seq.begin(), E = Seq.end(); I != E; ++I) { 715 if (I != Seq.begin()) 716 Name += '_'; 717 Name += getSchedRW(*I, IsRead).Name; 718 } 719 Name += ')'; 720 return Name; 721} 722 723unsigned CodeGenSchedModels::getSchedRWIdx(const Record *Def, 724 bool IsRead) const { 725 const std::vector<CodeGenSchedRW> &RWVec = IsRead ? SchedReads : SchedWrites; 726 const auto I = find_if( 727 RWVec, [Def](const CodeGenSchedRW &RW) { return RW.TheDef == Def; }); 728 return I == RWVec.end() ? 0 : std::distance(RWVec.begin(), I); 729} 730 731bool CodeGenSchedModels::hasReadOfWrite(Record *WriteDef) const { 732 for (const CodeGenSchedRW &Read : SchedReads) { 733 Record *ReadDef = Read.TheDef; 734 if (!ReadDef || !ReadDef->isSubClassOf("ProcReadAdvance")) 735 continue; 736 737 RecVec ValidWrites = ReadDef->getValueAsListOfDefs("ValidWrites"); 738 if (is_contained(ValidWrites, WriteDef)) { 739 return true; 740 } 741 } 742 return false; 743} 744 745static void splitSchedReadWrites(const RecVec &RWDefs, 746 RecVec &WriteDefs, RecVec &ReadDefs) { 747 for (Record *RWDef : RWDefs) { 748 if (RWDef->isSubClassOf("SchedWrite")) 749 WriteDefs.push_back(RWDef); 750 else { 751 assert(RWDef->isSubClassOf("SchedRead") && "unknown SchedReadWrite"); 752 ReadDefs.push_back(RWDef); 753 } 754 } 755} 756 757// Split the SchedReadWrites defs and call findRWs for each list. 758void CodeGenSchedModels::findRWs(const RecVec &RWDefs, 759 IdxVec &Writes, IdxVec &Reads) const { 760 RecVec WriteDefs; 761 RecVec ReadDefs; 762 splitSchedReadWrites(RWDefs, WriteDefs, ReadDefs); 763 findRWs(WriteDefs, Writes, false); 764 findRWs(ReadDefs, Reads, true); 765} 766 767// Call getSchedRWIdx for all elements in a sequence of SchedRW defs. 768void CodeGenSchedModels::findRWs(const RecVec &RWDefs, IdxVec &RWs, 769 bool IsRead) const { 770 for (Record *RWDef : RWDefs) { 771 unsigned Idx = getSchedRWIdx(RWDef, IsRead); 772 assert(Idx && "failed to collect SchedReadWrite"); 773 RWs.push_back(Idx); 774 } 775} 776 777void CodeGenSchedModels::expandRWSequence(unsigned RWIdx, IdxVec &RWSeq, 778 bool IsRead) const { 779 const CodeGenSchedRW &SchedRW = getSchedRW(RWIdx, IsRead); 780 if (!SchedRW.IsSequence) { 781 RWSeq.push_back(RWIdx); 782 return; 783 } 784 int Repeat = 785 SchedRW.TheDef ? SchedRW.TheDef->getValueAsInt("Repeat") : 1; 786 for (int i = 0; i < Repeat; ++i) { 787 for (unsigned I : SchedRW.Sequence) { 788 expandRWSequence(I, RWSeq, IsRead); 789 } 790 } 791} 792 793// Expand a SchedWrite as a sequence following any aliases that coincide with 794// the given processor model. 795void CodeGenSchedModels::expandRWSeqForProc( 796 unsigned RWIdx, IdxVec &RWSeq, bool IsRead, 797 const CodeGenProcModel &ProcModel) const { 798 799 const CodeGenSchedRW &SchedWrite = getSchedRW(RWIdx, IsRead); 800 Record *AliasDef = nullptr; 801 for (const Record *Rec : SchedWrite.Aliases) { 802 const CodeGenSchedRW &AliasRW = getSchedRW(Rec->getValueAsDef("AliasRW")); 803 if (Rec->getValueInit("SchedModel")->isComplete()) { 804 Record *ModelDef = Rec->getValueAsDef("SchedModel"); 805 if (&getProcModel(ModelDef) != &ProcModel) 806 continue; 807 } 808 if (AliasDef) 809 PrintFatalError(AliasRW.TheDef->getLoc(), "Multiple aliases " 810 "defined for processor " + ProcModel.ModelName + 811 " Ensure only one SchedAlias exists per RW."); 812 AliasDef = AliasRW.TheDef; 813 } 814 if (AliasDef) { 815 expandRWSeqForProc(getSchedRWIdx(AliasDef, IsRead), 816 RWSeq, IsRead,ProcModel); 817 return; 818 } 819 if (!SchedWrite.IsSequence) { 820 RWSeq.push_back(RWIdx); 821 return; 822 } 823 int Repeat = 824 SchedWrite.TheDef ? SchedWrite.TheDef->getValueAsInt("Repeat") : 1; 825 for (int I = 0, E = Repeat; I < E; ++I) { 826 for (unsigned Idx : SchedWrite.Sequence) { 827 expandRWSeqForProc(Idx, RWSeq, IsRead, ProcModel); 828 } 829 } 830} 831 832// Find the existing SchedWrite that models this sequence of writes. 833unsigned CodeGenSchedModels::findRWForSequence(ArrayRef<unsigned> Seq, 834 bool IsRead) { 835 std::vector<CodeGenSchedRW> &RWVec = IsRead ? SchedReads : SchedWrites; 836 837 auto I = find_if(RWVec, [Seq](CodeGenSchedRW &RW) { 838 return makeArrayRef(RW.Sequence) == Seq; 839 }); 840 // Index zero reserved for invalid RW. 841 return I == RWVec.end() ? 0 : std::distance(RWVec.begin(), I); 842} 843 844/// Add this ReadWrite if it doesn't already exist. 845unsigned CodeGenSchedModels::findOrInsertRW(ArrayRef<unsigned> Seq, 846 bool IsRead) { 847 assert(!Seq.empty() && "cannot insert empty sequence"); 848 if (Seq.size() == 1) 849 return Seq.back(); 850 851 unsigned Idx = findRWForSequence(Seq, IsRead); 852 if (Idx) 853 return Idx; 854 855 std::vector<CodeGenSchedRW> &RWVec = IsRead ? SchedReads : SchedWrites; 856 unsigned RWIdx = RWVec.size(); 857 CodeGenSchedRW SchedRW(RWIdx, IsRead, Seq, genRWName(Seq, IsRead)); 858 RWVec.push_back(SchedRW); 859 return RWIdx; 860} 861 862/// Visit all the instruction definitions for this target to gather and 863/// enumerate the itinerary classes. These are the explicitly specified 864/// SchedClasses. More SchedClasses may be inferred. 865void CodeGenSchedModels::collectSchedClasses() { 866 867 // NoItinerary is always the first class at Idx=0 868 assert(SchedClasses.empty() && "Expected empty sched class"); 869 SchedClasses.emplace_back(0, "NoInstrModel", 870 Records.getDef("NoItinerary")); 871 SchedClasses.back().ProcIndices.push_back(0); 872 873 // Create a SchedClass for each unique combination of itinerary class and 874 // SchedRW list. 875 for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue()) { 876 Record *ItinDef = Inst->TheDef->getValueAsDef("Itinerary"); 877 IdxVec Writes, Reads; 878 if (!Inst->TheDef->isValueUnset("SchedRW")) 879 findRWs(Inst->TheDef->getValueAsListOfDefs("SchedRW"), Writes, Reads); 880 881 // ProcIdx == 0 indicates the class applies to all processors. 882 unsigned SCIdx = addSchedClass(ItinDef, Writes, Reads, /*ProcIndices*/{0}); 883 InstrClassMap[Inst->TheDef] = SCIdx; 884 } 885 // Create classes for InstRW defs. 886 RecVec InstRWDefs = Records.getAllDerivedDefinitions("InstRW"); 887 llvm::sort(InstRWDefs, LessRecord()); 888 LLVM_DEBUG(dbgs() << "\n+++ SCHED CLASSES (createInstRWClass) +++\n"); 889 for (Record *RWDef : InstRWDefs) 890 createInstRWClass(RWDef); 891 892 NumInstrSchedClasses = SchedClasses.size(); 893 894 bool EnableDump = false; 895 LLVM_DEBUG(EnableDump = true); 896 if (!EnableDump) 897 return; 898 899 LLVM_DEBUG( 900 dbgs() 901 << "\n+++ ITINERARIES and/or MACHINE MODELS (collectSchedClasses) +++\n"); 902 for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue()) { 903 StringRef InstName = Inst->TheDef->getName(); 904 unsigned SCIdx = getSchedClassIdx(*Inst); 905 if (!SCIdx) { 906 LLVM_DEBUG({ 907 if (!Inst->hasNoSchedulingInfo) 908 dbgs() << "No machine model for " << Inst->TheDef->getName() << '\n'; 909 }); 910 continue; 911 } 912 CodeGenSchedClass &SC = getSchedClass(SCIdx); 913 if (SC.ProcIndices[0] != 0) 914 PrintFatalError(Inst->TheDef->getLoc(), "Instruction's sched class " 915 "must not be subtarget specific."); 916 917 IdxVec ProcIndices; 918 if (SC.ItinClassDef->getName() != "NoItinerary") { 919 ProcIndices.push_back(0); 920 dbgs() << "Itinerary for " << InstName << ": " 921 << SC.ItinClassDef->getName() << '\n'; 922 } 923 if (!SC.Writes.empty()) { 924 ProcIndices.push_back(0); 925 LLVM_DEBUG({ 926 dbgs() << "SchedRW machine model for " << InstName; 927 for (IdxIter WI = SC.Writes.begin(), WE = SC.Writes.end(); WI != WE; 928 ++WI) 929 dbgs() << " " << SchedWrites[*WI].Name; 930 for (IdxIter RI = SC.Reads.begin(), RE = SC.Reads.end(); RI != RE; ++RI) 931 dbgs() << " " << SchedReads[*RI].Name; 932 dbgs() << '\n'; 933 }); 934 } 935 const RecVec &RWDefs = SchedClasses[SCIdx].InstRWs; 936 for (Record *RWDef : RWDefs) { 937 const CodeGenProcModel &ProcModel = 938 getProcModel(RWDef->getValueAsDef("SchedModel")); 939 ProcIndices.push_back(ProcModel.Index); 940 LLVM_DEBUG(dbgs() << "InstRW on " << ProcModel.ModelName << " for " 941 << InstName); 942 IdxVec Writes; 943 IdxVec Reads; 944 findRWs(RWDef->getValueAsListOfDefs("OperandReadWrites"), 945 Writes, Reads); 946 LLVM_DEBUG({ 947 for (unsigned WIdx : Writes) 948 dbgs() << " " << SchedWrites[WIdx].Name; 949 for (unsigned RIdx : Reads) 950 dbgs() << " " << SchedReads[RIdx].Name; 951 dbgs() << '\n'; 952 }); 953 } 954 // If ProcIndices contains zero, the class applies to all processors. 955 LLVM_DEBUG({ 956 if (!std::count(ProcIndices.begin(), ProcIndices.end(), 0)) { 957 for (const CodeGenProcModel &PM : ProcModels) { 958 if (!std::count(ProcIndices.begin(), ProcIndices.end(), PM.Index)) 959 dbgs() << "No machine model for " << Inst->TheDef->getName() 960 << " on processor " << PM.ModelName << '\n'; 961 } 962 } 963 }); 964 } 965} 966 967// Get the SchedClass index for an instruction. 968unsigned 969CodeGenSchedModels::getSchedClassIdx(const CodeGenInstruction &Inst) const { 970 return InstrClassMap.lookup(Inst.TheDef); 971} 972 973std::string 974CodeGenSchedModels::createSchedClassName(Record *ItinClassDef, 975 ArrayRef<unsigned> OperWrites, 976 ArrayRef<unsigned> OperReads) { 977 978 std::string Name; 979 if (ItinClassDef && ItinClassDef->getName() != "NoItinerary") 980 Name = std::string(ItinClassDef->getName()); 981 for (unsigned Idx : OperWrites) { 982 if (!Name.empty()) 983 Name += '_'; 984 Name += SchedWrites[Idx].Name; 985 } 986 for (unsigned Idx : OperReads) { 987 Name += '_'; 988 Name += SchedReads[Idx].Name; 989 } 990 return Name; 991} 992 993std::string CodeGenSchedModels::createSchedClassName(const RecVec &InstDefs) { 994 995 std::string Name; 996 for (RecIter I = InstDefs.begin(), E = InstDefs.end(); I != E; ++I) { 997 if (I != InstDefs.begin()) 998 Name += '_'; 999 Name += (*I)->getName(); 1000 } 1001 return Name; 1002} 1003 1004/// Add an inferred sched class from an itinerary class and per-operand list of 1005/// SchedWrites and SchedReads. ProcIndices contains the set of IDs of 1006/// processors that may utilize this class. 1007unsigned CodeGenSchedModels::addSchedClass(Record *ItinClassDef, 1008 ArrayRef<unsigned> OperWrites, 1009 ArrayRef<unsigned> OperReads, 1010 ArrayRef<unsigned> ProcIndices) { 1011 assert(!ProcIndices.empty() && "expect at least one ProcIdx"); 1012 1013 auto IsKeyEqual = [=](const CodeGenSchedClass &SC) { 1014 return SC.isKeyEqual(ItinClassDef, OperWrites, OperReads); 1015 }; 1016 1017 auto I = find_if(make_range(schedClassBegin(), schedClassEnd()), IsKeyEqual); 1018 unsigned Idx = I == schedClassEnd() ? 0 : std::distance(schedClassBegin(), I); 1019 if (Idx || SchedClasses[0].isKeyEqual(ItinClassDef, OperWrites, OperReads)) { 1020 IdxVec PI; 1021 std::set_union(SchedClasses[Idx].ProcIndices.begin(), 1022 SchedClasses[Idx].ProcIndices.end(), 1023 ProcIndices.begin(), ProcIndices.end(), 1024 std::back_inserter(PI)); 1025 SchedClasses[Idx].ProcIndices = std::move(PI); 1026 return Idx; 1027 } 1028 Idx = SchedClasses.size(); 1029 SchedClasses.emplace_back(Idx, 1030 createSchedClassName(ItinClassDef, OperWrites, 1031 OperReads), 1032 ItinClassDef); 1033 CodeGenSchedClass &SC = SchedClasses.back(); 1034 SC.Writes = OperWrites; 1035 SC.Reads = OperReads; 1036 SC.ProcIndices = ProcIndices; 1037 1038 return Idx; 1039} 1040 1041// Create classes for each set of opcodes that are in the same InstReadWrite 1042// definition across all processors. 1043void CodeGenSchedModels::createInstRWClass(Record *InstRWDef) { 1044 // ClassInstrs will hold an entry for each subset of Instrs in InstRWDef that 1045 // intersects with an existing class via a previous InstRWDef. Instrs that do 1046 // not intersect with an existing class refer back to their former class as 1047 // determined from ItinDef or SchedRW. 1048 SmallMapVector<unsigned, SmallVector<Record *, 8>, 4> ClassInstrs; 1049 // Sort Instrs into sets. 1050 const RecVec *InstDefs = Sets.expand(InstRWDef); 1051 if (InstDefs->empty()) 1052 PrintFatalError(InstRWDef->getLoc(), "No matching instruction opcodes"); 1053 1054 for (Record *InstDef : *InstDefs) { 1055 InstClassMapTy::const_iterator Pos = InstrClassMap.find(InstDef); 1056 if (Pos == InstrClassMap.end()) 1057 PrintFatalError(InstDef->getLoc(), "No sched class for instruction."); 1058 unsigned SCIdx = Pos->second; 1059 ClassInstrs[SCIdx].push_back(InstDef); 1060 } 1061 // For each set of Instrs, create a new class if necessary, and map or remap 1062 // the Instrs to it. 1063 for (auto &Entry : ClassInstrs) { 1064 unsigned OldSCIdx = Entry.first; 1065 ArrayRef<Record*> InstDefs = Entry.second; 1066 // If the all instrs in the current class are accounted for, then leave 1067 // them mapped to their old class. 1068 if (OldSCIdx) { 1069 const RecVec &RWDefs = SchedClasses[OldSCIdx].InstRWs; 1070 if (!RWDefs.empty()) { 1071 const RecVec *OrigInstDefs = Sets.expand(RWDefs[0]); 1072 unsigned OrigNumInstrs = 1073 count_if(*OrigInstDefs, [&](Record *OIDef) { 1074 return InstrClassMap[OIDef] == OldSCIdx; 1075 }); 1076 if (OrigNumInstrs == InstDefs.size()) { 1077 assert(SchedClasses[OldSCIdx].ProcIndices[0] == 0 && 1078 "expected a generic SchedClass"); 1079 Record *RWModelDef = InstRWDef->getValueAsDef("SchedModel"); 1080 // Make sure we didn't already have a InstRW containing this 1081 // instruction on this model. 1082 for (Record *RWD : RWDefs) { 1083 if (RWD->getValueAsDef("SchedModel") == RWModelDef && 1084 RWModelDef->getValueAsBit("FullInstRWOverlapCheck")) { 1085 assert(!InstDefs.empty()); // Checked at function start. 1086 PrintFatalError 1087 (InstRWDef->getLoc(), 1088 "Overlapping InstRW definition for \"" + 1089 InstDefs.front()->getName() + 1090 "\" also matches previous \"" + 1091 RWD->getValue("Instrs")->getValue()->getAsString() + 1092 "\"."); 1093 } 1094 } 1095 LLVM_DEBUG(dbgs() << "InstRW: Reuse SC " << OldSCIdx << ":" 1096 << SchedClasses[OldSCIdx].Name << " on " 1097 << RWModelDef->getName() << "\n"); 1098 SchedClasses[OldSCIdx].InstRWs.push_back(InstRWDef); 1099 continue; 1100 } 1101 } 1102 } 1103 unsigned SCIdx = SchedClasses.size(); 1104 SchedClasses.emplace_back(SCIdx, createSchedClassName(InstDefs), nullptr); 1105 CodeGenSchedClass &SC = SchedClasses.back(); 1106 LLVM_DEBUG(dbgs() << "InstRW: New SC " << SCIdx << ":" << SC.Name << " on " 1107 << InstRWDef->getValueAsDef("SchedModel")->getName() 1108 << "\n"); 1109 1110 // Preserve ItinDef and Writes/Reads for processors without an InstRW entry. 1111 SC.ItinClassDef = SchedClasses[OldSCIdx].ItinClassDef; 1112 SC.Writes = SchedClasses[OldSCIdx].Writes; 1113 SC.Reads = SchedClasses[OldSCIdx].Reads; 1114 SC.ProcIndices.push_back(0); 1115 // If we had an old class, copy it's InstRWs to this new class. 1116 if (OldSCIdx) { 1117 Record *RWModelDef = InstRWDef->getValueAsDef("SchedModel"); 1118 for (Record *OldRWDef : SchedClasses[OldSCIdx].InstRWs) { 1119 if (OldRWDef->getValueAsDef("SchedModel") == RWModelDef) { 1120 assert(!InstDefs.empty()); // Checked at function start. 1121 PrintFatalError 1122 (InstRWDef->getLoc(), 1123 "Overlapping InstRW definition for \"" + 1124 InstDefs.front()->getName() + 1125 "\" also matches previous \"" + 1126 OldRWDef->getValue("Instrs")->getValue()->getAsString() + 1127 "\"."); 1128 } 1129 assert(OldRWDef != InstRWDef && 1130 "SchedClass has duplicate InstRW def"); 1131 SC.InstRWs.push_back(OldRWDef); 1132 } 1133 } 1134 // Map each Instr to this new class. 1135 for (Record *InstDef : InstDefs) 1136 InstrClassMap[InstDef] = SCIdx; 1137 SC.InstRWs.push_back(InstRWDef); 1138 } 1139} 1140 1141// True if collectProcItins found anything. 1142bool CodeGenSchedModels::hasItineraries() const { 1143 for (const CodeGenProcModel &PM : make_range(procModelBegin(),procModelEnd())) 1144 if (PM.hasItineraries()) 1145 return true; 1146 return false; 1147} 1148 1149// Gather the processor itineraries. 1150void CodeGenSchedModels::collectProcItins() { 1151 LLVM_DEBUG(dbgs() << "\n+++ PROBLEM ITINERARIES (collectProcItins) +++\n"); 1152 for (CodeGenProcModel &ProcModel : ProcModels) { 1153 if (!ProcModel.hasItineraries()) 1154 continue; 1155 1156 RecVec ItinRecords = ProcModel.ItinsDef->getValueAsListOfDefs("IID"); 1157 assert(!ItinRecords.empty() && "ProcModel.hasItineraries is incorrect"); 1158 1159 // Populate ItinDefList with Itinerary records. 1160 ProcModel.ItinDefList.resize(NumInstrSchedClasses); 1161 1162 // Insert each itinerary data record in the correct position within 1163 // the processor model's ItinDefList. 1164 for (Record *ItinData : ItinRecords) { 1165 const Record *ItinDef = ItinData->getValueAsDef("TheClass"); 1166 bool FoundClass = false; 1167 1168 for (const CodeGenSchedClass &SC : 1169 make_range(schedClassBegin(), schedClassEnd())) { 1170 // Multiple SchedClasses may share an itinerary. Update all of them. 1171 if (SC.ItinClassDef == ItinDef) { 1172 ProcModel.ItinDefList[SC.Index] = ItinData; 1173 FoundClass = true; 1174 } 1175 } 1176 if (!FoundClass) { 1177 LLVM_DEBUG(dbgs() << ProcModel.ItinsDef->getName() 1178 << " missing class for itinerary " 1179 << ItinDef->getName() << '\n'); 1180 } 1181 } 1182 // Check for missing itinerary entries. 1183 assert(!ProcModel.ItinDefList[0] && "NoItinerary class can't have rec"); 1184 LLVM_DEBUG( 1185 for (unsigned i = 1, N = ProcModel.ItinDefList.size(); i < N; ++i) { 1186 if (!ProcModel.ItinDefList[i]) 1187 dbgs() << ProcModel.ItinsDef->getName() 1188 << " missing itinerary for class " << SchedClasses[i].Name 1189 << '\n'; 1190 }); 1191 } 1192} 1193 1194// Gather the read/write types for each itinerary class. 1195void CodeGenSchedModels::collectProcItinRW() { 1196 RecVec ItinRWDefs = Records.getAllDerivedDefinitions("ItinRW"); 1197 llvm::sort(ItinRWDefs, LessRecord()); 1198 for (Record *RWDef : ItinRWDefs) { 1199 if (!RWDef->getValueInit("SchedModel")->isComplete()) 1200 PrintFatalError(RWDef->getLoc(), "SchedModel is undefined"); 1201 Record *ModelDef = RWDef->getValueAsDef("SchedModel"); 1202 ProcModelMapTy::const_iterator I = ProcModelMap.find(ModelDef); 1203 if (I == ProcModelMap.end()) { 1204 PrintFatalError(RWDef->getLoc(), "Undefined SchedMachineModel " 1205 + ModelDef->getName()); 1206 } 1207 ProcModels[I->second].ItinRWDefs.push_back(RWDef); 1208 } 1209} 1210 1211// Gather the unsupported features for processor models. 1212void CodeGenSchedModels::collectProcUnsupportedFeatures() { 1213 for (CodeGenProcModel &ProcModel : ProcModels) { 1214 for (Record *Pred : ProcModel.ModelDef->getValueAsListOfDefs("UnsupportedFeatures")) { 1215 ProcModel.UnsupportedFeaturesDefs.push_back(Pred); 1216 } 1217 } 1218} 1219 1220/// Infer new classes from existing classes. In the process, this may create new 1221/// SchedWrites from sequences of existing SchedWrites. 1222void CodeGenSchedModels::inferSchedClasses() { 1223 LLVM_DEBUG( 1224 dbgs() << "\n+++ INFERRING SCHED CLASSES (inferSchedClasses) +++\n"); 1225 LLVM_DEBUG(dbgs() << NumInstrSchedClasses << " instr sched classes.\n"); 1226 1227 // Visit all existing classes and newly created classes. 1228 for (unsigned Idx = 0; Idx != SchedClasses.size(); ++Idx) { 1229 assert(SchedClasses[Idx].Index == Idx && "bad SCIdx"); 1230 1231 if (SchedClasses[Idx].ItinClassDef) 1232 inferFromItinClass(SchedClasses[Idx].ItinClassDef, Idx); 1233 if (!SchedClasses[Idx].InstRWs.empty()) 1234 inferFromInstRWs(Idx); 1235 if (!SchedClasses[Idx].Writes.empty()) { 1236 inferFromRW(SchedClasses[Idx].Writes, SchedClasses[Idx].Reads, 1237 Idx, SchedClasses[Idx].ProcIndices); 1238 } 1239 assert(SchedClasses.size() < (NumInstrSchedClasses*6) && 1240 "too many SchedVariants"); 1241 } 1242} 1243 1244/// Infer classes from per-processor itinerary resources. 1245void CodeGenSchedModels::inferFromItinClass(Record *ItinClassDef, 1246 unsigned FromClassIdx) { 1247 for (unsigned PIdx = 0, PEnd = ProcModels.size(); PIdx != PEnd; ++PIdx) { 1248 const CodeGenProcModel &PM = ProcModels[PIdx]; 1249 // For all ItinRW entries. 1250 bool HasMatch = false; 1251 for (const Record *Rec : PM.ItinRWDefs) { 1252 RecVec Matched = Rec->getValueAsListOfDefs("MatchedItinClasses"); 1253 if (!std::count(Matched.begin(), Matched.end(), ItinClassDef)) 1254 continue; 1255 if (HasMatch) 1256 PrintFatalError(Rec->getLoc(), "Duplicate itinerary class " 1257 + ItinClassDef->getName() 1258 + " in ItinResources for " + PM.ModelName); 1259 HasMatch = true; 1260 IdxVec Writes, Reads; 1261 findRWs(Rec->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads); 1262 inferFromRW(Writes, Reads, FromClassIdx, PIdx); 1263 } 1264 } 1265} 1266 1267/// Infer classes from per-processor InstReadWrite definitions. 1268void CodeGenSchedModels::inferFromInstRWs(unsigned SCIdx) { 1269 for (unsigned I = 0, E = SchedClasses[SCIdx].InstRWs.size(); I != E; ++I) { 1270 assert(SchedClasses[SCIdx].InstRWs.size() == E && "InstrRWs was mutated!"); 1271 Record *Rec = SchedClasses[SCIdx].InstRWs[I]; 1272 const RecVec *InstDefs = Sets.expand(Rec); 1273 RecIter II = InstDefs->begin(), IE = InstDefs->end(); 1274 for (; II != IE; ++II) { 1275 if (InstrClassMap[*II] == SCIdx) 1276 break; 1277 } 1278 // If this class no longer has any instructions mapped to it, it has become 1279 // irrelevant. 1280 if (II == IE) 1281 continue; 1282 IdxVec Writes, Reads; 1283 findRWs(Rec->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads); 1284 unsigned PIdx = getProcModel(Rec->getValueAsDef("SchedModel")).Index; 1285 inferFromRW(Writes, Reads, SCIdx, PIdx); // May mutate SchedClasses. 1286 } 1287} 1288 1289namespace { 1290 1291// Helper for substituteVariantOperand. 1292struct TransVariant { 1293 Record *VarOrSeqDef; // Variant or sequence. 1294 unsigned RWIdx; // Index of this variant or sequence's matched type. 1295 unsigned ProcIdx; // Processor model index or zero for any. 1296 unsigned TransVecIdx; // Index into PredTransitions::TransVec. 1297 1298 TransVariant(Record *def, unsigned rwi, unsigned pi, unsigned ti): 1299 VarOrSeqDef(def), RWIdx(rwi), ProcIdx(pi), TransVecIdx(ti) {} 1300}; 1301 1302// Associate a predicate with the SchedReadWrite that it guards. 1303// RWIdx is the index of the read/write variant. 1304struct PredCheck { 1305 bool IsRead; 1306 unsigned RWIdx; 1307 Record *Predicate; 1308 1309 PredCheck(bool r, unsigned w, Record *p): IsRead(r), RWIdx(w), Predicate(p) {} 1310}; 1311 1312// A Predicate transition is a list of RW sequences guarded by a PredTerm. 1313struct PredTransition { 1314 // A predicate term is a conjunction of PredChecks. 1315 SmallVector<PredCheck, 4> PredTerm; 1316 SmallVector<SmallVector<unsigned,4>, 16> WriteSequences; 1317 SmallVector<SmallVector<unsigned,4>, 16> ReadSequences; 1318 SmallVector<unsigned, 4> ProcIndices; 1319}; 1320 1321// Encapsulate a set of partially constructed transitions. 1322// The results are built by repeated calls to substituteVariants. 1323class PredTransitions { 1324 CodeGenSchedModels &SchedModels; 1325 1326public: 1327 std::vector<PredTransition> TransVec; 1328 1329 PredTransitions(CodeGenSchedModels &sm): SchedModels(sm) {} 1330 1331 void substituteVariantOperand(const SmallVectorImpl<unsigned> &RWSeq, 1332 bool IsRead, unsigned StartIdx); 1333 1334 void substituteVariants(const PredTransition &Trans); 1335 1336#ifndef NDEBUG 1337 void dump() const; 1338#endif 1339 1340private: 1341 bool mutuallyExclusive(Record *PredDef, ArrayRef<PredCheck> Term); 1342 void getIntersectingVariants( 1343 const CodeGenSchedRW &SchedRW, unsigned TransIdx, 1344 std::vector<TransVariant> &IntersectingVariants); 1345 void pushVariant(const TransVariant &VInfo, bool IsRead); 1346}; 1347 1348} // end anonymous namespace 1349 1350// Return true if this predicate is mutually exclusive with a PredTerm. This 1351// degenerates into checking if the predicate is mutually exclusive with any 1352// predicate in the Term's conjunction. 1353// 1354// All predicates associated with a given SchedRW are considered mutually 1355// exclusive. This should work even if the conditions expressed by the 1356// predicates are not exclusive because the predicates for a given SchedWrite 1357// are always checked in the order they are defined in the .td file. Later 1358// conditions implicitly negate any prior condition. 1359bool PredTransitions::mutuallyExclusive(Record *PredDef, 1360 ArrayRef<PredCheck> Term) { 1361 for (const PredCheck &PC: Term) { 1362 if (PC.Predicate == PredDef) 1363 return false; 1364 1365 const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(PC.RWIdx, PC.IsRead); 1366 assert(SchedRW.HasVariants && "PredCheck must refer to a SchedVariant"); 1367 RecVec Variants = SchedRW.TheDef->getValueAsListOfDefs("Variants"); 1368 if (any_of(Variants, [PredDef](const Record *R) { 1369 return R->getValueAsDef("Predicate") == PredDef; 1370 })) 1371 return true; 1372 } 1373 return false; 1374} 1375 1376static bool hasAliasedVariants(const CodeGenSchedRW &RW, 1377 CodeGenSchedModels &SchedModels) { 1378 if (RW.HasVariants) 1379 return true; 1380 1381 for (Record *Alias : RW.Aliases) { 1382 const CodeGenSchedRW &AliasRW = 1383 SchedModels.getSchedRW(Alias->getValueAsDef("AliasRW")); 1384 if (AliasRW.HasVariants) 1385 return true; 1386 if (AliasRW.IsSequence) { 1387 IdxVec ExpandedRWs; 1388 SchedModels.expandRWSequence(AliasRW.Index, ExpandedRWs, AliasRW.IsRead); 1389 for (unsigned SI : ExpandedRWs) { 1390 if (hasAliasedVariants(SchedModels.getSchedRW(SI, AliasRW.IsRead), 1391 SchedModels)) 1392 return true; 1393 } 1394 } 1395 } 1396 return false; 1397} 1398 1399static bool hasVariant(ArrayRef<PredTransition> Transitions, 1400 CodeGenSchedModels &SchedModels) { 1401 for (const PredTransition &PTI : Transitions) { 1402 for (const SmallVectorImpl<unsigned> &WSI : PTI.WriteSequences) 1403 for (unsigned WI : WSI) 1404 if (hasAliasedVariants(SchedModels.getSchedWrite(WI), SchedModels)) 1405 return true; 1406 1407 for (const SmallVectorImpl<unsigned> &RSI : PTI.ReadSequences) 1408 for (unsigned RI : RSI) 1409 if (hasAliasedVariants(SchedModels.getSchedRead(RI), SchedModels)) 1410 return true; 1411 } 1412 return false; 1413} 1414 1415// Populate IntersectingVariants with any variants or aliased sequences of the 1416// given SchedRW whose processor indices and predicates are not mutually 1417// exclusive with the given transition. 1418void PredTransitions::getIntersectingVariants( 1419 const CodeGenSchedRW &SchedRW, unsigned TransIdx, 1420 std::vector<TransVariant> &IntersectingVariants) { 1421 1422 bool GenericRW = false; 1423 1424 std::vector<TransVariant> Variants; 1425 if (SchedRW.HasVariants) { 1426 unsigned VarProcIdx = 0; 1427 if (SchedRW.TheDef->getValueInit("SchedModel")->isComplete()) { 1428 Record *ModelDef = SchedRW.TheDef->getValueAsDef("SchedModel"); 1429 VarProcIdx = SchedModels.getProcModel(ModelDef).Index; 1430 } 1431 // Push each variant. Assign TransVecIdx later. 1432 const RecVec VarDefs = SchedRW.TheDef->getValueAsListOfDefs("Variants"); 1433 for (Record *VarDef : VarDefs) 1434 Variants.emplace_back(VarDef, SchedRW.Index, VarProcIdx, 0); 1435 if (VarProcIdx == 0) 1436 GenericRW = true; 1437 } 1438 for (RecIter AI = SchedRW.Aliases.begin(), AE = SchedRW.Aliases.end(); 1439 AI != AE; ++AI) { 1440 // If either the SchedAlias itself or the SchedReadWrite that it aliases 1441 // to is defined within a processor model, constrain all variants to 1442 // that processor. 1443 unsigned AliasProcIdx = 0; 1444 if ((*AI)->getValueInit("SchedModel")->isComplete()) { 1445 Record *ModelDef = (*AI)->getValueAsDef("SchedModel"); 1446 AliasProcIdx = SchedModels.getProcModel(ModelDef).Index; 1447 } 1448 const CodeGenSchedRW &AliasRW = 1449 SchedModels.getSchedRW((*AI)->getValueAsDef("AliasRW")); 1450 1451 if (AliasRW.HasVariants) { 1452 const RecVec VarDefs = AliasRW.TheDef->getValueAsListOfDefs("Variants"); 1453 for (Record *VD : VarDefs) 1454 Variants.emplace_back(VD, AliasRW.Index, AliasProcIdx, 0); 1455 } 1456 if (AliasRW.IsSequence) 1457 Variants.emplace_back(AliasRW.TheDef, SchedRW.Index, AliasProcIdx, 0); 1458 if (AliasProcIdx == 0) 1459 GenericRW = true; 1460 } 1461 for (TransVariant &Variant : Variants) { 1462 // Don't expand variants if the processor models don't intersect. 1463 // A zero processor index means any processor. 1464 SmallVectorImpl<unsigned> &ProcIndices = TransVec[TransIdx].ProcIndices; 1465 if (ProcIndices[0] && Variant.ProcIdx) { 1466 unsigned Cnt = std::count(ProcIndices.begin(), ProcIndices.end(), 1467 Variant.ProcIdx); 1468 if (!Cnt) 1469 continue; 1470 if (Cnt > 1) { 1471 const CodeGenProcModel &PM = 1472 *(SchedModels.procModelBegin() + Variant.ProcIdx); 1473 PrintFatalError(Variant.VarOrSeqDef->getLoc(), 1474 "Multiple variants defined for processor " + 1475 PM.ModelName + 1476 " Ensure only one SchedAlias exists per RW."); 1477 } 1478 } 1479 if (Variant.VarOrSeqDef->isSubClassOf("SchedVar")) { 1480 Record *PredDef = Variant.VarOrSeqDef->getValueAsDef("Predicate"); 1481 if (mutuallyExclusive(PredDef, TransVec[TransIdx].PredTerm)) 1482 continue; 1483 } 1484 if (IntersectingVariants.empty()) { 1485 // The first variant builds on the existing transition. 1486 Variant.TransVecIdx = TransIdx; 1487 IntersectingVariants.push_back(Variant); 1488 } 1489 else { 1490 // Push another copy of the current transition for more variants. 1491 Variant.TransVecIdx = TransVec.size(); 1492 IntersectingVariants.push_back(Variant); 1493 TransVec.push_back(TransVec[TransIdx]); 1494 } 1495 } 1496 if (GenericRW && IntersectingVariants.empty()) { 1497 PrintFatalError(SchedRW.TheDef->getLoc(), "No variant of this type has " 1498 "a matching predicate on any processor"); 1499 } 1500} 1501 1502// Push the Reads/Writes selected by this variant onto the PredTransition 1503// specified by VInfo. 1504void PredTransitions:: 1505pushVariant(const TransVariant &VInfo, bool IsRead) { 1506 PredTransition &Trans = TransVec[VInfo.TransVecIdx]; 1507 1508 // If this operand transition is reached through a processor-specific alias, 1509 // then the whole transition is specific to this processor. 1510 if (VInfo.ProcIdx != 0) 1511 Trans.ProcIndices.assign(1, VInfo.ProcIdx); 1512 1513 IdxVec SelectedRWs; 1514 if (VInfo.VarOrSeqDef->isSubClassOf("SchedVar")) { 1515 Record *PredDef = VInfo.VarOrSeqDef->getValueAsDef("Predicate"); 1516 Trans.PredTerm.emplace_back(IsRead, VInfo.RWIdx,PredDef); 1517 RecVec SelectedDefs = VInfo.VarOrSeqDef->getValueAsListOfDefs("Selected"); 1518 SchedModels.findRWs(SelectedDefs, SelectedRWs, IsRead); 1519 } 1520 else { 1521 assert(VInfo.VarOrSeqDef->isSubClassOf("WriteSequence") && 1522 "variant must be a SchedVariant or aliased WriteSequence"); 1523 SelectedRWs.push_back(SchedModels.getSchedRWIdx(VInfo.VarOrSeqDef, IsRead)); 1524 } 1525 1526 const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(VInfo.RWIdx, IsRead); 1527 1528 SmallVectorImpl<SmallVector<unsigned,4>> &RWSequences = IsRead 1529 ? Trans.ReadSequences : Trans.WriteSequences; 1530 if (SchedRW.IsVariadic) { 1531 unsigned OperIdx = RWSequences.size()-1; 1532 // Make N-1 copies of this transition's last sequence. 1533 RWSequences.insert(RWSequences.end(), SelectedRWs.size() - 1, 1534 RWSequences[OperIdx]); 1535 // Push each of the N elements of the SelectedRWs onto a copy of the last 1536 // sequence (split the current operand into N operands). 1537 // Note that write sequences should be expanded within this loop--the entire 1538 // sequence belongs to a single operand. 1539 for (IdxIter RWI = SelectedRWs.begin(), RWE = SelectedRWs.end(); 1540 RWI != RWE; ++RWI, ++OperIdx) { 1541 IdxVec ExpandedRWs; 1542 if (IsRead) 1543 ExpandedRWs.push_back(*RWI); 1544 else 1545 SchedModels.expandRWSequence(*RWI, ExpandedRWs, IsRead); 1546 RWSequences[OperIdx].insert(RWSequences[OperIdx].end(), 1547 ExpandedRWs.begin(), ExpandedRWs.end()); 1548 } 1549 assert(OperIdx == RWSequences.size() && "missed a sequence"); 1550 } 1551 else { 1552 // Push this transition's expanded sequence onto this transition's last 1553 // sequence (add to the current operand's sequence). 1554 SmallVectorImpl<unsigned> &Seq = RWSequences.back(); 1555 IdxVec ExpandedRWs; 1556 for (IdxIter RWI = SelectedRWs.begin(), RWE = SelectedRWs.end(); 1557 RWI != RWE; ++RWI) { 1558 if (IsRead) 1559 ExpandedRWs.push_back(*RWI); 1560 else 1561 SchedModels.expandRWSequence(*RWI, ExpandedRWs, IsRead); 1562 } 1563 Seq.insert(Seq.end(), ExpandedRWs.begin(), ExpandedRWs.end()); 1564 } 1565} 1566 1567// RWSeq is a sequence of all Reads or all Writes for the next read or write 1568// operand. StartIdx is an index into TransVec where partial results 1569// starts. RWSeq must be applied to all transitions between StartIdx and the end 1570// of TransVec. 1571void PredTransitions::substituteVariantOperand( 1572 const SmallVectorImpl<unsigned> &RWSeq, bool IsRead, unsigned StartIdx) { 1573 1574 // Visit each original RW within the current sequence. 1575 for (SmallVectorImpl<unsigned>::const_iterator 1576 RWI = RWSeq.begin(), RWE = RWSeq.end(); RWI != RWE; ++RWI) { 1577 const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(*RWI, IsRead); 1578 // Push this RW on all partial PredTransitions or distribute variants. 1579 // New PredTransitions may be pushed within this loop which should not be 1580 // revisited (TransEnd must be loop invariant). 1581 for (unsigned TransIdx = StartIdx, TransEnd = TransVec.size(); 1582 TransIdx != TransEnd; ++TransIdx) { 1583 // In the common case, push RW onto the current operand's sequence. 1584 if (!hasAliasedVariants(SchedRW, SchedModels)) { 1585 if (IsRead) 1586 TransVec[TransIdx].ReadSequences.back().push_back(*RWI); 1587 else 1588 TransVec[TransIdx].WriteSequences.back().push_back(*RWI); 1589 continue; 1590 } 1591 // Distribute this partial PredTransition across intersecting variants. 1592 // This will push a copies of TransVec[TransIdx] on the back of TransVec. 1593 std::vector<TransVariant> IntersectingVariants; 1594 getIntersectingVariants(SchedRW, TransIdx, IntersectingVariants); 1595 // Now expand each variant on top of its copy of the transition. 1596 for (std::vector<TransVariant>::const_iterator 1597 IVI = IntersectingVariants.begin(), 1598 IVE = IntersectingVariants.end(); 1599 IVI != IVE; ++IVI) { 1600 pushVariant(*IVI, IsRead); 1601 } 1602 } 1603 } 1604} 1605 1606// For each variant of a Read/Write in Trans, substitute the sequence of 1607// Read/Writes guarded by the variant. This is exponential in the number of 1608// variant Read/Writes, but in practice detection of mutually exclusive 1609// predicates should result in linear growth in the total number variants. 1610// 1611// This is one step in a breadth-first search of nested variants. 1612void PredTransitions::substituteVariants(const PredTransition &Trans) { 1613 // Build up a set of partial results starting at the back of 1614 // PredTransitions. Remember the first new transition. 1615 unsigned StartIdx = TransVec.size(); 1616 TransVec.emplace_back(); 1617 TransVec.back().PredTerm = Trans.PredTerm; 1618 TransVec.back().ProcIndices = Trans.ProcIndices; 1619 1620 // Visit each original write sequence. 1621 for (SmallVectorImpl<SmallVector<unsigned,4>>::const_iterator 1622 WSI = Trans.WriteSequences.begin(), WSE = Trans.WriteSequences.end(); 1623 WSI != WSE; ++WSI) { 1624 // Push a new (empty) write sequence onto all partial Transitions. 1625 for (std::vector<PredTransition>::iterator I = 1626 TransVec.begin() + StartIdx, E = TransVec.end(); I != E; ++I) { 1627 I->WriteSequences.emplace_back(); 1628 } 1629 substituteVariantOperand(*WSI, /*IsRead=*/false, StartIdx); 1630 } 1631 // Visit each original read sequence. 1632 for (SmallVectorImpl<SmallVector<unsigned,4>>::const_iterator 1633 RSI = Trans.ReadSequences.begin(), RSE = Trans.ReadSequences.end(); 1634 RSI != RSE; ++RSI) { 1635 // Push a new (empty) read sequence onto all partial Transitions. 1636 for (std::vector<PredTransition>::iterator I = 1637 TransVec.begin() + StartIdx, E = TransVec.end(); I != E; ++I) { 1638 I->ReadSequences.emplace_back(); 1639 } 1640 substituteVariantOperand(*RSI, /*IsRead=*/true, StartIdx); 1641 } 1642} 1643 1644// Create a new SchedClass for each variant found by inferFromRW. Pass 1645static void inferFromTransitions(ArrayRef<PredTransition> LastTransitions, 1646 unsigned FromClassIdx, 1647 CodeGenSchedModels &SchedModels) { 1648 // For each PredTransition, create a new CodeGenSchedTransition, which usually 1649 // requires creating a new SchedClass. 1650 for (ArrayRef<PredTransition>::iterator 1651 I = LastTransitions.begin(), E = LastTransitions.end(); I != E; ++I) { 1652 IdxVec OperWritesVariant; 1653 transform(I->WriteSequences, std::back_inserter(OperWritesVariant), 1654 [&SchedModels](ArrayRef<unsigned> WS) { 1655 return SchedModels.findOrInsertRW(WS, /*IsRead=*/false); 1656 }); 1657 IdxVec OperReadsVariant; 1658 transform(I->ReadSequences, std::back_inserter(OperReadsVariant), 1659 [&SchedModels](ArrayRef<unsigned> RS) { 1660 return SchedModels.findOrInsertRW(RS, /*IsRead=*/true); 1661 }); 1662 CodeGenSchedTransition SCTrans; 1663 SCTrans.ToClassIdx = 1664 SchedModels.addSchedClass(/*ItinClassDef=*/nullptr, OperWritesVariant, 1665 OperReadsVariant, I->ProcIndices); 1666 SCTrans.ProcIndices.assign(I->ProcIndices.begin(), I->ProcIndices.end()); 1667 // The final PredTerm is unique set of predicates guarding the transition. 1668 RecVec Preds; 1669 transform(I->PredTerm, std::back_inserter(Preds), 1670 [](const PredCheck &P) { 1671 return P.Predicate; 1672 }); 1673 Preds.erase(std::unique(Preds.begin(), Preds.end()), Preds.end()); 1674 SCTrans.PredTerm = std::move(Preds); 1675 SchedModels.getSchedClass(FromClassIdx) 1676 .Transitions.push_back(std::move(SCTrans)); 1677 } 1678} 1679 1680// Create new SchedClasses for the given ReadWrite list. If any of the 1681// ReadWrites refers to a SchedVariant, create a new SchedClass for each variant 1682// of the ReadWrite list, following Aliases if necessary. 1683void CodeGenSchedModels::inferFromRW(ArrayRef<unsigned> OperWrites, 1684 ArrayRef<unsigned> OperReads, 1685 unsigned FromClassIdx, 1686 ArrayRef<unsigned> ProcIndices) { 1687 LLVM_DEBUG(dbgs() << "INFER RW proc("; dumpIdxVec(ProcIndices); 1688 dbgs() << ") "); 1689 1690 // Create a seed transition with an empty PredTerm and the expanded sequences 1691 // of SchedWrites for the current SchedClass. 1692 std::vector<PredTransition> LastTransitions; 1693 LastTransitions.emplace_back(); 1694 LastTransitions.back().ProcIndices.append(ProcIndices.begin(), 1695 ProcIndices.end()); 1696 1697 for (unsigned WriteIdx : OperWrites) { 1698 IdxVec WriteSeq; 1699 expandRWSequence(WriteIdx, WriteSeq, /*IsRead=*/false); 1700 LastTransitions[0].WriteSequences.emplace_back(); 1701 SmallVectorImpl<unsigned> &Seq = LastTransitions[0].WriteSequences.back(); 1702 Seq.append(WriteSeq.begin(), WriteSeq.end()); 1703 LLVM_DEBUG(dbgs() << "("; dumpIdxVec(Seq); dbgs() << ") "); 1704 } 1705 LLVM_DEBUG(dbgs() << " Reads: "); 1706 for (unsigned ReadIdx : OperReads) { 1707 IdxVec ReadSeq; 1708 expandRWSequence(ReadIdx, ReadSeq, /*IsRead=*/true); 1709 LastTransitions[0].ReadSequences.emplace_back(); 1710 SmallVectorImpl<unsigned> &Seq = LastTransitions[0].ReadSequences.back(); 1711 Seq.append(ReadSeq.begin(), ReadSeq.end()); 1712 LLVM_DEBUG(dbgs() << "("; dumpIdxVec(Seq); dbgs() << ") "); 1713 } 1714 LLVM_DEBUG(dbgs() << '\n'); 1715 1716 // Collect all PredTransitions for individual operands. 1717 // Iterate until no variant writes remain. 1718 while (hasVariant(LastTransitions, *this)) { 1719 PredTransitions Transitions(*this); 1720 for (const PredTransition &Trans : LastTransitions) 1721 Transitions.substituteVariants(Trans); 1722 LLVM_DEBUG(Transitions.dump()); 1723 LastTransitions.swap(Transitions.TransVec); 1724 } 1725 // If the first transition has no variants, nothing to do. 1726 if (LastTransitions[0].PredTerm.empty()) 1727 return; 1728 1729 // WARNING: We are about to mutate the SchedClasses vector. Do not refer to 1730 // OperWrites, OperReads, or ProcIndices after calling inferFromTransitions. 1731 inferFromTransitions(LastTransitions, FromClassIdx, *this); 1732} 1733 1734// Check if any processor resource group contains all resource records in 1735// SubUnits. 1736bool CodeGenSchedModels::hasSuperGroup(RecVec &SubUnits, CodeGenProcModel &PM) { 1737 for (unsigned i = 0, e = PM.ProcResourceDefs.size(); i < e; ++i) { 1738 if (!PM.ProcResourceDefs[i]->isSubClassOf("ProcResGroup")) 1739 continue; 1740 RecVec SuperUnits = 1741 PM.ProcResourceDefs[i]->getValueAsListOfDefs("Resources"); 1742 RecIter RI = SubUnits.begin(), RE = SubUnits.end(); 1743 for ( ; RI != RE; ++RI) { 1744 if (!is_contained(SuperUnits, *RI)) { 1745 break; 1746 } 1747 } 1748 if (RI == RE) 1749 return true; 1750 } 1751 return false; 1752} 1753 1754// Verify that overlapping groups have a common supergroup. 1755void CodeGenSchedModels::verifyProcResourceGroups(CodeGenProcModel &PM) { 1756 for (unsigned i = 0, e = PM.ProcResourceDefs.size(); i < e; ++i) { 1757 if (!PM.ProcResourceDefs[i]->isSubClassOf("ProcResGroup")) 1758 continue; 1759 RecVec CheckUnits = 1760 PM.ProcResourceDefs[i]->getValueAsListOfDefs("Resources"); 1761 for (unsigned j = i+1; j < e; ++j) { 1762 if (!PM.ProcResourceDefs[j]->isSubClassOf("ProcResGroup")) 1763 continue; 1764 RecVec OtherUnits = 1765 PM.ProcResourceDefs[j]->getValueAsListOfDefs("Resources"); 1766 if (std::find_first_of(CheckUnits.begin(), CheckUnits.end(), 1767 OtherUnits.begin(), OtherUnits.end()) 1768 != CheckUnits.end()) { 1769 // CheckUnits and OtherUnits overlap 1770 OtherUnits.insert(OtherUnits.end(), CheckUnits.begin(), 1771 CheckUnits.end()); 1772 if (!hasSuperGroup(OtherUnits, PM)) { 1773 PrintFatalError((PM.ProcResourceDefs[i])->getLoc(), 1774 "proc resource group overlaps with " 1775 + PM.ProcResourceDefs[j]->getName() 1776 + " but no supergroup contains both."); 1777 } 1778 } 1779 } 1780 } 1781} 1782 1783// Collect all the RegisterFile definitions available in this target. 1784void CodeGenSchedModels::collectRegisterFiles() { 1785 RecVec RegisterFileDefs = Records.getAllDerivedDefinitions("RegisterFile"); 1786 1787 // RegisterFiles is the vector of CodeGenRegisterFile. 1788 for (Record *RF : RegisterFileDefs) { 1789 // For each register file definition, construct a CodeGenRegisterFile object 1790 // and add it to the appropriate scheduling model. 1791 CodeGenProcModel &PM = getProcModel(RF->getValueAsDef("SchedModel")); 1792 PM.RegisterFiles.emplace_back(CodeGenRegisterFile(RF->getName(),RF)); 1793 CodeGenRegisterFile &CGRF = PM.RegisterFiles.back(); 1794 CGRF.MaxMovesEliminatedPerCycle = 1795 RF->getValueAsInt("MaxMovesEliminatedPerCycle"); 1796 CGRF.AllowZeroMoveEliminationOnly = 1797 RF->getValueAsBit("AllowZeroMoveEliminationOnly"); 1798 1799 // Now set the number of physical registers as well as the cost of registers 1800 // in each register class. 1801 CGRF.NumPhysRegs = RF->getValueAsInt("NumPhysRegs"); 1802 if (!CGRF.NumPhysRegs) { 1803 PrintFatalError(RF->getLoc(), 1804 "Invalid RegisterFile with zero physical registers"); 1805 } 1806 1807 RecVec RegisterClasses = RF->getValueAsListOfDefs("RegClasses"); 1808 std::vector<int64_t> RegisterCosts = RF->getValueAsListOfInts("RegCosts"); 1809 ListInit *MoveElimInfo = RF->getValueAsListInit("AllowMoveElimination"); 1810 for (unsigned I = 0, E = RegisterClasses.size(); I < E; ++I) { 1811 int Cost = RegisterCosts.size() > I ? RegisterCosts[I] : 1; 1812 1813 bool AllowMoveElim = false; 1814 if (MoveElimInfo->size() > I) { 1815 BitInit *Val = cast<BitInit>(MoveElimInfo->getElement(I)); 1816 AllowMoveElim = Val->getValue(); 1817 } 1818 1819 CGRF.Costs.emplace_back(RegisterClasses[I], Cost, AllowMoveElim); 1820 } 1821 } 1822} 1823 1824// Collect and sort WriteRes, ReadAdvance, and ProcResources. 1825void CodeGenSchedModels::collectProcResources() { 1826 ProcResourceDefs = Records.getAllDerivedDefinitions("ProcResourceUnits"); 1827 ProcResGroups = Records.getAllDerivedDefinitions("ProcResGroup"); 1828 1829 // Add any subtarget-specific SchedReadWrites that are directly associated 1830 // with processor resources. Refer to the parent SchedClass's ProcIndices to 1831 // determine which processors they apply to. 1832 for (const CodeGenSchedClass &SC : 1833 make_range(schedClassBegin(), schedClassEnd())) { 1834 if (SC.ItinClassDef) { 1835 collectItinProcResources(SC.ItinClassDef); 1836 continue; 1837 } 1838 1839 // This class may have a default ReadWrite list which can be overriden by 1840 // InstRW definitions. 1841 for (Record *RW : SC.InstRWs) { 1842 Record *RWModelDef = RW->getValueAsDef("SchedModel"); 1843 unsigned PIdx = getProcModel(RWModelDef).Index; 1844 IdxVec Writes, Reads; 1845 findRWs(RW->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads); 1846 collectRWResources(Writes, Reads, PIdx); 1847 } 1848 1849 collectRWResources(SC.Writes, SC.Reads, SC.ProcIndices); 1850 } 1851 // Add resources separately defined by each subtarget. 1852 RecVec WRDefs = Records.getAllDerivedDefinitions("WriteRes"); 1853 for (Record *WR : WRDefs) { 1854 Record *ModelDef = WR->getValueAsDef("SchedModel"); 1855 addWriteRes(WR, getProcModel(ModelDef).Index); 1856 } 1857 RecVec SWRDefs = Records.getAllDerivedDefinitions("SchedWriteRes"); 1858 for (Record *SWR : SWRDefs) { 1859 Record *ModelDef = SWR->getValueAsDef("SchedModel"); 1860 addWriteRes(SWR, getProcModel(ModelDef).Index); 1861 } 1862 RecVec RADefs = Records.getAllDerivedDefinitions("ReadAdvance"); 1863 for (Record *RA : RADefs) { 1864 Record *ModelDef = RA->getValueAsDef("SchedModel"); 1865 addReadAdvance(RA, getProcModel(ModelDef).Index); 1866 } 1867 RecVec SRADefs = Records.getAllDerivedDefinitions("SchedReadAdvance"); 1868 for (Record *SRA : SRADefs) { 1869 if (SRA->getValueInit("SchedModel")->isComplete()) { 1870 Record *ModelDef = SRA->getValueAsDef("SchedModel"); 1871 addReadAdvance(SRA, getProcModel(ModelDef).Index); 1872 } 1873 } 1874 // Add ProcResGroups that are defined within this processor model, which may 1875 // not be directly referenced but may directly specify a buffer size. 1876 RecVec ProcResGroups = Records.getAllDerivedDefinitions("ProcResGroup"); 1877 for (Record *PRG : ProcResGroups) { 1878 if (!PRG->getValueInit("SchedModel")->isComplete()) 1879 continue; 1880 CodeGenProcModel &PM = getProcModel(PRG->getValueAsDef("SchedModel")); 1881 if (!is_contained(PM.ProcResourceDefs, PRG)) 1882 PM.ProcResourceDefs.push_back(PRG); 1883 } 1884 // Add ProcResourceUnits unconditionally. 1885 for (Record *PRU : Records.getAllDerivedDefinitions("ProcResourceUnits")) { 1886 if (!PRU->getValueInit("SchedModel")->isComplete()) 1887 continue; 1888 CodeGenProcModel &PM = getProcModel(PRU->getValueAsDef("SchedModel")); 1889 if (!is_contained(PM.ProcResourceDefs, PRU)) 1890 PM.ProcResourceDefs.push_back(PRU); 1891 } 1892 // Finalize each ProcModel by sorting the record arrays. 1893 for (CodeGenProcModel &PM : ProcModels) { 1894 llvm::sort(PM.WriteResDefs, LessRecord()); 1895 llvm::sort(PM.ReadAdvanceDefs, LessRecord()); 1896 llvm::sort(PM.ProcResourceDefs, LessRecord()); 1897 LLVM_DEBUG( 1898 PM.dump(); 1899 dbgs() << "WriteResDefs: "; for (RecIter RI = PM.WriteResDefs.begin(), 1900 RE = PM.WriteResDefs.end(); 1901 RI != RE; ++RI) { 1902 if ((*RI)->isSubClassOf("WriteRes")) 1903 dbgs() << (*RI)->getValueAsDef("WriteType")->getName() << " "; 1904 else 1905 dbgs() << (*RI)->getName() << " "; 1906 } dbgs() << "\nReadAdvanceDefs: "; 1907 for (RecIter RI = PM.ReadAdvanceDefs.begin(), 1908 RE = PM.ReadAdvanceDefs.end(); 1909 RI != RE; ++RI) { 1910 if ((*RI)->isSubClassOf("ReadAdvance")) 1911 dbgs() << (*RI)->getValueAsDef("ReadType")->getName() << " "; 1912 else 1913 dbgs() << (*RI)->getName() << " "; 1914 } dbgs() 1915 << "\nProcResourceDefs: "; 1916 for (RecIter RI = PM.ProcResourceDefs.begin(), 1917 RE = PM.ProcResourceDefs.end(); 1918 RI != RE; ++RI) { dbgs() << (*RI)->getName() << " "; } dbgs() 1919 << '\n'); 1920 verifyProcResourceGroups(PM); 1921 } 1922 1923 ProcResourceDefs.clear(); 1924 ProcResGroups.clear(); 1925} 1926 1927void CodeGenSchedModels::checkCompleteness() { 1928 bool Complete = true; 1929 bool HadCompleteModel = false; 1930 for (const CodeGenProcModel &ProcModel : procModels()) { 1931 const bool HasItineraries = ProcModel.hasItineraries(); 1932 if (!ProcModel.ModelDef->getValueAsBit("CompleteModel")) 1933 continue; 1934 for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue()) { 1935 if (Inst->hasNoSchedulingInfo) 1936 continue; 1937 if (ProcModel.isUnsupported(*Inst)) 1938 continue; 1939 unsigned SCIdx = getSchedClassIdx(*Inst); 1940 if (!SCIdx) { 1941 if (Inst->TheDef->isValueUnset("SchedRW") && !HadCompleteModel) { 1942 PrintError(Inst->TheDef->getLoc(), 1943 "No schedule information for instruction '" + 1944 Inst->TheDef->getName() + "' in SchedMachineModel '" + 1945 ProcModel.ModelDef->getName() + "'"); 1946 Complete = false; 1947 } 1948 continue; 1949 } 1950 1951 const CodeGenSchedClass &SC = getSchedClass(SCIdx); 1952 if (!SC.Writes.empty()) 1953 continue; 1954 if (HasItineraries && SC.ItinClassDef != nullptr && 1955 SC.ItinClassDef->getName() != "NoItinerary") 1956 continue; 1957 1958 const RecVec &InstRWs = SC.InstRWs; 1959 auto I = find_if(InstRWs, [&ProcModel](const Record *R) { 1960 return R->getValueAsDef("SchedModel") == ProcModel.ModelDef; 1961 }); 1962 if (I == InstRWs.end()) { 1963 PrintError(Inst->TheDef->getLoc(), "'" + ProcModel.ModelName + 1964 "' lacks information for '" + 1965 Inst->TheDef->getName() + "'"); 1966 Complete = false; 1967 } 1968 } 1969 HadCompleteModel = true; 1970 } 1971 if (!Complete) { 1972 errs() << "\n\nIncomplete schedule models found.\n" 1973 << "- Consider setting 'CompleteModel = 0' while developing new models.\n" 1974 << "- Pseudo instructions can be marked with 'hasNoSchedulingInfo = 1'.\n" 1975 << "- Instructions should usually have Sched<[...]> as a superclass, " 1976 "you may temporarily use an empty list.\n" 1977 << "- Instructions related to unsupported features can be excluded with " 1978 "list<Predicate> UnsupportedFeatures = [HasA,..,HasY]; in the " 1979 "processor model.\n\n"; 1980 PrintFatalError("Incomplete schedule model"); 1981 } 1982} 1983 1984// Collect itinerary class resources for each processor. 1985void CodeGenSchedModels::collectItinProcResources(Record *ItinClassDef) { 1986 for (unsigned PIdx = 0, PEnd = ProcModels.size(); PIdx != PEnd; ++PIdx) { 1987 const CodeGenProcModel &PM = ProcModels[PIdx]; 1988 // For all ItinRW entries. 1989 bool HasMatch = false; 1990 for (RecIter II = PM.ItinRWDefs.begin(), IE = PM.ItinRWDefs.end(); 1991 II != IE; ++II) { 1992 RecVec Matched = (*II)->getValueAsListOfDefs("MatchedItinClasses"); 1993 if (!std::count(Matched.begin(), Matched.end(), ItinClassDef)) 1994 continue; 1995 if (HasMatch) 1996 PrintFatalError((*II)->getLoc(), "Duplicate itinerary class " 1997 + ItinClassDef->getName() 1998 + " in ItinResources for " + PM.ModelName); 1999 HasMatch = true; 2000 IdxVec Writes, Reads; 2001 findRWs((*II)->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads); 2002 collectRWResources(Writes, Reads, PIdx); 2003 } 2004 } 2005} 2006 2007void CodeGenSchedModels::collectRWResources(unsigned RWIdx, bool IsRead, 2008 ArrayRef<unsigned> ProcIndices) { 2009 const CodeGenSchedRW &SchedRW = getSchedRW(RWIdx, IsRead); 2010 if (SchedRW.TheDef) { 2011 if (!IsRead && SchedRW.TheDef->isSubClassOf("SchedWriteRes")) { 2012 for (unsigned Idx : ProcIndices) 2013 addWriteRes(SchedRW.TheDef, Idx); 2014 } 2015 else if (IsRead && SchedRW.TheDef->isSubClassOf("SchedReadAdvance")) { 2016 for (unsigned Idx : ProcIndices) 2017 addReadAdvance(SchedRW.TheDef, Idx); 2018 } 2019 } 2020 for (RecIter AI = SchedRW.Aliases.begin(), AE = SchedRW.Aliases.end(); 2021 AI != AE; ++AI) { 2022 IdxVec AliasProcIndices; 2023 if ((*AI)->getValueInit("SchedModel")->isComplete()) { 2024 AliasProcIndices.push_back( 2025 getProcModel((*AI)->getValueAsDef("SchedModel")).Index); 2026 } 2027 else 2028 AliasProcIndices = ProcIndices; 2029 const CodeGenSchedRW &AliasRW = getSchedRW((*AI)->getValueAsDef("AliasRW")); 2030 assert(AliasRW.IsRead == IsRead && "cannot alias reads to writes"); 2031 2032 IdxVec ExpandedRWs; 2033 expandRWSequence(AliasRW.Index, ExpandedRWs, IsRead); 2034 for (IdxIter SI = ExpandedRWs.begin(), SE = ExpandedRWs.end(); 2035 SI != SE; ++SI) { 2036 collectRWResources(*SI, IsRead, AliasProcIndices); 2037 } 2038 } 2039} 2040 2041// Collect resources for a set of read/write types and processor indices. 2042void CodeGenSchedModels::collectRWResources(ArrayRef<unsigned> Writes, 2043 ArrayRef<unsigned> Reads, 2044 ArrayRef<unsigned> ProcIndices) { 2045 for (unsigned Idx : Writes) 2046 collectRWResources(Idx, /*IsRead=*/false, ProcIndices); 2047 2048 for (unsigned Idx : Reads) 2049 collectRWResources(Idx, /*IsRead=*/true, ProcIndices); 2050} 2051 2052// Find the processor's resource units for this kind of resource. 2053Record *CodeGenSchedModels::findProcResUnits(Record *ProcResKind, 2054 const CodeGenProcModel &PM, 2055 ArrayRef<SMLoc> Loc) const { 2056 if (ProcResKind->isSubClassOf("ProcResourceUnits")) 2057 return ProcResKind; 2058 2059 Record *ProcUnitDef = nullptr; 2060 assert(!ProcResourceDefs.empty()); 2061 assert(!ProcResGroups.empty()); 2062 2063 for (Record *ProcResDef : ProcResourceDefs) { 2064 if (ProcResDef->getValueAsDef("Kind") == ProcResKind 2065 && ProcResDef->getValueAsDef("SchedModel") == PM.ModelDef) { 2066 if (ProcUnitDef) { 2067 PrintFatalError(Loc, 2068 "Multiple ProcessorResourceUnits associated with " 2069 + ProcResKind->getName()); 2070 } 2071 ProcUnitDef = ProcResDef; 2072 } 2073 } 2074 for (Record *ProcResGroup : ProcResGroups) { 2075 if (ProcResGroup == ProcResKind 2076 && ProcResGroup->getValueAsDef("SchedModel") == PM.ModelDef) { 2077 if (ProcUnitDef) { 2078 PrintFatalError(Loc, 2079 "Multiple ProcessorResourceUnits associated with " 2080 + ProcResKind->getName()); 2081 } 2082 ProcUnitDef = ProcResGroup; 2083 } 2084 } 2085 if (!ProcUnitDef) { 2086 PrintFatalError(Loc, 2087 "No ProcessorResources associated with " 2088 + ProcResKind->getName()); 2089 } 2090 return ProcUnitDef; 2091} 2092 2093// Iteratively add a resource and its super resources. 2094void CodeGenSchedModels::addProcResource(Record *ProcResKind, 2095 CodeGenProcModel &PM, 2096 ArrayRef<SMLoc> Loc) { 2097 while (true) { 2098 Record *ProcResUnits = findProcResUnits(ProcResKind, PM, Loc); 2099 2100 // See if this ProcResource is already associated with this processor. 2101 if (is_contained(PM.ProcResourceDefs, ProcResUnits)) 2102 return; 2103 2104 PM.ProcResourceDefs.push_back(ProcResUnits); 2105 if (ProcResUnits->isSubClassOf("ProcResGroup")) 2106 return; 2107 2108 if (!ProcResUnits->getValueInit("Super")->isComplete()) 2109 return; 2110 2111 ProcResKind = ProcResUnits->getValueAsDef("Super"); 2112 } 2113} 2114 2115// Add resources for a SchedWrite to this processor if they don't exist. 2116void CodeGenSchedModels::addWriteRes(Record *ProcWriteResDef, unsigned PIdx) { 2117 assert(PIdx && "don't add resources to an invalid Processor model"); 2118 2119 RecVec &WRDefs = ProcModels[PIdx].WriteResDefs; 2120 if (is_contained(WRDefs, ProcWriteResDef)) 2121 return; 2122 WRDefs.push_back(ProcWriteResDef); 2123 2124 // Visit ProcResourceKinds referenced by the newly discovered WriteRes. 2125 RecVec ProcResDefs = ProcWriteResDef->getValueAsListOfDefs("ProcResources"); 2126 for (RecIter WritePRI = ProcResDefs.begin(), WritePRE = ProcResDefs.end(); 2127 WritePRI != WritePRE; ++WritePRI) { 2128 addProcResource(*WritePRI, ProcModels[PIdx], ProcWriteResDef->getLoc()); 2129 } 2130} 2131 2132// Add resources for a ReadAdvance to this processor if they don't exist. 2133void CodeGenSchedModels::addReadAdvance(Record *ProcReadAdvanceDef, 2134 unsigned PIdx) { 2135 RecVec &RADefs = ProcModels[PIdx].ReadAdvanceDefs; 2136 if (is_contained(RADefs, ProcReadAdvanceDef)) 2137 return; 2138 RADefs.push_back(ProcReadAdvanceDef); 2139} 2140 2141unsigned CodeGenProcModel::getProcResourceIdx(Record *PRDef) const { 2142 RecIter PRPos = find(ProcResourceDefs, PRDef); 2143 if (PRPos == ProcResourceDefs.end()) 2144 PrintFatalError(PRDef->getLoc(), "ProcResource def is not included in " 2145 "the ProcResources list for " + ModelName); 2146 // Idx=0 is reserved for invalid. 2147 return 1 + (PRPos - ProcResourceDefs.begin()); 2148} 2149 2150bool CodeGenProcModel::isUnsupported(const CodeGenInstruction &Inst) const { 2151 for (const Record *TheDef : UnsupportedFeaturesDefs) { 2152 for (const Record *PredDef : Inst.TheDef->getValueAsListOfDefs("Predicates")) { 2153 if (TheDef->getName() == PredDef->getName()) 2154 return true; 2155 } 2156 } 2157 return false; 2158} 2159 2160#ifndef NDEBUG 2161void CodeGenProcModel::dump() const { 2162 dbgs() << Index << ": " << ModelName << " " 2163 << (ModelDef ? ModelDef->getName() : "inferred") << " " 2164 << (ItinsDef ? ItinsDef->getName() : "no itinerary") << '\n'; 2165} 2166 2167void CodeGenSchedRW::dump() const { 2168 dbgs() << Name << (IsVariadic ? " (V) " : " "); 2169 if (IsSequence) { 2170 dbgs() << "("; 2171 dumpIdxVec(Sequence); 2172 dbgs() << ")"; 2173 } 2174} 2175 2176void CodeGenSchedClass::dump(const CodeGenSchedModels* SchedModels) const { 2177 dbgs() << "SCHEDCLASS " << Index << ":" << Name << '\n' 2178 << " Writes: "; 2179 for (unsigned i = 0, N = Writes.size(); i < N; ++i) { 2180 SchedModels->getSchedWrite(Writes[i]).dump(); 2181 if (i < N-1) { 2182 dbgs() << '\n'; 2183 dbgs().indent(10); 2184 } 2185 } 2186 dbgs() << "\n Reads: "; 2187 for (unsigned i = 0, N = Reads.size(); i < N; ++i) { 2188 SchedModels->getSchedRead(Reads[i]).dump(); 2189 if (i < N-1) { 2190 dbgs() << '\n'; 2191 dbgs().indent(10); 2192 } 2193 } 2194 dbgs() << "\n ProcIdx: "; dumpIdxVec(ProcIndices); dbgs() << '\n'; 2195 if (!Transitions.empty()) { 2196 dbgs() << "\n Transitions for Proc "; 2197 for (const CodeGenSchedTransition &Transition : Transitions) { 2198 dumpIdxVec(Transition.ProcIndices); 2199 } 2200 } 2201} 2202 2203void PredTransitions::dump() const { 2204 dbgs() << "Expanded Variants:\n"; 2205 for (std::vector<PredTransition>::const_iterator 2206 TI = TransVec.begin(), TE = TransVec.end(); TI != TE; ++TI) { 2207 dbgs() << "{"; 2208 for (SmallVectorImpl<PredCheck>::const_iterator 2209 PCI = TI->PredTerm.begin(), PCE = TI->PredTerm.end(); 2210 PCI != PCE; ++PCI) { 2211 if (PCI != TI->PredTerm.begin()) 2212 dbgs() << ", "; 2213 dbgs() << SchedModels.getSchedRW(PCI->RWIdx, PCI->IsRead).Name 2214 << ":" << PCI->Predicate->getName(); 2215 } 2216 dbgs() << "},\n => {"; 2217 for (SmallVectorImpl<SmallVector<unsigned,4>>::const_iterator 2218 WSI = TI->WriteSequences.begin(), WSE = TI->WriteSequences.end(); 2219 WSI != WSE; ++WSI) { 2220 dbgs() << "("; 2221 for (SmallVectorImpl<unsigned>::const_iterator 2222 WI = WSI->begin(), WE = WSI->end(); WI != WE; ++WI) { 2223 if (WI != WSI->begin()) 2224 dbgs() << ", "; 2225 dbgs() << SchedModels.getSchedWrite(*WI).Name; 2226 } 2227 dbgs() << "),"; 2228 } 2229 dbgs() << "}\n"; 2230 } 2231} 2232#endif // NDEBUG 2233