SILowerControlFlow.cpp revision 284677
1//===-- SILowerControlFlow.cpp - Use predicates for control flow ----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10/// \file 11/// \brief This pass lowers the pseudo control flow instructions to real 12/// machine instructions. 13/// 14/// All control flow is handled using predicated instructions and 15/// a predicate stack. Each Scalar ALU controls the operations of 64 Vector 16/// ALUs. The Scalar ALU can update the predicate for any of the Vector ALUs 17/// by writting to the 64-bit EXEC register (each bit corresponds to a 18/// single vector ALU). Typically, for predicates, a vector ALU will write 19/// to its bit of the VCC register (like EXEC VCC is 64-bits, one for each 20/// Vector ALU) and then the ScalarALU will AND the VCC register with the 21/// EXEC to update the predicates. 22/// 23/// For example: 24/// %VCC = V_CMP_GT_F32 %VGPR1, %VGPR2 25/// %SGPR0 = SI_IF %VCC 26/// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0 27/// %SGPR0 = SI_ELSE %SGPR0 28/// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR0 29/// SI_END_CF %SGPR0 30/// 31/// becomes: 32/// 33/// %SGPR0 = S_AND_SAVEEXEC_B64 %VCC // Save and update the exec mask 34/// %SGPR0 = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask 35/// S_CBRANCH_EXECZ label0 // This instruction is an optional 36/// // optimization which allows us to 37/// // branch if all the bits of 38/// // EXEC are zero. 39/// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0 // Do the IF block of the branch 40/// 41/// label0: 42/// %SGPR0 = S_OR_SAVEEXEC_B64 %EXEC // Restore the exec mask for the Then block 43/// %EXEC = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask 44/// S_BRANCH_EXECZ label1 // Use our branch optimization 45/// // instruction again. 46/// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR // Do the THEN block 47/// label1: 48/// %EXEC = S_OR_B64 %EXEC, %SGPR0 // Re-enable saved exec mask bits 49//===----------------------------------------------------------------------===// 50 51#include "AMDGPU.h" 52#include "AMDGPUSubtarget.h" 53#include "SIInstrInfo.h" 54#include "SIMachineFunctionInfo.h" 55#include "llvm/CodeGen/MachineFrameInfo.h" 56#include "llvm/CodeGen/MachineFunction.h" 57#include "llvm/CodeGen/MachineFunctionPass.h" 58#include "llvm/CodeGen/MachineInstrBuilder.h" 59#include "llvm/CodeGen/MachineRegisterInfo.h" 60#include "llvm/IR/Constants.h" 61 62using namespace llvm; 63 64namespace { 65 66class SILowerControlFlowPass : public MachineFunctionPass { 67 68private: 69 static const unsigned SkipThreshold = 12; 70 71 static char ID; 72 const SIRegisterInfo *TRI; 73 const SIInstrInfo *TII; 74 75 bool shouldSkip(MachineBasicBlock *From, MachineBasicBlock *To); 76 77 void Skip(MachineInstr &From, MachineOperand &To); 78 void SkipIfDead(MachineInstr &MI); 79 80 void If(MachineInstr &MI); 81 void Else(MachineInstr &MI); 82 void Break(MachineInstr &MI); 83 void IfBreak(MachineInstr &MI); 84 void ElseBreak(MachineInstr &MI); 85 void Loop(MachineInstr &MI); 86 void EndCf(MachineInstr &MI); 87 88 void Kill(MachineInstr &MI); 89 void Branch(MachineInstr &MI); 90 91 void LoadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset = 0); 92 void computeIndirectRegAndOffset(unsigned VecReg, unsigned &Reg, int &Offset); 93 void IndirectSrc(MachineInstr &MI); 94 void IndirectDst(MachineInstr &MI); 95 96public: 97 SILowerControlFlowPass(TargetMachine &tm) : 98 MachineFunctionPass(ID), TRI(nullptr), TII(nullptr) { } 99 100 bool runOnMachineFunction(MachineFunction &MF) override; 101 102 const char *getPassName() const override { 103 return "SI Lower control flow instructions"; 104 } 105 106}; 107 108} // End anonymous namespace 109 110char SILowerControlFlowPass::ID = 0; 111 112FunctionPass *llvm::createSILowerControlFlowPass(TargetMachine &tm) { 113 return new SILowerControlFlowPass(tm); 114} 115 116bool SILowerControlFlowPass::shouldSkip(MachineBasicBlock *From, 117 MachineBasicBlock *To) { 118 119 unsigned NumInstr = 0; 120 121 for (MachineBasicBlock *MBB = From; MBB != To && !MBB->succ_empty(); 122 MBB = *MBB->succ_begin()) { 123 124 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); 125 NumInstr < SkipThreshold && I != E; ++I) { 126 127 if (I->isBundle() || !I->isBundled()) 128 if (++NumInstr >= SkipThreshold) 129 return true; 130 } 131 } 132 133 return false; 134} 135 136void SILowerControlFlowPass::Skip(MachineInstr &From, MachineOperand &To) { 137 138 if (!shouldSkip(*From.getParent()->succ_begin(), To.getMBB())) 139 return; 140 141 DebugLoc DL = From.getDebugLoc(); 142 BuildMI(*From.getParent(), &From, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ)) 143 .addOperand(To) 144 .addReg(AMDGPU::EXEC); 145} 146 147void SILowerControlFlowPass::SkipIfDead(MachineInstr &MI) { 148 149 MachineBasicBlock &MBB = *MI.getParent(); 150 DebugLoc DL = MI.getDebugLoc(); 151 152 if (MBB.getParent()->getInfo<SIMachineFunctionInfo>()->getShaderType() != 153 ShaderType::PIXEL || 154 !shouldSkip(&MBB, &MBB.getParent()->back())) 155 return; 156 157 MachineBasicBlock::iterator Insert = &MI; 158 ++Insert; 159 160 // If the exec mask is non-zero, skip the next two instructions 161 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 162 .addImm(3) 163 .addReg(AMDGPU::EXEC); 164 165 // Exec mask is zero: Export to NULL target... 166 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::EXP)) 167 .addImm(0) 168 .addImm(0x09) // V_008DFC_SQ_EXP_NULL 169 .addImm(0) 170 .addImm(1) 171 .addImm(1) 172 .addReg(AMDGPU::VGPR0) 173 .addReg(AMDGPU::VGPR0) 174 .addReg(AMDGPU::VGPR0) 175 .addReg(AMDGPU::VGPR0); 176 177 // ... and terminate wavefront 178 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_ENDPGM)); 179} 180 181void SILowerControlFlowPass::If(MachineInstr &MI) { 182 MachineBasicBlock &MBB = *MI.getParent(); 183 DebugLoc DL = MI.getDebugLoc(); 184 unsigned Reg = MI.getOperand(0).getReg(); 185 unsigned Vcc = MI.getOperand(1).getReg(); 186 187 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), Reg) 188 .addReg(Vcc); 189 190 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), Reg) 191 .addReg(AMDGPU::EXEC) 192 .addReg(Reg); 193 194 Skip(MI, MI.getOperand(2)); 195 196 MI.eraseFromParent(); 197} 198 199void SILowerControlFlowPass::Else(MachineInstr &MI) { 200 MachineBasicBlock &MBB = *MI.getParent(); 201 DebugLoc DL = MI.getDebugLoc(); 202 unsigned Dst = MI.getOperand(0).getReg(); 203 unsigned Src = MI.getOperand(1).getReg(); 204 205 BuildMI(MBB, MBB.getFirstNonPHI(), DL, 206 TII->get(AMDGPU::S_OR_SAVEEXEC_B64), Dst) 207 .addReg(Src); // Saved EXEC 208 209 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC) 210 .addReg(AMDGPU::EXEC) 211 .addReg(Dst); 212 213 Skip(MI, MI.getOperand(2)); 214 215 MI.eraseFromParent(); 216} 217 218void SILowerControlFlowPass::Break(MachineInstr &MI) { 219 MachineBasicBlock &MBB = *MI.getParent(); 220 DebugLoc DL = MI.getDebugLoc(); 221 222 unsigned Dst = MI.getOperand(0).getReg(); 223 unsigned Src = MI.getOperand(1).getReg(); 224 225 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) 226 .addReg(AMDGPU::EXEC) 227 .addReg(Src); 228 229 MI.eraseFromParent(); 230} 231 232void SILowerControlFlowPass::IfBreak(MachineInstr &MI) { 233 MachineBasicBlock &MBB = *MI.getParent(); 234 DebugLoc DL = MI.getDebugLoc(); 235 236 unsigned Dst = MI.getOperand(0).getReg(); 237 unsigned Vcc = MI.getOperand(1).getReg(); 238 unsigned Src = MI.getOperand(2).getReg(); 239 240 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) 241 .addReg(Vcc) 242 .addReg(Src); 243 244 MI.eraseFromParent(); 245} 246 247void SILowerControlFlowPass::ElseBreak(MachineInstr &MI) { 248 MachineBasicBlock &MBB = *MI.getParent(); 249 DebugLoc DL = MI.getDebugLoc(); 250 251 unsigned Dst = MI.getOperand(0).getReg(); 252 unsigned Saved = MI.getOperand(1).getReg(); 253 unsigned Src = MI.getOperand(2).getReg(); 254 255 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) 256 .addReg(Saved) 257 .addReg(Src); 258 259 MI.eraseFromParent(); 260} 261 262void SILowerControlFlowPass::Loop(MachineInstr &MI) { 263 MachineBasicBlock &MBB = *MI.getParent(); 264 DebugLoc DL = MI.getDebugLoc(); 265 unsigned Src = MI.getOperand(0).getReg(); 266 267 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64), AMDGPU::EXEC) 268 .addReg(AMDGPU::EXEC) 269 .addReg(Src); 270 271 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 272 .addOperand(MI.getOperand(1)) 273 .addReg(AMDGPU::EXEC); 274 275 MI.eraseFromParent(); 276} 277 278void SILowerControlFlowPass::EndCf(MachineInstr &MI) { 279 MachineBasicBlock &MBB = *MI.getParent(); 280 DebugLoc DL = MI.getDebugLoc(); 281 unsigned Reg = MI.getOperand(0).getReg(); 282 283 BuildMI(MBB, MBB.getFirstNonPHI(), DL, 284 TII->get(AMDGPU::S_OR_B64), AMDGPU::EXEC) 285 .addReg(AMDGPU::EXEC) 286 .addReg(Reg); 287 288 MI.eraseFromParent(); 289} 290 291void SILowerControlFlowPass::Branch(MachineInstr &MI) { 292 if (MI.getOperand(0).getMBB() == MI.getParent()->getNextNode()) 293 MI.eraseFromParent(); 294 295 // If these aren't equal, this is probably an infinite loop. 296} 297 298void SILowerControlFlowPass::Kill(MachineInstr &MI) { 299 MachineBasicBlock &MBB = *MI.getParent(); 300 DebugLoc DL = MI.getDebugLoc(); 301 const MachineOperand &Op = MI.getOperand(0); 302 303#ifndef NDEBUG 304 const SIMachineFunctionInfo *MFI 305 = MBB.getParent()->getInfo<SIMachineFunctionInfo>(); 306 // Kill is only allowed in pixel / geometry shaders. 307 assert(MFI->getShaderType() == ShaderType::PIXEL || 308 MFI->getShaderType() == ShaderType::GEOMETRY); 309#endif 310 311 // Clear this thread from the exec mask if the operand is negative 312 if ((Op.isImm())) { 313 // Constant operand: Set exec mask to 0 or do nothing 314 if (Op.getImm() & 0x80000000) { 315 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) 316 .addImm(0); 317 } 318 } else { 319 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMPX_LE_F32_e32), AMDGPU::VCC) 320 .addImm(0) 321 .addOperand(Op); 322 } 323 324 MI.eraseFromParent(); 325} 326 327void SILowerControlFlowPass::LoadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset) { 328 329 MachineBasicBlock &MBB = *MI.getParent(); 330 DebugLoc DL = MI.getDebugLoc(); 331 MachineBasicBlock::iterator I = MI; 332 333 unsigned Save = MI.getOperand(1).getReg(); 334 unsigned Idx = MI.getOperand(3).getReg(); 335 336 if (AMDGPU::SReg_32RegClass.contains(Idx)) { 337 if (Offset) { 338 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 339 .addReg(Idx) 340 .addImm(Offset); 341 } else { 342 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 343 .addReg(Idx); 344 } 345 MBB.insert(I, MovRel); 346 } else { 347 348 assert(AMDGPU::SReg_64RegClass.contains(Save)); 349 assert(AMDGPU::VGPR_32RegClass.contains(Idx)); 350 351 // Save the EXEC mask 352 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), Save) 353 .addReg(AMDGPU::EXEC); 354 355 // Read the next variant into VCC (lower 32 bits) <- also loop target 356 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), 357 AMDGPU::VCC_LO) 358 .addReg(Idx); 359 360 // Move index from VCC into M0 361 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 362 .addReg(AMDGPU::VCC_LO); 363 364 // Compare the just read M0 value to all possible Idx values 365 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e32), AMDGPU::VCC) 366 .addReg(AMDGPU::M0) 367 .addReg(Idx); 368 369 // Update EXEC, save the original EXEC value to VCC 370 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), AMDGPU::VCC) 371 .addReg(AMDGPU::VCC); 372 373 if (Offset) { 374 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 375 .addReg(AMDGPU::M0) 376 .addImm(Offset); 377 } 378 // Do the actual move 379 MBB.insert(I, MovRel); 380 381 // Update EXEC, switch all done bits to 0 and all todo bits to 1 382 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC) 383 .addReg(AMDGPU::EXEC) 384 .addReg(AMDGPU::VCC); 385 386 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover 387 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 388 .addImm(-7) 389 .addReg(AMDGPU::EXEC); 390 391 // Restore EXEC 392 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) 393 .addReg(Save); 394 395 } 396 MI.eraseFromParent(); 397} 398 399/// \param @VecReg The register which holds element zero of the vector 400/// being addressed into. 401/// \param[out] @Reg The base register to use in the indirect addressing instruction. 402/// \param[in,out] @Offset As an input, this is the constant offset part of the 403// indirect Index. e.g. v0 = v[VecReg + Offset] 404// As an output, this is a constant value that needs 405// to be added to the value stored in M0. 406void SILowerControlFlowPass::computeIndirectRegAndOffset(unsigned VecReg, 407 unsigned &Reg, 408 int &Offset) { 409 unsigned SubReg = TRI->getSubReg(VecReg, AMDGPU::sub0); 410 if (!SubReg) 411 SubReg = VecReg; 412 413 const TargetRegisterClass *RC = TRI->getPhysRegClass(SubReg); 414 int RegIdx = TRI->getHWRegIndex(SubReg) + Offset; 415 416 if (RegIdx < 0) { 417 Offset = RegIdx; 418 RegIdx = 0; 419 } else { 420 Offset = 0; 421 } 422 423 Reg = RC->getRegister(RegIdx); 424} 425 426void SILowerControlFlowPass::IndirectSrc(MachineInstr &MI) { 427 428 MachineBasicBlock &MBB = *MI.getParent(); 429 DebugLoc DL = MI.getDebugLoc(); 430 431 unsigned Dst = MI.getOperand(0).getReg(); 432 unsigned Vec = MI.getOperand(2).getReg(); 433 int Off = MI.getOperand(4).getImm(); 434 unsigned Reg; 435 436 computeIndirectRegAndOffset(Vec, Reg, Off); 437 438 MachineInstr *MovRel = 439 BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) 440 .addReg(Reg) 441 .addReg(AMDGPU::M0, RegState::Implicit) 442 .addReg(Vec, RegState::Implicit); 443 444 LoadM0(MI, MovRel, Off); 445} 446 447void SILowerControlFlowPass::IndirectDst(MachineInstr &MI) { 448 449 MachineBasicBlock &MBB = *MI.getParent(); 450 DebugLoc DL = MI.getDebugLoc(); 451 452 unsigned Dst = MI.getOperand(0).getReg(); 453 int Off = MI.getOperand(4).getImm(); 454 unsigned Val = MI.getOperand(5).getReg(); 455 unsigned Reg; 456 457 computeIndirectRegAndOffset(Dst, Reg, Off); 458 459 MachineInstr *MovRel = 460 BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELD_B32_e32)) 461 .addReg(Reg, RegState::Define) 462 .addReg(Val) 463 .addReg(AMDGPU::M0, RegState::Implicit) 464 .addReg(Dst, RegState::Implicit); 465 466 LoadM0(MI, MovRel, Off); 467} 468 469bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) { 470 TII = static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo()); 471 TRI = 472 static_cast<const SIRegisterInfo *>(MF.getSubtarget().getRegisterInfo()); 473 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 474 475 bool HaveKill = false; 476 bool NeedWQM = false; 477 bool NeedFlat = false; 478 unsigned Depth = 0; 479 480 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); 481 BI != BE; ++BI) { 482 483 MachineBasicBlock &MBB = *BI; 484 MachineBasicBlock::iterator I, Next; 485 for (I = MBB.begin(); I != MBB.end(); I = Next) { 486 Next = std::next(I); 487 488 MachineInstr &MI = *I; 489 if (TII->isWQM(MI.getOpcode()) || TII->isDS(MI.getOpcode())) 490 NeedWQM = true; 491 492 // Flat uses m0 in case it needs to access LDS. 493 if (TII->isFLAT(MI.getOpcode())) 494 NeedFlat = true; 495 496 switch (MI.getOpcode()) { 497 default: break; 498 case AMDGPU::SI_IF: 499 ++Depth; 500 If(MI); 501 break; 502 503 case AMDGPU::SI_ELSE: 504 Else(MI); 505 break; 506 507 case AMDGPU::SI_BREAK: 508 Break(MI); 509 break; 510 511 case AMDGPU::SI_IF_BREAK: 512 IfBreak(MI); 513 break; 514 515 case AMDGPU::SI_ELSE_BREAK: 516 ElseBreak(MI); 517 break; 518 519 case AMDGPU::SI_LOOP: 520 ++Depth; 521 Loop(MI); 522 break; 523 524 case AMDGPU::SI_END_CF: 525 if (--Depth == 0 && HaveKill) { 526 SkipIfDead(MI); 527 HaveKill = false; 528 } 529 EndCf(MI); 530 break; 531 532 case AMDGPU::SI_KILL: 533 if (Depth == 0) 534 SkipIfDead(MI); 535 else 536 HaveKill = true; 537 Kill(MI); 538 break; 539 540 case AMDGPU::S_BRANCH: 541 Branch(MI); 542 break; 543 544 case AMDGPU::SI_INDIRECT_SRC: 545 IndirectSrc(MI); 546 break; 547 548 case AMDGPU::SI_INDIRECT_DST_V1: 549 case AMDGPU::SI_INDIRECT_DST_V2: 550 case AMDGPU::SI_INDIRECT_DST_V4: 551 case AMDGPU::SI_INDIRECT_DST_V8: 552 case AMDGPU::SI_INDIRECT_DST_V16: 553 IndirectDst(MI); 554 break; 555 } 556 } 557 } 558 559 if (NeedWQM && MFI->getShaderType() == ShaderType::PIXEL) { 560 MachineBasicBlock &MBB = MF.front(); 561 BuildMI(MBB, MBB.getFirstNonPHI(), DebugLoc(), TII->get(AMDGPU::S_WQM_B64), 562 AMDGPU::EXEC).addReg(AMDGPU::EXEC); 563 } 564 565 // FIXME: This seems inappropriate to do here. 566 if (NeedFlat && MFI->IsKernel) { 567 // Insert the prologue initializing the SGPRs pointing to the scratch space 568 // for flat accesses. 569 const MachineFrameInfo *FrameInfo = MF.getFrameInfo(); 570 571 // TODO: What to use with function calls? 572 573 // FIXME: This is reporting stack size that is used in a scratch buffer 574 // rather than registers as well. 575 uint64_t StackSizeBytes = FrameInfo->getStackSize(); 576 577 int IndirectBegin 578 = static_cast<const AMDGPUInstrInfo*>(TII)->getIndirectIndexBegin(MF); 579 // Convert register index to 256-byte unit. 580 uint64_t StackOffset = IndirectBegin < 0 ? 0 : (4 * IndirectBegin / 256); 581 582 assert((StackSizeBytes < 0xffff) && StackOffset < 0xffff && 583 "Stack limits should be smaller than 16-bits"); 584 585 // Initialize the flat scratch register pair. 586 // TODO: Can we use one s_mov_b64 here? 587 588 // Offset is in units of 256-bytes. 589 MachineBasicBlock &MBB = MF.front(); 590 DebugLoc NoDL; 591 MachineBasicBlock::iterator Start = MBB.getFirstNonPHI(); 592 const MCInstrDesc &SMovK = TII->get(AMDGPU::S_MOVK_I32); 593 594 assert(isInt<16>(StackOffset) && isInt<16>(StackSizeBytes)); 595 596 BuildMI(MBB, Start, NoDL, SMovK, AMDGPU::FLAT_SCR_LO) 597 .addImm(StackOffset); 598 599 // Documentation says size is "per-thread scratch size in bytes" 600 BuildMI(MBB, Start, NoDL, SMovK, AMDGPU::FLAT_SCR_HI) 601 .addImm(StackSizeBytes); 602 } 603 604 return true; 605} 606