SIFrameLowering.cpp revision 293265
1243730Srwatson//===----------------------- SIFrameLowering.cpp --------------------------===// 2243730Srwatson// 3243730Srwatson// The LLVM Compiler Infrastructure 4243730Srwatson// 5243730Srwatson// This file is distributed under the University of Illinois Open Source 6243730Srwatson// License. See LICENSE.TXT for details. 7243730Srwatson// 8243730Srwatson//==-----------------------------------------------------------------------===// 9243730Srwatson 10243730Srwatson#include "SIFrameLowering.h" 11243730Srwatson#include "SIInstrInfo.h" 12243730Srwatson#include "SIMachineFunctionInfo.h" 13243730Srwatson#include "SIRegisterInfo.h" 14243730Srwatson#include "llvm/CodeGen/MachineFrameInfo.h" 15243730Srwatson#include "llvm/CodeGen/MachineFunction.h" 16243730Srwatson#include "llvm/CodeGen/MachineInstrBuilder.h" 17243730Srwatson#include "llvm/CodeGen/RegisterScavenging.h" 18243730Srwatson 19243730Srwatsonusing namespace llvm; 20243730Srwatson 21243730Srwatson 22243730Srwatsonstatic bool hasOnlySGPRSpills(const SIMachineFunctionInfo *FuncInfo, 23243730Srwatson const MachineFrameInfo *FrameInfo) { 24243730Srwatson if (!FuncInfo->hasSpilledSGPRs()) 25243730Srwatson return false; 26243730Srwatson 27243730Srwatson if (FuncInfo->hasSpilledVGPRs()) 28243730Srwatson return false; 29243730Srwatson 30243734Srwatson for (int I = FrameInfo->getObjectIndexBegin(), 31243730Srwatson E = FrameInfo->getObjectIndexEnd(); I != E; ++I) { 32243730Srwatson if (!FrameInfo->isSpillSlotObjectIndex(I)) 33243734Srwatson return false; 34243730Srwatson } 35243730Srwatson 36243730Srwatson return true; 37243730Srwatson} 38243730Srwatson 39243730Srwatsonstatic ArrayRef<MCPhysReg> getAllSGPR128() { 40243730Srwatson return makeArrayRef(AMDGPU::SReg_128RegClass.begin(), 41243730Srwatson AMDGPU::SReg_128RegClass.getNumRegs()); 42243730Srwatson} 43243730Srwatson 44243730Srwatsonstatic ArrayRef<MCPhysReg> getAllSGPRs() { 45243730Srwatson return makeArrayRef(AMDGPU::SGPR_32RegClass.begin(), 46243730Srwatson AMDGPU::SGPR_32RegClass.getNumRegs()); 47243730Srwatson} 48243730Srwatson 49243730Srwatsonvoid SIFrameLowering::emitPrologue(MachineFunction &MF, 50243730Srwatson MachineBasicBlock &MBB) const { 51243730Srwatson if (!MF.getFrameInfo()->hasStackObjects()) 52243730Srwatson return; 53243730Srwatson 54243730Srwatson assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported"); 55243730Srwatson 56243730Srwatson SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 57243730Srwatson 58243730Srwatson // If we only have SGPR spills, we won't actually be using scratch memory 59243730Srwatson // since these spill to VGPRs. 60243730Srwatson // 61243730Srwatson // FIXME: We should be cleaning up these unused SGPR spill frame indices 62243730Srwatson // somewhere. 63243730Srwatson if (hasOnlySGPRSpills(MFI, MF.getFrameInfo())) 64243730Srwatson return; 65243730Srwatson 66243730Srwatson const SIInstrInfo *TII = 67243730Srwatson static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo()); 68243730Srwatson const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 69243730Srwatson const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>(); 70243730Srwatson 71243730Srwatson // We need to insert initialization of the scratch resource descriptor. 72243730Srwatson unsigned ScratchRsrcReg = MFI->getScratchRSrcReg(); 73243730Srwatson assert(ScratchRsrcReg != AMDGPU::NoRegister); 74243730Srwatson 75243730Srwatson unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg(); 76243730Srwatson assert(ScratchWaveOffsetReg != AMDGPU::NoRegister); 77243730Srwatson 78243730Srwatson unsigned PreloadedScratchWaveOffsetReg = TRI->getPreloadedValue( 79243730Srwatson MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 80243730Srwatson 81243730Srwatson unsigned PreloadedPrivateBufferReg = AMDGPU::NoRegister; 82243730Srwatson if (ST.isAmdHsaOS()) { 83243730Srwatson PreloadedPrivateBufferReg = TRI->getPreloadedValue( 84 MF, SIRegisterInfo::PRIVATE_SEGMENT_BUFFER); 85 } 86 87 // If we reserved the original input registers, we don't need to copy to the 88 // reserved registers. 89 if (ScratchRsrcReg == PreloadedPrivateBufferReg) { 90 // We should always reserve these 5 registers at the same time. 91 assert(ScratchWaveOffsetReg == PreloadedScratchWaveOffsetReg && 92 "scratch wave offset and private segment buffer inconsistent"); 93 return; 94 } 95 96 97 // We added live-ins during argument lowering, but since they were not used 98 // they were deleted. We're adding the uses now, so add them back. 99 MachineRegisterInfo &MRI = MF.getRegInfo(); 100 MRI.addLiveIn(PreloadedScratchWaveOffsetReg); 101 MBB.addLiveIn(PreloadedScratchWaveOffsetReg); 102 103 if (ST.isAmdHsaOS()) { 104 MRI.addLiveIn(PreloadedPrivateBufferReg); 105 MBB.addLiveIn(PreloadedPrivateBufferReg); 106 } 107 108 if (!ST.hasSGPRInitBug()) { 109 // We reserved the last registers for this. Shift it down to the end of those 110 // which were actually used. 111 // 112 // FIXME: It might be safer to use a pseudoregister before replacement. 113 114 // FIXME: We should be able to eliminate unused input registers. We only 115 // cannot do this for the resources required for scratch access. For now we 116 // skip over user SGPRs and may leave unused holes. 117 118 // We find the resource first because it has an alignment requirement. 119 if (ScratchRsrcReg == TRI->reservedPrivateSegmentBufferReg(MF)) { 120 MachineRegisterInfo &MRI = MF.getRegInfo(); 121 122 unsigned NumPreloaded = MFI->getNumPreloadedSGPRs() / 4; 123 // Skip the last 2 elements because the last one is reserved for VCC, and 124 // this is the 2nd to last element already. 125 for (MCPhysReg Reg : getAllSGPR128().drop_back(2).slice(NumPreloaded)) { 126 // Pick the first unallocated one. Make sure we don't clobber the other 127 // reserved input we needed. 128 if (!MRI.isPhysRegUsed(Reg)) { 129 assert(MRI.isAllocatable(Reg)); 130 MRI.replaceRegWith(ScratchRsrcReg, Reg); 131 ScratchRsrcReg = Reg; 132 MFI->setScratchRSrcReg(ScratchRsrcReg); 133 break; 134 } 135 } 136 } 137 138 if (ScratchWaveOffsetReg == TRI->reservedPrivateSegmentWaveByteOffsetReg(MF)) { 139 MachineRegisterInfo &MRI = MF.getRegInfo(); 140 // Skip the last 2 elements because the last one is reserved for VCC, and 141 // this is the 2nd to last element already. 142 unsigned NumPreloaded = MFI->getNumPreloadedSGPRs(); 143 for (MCPhysReg Reg : getAllSGPRs().drop_back(6).slice(NumPreloaded)) { 144 // Pick the first unallocated SGPR. Be careful not to pick an alias of the 145 // scratch descriptor, since we haven���t added its uses yet. 146 if (!MRI.isPhysRegUsed(Reg)) { 147 assert(MRI.isAllocatable(Reg) && 148 !TRI->isSubRegisterEq(ScratchRsrcReg, Reg)); 149 150 MRI.replaceRegWith(ScratchWaveOffsetReg, Reg); 151 ScratchWaveOffsetReg = Reg; 152 MFI->setScratchWaveOffsetReg(ScratchWaveOffsetReg); 153 break; 154 } 155 } 156 } 157 } 158 159 160 assert(!TRI->isSubRegister(ScratchRsrcReg, ScratchWaveOffsetReg)); 161 162 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32); 163 MachineBasicBlock::iterator I = MBB.begin(); 164 DebugLoc DL; 165 166 if (PreloadedScratchWaveOffsetReg != ScratchWaveOffsetReg) { 167 // Make sure we emit the copy for the offset first. We may have chosen to copy 168 // the buffer resource into a register that aliases the input offset register. 169 BuildMI(MBB, I, DL, SMovB32, ScratchWaveOffsetReg) 170 .addReg(PreloadedScratchWaveOffsetReg, RegState::Kill); 171 } 172 173 if (ST.isAmdHsaOS()) { 174 // Insert copies from argument register. 175 assert( 176 !TRI->isSubRegisterEq(PreloadedPrivateBufferReg, ScratchRsrcReg) && 177 !TRI->isSubRegisterEq(PreloadedPrivateBufferReg, ScratchWaveOffsetReg)); 178 179 unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1); 180 unsigned Rsrc23 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub2_sub3); 181 182 unsigned Lo = TRI->getSubReg(PreloadedPrivateBufferReg, AMDGPU::sub0_sub1); 183 unsigned Hi = TRI->getSubReg(PreloadedPrivateBufferReg, AMDGPU::sub2_sub3); 184 185 const MCInstrDesc &SMovB64 = TII->get(AMDGPU::S_MOV_B64); 186 187 BuildMI(MBB, I, DL, SMovB64, Rsrc01) 188 .addReg(Lo, RegState::Kill); 189 BuildMI(MBB, I, DL, SMovB64, Rsrc23) 190 .addReg(Hi, RegState::Kill); 191 } else { 192 unsigned Rsrc0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0); 193 unsigned Rsrc1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1); 194 unsigned Rsrc2 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub2); 195 unsigned Rsrc3 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3); 196 197 // Use relocations to get the pointer, and setup the other bits manually. 198 uint64_t Rsrc23 = TII->getScratchRsrcWords23(); 199 BuildMI(MBB, I, DL, SMovB32, Rsrc0) 200 .addExternalSymbol("SCRATCH_RSRC_DWORD0") 201 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 202 203 BuildMI(MBB, I, DL, SMovB32, Rsrc1) 204 .addExternalSymbol("SCRATCH_RSRC_DWORD1") 205 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 206 207 BuildMI(MBB, I, DL, SMovB32, Rsrc2) 208 .addImm(Rsrc23 & 0xffffffff) 209 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 210 211 BuildMI(MBB, I, DL, SMovB32, Rsrc3) 212 .addImm(Rsrc23 >> 32) 213 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 214 } 215 216 // Make the register selected live throughout the function. 217 for (MachineBasicBlock &OtherBB : MF) { 218 if (&OtherBB == &MBB) 219 continue; 220 221 OtherBB.addLiveIn(ScratchRsrcReg); 222 OtherBB.addLiveIn(ScratchWaveOffsetReg); 223 } 224} 225 226void SIFrameLowering::processFunctionBeforeFrameFinalized( 227 MachineFunction &MF, 228 RegScavenger *RS) const { 229 MachineFrameInfo *MFI = MF.getFrameInfo(); 230 231 if (!MFI->hasStackObjects()) 232 return; 233 234 bool MayNeedScavengingEmergencySlot = MFI->hasStackObjects(); 235 236 assert((RS || !MayNeedScavengingEmergencySlot) && 237 "RegScavenger required if spilling"); 238 239 if (MayNeedScavengingEmergencySlot) { 240 int ScavengeFI = MFI->CreateSpillStackObject( 241 AMDGPU::SGPR_32RegClass.getSize(), 242 AMDGPU::SGPR_32RegClass.getAlignment()); 243 RS->addScavengingFrameIndex(ScavengeFI); 244 } 245} 246