Lines Matching refs:MI

78   void SkipIfDead(MachineInstr &MI);
80 void If(MachineInstr &MI);
81 void Else(MachineInstr &MI);
82 void Break(MachineInstr &MI);
83 void IfBreak(MachineInstr &MI);
84 void ElseBreak(MachineInstr &MI);
85 void Loop(MachineInstr &MI);
86 void EndCf(MachineInstr &MI);
88 void Kill(MachineInstr &MI);
89 void Branch(MachineInstr &MI);
91 void LoadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset = 0);
93 void IndirectSrc(MachineInstr &MI);
94 void IndirectDst(MachineInstr &MI);
150 void SILowerControlFlowPass::SkipIfDead(MachineInstr &MI) {
152 MachineBasicBlock &MBB = *MI.getParent();
153 DebugLoc DL = MI.getDebugLoc();
160 MachineBasicBlock::iterator Insert = &MI;
183 void SILowerControlFlowPass::If(MachineInstr &MI) {
184 MachineBasicBlock &MBB = *MI.getParent();
185 DebugLoc DL = MI.getDebugLoc();
186 unsigned Reg = MI.getOperand(0).getReg();
187 unsigned Vcc = MI.getOperand(1).getReg();
189 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), Reg)
192 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), Reg)
196 Skip(MI, MI.getOperand(2));
198 MI.eraseFromParent();
201 void SILowerControlFlowPass::Else(MachineInstr &MI) {
202 MachineBasicBlock &MBB = *MI.getParent();
203 DebugLoc DL = MI.getDebugLoc();
204 unsigned Dst = MI.getOperand(0).getReg();
205 unsigned Src = MI.getOperand(1).getReg();
211 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
215 Skip(MI, MI.getOperand(2));
217 MI.eraseFromParent();
220 void SILowerControlFlowPass::Break(MachineInstr &MI) {
221 MachineBasicBlock &MBB = *MI.getParent();
222 DebugLoc DL = MI.getDebugLoc();
224 unsigned Dst = MI.getOperand(0).getReg();
225 unsigned Src = MI.getOperand(1).getReg();
227 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
231 MI.eraseFromParent();
234 void SILowerControlFlowPass::IfBreak(MachineInstr &MI) {
235 MachineBasicBlock &MBB = *MI.getParent();
236 DebugLoc DL = MI.getDebugLoc();
238 unsigned Dst = MI.getOperand(0).getReg();
239 unsigned Vcc = MI.getOperand(1).getReg();
240 unsigned Src = MI.getOperand(2).getReg();
242 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
246 MI.eraseFromParent();
249 void SILowerControlFlowPass::ElseBreak(MachineInstr &MI) {
250 MachineBasicBlock &MBB = *MI.getParent();
251 DebugLoc DL = MI.getDebugLoc();
253 unsigned Dst = MI.getOperand(0).getReg();
254 unsigned Saved = MI.getOperand(1).getReg();
255 unsigned Src = MI.getOperand(2).getReg();
257 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
261 MI.eraseFromParent();
264 void SILowerControlFlowPass::Loop(MachineInstr &MI) {
265 MachineBasicBlock &MBB = *MI.getParent();
266 DebugLoc DL = MI.getDebugLoc();
267 unsigned Src = MI.getOperand(0).getReg();
269 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64), AMDGPU::EXEC)
273 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
274 .addOperand(MI.getOperand(1));
276 MI.eraseFromParent();
279 void SILowerControlFlowPass::EndCf(MachineInstr &MI) {
280 MachineBasicBlock &MBB = *MI.getParent();
281 DebugLoc DL = MI.getDebugLoc();
282 unsigned Reg = MI.getOperand(0).getReg();
289 MI.eraseFromParent();
292 void SILowerControlFlowPass::Branch(MachineInstr &MI) {
293 if (MI.getOperand(0).getMBB() == MI.getParent()->getNextNode())
294 MI.eraseFromParent();
299 void SILowerControlFlowPass::Kill(MachineInstr &MI) {
300 MachineBasicBlock &MBB = *MI.getParent();
301 DebugLoc DL = MI.getDebugLoc();
302 const MachineOperand &Op = MI.getOperand(0);
316 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
320 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMPX_LE_F32_e32))
325 MI.eraseFromParent();
328 void SILowerControlFlowPass::LoadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset) {
330 MachineBasicBlock &MBB = *MI.getParent();
331 DebugLoc DL = MI.getDebugLoc();
332 MachineBasicBlock::iterator I = MI;
334 unsigned Save = MI.getOperand(1).getReg();
335 unsigned Idx = MI.getOperand(3).getReg();
339 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
343 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
353 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), Save)
357 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32),
362 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
366 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e32))
371 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), AMDGPU::VCC)
375 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
383 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
388 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
392 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
396 MI.eraseFromParent();
426 void SILowerControlFlowPass::IndirectSrc(MachineInstr &MI) {
428 MachineBasicBlock &MBB = *MI.getParent();
429 DebugLoc DL = MI.getDebugLoc();
431 unsigned Dst = MI.getOperand(0).getReg();
432 unsigned Vec = MI.getOperand(2).getReg();
433 int Off = MI.getOperand(4).getImm();
443 LoadM0(MI, MovRel, Off);
446 void SILowerControlFlowPass::IndirectDst(MachineInstr &MI) {
448 MachineBasicBlock &MBB = *MI.getParent();
449 DebugLoc DL = MI.getDebugLoc();
451 unsigned Dst = MI.getOperand(0).getReg();
452 int Off = MI.getOperand(4).getImm();
453 unsigned Val = MI.getOperand(5).getReg();
464 LoadM0(MI, MovRel, Off);
486 MachineInstr &MI = *I;
487 if (TII->isWQM(MI) || TII->isDS(MI))
491 if (TII->isFLAT(MI))
494 switch (MI.getOpcode()) {
498 If(MI);
502 Else(MI);
506 Break(MI);
510 IfBreak(MI);
514 ElseBreak(MI);
519 Loop(MI);
524 SkipIfDead(MI);
527 EndCf(MI);
532 SkipIfDead(MI);
535 Kill(MI);
539 Branch(MI);
547 IndirectSrc(MI);
555 IndirectDst(MI);