R600ISelLowering.cpp revision 249423
1//===-- R600ISelLowering.cpp - R600 DAG Lowering Implementation -----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10/// \file 11/// \brief Custom DAG lowering for R600 12// 13//===----------------------------------------------------------------------===// 14 15#include "R600ISelLowering.h" 16#include "R600Defines.h" 17#include "R600InstrInfo.h" 18#include "R600MachineFunctionInfo.h" 19#include "llvm/CodeGen/MachineFrameInfo.h" 20#include "llvm/CodeGen/MachineInstrBuilder.h" 21#include "llvm/CodeGen/MachineRegisterInfo.h" 22#include "llvm/CodeGen/SelectionDAG.h" 23#include "llvm/IR/Argument.h" 24#include "llvm/IR/Function.h" 25 26using namespace llvm; 27 28R600TargetLowering::R600TargetLowering(TargetMachine &TM) : 29 AMDGPUTargetLowering(TM), 30 TII(static_cast<const R600InstrInfo*>(TM.getInstrInfo())) { 31 addRegisterClass(MVT::v4f32, &AMDGPU::R600_Reg128RegClass); 32 addRegisterClass(MVT::f32, &AMDGPU::R600_Reg32RegClass); 33 addRegisterClass(MVT::v4i32, &AMDGPU::R600_Reg128RegClass); 34 addRegisterClass(MVT::i32, &AMDGPU::R600_Reg32RegClass); 35 computeRegisterProperties(); 36 37 setOperationAction(ISD::FADD, MVT::v4f32, Expand); 38 setOperationAction(ISD::FMUL, MVT::v4f32, Expand); 39 setOperationAction(ISD::FDIV, MVT::v4f32, Expand); 40 setOperationAction(ISD::FSUB, MVT::v4f32, Expand); 41 42 setOperationAction(ISD::ADD, MVT::v4i32, Expand); 43 setOperationAction(ISD::AND, MVT::v4i32, Expand); 44 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Expand); 45 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Expand); 46 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Expand); 47 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Expand); 48 setOperationAction(ISD::UDIV, MVT::v4i32, Expand); 49 setOperationAction(ISD::UREM, MVT::v4i32, Expand); 50 setOperationAction(ISD::SETCC, MVT::v4i32, Expand); 51 52 setOperationAction(ISD::BR_CC, MVT::i32, Expand); 53 setOperationAction(ISD::BR_CC, MVT::f32, Expand); 54 55 setOperationAction(ISD::FSUB, MVT::f32, Expand); 56 57 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 58 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 59 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i1, Custom); 60 61 setOperationAction(ISD::ROTL, MVT::i32, Custom); 62 63 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 64 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 65 66 setOperationAction(ISD::SETCC, MVT::i32, Expand); 67 setOperationAction(ISD::SETCC, MVT::f32, Expand); 68 setOperationAction(ISD::FP_TO_UINT, MVT::i1, Custom); 69 70 setOperationAction(ISD::SELECT, MVT::i32, Custom); 71 setOperationAction(ISD::SELECT, MVT::f32, Custom); 72 73 // Legalize loads and stores to the private address space. 74 setOperationAction(ISD::LOAD, MVT::i32, Custom); 75 setOperationAction(ISD::LOAD, MVT::v2i32, Custom); 76 setOperationAction(ISD::LOAD, MVT::v4i32, Custom); 77 setLoadExtAction(ISD::EXTLOAD, MVT::v4i8, Custom); 78 setLoadExtAction(ISD::EXTLOAD, MVT::i8, Custom); 79 setLoadExtAction(ISD::ZEXTLOAD, MVT::i8, Custom); 80 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i8, Custom); 81 setOperationAction(ISD::STORE, MVT::i8, Custom); 82 setOperationAction(ISD::STORE, MVT::i32, Custom); 83 setOperationAction(ISD::STORE, MVT::v2i32, Custom); 84 setOperationAction(ISD::STORE, MVT::v4i32, Custom); 85 86 setOperationAction(ISD::LOAD, MVT::i32, Custom); 87 setOperationAction(ISD::LOAD, MVT::v4i32, Custom); 88 setOperationAction(ISD::FrameIndex, MVT::i32, Custom); 89 90 setTargetDAGCombine(ISD::FP_ROUND); 91 setTargetDAGCombine(ISD::FP_TO_SINT); 92 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); 93 setTargetDAGCombine(ISD::SELECT_CC); 94 95 setBooleanContents(ZeroOrNegativeOneBooleanContent); 96 setSchedulingPreference(Sched::VLIW); 97} 98 99MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter( 100 MachineInstr * MI, MachineBasicBlock * BB) const { 101 MachineFunction * MF = BB->getParent(); 102 MachineRegisterInfo &MRI = MF->getRegInfo(); 103 MachineBasicBlock::iterator I = *MI; 104 105 switch (MI->getOpcode()) { 106 default: return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); 107 case AMDGPU::CLAMP_R600: { 108 MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, I, 109 AMDGPU::MOV, 110 MI->getOperand(0).getReg(), 111 MI->getOperand(1).getReg()); 112 TII->addFlag(NewMI, 0, MO_FLAG_CLAMP); 113 break; 114 } 115 116 case AMDGPU::FABS_R600: { 117 MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, I, 118 AMDGPU::MOV, 119 MI->getOperand(0).getReg(), 120 MI->getOperand(1).getReg()); 121 TII->addFlag(NewMI, 0, MO_FLAG_ABS); 122 break; 123 } 124 125 case AMDGPU::FNEG_R600: { 126 MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, I, 127 AMDGPU::MOV, 128 MI->getOperand(0).getReg(), 129 MI->getOperand(1).getReg()); 130 TII->addFlag(NewMI, 0, MO_FLAG_NEG); 131 break; 132 } 133 134 case AMDGPU::MASK_WRITE: { 135 unsigned maskedRegister = MI->getOperand(0).getReg(); 136 assert(TargetRegisterInfo::isVirtualRegister(maskedRegister)); 137 MachineInstr * defInstr = MRI.getVRegDef(maskedRegister); 138 TII->addFlag(defInstr, 0, MO_FLAG_MASK); 139 break; 140 } 141 142 case AMDGPU::MOV_IMM_F32: 143 TII->buildMovImm(*BB, I, MI->getOperand(0).getReg(), 144 MI->getOperand(1).getFPImm()->getValueAPF() 145 .bitcastToAPInt().getZExtValue()); 146 break; 147 case AMDGPU::MOV_IMM_I32: 148 TII->buildMovImm(*BB, I, MI->getOperand(0).getReg(), 149 MI->getOperand(1).getImm()); 150 break; 151 case AMDGPU::CONST_COPY: { 152 MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, MI, AMDGPU::MOV, 153 MI->getOperand(0).getReg(), AMDGPU::ALU_CONST); 154 TII->setImmOperand(NewMI, R600Operands::SRC0_SEL, 155 MI->getOperand(1).getImm()); 156 break; 157 } 158 159 case AMDGPU::RAT_WRITE_CACHELESS_32_eg: 160 case AMDGPU::RAT_WRITE_CACHELESS_128_eg: { 161 unsigned EOP = (llvm::next(I)->getOpcode() == AMDGPU::RETURN) ? 1 : 0; 162 163 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI->getOpcode())) 164 .addOperand(MI->getOperand(0)) 165 .addOperand(MI->getOperand(1)) 166 .addImm(EOP); // Set End of program bit 167 break; 168 } 169 170 case AMDGPU::TXD: { 171 unsigned T0 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass); 172 unsigned T1 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass); 173 174 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_H), T0) 175 .addOperand(MI->getOperand(3)) 176 .addOperand(MI->getOperand(4)) 177 .addOperand(MI->getOperand(5)) 178 .addOperand(MI->getOperand(6)); 179 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_V), T1) 180 .addOperand(MI->getOperand(2)) 181 .addOperand(MI->getOperand(4)) 182 .addOperand(MI->getOperand(5)) 183 .addOperand(MI->getOperand(6)); 184 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SAMPLE_G)) 185 .addOperand(MI->getOperand(0)) 186 .addOperand(MI->getOperand(1)) 187 .addOperand(MI->getOperand(4)) 188 .addOperand(MI->getOperand(5)) 189 .addOperand(MI->getOperand(6)) 190 .addReg(T0, RegState::Implicit) 191 .addReg(T1, RegState::Implicit); 192 break; 193 } 194 195 case AMDGPU::TXD_SHADOW: { 196 unsigned T0 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass); 197 unsigned T1 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass); 198 199 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_H), T0) 200 .addOperand(MI->getOperand(3)) 201 .addOperand(MI->getOperand(4)) 202 .addOperand(MI->getOperand(5)) 203 .addOperand(MI->getOperand(6)); 204 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_V), T1) 205 .addOperand(MI->getOperand(2)) 206 .addOperand(MI->getOperand(4)) 207 .addOperand(MI->getOperand(5)) 208 .addOperand(MI->getOperand(6)); 209 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SAMPLE_C_G)) 210 .addOperand(MI->getOperand(0)) 211 .addOperand(MI->getOperand(1)) 212 .addOperand(MI->getOperand(4)) 213 .addOperand(MI->getOperand(5)) 214 .addOperand(MI->getOperand(6)) 215 .addReg(T0, RegState::Implicit) 216 .addReg(T1, RegState::Implicit); 217 break; 218 } 219 220 case AMDGPU::BRANCH: 221 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP)) 222 .addOperand(MI->getOperand(0)); 223 break; 224 225 case AMDGPU::BRANCH_COND_f32: { 226 MachineInstr *NewMI = 227 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::PRED_X), 228 AMDGPU::PREDICATE_BIT) 229 .addOperand(MI->getOperand(1)) 230 .addImm(OPCODE_IS_NOT_ZERO) 231 .addImm(0); // Flags 232 TII->addFlag(NewMI, 0, MO_FLAG_PUSH); 233 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP_COND)) 234 .addOperand(MI->getOperand(0)) 235 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill); 236 break; 237 } 238 239 case AMDGPU::BRANCH_COND_i32: { 240 MachineInstr *NewMI = 241 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::PRED_X), 242 AMDGPU::PREDICATE_BIT) 243 .addOperand(MI->getOperand(1)) 244 .addImm(OPCODE_IS_NOT_ZERO_INT) 245 .addImm(0); // Flags 246 TII->addFlag(NewMI, 0, MO_FLAG_PUSH); 247 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP_COND)) 248 .addOperand(MI->getOperand(0)) 249 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill); 250 break; 251 } 252 253 case AMDGPU::EG_ExportSwz: 254 case AMDGPU::R600_ExportSwz: { 255 // Instruction is left unmodified if its not the last one of its type 256 bool isLastInstructionOfItsType = true; 257 unsigned InstExportType = MI->getOperand(1).getImm(); 258 for (MachineBasicBlock::iterator NextExportInst = llvm::next(I), 259 EndBlock = BB->end(); NextExportInst != EndBlock; 260 NextExportInst = llvm::next(NextExportInst)) { 261 if (NextExportInst->getOpcode() == AMDGPU::EG_ExportSwz || 262 NextExportInst->getOpcode() == AMDGPU::R600_ExportSwz) { 263 unsigned CurrentInstExportType = NextExportInst->getOperand(1) 264 .getImm(); 265 if (CurrentInstExportType == InstExportType) { 266 isLastInstructionOfItsType = false; 267 break; 268 } 269 } 270 } 271 bool EOP = (llvm::next(I)->getOpcode() == AMDGPU::RETURN)? 1 : 0; 272 if (!EOP && !isLastInstructionOfItsType) 273 return BB; 274 unsigned CfInst = (MI->getOpcode() == AMDGPU::EG_ExportSwz)? 84 : 40; 275 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI->getOpcode())) 276 .addOperand(MI->getOperand(0)) 277 .addOperand(MI->getOperand(1)) 278 .addOperand(MI->getOperand(2)) 279 .addOperand(MI->getOperand(3)) 280 .addOperand(MI->getOperand(4)) 281 .addOperand(MI->getOperand(5)) 282 .addOperand(MI->getOperand(6)) 283 .addImm(CfInst) 284 .addImm(EOP); 285 break; 286 } 287 case AMDGPU::RETURN: { 288 // RETURN instructions must have the live-out registers as implicit uses, 289 // otherwise they appear dead. 290 R600MachineFunctionInfo *MFI = MF->getInfo<R600MachineFunctionInfo>(); 291 MachineInstrBuilder MIB(*MF, MI); 292 for (unsigned i = 0, e = MFI->LiveOuts.size(); i != e; ++i) 293 MIB.addReg(MFI->LiveOuts[i], RegState::Implicit); 294 return BB; 295 } 296 } 297 298 MI->eraseFromParent(); 299 return BB; 300} 301 302//===----------------------------------------------------------------------===// 303// Custom DAG Lowering Operations 304//===----------------------------------------------------------------------===// 305 306using namespace llvm::Intrinsic; 307using namespace llvm::AMDGPUIntrinsic; 308 309SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 310 switch (Op.getOpcode()) { 311 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); 312 case ISD::ROTL: return LowerROTL(Op, DAG); 313 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 314 case ISD::SELECT: return LowerSELECT(Op, DAG); 315 case ISD::STORE: return LowerSTORE(Op, DAG); 316 case ISD::LOAD: return LowerLOAD(Op, DAG); 317 case ISD::FrameIndex: return LowerFrameIndex(Op, DAG); 318 case ISD::INTRINSIC_VOID: { 319 SDValue Chain = Op.getOperand(0); 320 unsigned IntrinsicID = 321 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 322 switch (IntrinsicID) { 323 case AMDGPUIntrinsic::AMDGPU_store_output: { 324 MachineFunction &MF = DAG.getMachineFunction(); 325 R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>(); 326 int64_t RegIndex = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); 327 unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister(RegIndex); 328 MFI->LiveOuts.push_back(Reg); 329 return DAG.getCopyToReg(Chain, Op.getDebugLoc(), Reg, Op.getOperand(2)); 330 } 331 case AMDGPUIntrinsic::R600_store_swizzle: { 332 const SDValue Args[8] = { 333 Chain, 334 Op.getOperand(2), // Export Value 335 Op.getOperand(3), // ArrayBase 336 Op.getOperand(4), // Type 337 DAG.getConstant(0, MVT::i32), // SWZ_X 338 DAG.getConstant(1, MVT::i32), // SWZ_Y 339 DAG.getConstant(2, MVT::i32), // SWZ_Z 340 DAG.getConstant(3, MVT::i32) // SWZ_W 341 }; 342 return DAG.getNode(AMDGPUISD::EXPORT, Op.getDebugLoc(), Op.getValueType(), 343 Args, 8); 344 } 345 346 // default for switch(IntrinsicID) 347 default: break; 348 } 349 // break out of case ISD::INTRINSIC_VOID in switch(Op.getOpcode()) 350 break; 351 } 352 case ISD::INTRINSIC_WO_CHAIN: { 353 unsigned IntrinsicID = 354 cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 355 EVT VT = Op.getValueType(); 356 DebugLoc DL = Op.getDebugLoc(); 357 switch(IntrinsicID) { 358 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); 359 case AMDGPUIntrinsic::R600_load_input: { 360 int64_t RegIndex = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 361 unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister(RegIndex); 362 return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, Reg, VT); 363 } 364 365 case AMDGPUIntrinsic::R600_interp_input: { 366 int slot = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 367 int ijb = cast<ConstantSDNode>(Op.getOperand(2))->getSExtValue(); 368 MachineSDNode *interp; 369 if (ijb < 0) { 370 interp = DAG.getMachineNode(AMDGPU::INTERP_VEC_LOAD, DL, 371 MVT::v4f32, DAG.getTargetConstant(slot / 4 , MVT::i32)); 372 return DAG.getTargetExtractSubreg( 373 TII->getRegisterInfo().getSubRegFromChannel(slot % 4), 374 DL, MVT::f32, SDValue(interp, 0)); 375 } 376 377 if (slot % 4 < 2) 378 interp = DAG.getMachineNode(AMDGPU::INTERP_PAIR_XY, DL, 379 MVT::f32, MVT::f32, DAG.getTargetConstant(slot / 4 , MVT::i32), 380 CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, 381 AMDGPU::R600_TReg32RegClass.getRegister(2 * ijb + 1), MVT::f32), 382 CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, 383 AMDGPU::R600_TReg32RegClass.getRegister(2 * ijb), MVT::f32)); 384 else 385 interp = DAG.getMachineNode(AMDGPU::INTERP_PAIR_ZW, DL, 386 MVT::f32, MVT::f32, DAG.getTargetConstant(slot / 4 , MVT::i32), 387 CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, 388 AMDGPU::R600_TReg32RegClass.getRegister(2 * ijb + 1), MVT::f32), 389 CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, 390 AMDGPU::R600_TReg32RegClass.getRegister(2 * ijb), MVT::f32)); 391 392 return SDValue(interp, slot % 2); 393 } 394 395 case r600_read_ngroups_x: 396 return LowerImplicitParameter(DAG, VT, DL, 0); 397 case r600_read_ngroups_y: 398 return LowerImplicitParameter(DAG, VT, DL, 1); 399 case r600_read_ngroups_z: 400 return LowerImplicitParameter(DAG, VT, DL, 2); 401 case r600_read_global_size_x: 402 return LowerImplicitParameter(DAG, VT, DL, 3); 403 case r600_read_global_size_y: 404 return LowerImplicitParameter(DAG, VT, DL, 4); 405 case r600_read_global_size_z: 406 return LowerImplicitParameter(DAG, VT, DL, 5); 407 case r600_read_local_size_x: 408 return LowerImplicitParameter(DAG, VT, DL, 6); 409 case r600_read_local_size_y: 410 return LowerImplicitParameter(DAG, VT, DL, 7); 411 case r600_read_local_size_z: 412 return LowerImplicitParameter(DAG, VT, DL, 8); 413 414 case r600_read_tgid_x: 415 return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, 416 AMDGPU::T1_X, VT); 417 case r600_read_tgid_y: 418 return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, 419 AMDGPU::T1_Y, VT); 420 case r600_read_tgid_z: 421 return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, 422 AMDGPU::T1_Z, VT); 423 case r600_read_tidig_x: 424 return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, 425 AMDGPU::T0_X, VT); 426 case r600_read_tidig_y: 427 return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, 428 AMDGPU::T0_Y, VT); 429 case r600_read_tidig_z: 430 return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, 431 AMDGPU::T0_Z, VT); 432 } 433 // break out of case ISD::INTRINSIC_WO_CHAIN in switch(Op.getOpcode()) 434 break; 435 } 436 } // end switch(Op.getOpcode()) 437 return SDValue(); 438} 439 440void R600TargetLowering::ReplaceNodeResults(SDNode *N, 441 SmallVectorImpl<SDValue> &Results, 442 SelectionDAG &DAG) const { 443 switch (N->getOpcode()) { 444 default: return; 445 case ISD::FP_TO_UINT: Results.push_back(LowerFPTOUINT(N->getOperand(0), DAG)); 446 return; 447 case ISD::LOAD: { 448 SDNode *Node = LowerLOAD(SDValue(N, 0), DAG).getNode(); 449 Results.push_back(SDValue(Node, 0)); 450 Results.push_back(SDValue(Node, 1)); 451 // XXX: LLVM seems not to replace Chain Value inside CustomWidenLowerNode 452 // function 453 DAG.ReplaceAllUsesOfValueWith(SDValue(N,1), SDValue(Node, 1)); 454 return; 455 } 456 case ISD::STORE: 457 SDNode *Node = LowerSTORE(SDValue(N, 0), DAG).getNode(); 458 Results.push_back(SDValue(Node, 0)); 459 return; 460 } 461} 462 463SDValue R600TargetLowering::LowerFPTOUINT(SDValue Op, SelectionDAG &DAG) const { 464 return DAG.getNode( 465 ISD::SETCC, 466 Op.getDebugLoc(), 467 MVT::i1, 468 Op, DAG.getConstantFP(0.0f, MVT::f32), 469 DAG.getCondCode(ISD::SETNE) 470 ); 471} 472 473SDValue R600TargetLowering::LowerImplicitParameter(SelectionDAG &DAG, EVT VT, 474 DebugLoc DL, 475 unsigned DwordOffset) const { 476 unsigned ByteOffset = DwordOffset * 4; 477 PointerType * PtrType = PointerType::get(VT.getTypeForEVT(*DAG.getContext()), 478 AMDGPUAS::PARAM_I_ADDRESS); 479 480 // We shouldn't be using an offset wider than 16-bits for implicit parameters. 481 assert(isInt<16>(ByteOffset)); 482 483 return DAG.getLoad(VT, DL, DAG.getEntryNode(), 484 DAG.getConstant(ByteOffset, MVT::i32), // PTR 485 MachinePointerInfo(ConstantPointerNull::get(PtrType)), 486 false, false, false, 0); 487} 488 489SDValue R600TargetLowering::LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const { 490 491 MachineFunction &MF = DAG.getMachineFunction(); 492 const AMDGPUFrameLowering *TFL = 493 static_cast<const AMDGPUFrameLowering*>(getTargetMachine().getFrameLowering()); 494 495 FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Op); 496 assert(FIN); 497 498 unsigned FrameIndex = FIN->getIndex(); 499 unsigned Offset = TFL->getFrameIndexOffset(MF, FrameIndex); 500 return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF), MVT::i32); 501} 502 503SDValue R600TargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const { 504 DebugLoc DL = Op.getDebugLoc(); 505 EVT VT = Op.getValueType(); 506 507 return DAG.getNode(AMDGPUISD::BITALIGN, DL, VT, 508 Op.getOperand(0), 509 Op.getOperand(0), 510 DAG.getNode(ISD::SUB, DL, VT, 511 DAG.getConstant(32, MVT::i32), 512 Op.getOperand(1))); 513} 514 515bool R600TargetLowering::isZero(SDValue Op) const { 516 if(ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) { 517 return Cst->isNullValue(); 518 } else if(ConstantFPSDNode *CstFP = dyn_cast<ConstantFPSDNode>(Op)){ 519 return CstFP->isZero(); 520 } else { 521 return false; 522 } 523} 524 525SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 526 DebugLoc DL = Op.getDebugLoc(); 527 EVT VT = Op.getValueType(); 528 529 SDValue LHS = Op.getOperand(0); 530 SDValue RHS = Op.getOperand(1); 531 SDValue True = Op.getOperand(2); 532 SDValue False = Op.getOperand(3); 533 SDValue CC = Op.getOperand(4); 534 SDValue Temp; 535 536 // LHS and RHS are guaranteed to be the same value type 537 EVT CompareVT = LHS.getValueType(); 538 539 // Check if we can lower this to a native operation. 540 541 // Try to lower to a SET* instruction: 542 // 543 // SET* can match the following patterns: 544 // 545 // select_cc f32, f32, -1, 0, cc_any 546 // select_cc f32, f32, 1.0f, 0.0f, cc_any 547 // select_cc i32, i32, -1, 0, cc_any 548 // 549 550 // Move hardware True/False values to the correct operand. 551 if (isHWTrueValue(False) && isHWFalseValue(True)) { 552 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get(); 553 std::swap(False, True); 554 CC = DAG.getCondCode(ISD::getSetCCInverse(CCOpcode, CompareVT == MVT::i32)); 555 } 556 557 if (isHWTrueValue(True) && isHWFalseValue(False) && 558 (CompareVT == VT || VT == MVT::i32)) { 559 // This can be matched by a SET* instruction. 560 return DAG.getNode(ISD::SELECT_CC, DL, VT, LHS, RHS, True, False, CC); 561 } 562 563 // Try to lower to a CND* instruction: 564 // 565 // CND* can match the following patterns: 566 // 567 // select_cc f32, 0.0, f32, f32, cc_any 568 // select_cc f32, 0.0, i32, i32, cc_any 569 // select_cc i32, 0, f32, f32, cc_any 570 // select_cc i32, 0, i32, i32, cc_any 571 // 572 if (isZero(LHS) || isZero(RHS)) { 573 SDValue Cond = (isZero(LHS) ? RHS : LHS); 574 SDValue Zero = (isZero(LHS) ? LHS : RHS); 575 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get(); 576 if (CompareVT != VT) { 577 // Bitcast True / False to the correct types. This will end up being 578 // a nop, but it allows us to define only a single pattern in the 579 // .TD files for each CND* instruction rather than having to have 580 // one pattern for integer True/False and one for fp True/False 581 True = DAG.getNode(ISD::BITCAST, DL, CompareVT, True); 582 False = DAG.getNode(ISD::BITCAST, DL, CompareVT, False); 583 } 584 if (isZero(LHS)) { 585 CCOpcode = ISD::getSetCCSwappedOperands(CCOpcode); 586 } 587 588 switch (CCOpcode) { 589 case ISD::SETONE: 590 case ISD::SETUNE: 591 case ISD::SETNE: 592 case ISD::SETULE: 593 case ISD::SETULT: 594 case ISD::SETOLE: 595 case ISD::SETOLT: 596 case ISD::SETLE: 597 case ISD::SETLT: 598 CCOpcode = ISD::getSetCCInverse(CCOpcode, CompareVT == MVT::i32); 599 Temp = True; 600 True = False; 601 False = Temp; 602 break; 603 default: 604 break; 605 } 606 SDValue SelectNode = DAG.getNode(ISD::SELECT_CC, DL, CompareVT, 607 Cond, Zero, 608 True, False, 609 DAG.getCondCode(CCOpcode)); 610 return DAG.getNode(ISD::BITCAST, DL, VT, SelectNode); 611 } 612 613 614 // Possible Min/Max pattern 615 SDValue MinMax = LowerMinMax(Op, DAG); 616 if (MinMax.getNode()) { 617 return MinMax; 618 } 619 620 // If we make it this for it means we have no native instructions to handle 621 // this SELECT_CC, so we must lower it. 622 SDValue HWTrue, HWFalse; 623 624 if (CompareVT == MVT::f32) { 625 HWTrue = DAG.getConstantFP(1.0f, CompareVT); 626 HWFalse = DAG.getConstantFP(0.0f, CompareVT); 627 } else if (CompareVT == MVT::i32) { 628 HWTrue = DAG.getConstant(-1, CompareVT); 629 HWFalse = DAG.getConstant(0, CompareVT); 630 } 631 else { 632 assert(!"Unhandled value type in LowerSELECT_CC"); 633 } 634 635 // Lower this unsupported SELECT_CC into a combination of two supported 636 // SELECT_CC operations. 637 SDValue Cond = DAG.getNode(ISD::SELECT_CC, DL, CompareVT, LHS, RHS, HWTrue, HWFalse, CC); 638 639 return DAG.getNode(ISD::SELECT_CC, DL, VT, 640 Cond, HWFalse, 641 True, False, 642 DAG.getCondCode(ISD::SETNE)); 643} 644 645SDValue R600TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 646 return DAG.getNode(ISD::SELECT_CC, 647 Op.getDebugLoc(), 648 Op.getValueType(), 649 Op.getOperand(0), 650 DAG.getConstant(0, MVT::i32), 651 Op.getOperand(1), 652 Op.getOperand(2), 653 DAG.getCondCode(ISD::SETNE)); 654} 655 656/// LLVM generates byte-addresed pointers. For indirect addressing, we need to 657/// convert these pointers to a register index. Each register holds 658/// 16 bytes, (4 x 32bit sub-register), but we need to take into account the 659/// \p StackWidth, which tells us how many of the 4 sub-registrers will be used 660/// for indirect addressing. 661SDValue R600TargetLowering::stackPtrToRegIndex(SDValue Ptr, 662 unsigned StackWidth, 663 SelectionDAG &DAG) const { 664 unsigned SRLPad; 665 switch(StackWidth) { 666 case 1: 667 SRLPad = 2; 668 break; 669 case 2: 670 SRLPad = 3; 671 break; 672 case 4: 673 SRLPad = 4; 674 break; 675 default: llvm_unreachable("Invalid stack width"); 676 } 677 678 return DAG.getNode(ISD::SRL, Ptr.getDebugLoc(), Ptr.getValueType(), Ptr, 679 DAG.getConstant(SRLPad, MVT::i32)); 680} 681 682void R600TargetLowering::getStackAddress(unsigned StackWidth, 683 unsigned ElemIdx, 684 unsigned &Channel, 685 unsigned &PtrIncr) const { 686 switch (StackWidth) { 687 default: 688 case 1: 689 Channel = 0; 690 if (ElemIdx > 0) { 691 PtrIncr = 1; 692 } else { 693 PtrIncr = 0; 694 } 695 break; 696 case 2: 697 Channel = ElemIdx % 2; 698 if (ElemIdx == 2) { 699 PtrIncr = 1; 700 } else { 701 PtrIncr = 0; 702 } 703 break; 704 case 4: 705 Channel = ElemIdx; 706 PtrIncr = 0; 707 break; 708 } 709} 710 711SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 712 DebugLoc DL = Op.getDebugLoc(); 713 StoreSDNode *StoreNode = cast<StoreSDNode>(Op); 714 SDValue Chain = Op.getOperand(0); 715 SDValue Value = Op.getOperand(1); 716 SDValue Ptr = Op.getOperand(2); 717 718 if (StoreNode->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS && 719 Ptr->getOpcode() != AMDGPUISD::DWORDADDR) { 720 // Convert pointer from byte address to dword address. 721 Ptr = DAG.getNode(AMDGPUISD::DWORDADDR, DL, Ptr.getValueType(), 722 DAG.getNode(ISD::SRL, DL, Ptr.getValueType(), 723 Ptr, DAG.getConstant(2, MVT::i32))); 724 725 if (StoreNode->isTruncatingStore() || StoreNode->isIndexed()) { 726 assert(!"Truncated and indexed stores not supported yet"); 727 } else { 728 Chain = DAG.getStore(Chain, DL, Value, Ptr, StoreNode->getMemOperand()); 729 } 730 return Chain; 731 } 732 733 EVT ValueVT = Value.getValueType(); 734 735 if (StoreNode->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS) { 736 return SDValue(); 737 } 738 739 // Lowering for indirect addressing 740 741 const MachineFunction &MF = DAG.getMachineFunction(); 742 const AMDGPUFrameLowering *TFL = static_cast<const AMDGPUFrameLowering*>( 743 getTargetMachine().getFrameLowering()); 744 unsigned StackWidth = TFL->getStackWidth(MF); 745 746 Ptr = stackPtrToRegIndex(Ptr, StackWidth, DAG); 747 748 if (ValueVT.isVector()) { 749 unsigned NumElemVT = ValueVT.getVectorNumElements(); 750 EVT ElemVT = ValueVT.getVectorElementType(); 751 SDValue Stores[4]; 752 753 assert(NumElemVT >= StackWidth && "Stack width cannot be greater than " 754 "vector width in load"); 755 756 for (unsigned i = 0; i < NumElemVT; ++i) { 757 unsigned Channel, PtrIncr; 758 getStackAddress(StackWidth, i, Channel, PtrIncr); 759 Ptr = DAG.getNode(ISD::ADD, DL, MVT::i32, Ptr, 760 DAG.getConstant(PtrIncr, MVT::i32)); 761 SDValue Elem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT, 762 Value, DAG.getConstant(i, MVT::i32)); 763 764 Stores[i] = DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other, 765 Chain, Elem, Ptr, 766 DAG.getTargetConstant(Channel, MVT::i32)); 767 } 768 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores, NumElemVT); 769 } else { 770 if (ValueVT == MVT::i8) { 771 Value = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Value); 772 } 773 Chain = DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other, Chain, Value, Ptr, 774 DAG.getTargetConstant(0, MVT::i32)); // Channel 775 } 776 777 return Chain; 778} 779 780// return (512 + (kc_bank << 12) 781static int 782ConstantAddressBlock(unsigned AddressSpace) { 783 switch (AddressSpace) { 784 case AMDGPUAS::CONSTANT_BUFFER_0: 785 return 512; 786 case AMDGPUAS::CONSTANT_BUFFER_1: 787 return 512 + 4096; 788 case AMDGPUAS::CONSTANT_BUFFER_2: 789 return 512 + 4096 * 2; 790 case AMDGPUAS::CONSTANT_BUFFER_3: 791 return 512 + 4096 * 3; 792 case AMDGPUAS::CONSTANT_BUFFER_4: 793 return 512 + 4096 * 4; 794 case AMDGPUAS::CONSTANT_BUFFER_5: 795 return 512 + 4096 * 5; 796 case AMDGPUAS::CONSTANT_BUFFER_6: 797 return 512 + 4096 * 6; 798 case AMDGPUAS::CONSTANT_BUFFER_7: 799 return 512 + 4096 * 7; 800 case AMDGPUAS::CONSTANT_BUFFER_8: 801 return 512 + 4096 * 8; 802 case AMDGPUAS::CONSTANT_BUFFER_9: 803 return 512 + 4096 * 9; 804 case AMDGPUAS::CONSTANT_BUFFER_10: 805 return 512 + 4096 * 10; 806 case AMDGPUAS::CONSTANT_BUFFER_11: 807 return 512 + 4096 * 11; 808 case AMDGPUAS::CONSTANT_BUFFER_12: 809 return 512 + 4096 * 12; 810 case AMDGPUAS::CONSTANT_BUFFER_13: 811 return 512 + 4096 * 13; 812 case AMDGPUAS::CONSTANT_BUFFER_14: 813 return 512 + 4096 * 14; 814 case AMDGPUAS::CONSTANT_BUFFER_15: 815 return 512 + 4096 * 15; 816 default: 817 return -1; 818 } 819} 820 821SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const 822{ 823 EVT VT = Op.getValueType(); 824 DebugLoc DL = Op.getDebugLoc(); 825 LoadSDNode *LoadNode = cast<LoadSDNode>(Op); 826 SDValue Chain = Op.getOperand(0); 827 SDValue Ptr = Op.getOperand(1); 828 SDValue LoweredLoad; 829 830 int ConstantBlock = ConstantAddressBlock(LoadNode->getAddressSpace()); 831 if (ConstantBlock > -1) { 832 SDValue Result; 833 if (dyn_cast<ConstantExpr>(LoadNode->getSrcValue()) || 834 dyn_cast<Constant>(LoadNode->getSrcValue()) || 835 dyn_cast<ConstantSDNode>(Ptr)) { 836 SDValue Slots[4]; 837 for (unsigned i = 0; i < 4; i++) { 838 // We want Const position encoded with the following formula : 839 // (((512 + (kc_bank << 12) + const_index) << 2) + chan) 840 // const_index is Ptr computed by llvm using an alignment of 16. 841 // Thus we add (((512 + (kc_bank << 12)) + chan ) * 4 here and 842 // then div by 4 at the ISel step 843 SDValue NewPtr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr, 844 DAG.getConstant(4 * i + ConstantBlock * 16, MVT::i32)); 845 Slots[i] = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::i32, NewPtr); 846 } 847 Result = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v4i32, Slots, 4); 848 } else { 849 // non constant ptr cant be folded, keeps it as a v4f32 load 850 Result = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::v4i32, 851 DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr, DAG.getConstant(4, MVT::i32)), 852 DAG.getConstant(LoadNode->getAddressSpace() - 853 AMDGPUAS::CONSTANT_BUFFER_0, MVT::i32) 854 ); 855 } 856 857 if (!VT.isVector()) { 858 Result = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, Result, 859 DAG.getConstant(0, MVT::i32)); 860 } 861 862 SDValue MergedValues[2] = { 863 Result, 864 Chain 865 }; 866 return DAG.getMergeValues(MergedValues, 2, DL); 867 } 868 869 if (LoadNode->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS) { 870 return SDValue(); 871 } 872 873 // Lowering for indirect addressing 874 const MachineFunction &MF = DAG.getMachineFunction(); 875 const AMDGPUFrameLowering *TFL = static_cast<const AMDGPUFrameLowering*>( 876 getTargetMachine().getFrameLowering()); 877 unsigned StackWidth = TFL->getStackWidth(MF); 878 879 Ptr = stackPtrToRegIndex(Ptr, StackWidth, DAG); 880 881 if (VT.isVector()) { 882 unsigned NumElemVT = VT.getVectorNumElements(); 883 EVT ElemVT = VT.getVectorElementType(); 884 SDValue Loads[4]; 885 886 assert(NumElemVT >= StackWidth && "Stack width cannot be greater than " 887 "vector width in load"); 888 889 for (unsigned i = 0; i < NumElemVT; ++i) { 890 unsigned Channel, PtrIncr; 891 getStackAddress(StackWidth, i, Channel, PtrIncr); 892 Ptr = DAG.getNode(ISD::ADD, DL, MVT::i32, Ptr, 893 DAG.getConstant(PtrIncr, MVT::i32)); 894 Loads[i] = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, ElemVT, 895 Chain, Ptr, 896 DAG.getTargetConstant(Channel, MVT::i32), 897 Op.getOperand(2)); 898 } 899 for (unsigned i = NumElemVT; i < 4; ++i) { 900 Loads[i] = DAG.getUNDEF(ElemVT); 901 } 902 EVT TargetVT = EVT::getVectorVT(*DAG.getContext(), ElemVT, 4); 903 LoweredLoad = DAG.getNode(ISD::BUILD_VECTOR, DL, TargetVT, Loads, 4); 904 } else { 905 LoweredLoad = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, VT, 906 Chain, Ptr, 907 DAG.getTargetConstant(0, MVT::i32), // Channel 908 Op.getOperand(2)); 909 } 910 911 SDValue Ops[2]; 912 Ops[0] = LoweredLoad; 913 Ops[1] = Chain; 914 915 return DAG.getMergeValues(Ops, 2, DL); 916} 917 918/// XXX Only kernel functions are supported, so we can assume for now that 919/// every function is a kernel function, but in the future we should use 920/// separate calling conventions for kernel and non-kernel functions. 921SDValue R600TargetLowering::LowerFormalArguments( 922 SDValue Chain, 923 CallingConv::ID CallConv, 924 bool isVarArg, 925 const SmallVectorImpl<ISD::InputArg> &Ins, 926 DebugLoc DL, SelectionDAG &DAG, 927 SmallVectorImpl<SDValue> &InVals) const { 928 unsigned ParamOffsetBytes = 36; 929 Function::const_arg_iterator FuncArg = 930 DAG.getMachineFunction().getFunction()->arg_begin(); 931 for (unsigned i = 0, e = Ins.size(); i < e; ++i, ++FuncArg) { 932 EVT VT = Ins[i].VT; 933 Type *ArgType = FuncArg->getType(); 934 unsigned ArgSizeInBits = ArgType->isPointerTy() ? 935 32 : ArgType->getPrimitiveSizeInBits(); 936 unsigned ArgBytes = ArgSizeInBits >> 3; 937 EVT ArgVT; 938 if (ArgSizeInBits < VT.getSizeInBits()) { 939 assert(!ArgType->isFloatTy() && 940 "Extending floating point arguments not supported yet"); 941 ArgVT = MVT::getIntegerVT(ArgSizeInBits); 942 } else { 943 ArgVT = VT; 944 } 945 PointerType *PtrTy = PointerType::get(VT.getTypeForEVT(*DAG.getContext()), 946 AMDGPUAS::PARAM_I_ADDRESS); 947 SDValue Arg = DAG.getExtLoad(ISD::ZEXTLOAD, DL, VT, DAG.getRoot(), 948 DAG.getConstant(ParamOffsetBytes, MVT::i32), 949 MachinePointerInfo(UndefValue::get(PtrTy)), 950 ArgVT, false, false, ArgBytes); 951 InVals.push_back(Arg); 952 ParamOffsetBytes += ArgBytes; 953 } 954 return Chain; 955} 956 957EVT R600TargetLowering::getSetCCResultType(EVT VT) const { 958 if (!VT.isVector()) return MVT::i32; 959 return VT.changeVectorElementTypeToInteger(); 960} 961 962//===----------------------------------------------------------------------===// 963// Custom DAG Optimizations 964//===----------------------------------------------------------------------===// 965 966SDValue R600TargetLowering::PerformDAGCombine(SDNode *N, 967 DAGCombinerInfo &DCI) const { 968 SelectionDAG &DAG = DCI.DAG; 969 970 switch (N->getOpcode()) { 971 // (f32 fp_round (f64 uint_to_fp a)) -> (f32 uint_to_fp a) 972 case ISD::FP_ROUND: { 973 SDValue Arg = N->getOperand(0); 974 if (Arg.getOpcode() == ISD::UINT_TO_FP && Arg.getValueType() == MVT::f64) { 975 return DAG.getNode(ISD::UINT_TO_FP, N->getDebugLoc(), N->getValueType(0), 976 Arg.getOperand(0)); 977 } 978 break; 979 } 980 981 // (i32 fp_to_sint (fneg (select_cc f32, f32, 1.0, 0.0 cc))) -> 982 // (i32 select_cc f32, f32, -1, 0 cc) 983 // 984 // Mesa's GLSL frontend generates the above pattern a lot and we can lower 985 // this to one of the SET*_DX10 instructions. 986 case ISD::FP_TO_SINT: { 987 SDValue FNeg = N->getOperand(0); 988 if (FNeg.getOpcode() != ISD::FNEG) { 989 return SDValue(); 990 } 991 SDValue SelectCC = FNeg.getOperand(0); 992 if (SelectCC.getOpcode() != ISD::SELECT_CC || 993 SelectCC.getOperand(0).getValueType() != MVT::f32 || // LHS 994 SelectCC.getOperand(2).getValueType() != MVT::f32 || // True 995 !isHWTrueValue(SelectCC.getOperand(2)) || 996 !isHWFalseValue(SelectCC.getOperand(3))) { 997 return SDValue(); 998 } 999 1000 return DAG.getNode(ISD::SELECT_CC, N->getDebugLoc(), N->getValueType(0), 1001 SelectCC.getOperand(0), // LHS 1002 SelectCC.getOperand(1), // RHS 1003 DAG.getConstant(-1, MVT::i32), // True 1004 DAG.getConstant(0, MVT::i32), // Flase 1005 SelectCC.getOperand(4)); // CC 1006 1007 break; 1008 } 1009 // Extract_vec (Build_vector) generated by custom lowering 1010 // also needs to be customly combined 1011 case ISD::EXTRACT_VECTOR_ELT: { 1012 SDValue Arg = N->getOperand(0); 1013 if (Arg.getOpcode() == ISD::BUILD_VECTOR) { 1014 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N->getOperand(1))) { 1015 unsigned Element = Const->getZExtValue(); 1016 return Arg->getOperand(Element); 1017 } 1018 } 1019 if (Arg.getOpcode() == ISD::BITCAST && 1020 Arg.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) { 1021 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N->getOperand(1))) { 1022 unsigned Element = Const->getZExtValue(); 1023 return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), N->getVTList(), 1024 Arg->getOperand(0).getOperand(Element)); 1025 } 1026 } 1027 } 1028 1029 case ISD::SELECT_CC: { 1030 // fold selectcc (selectcc x, y, a, b, cc), b, a, b, seteq -> 1031 // selectcc x, y, a, b, inv(cc) 1032 // 1033 // fold selectcc (selectcc x, y, a, b, cc), b, a, b, setne -> 1034 // selectcc x, y, a, b, cc 1035 SDValue LHS = N->getOperand(0); 1036 if (LHS.getOpcode() != ISD::SELECT_CC) { 1037 return SDValue(); 1038 } 1039 1040 SDValue RHS = N->getOperand(1); 1041 SDValue True = N->getOperand(2); 1042 SDValue False = N->getOperand(3); 1043 ISD::CondCode NCC = cast<CondCodeSDNode>(N->getOperand(4))->get(); 1044 1045 if (LHS.getOperand(2).getNode() != True.getNode() || 1046 LHS.getOperand(3).getNode() != False.getNode() || 1047 RHS.getNode() != False.getNode()) { 1048 return SDValue(); 1049 } 1050 1051 switch (NCC) { 1052 default: return SDValue(); 1053 case ISD::SETNE: return LHS; 1054 case ISD::SETEQ: { 1055 ISD::CondCode LHSCC = cast<CondCodeSDNode>(LHS.getOperand(4))->get(); 1056 LHSCC = ISD::getSetCCInverse(LHSCC, 1057 LHS.getOperand(0).getValueType().isInteger()); 1058 return DAG.getSelectCC(N->getDebugLoc(), 1059 LHS.getOperand(0), 1060 LHS.getOperand(1), 1061 LHS.getOperand(2), 1062 LHS.getOperand(3), 1063 LHSCC); 1064 } 1065 } 1066 } 1067 case AMDGPUISD::EXPORT: { 1068 SDValue Arg = N->getOperand(1); 1069 if (Arg.getOpcode() != ISD::BUILD_VECTOR) 1070 break; 1071 SDValue NewBldVec[4] = { 1072 DAG.getUNDEF(MVT::f32), 1073 DAG.getUNDEF(MVT::f32), 1074 DAG.getUNDEF(MVT::f32), 1075 DAG.getUNDEF(MVT::f32) 1076 }; 1077 SDValue NewArgs[8] = { 1078 N->getOperand(0), // Chain 1079 SDValue(), 1080 N->getOperand(2), // ArrayBase 1081 N->getOperand(3), // Type 1082 N->getOperand(4), // SWZ_X 1083 N->getOperand(5), // SWZ_Y 1084 N->getOperand(6), // SWZ_Z 1085 N->getOperand(7) // SWZ_W 1086 }; 1087 for (unsigned i = 0; i < Arg.getNumOperands(); i++) { 1088 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Arg.getOperand(i))) { 1089 if (C->isZero()) { 1090 NewArgs[4 + i] = DAG.getConstant(4, MVT::i32); // SEL_0 1091 } else if (C->isExactlyValue(1.0)) { 1092 NewArgs[4 + i] = DAG.getConstant(5, MVT::i32); // SEL_0 1093 } else { 1094 NewBldVec[i] = Arg.getOperand(i); 1095 } 1096 } else { 1097 NewBldVec[i] = Arg.getOperand(i); 1098 } 1099 } 1100 DebugLoc DL = N->getDebugLoc(); 1101 NewArgs[1] = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v4f32, NewBldVec, 4); 1102 return DAG.getNode(AMDGPUISD::EXPORT, DL, N->getVTList(), NewArgs, 8); 1103 } 1104 } 1105 return SDValue(); 1106} 1107