PPCISelLowering.h revision 210299
1//===-- PPCISelLowering.h - PPC32 DAG Lowering Interface --------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that PPC uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#ifndef LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H 16#define LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H 17 18#include "llvm/Target/TargetLowering.h" 19#include "llvm/CodeGen/SelectionDAG.h" 20#include "PPC.h" 21#include "PPCSubtarget.h" 22 23namespace llvm { 24 namespace PPCISD { 25 enum NodeType { 26 // Start the numbering where the builtin ops and target ops leave off. 27 FIRST_NUMBER = ISD::BUILTIN_OP_END, 28 29 /// FSEL - Traditional three-operand fsel node. 30 /// 31 FSEL, 32 33 /// FCFID - The FCFID instruction, taking an f64 operand and producing 34 /// and f64 value containing the FP representation of the integer that 35 /// was temporarily in the f64 operand. 36 FCFID, 37 38 /// FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64 39 /// operand, producing an f64 value containing the integer representation 40 /// of that FP value. 41 FCTIDZ, FCTIWZ, 42 43 /// STFIWX - The STFIWX instruction. The first operand is an input token 44 /// chain, then an f64 value to store, then an address to store it to. 45 STFIWX, 46 47 // VMADDFP, VNMSUBFP - The VMADDFP and VNMSUBFP instructions, taking 48 // three v4f32 operands and producing a v4f32 result. 49 VMADDFP, VNMSUBFP, 50 51 /// VPERM - The PPC VPERM Instruction. 52 /// 53 VPERM, 54 55 /// Hi/Lo - These represent the high and low 16-bit parts of a global 56 /// address respectively. These nodes have two operands, the first of 57 /// which must be a TargetGlobalAddress, and the second of which must be a 58 /// Constant. Selected naively, these turn into 'lis G+C' and 'li G+C', 59 /// though these are usually folded into other nodes. 60 Hi, Lo, 61 62 TOC_ENTRY, 63 64 /// The following three target-specific nodes are used for calls through 65 /// function pointers in the 64-bit SVR4 ABI. 66 67 /// Restore the TOC from the TOC save area of the current stack frame. 68 /// This is basically a hard coded load instruction which additionally 69 /// takes/produces a flag. 70 TOC_RESTORE, 71 72 /// Like a regular LOAD but additionally taking/producing a flag. 73 LOAD, 74 75 /// LOAD into r2 (also taking/producing a flag). Like TOC_RESTORE, this is 76 /// a hard coded load instruction. 77 LOAD_TOC, 78 79 /// OPRC, CHAIN = DYNALLOC(CHAIN, NEGSIZE, FRAME_INDEX) 80 /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to 81 /// compute an allocation on the stack. 82 DYNALLOC, 83 84 /// GlobalBaseReg - On Darwin, this node represents the result of the mflr 85 /// at function entry, used for PIC code. 86 GlobalBaseReg, 87 88 /// These nodes represent the 32-bit PPC shifts that operate on 6-bit 89 /// shift amounts. These nodes are generated by the multi-precision shift 90 /// code. 91 SRL, SRA, SHL, 92 93 /// EXTSW_32 - This is the EXTSW instruction for use with "32-bit" 94 /// registers. 95 EXTSW_32, 96 97 /// CALL - A direct function call. 98 CALL_Darwin, CALL_SVR4, 99 100 /// NOP - Special NOP which follows 64-bit SVR4 calls. 101 NOP, 102 103 /// CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a 104 /// MTCTR instruction. 105 MTCTR, 106 107 /// CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a 108 /// BCTRL instruction. 109 BCTRL_Darwin, BCTRL_SVR4, 110 111 /// Return with a flag operand, matched by 'blr' 112 RET_FLAG, 113 114 /// R32 = MFCR(CRREG, INFLAG) - Represents the MFCRpseud/MFOCRF 115 /// instructions. This copies the bits corresponding to the specified 116 /// CRREG into the resultant GPR. Bits corresponding to other CR regs 117 /// are undefined. 118 MFCR, 119 120 /// RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP* 121 /// instructions. For lack of better number, we use the opcode number 122 /// encoding for the OPC field to identify the compare. For example, 838 123 /// is VCMPGTSH. 124 VCMP, 125 126 /// RESVEC, OUTFLAG = VCMPo(LHS, RHS, OPC) - Represents one of the 127 /// altivec VCMP*o instructions. For lack of better number, we use the 128 /// opcode number encoding for the OPC field to identify the compare. For 129 /// example, 838 is VCMPGTSH. 130 VCMPo, 131 132 /// CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This 133 /// corresponds to the COND_BRANCH pseudo instruction. CRRC is the 134 /// condition register to branch on, OPC is the branch opcode to use (e.g. 135 /// PPC::BLE), DESTBB is the destination block to branch to, and INFLAG is 136 /// an optional input flag argument. 137 COND_BRANCH, 138 139 // The following 5 instructions are used only as part of the 140 // long double-to-int conversion sequence. 141 142 /// OUTFLAG = MFFS F8RC - This moves the FPSCR (not modelled) into the 143 /// register. 144 MFFS, 145 146 /// OUTFLAG = MTFSB0 INFLAG - This clears a bit in the FPSCR. 147 MTFSB0, 148 149 /// OUTFLAG = MTFSB1 INFLAG - This sets a bit in the FPSCR. 150 MTFSB1, 151 152 /// F8RC, OUTFLAG = FADDRTZ F8RC, F8RC, INFLAG - This is an FADD done with 153 /// rounding towards zero. It has flags added so it won't move past the 154 /// FPSCR-setting instructions. 155 FADDRTZ, 156 157 /// MTFSF = F8RC, INFLAG - This moves the register into the FPSCR. 158 MTFSF, 159 160 /// LARX = This corresponds to PPC l{w|d}arx instrcution: load and 161 /// reserve indexed. This is used to implement atomic operations. 162 LARX, 163 164 /// STCX = This corresponds to PPC stcx. instrcution: store conditional 165 /// indexed. This is used to implement atomic operations. 166 STCX, 167 168 /// TC_RETURN - A tail call return. 169 /// operand #0 chain 170 /// operand #1 callee (register or absolute) 171 /// operand #2 stack adjustment 172 /// operand #3 optional in flag 173 TC_RETURN, 174 175 /// STD_32 - This is the STD instruction for use with "32-bit" registers. 176 STD_32 = ISD::FIRST_TARGET_MEMORY_OPCODE, 177 178 /// CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a 179 /// byte-swapping store instruction. It byte-swaps the low "Type" bits of 180 /// the GPRC input, then stores it through Ptr. Type can be either i16 or 181 /// i32. 182 STBRX, 183 184 /// GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a 185 /// byte-swapping load instruction. It loads "Type" bits, byte swaps it, 186 /// then puts it in the bottom bits of the GPRC. TYPE can be either i16 187 /// or i32. 188 LBRX 189 }; 190 } 191 192 /// Define some predicates that are used for node matching. 193 namespace PPC { 194 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 195 /// VPKUHUM instruction. 196 bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary); 197 198 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 199 /// VPKUWUM instruction. 200 bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary); 201 202 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 203 /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes). 204 bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 205 bool isUnary); 206 207 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 208 /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes). 209 bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 210 bool isUnary); 211 212 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 213 /// amount, otherwise return -1. 214 int isVSLDOIShuffleMask(SDNode *N, bool isUnary); 215 216 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 217 /// specifies a splat of a single element that is suitable for input to 218 /// VSPLTB/VSPLTH/VSPLTW. 219 bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize); 220 221 /// isAllNegativeZeroVector - Returns true if all elements of build_vector 222 /// are -0.0. 223 bool isAllNegativeZeroVector(SDNode *N); 224 225 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 226 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 227 unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize); 228 229 /// get_VSPLTI_elt - If this is a build_vector of constants which can be 230 /// formed by using a vspltis[bhw] instruction of the specified element 231 /// size, return the constant being splatted. The ByteSize field indicates 232 /// the number of bytes of each element [124] -> [bhw]. 233 SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG); 234 } 235 236 class PPCTargetLowering : public TargetLowering { 237 const PPCSubtarget &PPCSubTarget; 238 239 public: 240 explicit PPCTargetLowering(PPCTargetMachine &TM); 241 242 /// getTargetNodeName() - This method returns the name of a target specific 243 /// DAG node. 244 virtual const char *getTargetNodeName(unsigned Opcode) const; 245 246 /// getSetCCResultType - Return the ISD::SETCC ValueType 247 virtual MVT::SimpleValueType getSetCCResultType(EVT VT) const; 248 249 /// getPreIndexedAddressParts - returns true by value, base pointer and 250 /// offset pointer and addressing mode by reference if the node's address 251 /// can be legally represented as pre-indexed load / store address. 252 virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, 253 SDValue &Offset, 254 ISD::MemIndexedMode &AM, 255 SelectionDAG &DAG) const; 256 257 /// SelectAddressRegReg - Given the specified addressed, check to see if it 258 /// can be represented as an indexed [r+r] operation. Returns false if it 259 /// can be more efficiently represented with [r+imm]. 260 bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index, 261 SelectionDAG &DAG) const; 262 263 /// SelectAddressRegImm - Returns true if the address N can be represented 264 /// by a base register plus a signed 16-bit displacement [r+imm], and if it 265 /// is not better represented as reg+reg. 266 bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base, 267 SelectionDAG &DAG) const; 268 269 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 270 /// represented as an indexed [r+r] operation. 271 bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index, 272 SelectionDAG &DAG) const; 273 274 /// SelectAddressRegImmShift - Returns true if the address N can be 275 /// represented by a base register plus a signed 14-bit displacement 276 /// [r+imm*4]. Suitable for use by STD and friends. 277 bool SelectAddressRegImmShift(SDValue N, SDValue &Disp, SDValue &Base, 278 SelectionDAG &DAG) const; 279 280 281 /// LowerOperation - Provide custom lowering hooks for some operations. 282 /// 283 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; 284 285 /// ReplaceNodeResults - Replace the results of node with an illegal result 286 /// type with new values built out of custom code. 287 /// 288 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, 289 SelectionDAG &DAG) const; 290 291 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; 292 293 virtual void computeMaskedBitsForTargetNode(const SDValue Op, 294 const APInt &Mask, 295 APInt &KnownZero, 296 APInt &KnownOne, 297 const SelectionDAG &DAG, 298 unsigned Depth = 0) const; 299 300 virtual MachineBasicBlock * 301 EmitInstrWithCustomInserter(MachineInstr *MI, 302 MachineBasicBlock *MBB) const; 303 MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI, 304 MachineBasicBlock *MBB, bool is64Bit, 305 unsigned BinOpcode) const; 306 MachineBasicBlock *EmitPartwordAtomicBinary(MachineInstr *MI, 307 MachineBasicBlock *MBB, 308 bool is8bit, unsigned Opcode) const; 309 310 ConstraintType getConstraintType(const std::string &Constraint) const; 311 std::pair<unsigned, const TargetRegisterClass*> 312 getRegForInlineAsmConstraint(const std::string &Constraint, 313 EVT VT) const; 314 315 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 316 /// function arguments in the caller parameter area. This is the actual 317 /// alignment, not its logarithm. 318 unsigned getByValTypeAlignment(const Type *Ty) const; 319 320 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 321 /// vector. If it is invalid, don't add anything to Ops. 322 virtual void LowerAsmOperandForConstraint(SDValue Op, 323 char ConstraintLetter, 324 std::vector<SDValue> &Ops, 325 SelectionDAG &DAG) const; 326 327 /// isLegalAddressingMode - Return true if the addressing mode represented 328 /// by AM is legal for this target, for a load/store of the specified type. 329 virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const; 330 331 /// isLegalAddressImmediate - Return true if the integer value can be used 332 /// as the offset of the target addressing mode for load / store of the 333 /// given type. 334 virtual bool isLegalAddressImmediate(int64_t V, const Type *Ty) const; 335 336 /// isLegalAddressImmediate - Return true if the GlobalValue can be used as 337 /// the offset of the target addressing mode. 338 virtual bool isLegalAddressImmediate(GlobalValue *GV) const; 339 340 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; 341 342 /// getOptimalMemOpType - Returns the target specific optimal type for load 343 /// and store operations as a result of memset, memcpy, and memmove 344 /// lowering. If DstAlign is zero that means it's safe to destination 345 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 346 /// means there isn't a need to check it against alignment requirement, 347 /// probably because the source does not need to be loaded. If 348 /// 'NonScalarIntSafe' is true, that means it's safe to return a 349 /// non-scalar-integer type, e.g. empty string source, constant, or loaded 350 /// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is 351 /// constant so it does not need to be loaded. 352 /// It returns EVT::Other if the type should be determined using generic 353 /// target-independent logic. 354 virtual EVT 355 getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, 356 bool NonScalarIntSafe, bool MemcpyStrSrc, 357 MachineFunction &MF) const; 358 359 /// getFunctionAlignment - Return the Log2 alignment of this function. 360 virtual unsigned getFunctionAlignment(const Function *F) const; 361 362 private: 363 SDValue getFramePointerFrameIndex(SelectionDAG & DAG) const; 364 SDValue getReturnAddrFrameIndex(SelectionDAG & DAG) const; 365 366 bool 367 IsEligibleForTailCallOptimization(SDValue Callee, 368 CallingConv::ID CalleeCC, 369 bool isVarArg, 370 const SmallVectorImpl<ISD::InputArg> &Ins, 371 SelectionDAG& DAG) const; 372 373 SDValue EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, 374 int SPDiff, 375 SDValue Chain, 376 SDValue &LROpOut, 377 SDValue &FPOpOut, 378 bool isDarwinABI, 379 DebugLoc dl) const; 380 381 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 382 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; 383 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; 384 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; 385 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; 386 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 387 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; 388 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const; 389 SDValue LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG) const; 390 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG, 391 const PPCSubtarget &Subtarget) const; 392 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG, 393 const PPCSubtarget &Subtarget) const; 394 SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG, 395 const PPCSubtarget &Subtarget) const; 396 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, 397 const PPCSubtarget &Subtarget) const; 398 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; 399 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, DebugLoc dl) const; 400 SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; 401 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; 402 SDValue LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const; 403 SDValue LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const; 404 SDValue LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const; 405 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; 406 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; 407 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; 408 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const; 409 SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const; 410 411 SDValue LowerCallResult(SDValue Chain, SDValue InFlag, 412 CallingConv::ID CallConv, bool isVarArg, 413 const SmallVectorImpl<ISD::InputArg> &Ins, 414 DebugLoc dl, SelectionDAG &DAG, 415 SmallVectorImpl<SDValue> &InVals) const; 416 SDValue FinishCall(CallingConv::ID CallConv, DebugLoc dl, bool isTailCall, 417 bool isVarArg, 418 SelectionDAG &DAG, 419 SmallVector<std::pair<unsigned, SDValue>, 8> 420 &RegsToPass, 421 SDValue InFlag, SDValue Chain, 422 SDValue &Callee, 423 int SPDiff, unsigned NumBytes, 424 const SmallVectorImpl<ISD::InputArg> &Ins, 425 SmallVectorImpl<SDValue> &InVals) const; 426 427 virtual SDValue 428 LowerFormalArguments(SDValue Chain, 429 CallingConv::ID CallConv, bool isVarArg, 430 const SmallVectorImpl<ISD::InputArg> &Ins, 431 DebugLoc dl, SelectionDAG &DAG, 432 SmallVectorImpl<SDValue> &InVals) const; 433 434 virtual SDValue 435 LowerCall(SDValue Chain, SDValue Callee, 436 CallingConv::ID CallConv, bool isVarArg, bool &isTailCall, 437 const SmallVectorImpl<ISD::OutputArg> &Outs, 438 const SmallVectorImpl<SDValue> &OutVals, 439 const SmallVectorImpl<ISD::InputArg> &Ins, 440 DebugLoc dl, SelectionDAG &DAG, 441 SmallVectorImpl<SDValue> &InVals) const; 442 443 virtual SDValue 444 LowerReturn(SDValue Chain, 445 CallingConv::ID CallConv, bool isVarArg, 446 const SmallVectorImpl<ISD::OutputArg> &Outs, 447 const SmallVectorImpl<SDValue> &OutVals, 448 DebugLoc dl, SelectionDAG &DAG) const; 449 450 SDValue 451 LowerFormalArguments_Darwin(SDValue Chain, 452 CallingConv::ID CallConv, bool isVarArg, 453 const SmallVectorImpl<ISD::InputArg> &Ins, 454 DebugLoc dl, SelectionDAG &DAG, 455 SmallVectorImpl<SDValue> &InVals) const; 456 SDValue 457 LowerFormalArguments_SVR4(SDValue Chain, 458 CallingConv::ID CallConv, bool isVarArg, 459 const SmallVectorImpl<ISD::InputArg> &Ins, 460 DebugLoc dl, SelectionDAG &DAG, 461 SmallVectorImpl<SDValue> &InVals) const; 462 463 SDValue 464 LowerCall_Darwin(SDValue Chain, SDValue Callee, 465 CallingConv::ID CallConv, bool isVarArg, bool isTailCall, 466 const SmallVectorImpl<ISD::OutputArg> &Outs, 467 const SmallVectorImpl<SDValue> &OutVals, 468 const SmallVectorImpl<ISD::InputArg> &Ins, 469 DebugLoc dl, SelectionDAG &DAG, 470 SmallVectorImpl<SDValue> &InVals) const; 471 SDValue 472 LowerCall_SVR4(SDValue Chain, SDValue Callee, 473 CallingConv::ID CallConv, bool isVarArg, bool isTailCall, 474 const SmallVectorImpl<ISD::OutputArg> &Outs, 475 const SmallVectorImpl<SDValue> &OutVals, 476 const SmallVectorImpl<ISD::InputArg> &Ins, 477 DebugLoc dl, SelectionDAG &DAG, 478 SmallVectorImpl<SDValue> &InVals) const; 479 }; 480} 481 482#endif // LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H 483