ARMISelDAGToDAG.cpp revision 207618
1//===-- ARMISelDAGToDAG.cpp - A dag to dag inst selector for ARM ----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines an instruction selector for the ARM target. 11// 12//===----------------------------------------------------------------------===// 13 14#include "ARM.h" 15#include "ARMAddressingModes.h" 16#include "ARMTargetMachine.h" 17#include "llvm/CallingConv.h" 18#include "llvm/Constants.h" 19#include "llvm/DerivedTypes.h" 20#include "llvm/Function.h" 21#include "llvm/Intrinsics.h" 22#include "llvm/LLVMContext.h" 23#include "llvm/CodeGen/MachineFrameInfo.h" 24#include "llvm/CodeGen/MachineFunction.h" 25#include "llvm/CodeGen/MachineInstrBuilder.h" 26#include "llvm/CodeGen/SelectionDAG.h" 27#include "llvm/CodeGen/SelectionDAGISel.h" 28#include "llvm/Target/TargetLowering.h" 29#include "llvm/Target/TargetOptions.h" 30#include "llvm/Support/Compiler.h" 31#include "llvm/Support/Debug.h" 32#include "llvm/Support/ErrorHandling.h" 33#include "llvm/Support/raw_ostream.h" 34 35using namespace llvm; 36 37//===--------------------------------------------------------------------===// 38/// ARMDAGToDAGISel - ARM specific code to select ARM machine 39/// instructions for SelectionDAG operations. 40/// 41namespace { 42class ARMDAGToDAGISel : public SelectionDAGISel { 43 ARMBaseTargetMachine &TM; 44 45 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 46 /// make the right decision when generating code for different targets. 47 const ARMSubtarget *Subtarget; 48 49public: 50 explicit ARMDAGToDAGISel(ARMBaseTargetMachine &tm, 51 CodeGenOpt::Level OptLevel) 52 : SelectionDAGISel(tm, OptLevel), TM(tm), 53 Subtarget(&TM.getSubtarget<ARMSubtarget>()) { 54 } 55 56 virtual const char *getPassName() const { 57 return "ARM Instruction Selection"; 58 } 59 60 /// getI32Imm - Return a target constant of type i32 with the specified 61 /// value. 62 inline SDValue getI32Imm(unsigned Imm) { 63 return CurDAG->getTargetConstant(Imm, MVT::i32); 64 } 65 66 SDNode *Select(SDNode *N); 67 68 bool SelectShifterOperandReg(SDNode *Op, SDValue N, SDValue &A, 69 SDValue &B, SDValue &C); 70 bool SelectAddrMode2(SDNode *Op, SDValue N, SDValue &Base, 71 SDValue &Offset, SDValue &Opc); 72 bool SelectAddrMode2Offset(SDNode *Op, SDValue N, 73 SDValue &Offset, SDValue &Opc); 74 bool SelectAddrMode3(SDNode *Op, SDValue N, SDValue &Base, 75 SDValue &Offset, SDValue &Opc); 76 bool SelectAddrMode3Offset(SDNode *Op, SDValue N, 77 SDValue &Offset, SDValue &Opc); 78 bool SelectAddrMode4(SDNode *Op, SDValue N, SDValue &Addr, 79 SDValue &Mode); 80 bool SelectAddrMode5(SDNode *Op, SDValue N, SDValue &Base, 81 SDValue &Offset); 82 bool SelectAddrMode6(SDNode *Op, SDValue N, SDValue &Addr, SDValue &Align); 83 84 bool SelectAddrModePC(SDNode *Op, SDValue N, SDValue &Offset, 85 SDValue &Label); 86 87 bool SelectThumbAddrModeRR(SDNode *Op, SDValue N, SDValue &Base, 88 SDValue &Offset); 89 bool SelectThumbAddrModeRI5(SDNode *Op, SDValue N, unsigned Scale, 90 SDValue &Base, SDValue &OffImm, 91 SDValue &Offset); 92 bool SelectThumbAddrModeS1(SDNode *Op, SDValue N, SDValue &Base, 93 SDValue &OffImm, SDValue &Offset); 94 bool SelectThumbAddrModeS2(SDNode *Op, SDValue N, SDValue &Base, 95 SDValue &OffImm, SDValue &Offset); 96 bool SelectThumbAddrModeS4(SDNode *Op, SDValue N, SDValue &Base, 97 SDValue &OffImm, SDValue &Offset); 98 bool SelectThumbAddrModeSP(SDNode *Op, SDValue N, SDValue &Base, 99 SDValue &OffImm); 100 101 bool SelectT2ShifterOperandReg(SDNode *Op, SDValue N, 102 SDValue &BaseReg, SDValue &Opc); 103 bool SelectT2AddrModeImm12(SDNode *Op, SDValue N, SDValue &Base, 104 SDValue &OffImm); 105 bool SelectT2AddrModeImm8(SDNode *Op, SDValue N, SDValue &Base, 106 SDValue &OffImm); 107 bool SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N, 108 SDValue &OffImm); 109 bool SelectT2AddrModeImm8s4(SDNode *Op, SDValue N, SDValue &Base, 110 SDValue &OffImm); 111 bool SelectT2AddrModeSoReg(SDNode *Op, SDValue N, SDValue &Base, 112 SDValue &OffReg, SDValue &ShImm); 113 114 // Include the pieces autogenerated from the target description. 115#include "ARMGenDAGISel.inc" 116 117private: 118 /// SelectARMIndexedLoad - Indexed (pre/post inc/dec) load matching code for 119 /// ARM. 120 SDNode *SelectARMIndexedLoad(SDNode *N); 121 SDNode *SelectT2IndexedLoad(SDNode *N); 122 123 /// SelectVLD - Select NEON load intrinsics. NumVecs should be 124 /// 1, 2, 3 or 4. The opcode arrays specify the instructions used for 125 /// loads of D registers and even subregs and odd subregs of Q registers. 126 /// For NumVecs <= 2, QOpcodes1 is not used. 127 SDNode *SelectVLD(SDNode *N, unsigned NumVecs, unsigned *DOpcodes, 128 unsigned *QOpcodes0, unsigned *QOpcodes1); 129 130 /// SelectVST - Select NEON store intrinsics. NumVecs should 131 /// be 1, 2, 3 or 4. The opcode arrays specify the instructions used for 132 /// stores of D registers and even subregs and odd subregs of Q registers. 133 /// For NumVecs <= 2, QOpcodes1 is not used. 134 SDNode *SelectVST(SDNode *N, unsigned NumVecs, unsigned *DOpcodes, 135 unsigned *QOpcodes0, unsigned *QOpcodes1); 136 137 /// SelectVLDSTLane - Select NEON load/store lane intrinsics. NumVecs should 138 /// be 2, 3 or 4. The opcode arrays specify the instructions used for 139 /// load/store of D registers and even subregs and odd subregs of Q registers. 140 SDNode *SelectVLDSTLane(SDNode *N, bool IsLoad, unsigned NumVecs, 141 unsigned *DOpcodes, unsigned *QOpcodes0, 142 unsigned *QOpcodes1); 143 144 /// SelectV6T2BitfieldExtractOp - Select SBFX/UBFX instructions for ARM. 145 SDNode *SelectV6T2BitfieldExtractOp(SDNode *N, bool isSigned); 146 147 /// SelectCMOVOp - Select CMOV instructions for ARM. 148 SDNode *SelectCMOVOp(SDNode *N); 149 SDNode *SelectT2CMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal, 150 ARMCC::CondCodes CCVal, SDValue CCR, 151 SDValue InFlag); 152 SDNode *SelectARMCMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal, 153 ARMCC::CondCodes CCVal, SDValue CCR, 154 SDValue InFlag); 155 SDNode *SelectT2CMOVSoImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal, 156 ARMCC::CondCodes CCVal, SDValue CCR, 157 SDValue InFlag); 158 SDNode *SelectARMCMOVSoImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal, 159 ARMCC::CondCodes CCVal, SDValue CCR, 160 SDValue InFlag); 161 162 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for 163 /// inline asm expressions. 164 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op, 165 char ConstraintCode, 166 std::vector<SDValue> &OutOps); 167 168 /// PairDRegs - Insert a pair of double registers into an implicit def to 169 /// form a quad register. 170 SDNode *PairDRegs(EVT VT, SDValue V0, SDValue V1); 171}; 172} 173 174/// isInt32Immediate - This method tests to see if the node is a 32-bit constant 175/// operand. If so Imm will receive the 32-bit value. 176static bool isInt32Immediate(SDNode *N, unsigned &Imm) { 177 if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) { 178 Imm = cast<ConstantSDNode>(N)->getZExtValue(); 179 return true; 180 } 181 return false; 182} 183 184// isInt32Immediate - This method tests to see if a constant operand. 185// If so Imm will receive the 32 bit value. 186static bool isInt32Immediate(SDValue N, unsigned &Imm) { 187 return isInt32Immediate(N.getNode(), Imm); 188} 189 190// isOpcWithIntImmediate - This method tests to see if the node is a specific 191// opcode and that it has a immediate integer right operand. 192// If so Imm will receive the 32 bit value. 193static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) { 194 return N->getOpcode() == Opc && 195 isInt32Immediate(N->getOperand(1).getNode(), Imm); 196} 197 198 199bool ARMDAGToDAGISel::SelectShifterOperandReg(SDNode *Op, 200 SDValue N, 201 SDValue &BaseReg, 202 SDValue &ShReg, 203 SDValue &Opc) { 204 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N); 205 206 // Don't match base register only case. That is matched to a separate 207 // lower complexity pattern with explicit register operand. 208 if (ShOpcVal == ARM_AM::no_shift) return false; 209 210 BaseReg = N.getOperand(0); 211 unsigned ShImmVal = 0; 212 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 213 ShReg = CurDAG->getRegister(0, MVT::i32); 214 ShImmVal = RHS->getZExtValue() & 31; 215 } else { 216 ShReg = N.getOperand(1); 217 } 218 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal), 219 MVT::i32); 220 return true; 221} 222 223bool ARMDAGToDAGISel::SelectAddrMode2(SDNode *Op, SDValue N, 224 SDValue &Base, SDValue &Offset, 225 SDValue &Opc) { 226 if (N.getOpcode() == ISD::MUL) { 227 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 228 // X * [3,5,9] -> X + X * [2,4,8] etc. 229 int RHSC = (int)RHS->getZExtValue(); 230 if (RHSC & 1) { 231 RHSC = RHSC & ~1; 232 ARM_AM::AddrOpc AddSub = ARM_AM::add; 233 if (RHSC < 0) { 234 AddSub = ARM_AM::sub; 235 RHSC = - RHSC; 236 } 237 if (isPowerOf2_32(RHSC)) { 238 unsigned ShAmt = Log2_32(RHSC); 239 Base = Offset = N.getOperand(0); 240 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, 241 ARM_AM::lsl), 242 MVT::i32); 243 return true; 244 } 245 } 246 } 247 } 248 249 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB) { 250 Base = N; 251 if (N.getOpcode() == ISD::FrameIndex) { 252 int FI = cast<FrameIndexSDNode>(N)->getIndex(); 253 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 254 } else if (N.getOpcode() == ARMISD::Wrapper && 255 !(Subtarget->useMovt() && 256 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) { 257 Base = N.getOperand(0); 258 } 259 Offset = CurDAG->getRegister(0, MVT::i32); 260 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0, 261 ARM_AM::no_shift), 262 MVT::i32); 263 return true; 264 } 265 266 // Match simple R +/- imm12 operands. 267 if (N.getOpcode() == ISD::ADD) 268 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 269 int RHSC = (int)RHS->getZExtValue(); 270 if ((RHSC >= 0 && RHSC < 0x1000) || 271 (RHSC < 0 && RHSC > -0x1000)) { // 12 bits. 272 Base = N.getOperand(0); 273 if (Base.getOpcode() == ISD::FrameIndex) { 274 int FI = cast<FrameIndexSDNode>(Base)->getIndex(); 275 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 276 } 277 Offset = CurDAG->getRegister(0, MVT::i32); 278 279 ARM_AM::AddrOpc AddSub = ARM_AM::add; 280 if (RHSC < 0) { 281 AddSub = ARM_AM::sub; 282 RHSC = - RHSC; 283 } 284 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, RHSC, 285 ARM_AM::no_shift), 286 MVT::i32); 287 return true; 288 } 289 } 290 291 // Otherwise this is R +/- [possibly shifted] R. 292 ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::ADD ? ARM_AM::add:ARM_AM::sub; 293 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(1)); 294 unsigned ShAmt = 0; 295 296 Base = N.getOperand(0); 297 Offset = N.getOperand(1); 298 299 if (ShOpcVal != ARM_AM::no_shift) { 300 // Check to see if the RHS of the shift is a constant, if not, we can't fold 301 // it. 302 if (ConstantSDNode *Sh = 303 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) { 304 ShAmt = Sh->getZExtValue(); 305 Offset = N.getOperand(1).getOperand(0); 306 } else { 307 ShOpcVal = ARM_AM::no_shift; 308 } 309 } 310 311 // Try matching (R shl C) + (R). 312 if (N.getOpcode() == ISD::ADD && ShOpcVal == ARM_AM::no_shift) { 313 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0)); 314 if (ShOpcVal != ARM_AM::no_shift) { 315 // Check to see if the RHS of the shift is a constant, if not, we can't 316 // fold it. 317 if (ConstantSDNode *Sh = 318 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) { 319 ShAmt = Sh->getZExtValue(); 320 Offset = N.getOperand(0).getOperand(0); 321 Base = N.getOperand(1); 322 } else { 323 ShOpcVal = ARM_AM::no_shift; 324 } 325 } 326 } 327 328 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal), 329 MVT::i32); 330 return true; 331} 332 333bool ARMDAGToDAGISel::SelectAddrMode2Offset(SDNode *Op, SDValue N, 334 SDValue &Offset, SDValue &Opc) { 335 unsigned Opcode = Op->getOpcode(); 336 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD) 337 ? cast<LoadSDNode>(Op)->getAddressingMode() 338 : cast<StoreSDNode>(Op)->getAddressingMode(); 339 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC) 340 ? ARM_AM::add : ARM_AM::sub; 341 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) { 342 int Val = (int)C->getZExtValue(); 343 if (Val >= 0 && Val < 0x1000) { // 12 bits. 344 Offset = CurDAG->getRegister(0, MVT::i32); 345 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val, 346 ARM_AM::no_shift), 347 MVT::i32); 348 return true; 349 } 350 } 351 352 Offset = N; 353 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N); 354 unsigned ShAmt = 0; 355 if (ShOpcVal != ARM_AM::no_shift) { 356 // Check to see if the RHS of the shift is a constant, if not, we can't fold 357 // it. 358 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 359 ShAmt = Sh->getZExtValue(); 360 Offset = N.getOperand(0); 361 } else { 362 ShOpcVal = ARM_AM::no_shift; 363 } 364 } 365 366 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal), 367 MVT::i32); 368 return true; 369} 370 371 372bool ARMDAGToDAGISel::SelectAddrMode3(SDNode *Op, SDValue N, 373 SDValue &Base, SDValue &Offset, 374 SDValue &Opc) { 375 if (N.getOpcode() == ISD::SUB) { 376 // X - C is canonicalize to X + -C, no need to handle it here. 377 Base = N.getOperand(0); 378 Offset = N.getOperand(1); 379 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0),MVT::i32); 380 return true; 381 } 382 383 if (N.getOpcode() != ISD::ADD) { 384 Base = N; 385 if (N.getOpcode() == ISD::FrameIndex) { 386 int FI = cast<FrameIndexSDNode>(N)->getIndex(); 387 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 388 } 389 Offset = CurDAG->getRegister(0, MVT::i32); 390 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),MVT::i32); 391 return true; 392 } 393 394 // If the RHS is +/- imm8, fold into addr mode. 395 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 396 int RHSC = (int)RHS->getZExtValue(); 397 if ((RHSC >= 0 && RHSC < 256) || 398 (RHSC < 0 && RHSC > -256)) { // note -256 itself isn't allowed. 399 Base = N.getOperand(0); 400 if (Base.getOpcode() == ISD::FrameIndex) { 401 int FI = cast<FrameIndexSDNode>(Base)->getIndex(); 402 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 403 } 404 Offset = CurDAG->getRegister(0, MVT::i32); 405 406 ARM_AM::AddrOpc AddSub = ARM_AM::add; 407 if (RHSC < 0) { 408 AddSub = ARM_AM::sub; 409 RHSC = - RHSC; 410 } 411 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC),MVT::i32); 412 return true; 413 } 414 } 415 416 Base = N.getOperand(0); 417 Offset = N.getOperand(1); 418 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), MVT::i32); 419 return true; 420} 421 422bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDNode *Op, SDValue N, 423 SDValue &Offset, SDValue &Opc) { 424 unsigned Opcode = Op->getOpcode(); 425 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD) 426 ? cast<LoadSDNode>(Op)->getAddressingMode() 427 : cast<StoreSDNode>(Op)->getAddressingMode(); 428 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC) 429 ? ARM_AM::add : ARM_AM::sub; 430 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) { 431 int Val = (int)C->getZExtValue(); 432 if (Val >= 0 && Val < 256) { 433 Offset = CurDAG->getRegister(0, MVT::i32); 434 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), MVT::i32); 435 return true; 436 } 437 } 438 439 Offset = N; 440 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), MVT::i32); 441 return true; 442} 443 444bool ARMDAGToDAGISel::SelectAddrMode4(SDNode *Op, SDValue N, 445 SDValue &Addr, SDValue &Mode) { 446 Addr = N; 447 Mode = CurDAG->getTargetConstant(0, MVT::i32); 448 return true; 449} 450 451bool ARMDAGToDAGISel::SelectAddrMode5(SDNode *Op, SDValue N, 452 SDValue &Base, SDValue &Offset) { 453 if (N.getOpcode() != ISD::ADD) { 454 Base = N; 455 if (N.getOpcode() == ISD::FrameIndex) { 456 int FI = cast<FrameIndexSDNode>(N)->getIndex(); 457 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 458 } else if (N.getOpcode() == ARMISD::Wrapper && 459 !(Subtarget->useMovt() && 460 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) { 461 Base = N.getOperand(0); 462 } 463 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0), 464 MVT::i32); 465 return true; 466 } 467 468 // If the RHS is +/- imm8, fold into addr mode. 469 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 470 int RHSC = (int)RHS->getZExtValue(); 471 if ((RHSC & 3) == 0) { // The constant is implicitly multiplied by 4. 472 RHSC >>= 2; 473 if ((RHSC >= 0 && RHSC < 256) || 474 (RHSC < 0 && RHSC > -256)) { // note -256 itself isn't allowed. 475 Base = N.getOperand(0); 476 if (Base.getOpcode() == ISD::FrameIndex) { 477 int FI = cast<FrameIndexSDNode>(Base)->getIndex(); 478 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 479 } 480 481 ARM_AM::AddrOpc AddSub = ARM_AM::add; 482 if (RHSC < 0) { 483 AddSub = ARM_AM::sub; 484 RHSC = - RHSC; 485 } 486 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(AddSub, RHSC), 487 MVT::i32); 488 return true; 489 } 490 } 491 } 492 493 Base = N; 494 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0), 495 MVT::i32); 496 return true; 497} 498 499bool ARMDAGToDAGISel::SelectAddrMode6(SDNode *Op, SDValue N, 500 SDValue &Addr, SDValue &Align) { 501 Addr = N; 502 // Default to no alignment. 503 Align = CurDAG->getTargetConstant(0, MVT::i32); 504 return true; 505} 506 507bool ARMDAGToDAGISel::SelectAddrModePC(SDNode *Op, SDValue N, 508 SDValue &Offset, SDValue &Label) { 509 if (N.getOpcode() == ARMISD::PIC_ADD && N.hasOneUse()) { 510 Offset = N.getOperand(0); 511 SDValue N1 = N.getOperand(1); 512 Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(), 513 MVT::i32); 514 return true; 515 } 516 return false; 517} 518 519bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDNode *Op, SDValue N, 520 SDValue &Base, SDValue &Offset){ 521 // FIXME dl should come from the parent load or store, not the address 522 DebugLoc dl = Op->getDebugLoc(); 523 if (N.getOpcode() != ISD::ADD) { 524 ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N); 525 if (!NC || NC->getZExtValue() != 0) 526 return false; 527 528 Base = Offset = N; 529 return true; 530 } 531 532 Base = N.getOperand(0); 533 Offset = N.getOperand(1); 534 return true; 535} 536 537bool 538ARMDAGToDAGISel::SelectThumbAddrModeRI5(SDNode *Op, SDValue N, 539 unsigned Scale, SDValue &Base, 540 SDValue &OffImm, SDValue &Offset) { 541 if (Scale == 4) { 542 SDValue TmpBase, TmpOffImm; 543 if (SelectThumbAddrModeSP(Op, N, TmpBase, TmpOffImm)) 544 return false; // We want to select tLDRspi / tSTRspi instead. 545 if (N.getOpcode() == ARMISD::Wrapper && 546 N.getOperand(0).getOpcode() == ISD::TargetConstantPool) 547 return false; // We want to select tLDRpci instead. 548 } 549 550 if (N.getOpcode() != ISD::ADD) { 551 if (N.getOpcode() == ARMISD::Wrapper && 552 !(Subtarget->useMovt() && 553 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) { 554 Base = N.getOperand(0); 555 } else 556 Base = N; 557 558 Offset = CurDAG->getRegister(0, MVT::i32); 559 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 560 return true; 561 } 562 563 // Thumb does not have [sp, r] address mode. 564 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0)); 565 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1)); 566 if ((LHSR && LHSR->getReg() == ARM::SP) || 567 (RHSR && RHSR->getReg() == ARM::SP)) { 568 Base = N; 569 Offset = CurDAG->getRegister(0, MVT::i32); 570 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 571 return true; 572 } 573 574 // If the RHS is + imm5 * scale, fold into addr mode. 575 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 576 int RHSC = (int)RHS->getZExtValue(); 577 if ((RHSC & (Scale-1)) == 0) { // The constant is implicitly multiplied. 578 RHSC /= Scale; 579 if (RHSC >= 0 && RHSC < 32) { 580 Base = N.getOperand(0); 581 Offset = CurDAG->getRegister(0, MVT::i32); 582 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32); 583 return true; 584 } 585 } 586 } 587 588 Base = N.getOperand(0); 589 Offset = N.getOperand(1); 590 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 591 return true; 592} 593 594bool ARMDAGToDAGISel::SelectThumbAddrModeS1(SDNode *Op, SDValue N, 595 SDValue &Base, SDValue &OffImm, 596 SDValue &Offset) { 597 return SelectThumbAddrModeRI5(Op, N, 1, Base, OffImm, Offset); 598} 599 600bool ARMDAGToDAGISel::SelectThumbAddrModeS2(SDNode *Op, SDValue N, 601 SDValue &Base, SDValue &OffImm, 602 SDValue &Offset) { 603 return SelectThumbAddrModeRI5(Op, N, 2, Base, OffImm, Offset); 604} 605 606bool ARMDAGToDAGISel::SelectThumbAddrModeS4(SDNode *Op, SDValue N, 607 SDValue &Base, SDValue &OffImm, 608 SDValue &Offset) { 609 return SelectThumbAddrModeRI5(Op, N, 4, Base, OffImm, Offset); 610} 611 612bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDNode *Op, SDValue N, 613 SDValue &Base, SDValue &OffImm) { 614 if (N.getOpcode() == ISD::FrameIndex) { 615 int FI = cast<FrameIndexSDNode>(N)->getIndex(); 616 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 617 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 618 return true; 619 } 620 621 if (N.getOpcode() != ISD::ADD) 622 return false; 623 624 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0)); 625 if (N.getOperand(0).getOpcode() == ISD::FrameIndex || 626 (LHSR && LHSR->getReg() == ARM::SP)) { 627 // If the RHS is + imm8 * scale, fold into addr mode. 628 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 629 int RHSC = (int)RHS->getZExtValue(); 630 if ((RHSC & 3) == 0) { // The constant is implicitly multiplied. 631 RHSC >>= 2; 632 if (RHSC >= 0 && RHSC < 256) { 633 Base = N.getOperand(0); 634 if (Base.getOpcode() == ISD::FrameIndex) { 635 int FI = cast<FrameIndexSDNode>(Base)->getIndex(); 636 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 637 } 638 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32); 639 return true; 640 } 641 } 642 } 643 } 644 645 return false; 646} 647 648bool ARMDAGToDAGISel::SelectT2ShifterOperandReg(SDNode *Op, SDValue N, 649 SDValue &BaseReg, 650 SDValue &Opc) { 651 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N); 652 653 // Don't match base register only case. That is matched to a separate 654 // lower complexity pattern with explicit register operand. 655 if (ShOpcVal == ARM_AM::no_shift) return false; 656 657 BaseReg = N.getOperand(0); 658 unsigned ShImmVal = 0; 659 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 660 ShImmVal = RHS->getZExtValue() & 31; 661 Opc = getI32Imm(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal)); 662 return true; 663 } 664 665 return false; 666} 667 668bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDNode *Op, SDValue N, 669 SDValue &Base, SDValue &OffImm) { 670 // Match simple R + imm12 operands. 671 672 // Base only. 673 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB) { 674 if (N.getOpcode() == ISD::FrameIndex) { 675 // Match frame index... 676 int FI = cast<FrameIndexSDNode>(N)->getIndex(); 677 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 678 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 679 return true; 680 } else if (N.getOpcode() == ARMISD::Wrapper && 681 !(Subtarget->useMovt() && 682 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) { 683 Base = N.getOperand(0); 684 if (Base.getOpcode() == ISD::TargetConstantPool) 685 return false; // We want to select t2LDRpci instead. 686 } else 687 Base = N; 688 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 689 return true; 690 } 691 692 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 693 if (SelectT2AddrModeImm8(Op, N, Base, OffImm)) 694 // Let t2LDRi8 handle (R - imm8). 695 return false; 696 697 int RHSC = (int)RHS->getZExtValue(); 698 if (N.getOpcode() == ISD::SUB) 699 RHSC = -RHSC; 700 701 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned) 702 Base = N.getOperand(0); 703 if (Base.getOpcode() == ISD::FrameIndex) { 704 int FI = cast<FrameIndexSDNode>(Base)->getIndex(); 705 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 706 } 707 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32); 708 return true; 709 } 710 } 711 712 // Base only. 713 Base = N; 714 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 715 return true; 716} 717 718bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDNode *Op, SDValue N, 719 SDValue &Base, SDValue &OffImm) { 720 // Match simple R - imm8 operands. 721 if (N.getOpcode() == ISD::ADD || N.getOpcode() == ISD::SUB) { 722 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 723 int RHSC = (int)RHS->getSExtValue(); 724 if (N.getOpcode() == ISD::SUB) 725 RHSC = -RHSC; 726 727 if ((RHSC >= -255) && (RHSC < 0)) { // 8 bits (always negative) 728 Base = N.getOperand(0); 729 if (Base.getOpcode() == ISD::FrameIndex) { 730 int FI = cast<FrameIndexSDNode>(Base)->getIndex(); 731 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 732 } 733 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32); 734 return true; 735 } 736 } 737 } 738 739 return false; 740} 741 742bool ARMDAGToDAGISel::SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N, 743 SDValue &OffImm){ 744 unsigned Opcode = Op->getOpcode(); 745 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD) 746 ? cast<LoadSDNode>(Op)->getAddressingMode() 747 : cast<StoreSDNode>(Op)->getAddressingMode(); 748 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N)) { 749 int RHSC = (int)RHS->getZExtValue(); 750 if (RHSC >= 0 && RHSC < 0x100) { // 8 bits. 751 OffImm = ((AM == ISD::PRE_INC) || (AM == ISD::POST_INC)) 752 ? CurDAG->getTargetConstant(RHSC, MVT::i32) 753 : CurDAG->getTargetConstant(-RHSC, MVT::i32); 754 return true; 755 } 756 } 757 758 return false; 759} 760 761bool ARMDAGToDAGISel::SelectT2AddrModeImm8s4(SDNode *Op, SDValue N, 762 SDValue &Base, SDValue &OffImm) { 763 if (N.getOpcode() == ISD::ADD) { 764 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 765 int RHSC = (int)RHS->getZExtValue(); 766 if (((RHSC & 0x3) == 0) && 767 ((RHSC >= 0 && RHSC < 0x400) || (RHSC < 0 && RHSC > -0x400))) { // 8 bits. 768 Base = N.getOperand(0); 769 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32); 770 return true; 771 } 772 } 773 } else if (N.getOpcode() == ISD::SUB) { 774 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 775 int RHSC = (int)RHS->getZExtValue(); 776 if (((RHSC & 0x3) == 0) && (RHSC >= 0 && RHSC < 0x400)) { // 8 bits. 777 Base = N.getOperand(0); 778 OffImm = CurDAG->getTargetConstant(-RHSC, MVT::i32); 779 return true; 780 } 781 } 782 } 783 784 return false; 785} 786 787bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDNode *Op, SDValue N, 788 SDValue &Base, 789 SDValue &OffReg, SDValue &ShImm) { 790 // (R - imm8) should be handled by t2LDRi8. The rest are handled by t2LDRi12. 791 if (N.getOpcode() != ISD::ADD) 792 return false; 793 794 // Leave (R + imm12) for t2LDRi12, (R - imm8) for t2LDRi8. 795 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 796 int RHSC = (int)RHS->getZExtValue(); 797 if (RHSC >= 0 && RHSC < 0x1000) // 12 bits (unsigned) 798 return false; 799 else if (RHSC < 0 && RHSC >= -255) // 8 bits 800 return false; 801 } 802 803 // Look for (R + R) or (R + (R << [1,2,3])). 804 unsigned ShAmt = 0; 805 Base = N.getOperand(0); 806 OffReg = N.getOperand(1); 807 808 // Swap if it is ((R << c) + R). 809 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(OffReg); 810 if (ShOpcVal != ARM_AM::lsl) { 811 ShOpcVal = ARM_AM::getShiftOpcForNode(Base); 812 if (ShOpcVal == ARM_AM::lsl) 813 std::swap(Base, OffReg); 814 } 815 816 if (ShOpcVal == ARM_AM::lsl) { 817 // Check to see if the RHS of the shift is a constant, if not, we can't fold 818 // it. 819 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(OffReg.getOperand(1))) { 820 ShAmt = Sh->getZExtValue(); 821 if (ShAmt >= 4) { 822 ShAmt = 0; 823 ShOpcVal = ARM_AM::no_shift; 824 } else 825 OffReg = OffReg.getOperand(0); 826 } else { 827 ShOpcVal = ARM_AM::no_shift; 828 } 829 } 830 831 ShImm = CurDAG->getTargetConstant(ShAmt, MVT::i32); 832 833 return true; 834} 835 836//===--------------------------------------------------------------------===// 837 838/// getAL - Returns a ARMCC::AL immediate node. 839static inline SDValue getAL(SelectionDAG *CurDAG) { 840 return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, MVT::i32); 841} 842 843SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDNode *N) { 844 LoadSDNode *LD = cast<LoadSDNode>(N); 845 ISD::MemIndexedMode AM = LD->getAddressingMode(); 846 if (AM == ISD::UNINDEXED) 847 return NULL; 848 849 EVT LoadedVT = LD->getMemoryVT(); 850 SDValue Offset, AMOpc; 851 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC); 852 unsigned Opcode = 0; 853 bool Match = false; 854 if (LoadedVT == MVT::i32 && 855 SelectAddrMode2Offset(N, LD->getOffset(), Offset, AMOpc)) { 856 Opcode = isPre ? ARM::LDR_PRE : ARM::LDR_POST; 857 Match = true; 858 } else if (LoadedVT == MVT::i16 && 859 SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) { 860 Match = true; 861 Opcode = (LD->getExtensionType() == ISD::SEXTLOAD) 862 ? (isPre ? ARM::LDRSH_PRE : ARM::LDRSH_POST) 863 : (isPre ? ARM::LDRH_PRE : ARM::LDRH_POST); 864 } else if (LoadedVT == MVT::i8 || LoadedVT == MVT::i1) { 865 if (LD->getExtensionType() == ISD::SEXTLOAD) { 866 if (SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) { 867 Match = true; 868 Opcode = isPre ? ARM::LDRSB_PRE : ARM::LDRSB_POST; 869 } 870 } else { 871 if (SelectAddrMode2Offset(N, LD->getOffset(), Offset, AMOpc)) { 872 Match = true; 873 Opcode = isPre ? ARM::LDRB_PRE : ARM::LDRB_POST; 874 } 875 } 876 } 877 878 if (Match) { 879 SDValue Chain = LD->getChain(); 880 SDValue Base = LD->getBasePtr(); 881 SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG), 882 CurDAG->getRegister(0, MVT::i32), Chain }; 883 return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32, MVT::i32, 884 MVT::Other, Ops, 6); 885 } 886 887 return NULL; 888} 889 890SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) { 891 LoadSDNode *LD = cast<LoadSDNode>(N); 892 ISD::MemIndexedMode AM = LD->getAddressingMode(); 893 if (AM == ISD::UNINDEXED) 894 return NULL; 895 896 EVT LoadedVT = LD->getMemoryVT(); 897 bool isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD; 898 SDValue Offset; 899 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC); 900 unsigned Opcode = 0; 901 bool Match = false; 902 if (SelectT2AddrModeImm8Offset(N, LD->getOffset(), Offset)) { 903 switch (LoadedVT.getSimpleVT().SimpleTy) { 904 case MVT::i32: 905 Opcode = isPre ? ARM::t2LDR_PRE : ARM::t2LDR_POST; 906 break; 907 case MVT::i16: 908 if (isSExtLd) 909 Opcode = isPre ? ARM::t2LDRSH_PRE : ARM::t2LDRSH_POST; 910 else 911 Opcode = isPre ? ARM::t2LDRH_PRE : ARM::t2LDRH_POST; 912 break; 913 case MVT::i8: 914 case MVT::i1: 915 if (isSExtLd) 916 Opcode = isPre ? ARM::t2LDRSB_PRE : ARM::t2LDRSB_POST; 917 else 918 Opcode = isPre ? ARM::t2LDRB_PRE : ARM::t2LDRB_POST; 919 break; 920 default: 921 return NULL; 922 } 923 Match = true; 924 } 925 926 if (Match) { 927 SDValue Chain = LD->getChain(); 928 SDValue Base = LD->getBasePtr(); 929 SDValue Ops[]= { Base, Offset, getAL(CurDAG), 930 CurDAG->getRegister(0, MVT::i32), Chain }; 931 return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32, MVT::i32, 932 MVT::Other, Ops, 5); 933 } 934 935 return NULL; 936} 937 938/// PairDRegs - Insert a pair of double registers into an implicit def to 939/// form a quad register. 940SDNode *ARMDAGToDAGISel::PairDRegs(EVT VT, SDValue V0, SDValue V1) { 941 DebugLoc dl = V0.getNode()->getDebugLoc(); 942 SDValue Undef = 943 SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0); 944 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::DSUBREG_0, MVT::i32); 945 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::DSUBREG_1, MVT::i32); 946 SDNode *Pair = CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, dl, 947 VT, Undef, V0, SubReg0); 948 return CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, dl, 949 VT, SDValue(Pair, 0), V1, SubReg1); 950} 951 952/// GetNEONSubregVT - Given a type for a 128-bit NEON vector, return the type 953/// for a 64-bit subregister of the vector. 954static EVT GetNEONSubregVT(EVT VT) { 955 switch (VT.getSimpleVT().SimpleTy) { 956 default: llvm_unreachable("unhandled NEON type"); 957 case MVT::v16i8: return MVT::v8i8; 958 case MVT::v8i16: return MVT::v4i16; 959 case MVT::v4f32: return MVT::v2f32; 960 case MVT::v4i32: return MVT::v2i32; 961 case MVT::v2i64: return MVT::v1i64; 962 } 963} 964 965SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, unsigned NumVecs, 966 unsigned *DOpcodes, unsigned *QOpcodes0, 967 unsigned *QOpcodes1) { 968 assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range"); 969 DebugLoc dl = N->getDebugLoc(); 970 971 SDValue MemAddr, Align; 972 if (!SelectAddrMode6(N, N->getOperand(2), MemAddr, Align)) 973 return NULL; 974 975 SDValue Chain = N->getOperand(0); 976 EVT VT = N->getValueType(0); 977 bool is64BitVector = VT.is64BitVector(); 978 979 unsigned OpcodeIndex; 980 switch (VT.getSimpleVT().SimpleTy) { 981 default: llvm_unreachable("unhandled vld type"); 982 // Double-register operations: 983 case MVT::v8i8: OpcodeIndex = 0; break; 984 case MVT::v4i16: OpcodeIndex = 1; break; 985 case MVT::v2f32: 986 case MVT::v2i32: OpcodeIndex = 2; break; 987 case MVT::v1i64: OpcodeIndex = 3; break; 988 // Quad-register operations: 989 case MVT::v16i8: OpcodeIndex = 0; break; 990 case MVT::v8i16: OpcodeIndex = 1; break; 991 case MVT::v4f32: 992 case MVT::v4i32: OpcodeIndex = 2; break; 993 case MVT::v2i64: OpcodeIndex = 3; 994 assert(NumVecs == 1 && "v2i64 type only supported for VLD1"); 995 break; 996 } 997 998 SDValue Pred = getAL(CurDAG); 999 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); 1000 if (is64BitVector) { 1001 unsigned Opc = DOpcodes[OpcodeIndex]; 1002 const SDValue Ops[] = { MemAddr, Align, Pred, Reg0, Chain }; 1003 std::vector<EVT> ResTys(NumVecs, VT); 1004 ResTys.push_back(MVT::Other); 1005 return CurDAG->getMachineNode(Opc, dl, ResTys, Ops, 5); 1006 } 1007 1008 EVT RegVT = GetNEONSubregVT(VT); 1009 if (NumVecs <= 2) { 1010 // Quad registers are directly supported for VLD1 and VLD2, 1011 // loading pairs of D regs. 1012 unsigned Opc = QOpcodes0[OpcodeIndex]; 1013 const SDValue Ops[] = { MemAddr, Align, Pred, Reg0, Chain }; 1014 std::vector<EVT> ResTys(2 * NumVecs, RegVT); 1015 ResTys.push_back(MVT::Other); 1016 SDNode *VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops, 5); 1017 Chain = SDValue(VLd, 2 * NumVecs); 1018 1019 // Combine the even and odd subregs to produce the result. 1020 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) { 1021 SDNode *Q = PairDRegs(VT, SDValue(VLd, 2*Vec), SDValue(VLd, 2*Vec+1)); 1022 ReplaceUses(SDValue(N, Vec), SDValue(Q, 0)); 1023 } 1024 } else { 1025 // Otherwise, quad registers are loaded with two separate instructions, 1026 // where one loads the even registers and the other loads the odd registers. 1027 1028 std::vector<EVT> ResTys(NumVecs, RegVT); 1029 ResTys.push_back(MemAddr.getValueType()); 1030 ResTys.push_back(MVT::Other); 1031 1032 // Load the even subregs. 1033 unsigned Opc = QOpcodes0[OpcodeIndex]; 1034 const SDValue OpsA[] = { MemAddr, Align, Reg0, Pred, Reg0, Chain }; 1035 SDNode *VLdA = CurDAG->getMachineNode(Opc, dl, ResTys, OpsA, 6); 1036 Chain = SDValue(VLdA, NumVecs+1); 1037 1038 // Load the odd subregs. 1039 Opc = QOpcodes1[OpcodeIndex]; 1040 const SDValue OpsB[] = { SDValue(VLdA, NumVecs), 1041 Align, Reg0, Pred, Reg0, Chain }; 1042 SDNode *VLdB = CurDAG->getMachineNode(Opc, dl, ResTys, OpsB, 6); 1043 Chain = SDValue(VLdB, NumVecs+1); 1044 1045 // Combine the even and odd subregs to produce the result. 1046 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) { 1047 SDNode *Q = PairDRegs(VT, SDValue(VLdA, Vec), SDValue(VLdB, Vec)); 1048 ReplaceUses(SDValue(N, Vec), SDValue(Q, 0)); 1049 } 1050 } 1051 ReplaceUses(SDValue(N, NumVecs), Chain); 1052 return NULL; 1053} 1054 1055SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, unsigned NumVecs, 1056 unsigned *DOpcodes, unsigned *QOpcodes0, 1057 unsigned *QOpcodes1) { 1058 assert(NumVecs >=1 && NumVecs <= 4 && "VST NumVecs out-of-range"); 1059 DebugLoc dl = N->getDebugLoc(); 1060 1061 SDValue MemAddr, Align; 1062 if (!SelectAddrMode6(N, N->getOperand(2), MemAddr, Align)) 1063 return NULL; 1064 1065 SDValue Chain = N->getOperand(0); 1066 EVT VT = N->getOperand(3).getValueType(); 1067 bool is64BitVector = VT.is64BitVector(); 1068 1069 unsigned OpcodeIndex; 1070 switch (VT.getSimpleVT().SimpleTy) { 1071 default: llvm_unreachable("unhandled vst type"); 1072 // Double-register operations: 1073 case MVT::v8i8: OpcodeIndex = 0; break; 1074 case MVT::v4i16: OpcodeIndex = 1; break; 1075 case MVT::v2f32: 1076 case MVT::v2i32: OpcodeIndex = 2; break; 1077 case MVT::v1i64: OpcodeIndex = 3; break; 1078 // Quad-register operations: 1079 case MVT::v16i8: OpcodeIndex = 0; break; 1080 case MVT::v8i16: OpcodeIndex = 1; break; 1081 case MVT::v4f32: 1082 case MVT::v4i32: OpcodeIndex = 2; break; 1083 case MVT::v2i64: OpcodeIndex = 3; 1084 assert(NumVecs == 1 && "v2i64 type only supported for VST1"); 1085 break; 1086 } 1087 1088 SDValue Pred = getAL(CurDAG); 1089 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); 1090 1091 SmallVector<SDValue, 10> Ops; 1092 Ops.push_back(MemAddr); 1093 Ops.push_back(Align); 1094 1095 if (is64BitVector) { 1096 unsigned Opc = DOpcodes[OpcodeIndex]; 1097 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) 1098 Ops.push_back(N->getOperand(Vec+3)); 1099 Ops.push_back(Pred); 1100 Ops.push_back(Reg0); // predicate register 1101 Ops.push_back(Chain); 1102 return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops.data(), NumVecs+5); 1103 } 1104 1105 EVT RegVT = GetNEONSubregVT(VT); 1106 if (NumVecs <= 2) { 1107 // Quad registers are directly supported for VST1 and VST2, 1108 // storing pairs of D regs. 1109 unsigned Opc = QOpcodes0[OpcodeIndex]; 1110 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) { 1111 Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::DSUBREG_0, dl, RegVT, 1112 N->getOperand(Vec+3))); 1113 Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::DSUBREG_1, dl, RegVT, 1114 N->getOperand(Vec+3))); 1115 } 1116 Ops.push_back(Pred); 1117 Ops.push_back(Reg0); // predicate register 1118 Ops.push_back(Chain); 1119 return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops.data(), 1120 5 + 2 * NumVecs); 1121 } 1122 1123 // Otherwise, quad registers are stored with two separate instructions, 1124 // where one stores the even registers and the other stores the odd registers. 1125 1126 Ops.push_back(Reg0); // post-access address offset 1127 1128 // Store the even subregs. 1129 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) 1130 Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::DSUBREG_0, dl, RegVT, 1131 N->getOperand(Vec+3))); 1132 Ops.push_back(Pred); 1133 Ops.push_back(Reg0); // predicate register 1134 Ops.push_back(Chain); 1135 unsigned Opc = QOpcodes0[OpcodeIndex]; 1136 SDNode *VStA = CurDAG->getMachineNode(Opc, dl, MemAddr.getValueType(), 1137 MVT::Other, Ops.data(), NumVecs+6); 1138 Chain = SDValue(VStA, 1); 1139 1140 // Store the odd subregs. 1141 Ops[0] = SDValue(VStA, 0); // MemAddr 1142 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) 1143 Ops[Vec+3] = CurDAG->getTargetExtractSubreg(ARM::DSUBREG_1, dl, RegVT, 1144 N->getOperand(Vec+3)); 1145 Ops[NumVecs+5] = Chain; 1146 Opc = QOpcodes1[OpcodeIndex]; 1147 SDNode *VStB = CurDAG->getMachineNode(Opc, dl, MemAddr.getValueType(), 1148 MVT::Other, Ops.data(), NumVecs+6); 1149 Chain = SDValue(VStB, 1); 1150 ReplaceUses(SDValue(N, 0), Chain); 1151 return NULL; 1152} 1153 1154SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad, 1155 unsigned NumVecs, unsigned *DOpcodes, 1156 unsigned *QOpcodes0, 1157 unsigned *QOpcodes1) { 1158 assert(NumVecs >=2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range"); 1159 DebugLoc dl = N->getDebugLoc(); 1160 1161 SDValue MemAddr, Align; 1162 if (!SelectAddrMode6(N, N->getOperand(2), MemAddr, Align)) 1163 return NULL; 1164 1165 SDValue Chain = N->getOperand(0); 1166 unsigned Lane = 1167 cast<ConstantSDNode>(N->getOperand(NumVecs+3))->getZExtValue(); 1168 EVT VT = IsLoad ? N->getValueType(0) : N->getOperand(3).getValueType(); 1169 bool is64BitVector = VT.is64BitVector(); 1170 1171 // Quad registers are handled by load/store of subregs. Find the subreg info. 1172 unsigned NumElts = 0; 1173 int SubregIdx = 0; 1174 EVT RegVT = VT; 1175 if (!is64BitVector) { 1176 RegVT = GetNEONSubregVT(VT); 1177 NumElts = RegVT.getVectorNumElements(); 1178 SubregIdx = (Lane < NumElts) ? ARM::DSUBREG_0 : ARM::DSUBREG_1; 1179 } 1180 1181 unsigned OpcodeIndex; 1182 switch (VT.getSimpleVT().SimpleTy) { 1183 default: llvm_unreachable("unhandled vld/vst lane type"); 1184 // Double-register operations: 1185 case MVT::v8i8: OpcodeIndex = 0; break; 1186 case MVT::v4i16: OpcodeIndex = 1; break; 1187 case MVT::v2f32: 1188 case MVT::v2i32: OpcodeIndex = 2; break; 1189 // Quad-register operations: 1190 case MVT::v8i16: OpcodeIndex = 0; break; 1191 case MVT::v4f32: 1192 case MVT::v4i32: OpcodeIndex = 1; break; 1193 } 1194 1195 SDValue Pred = getAL(CurDAG); 1196 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); 1197 1198 SmallVector<SDValue, 10> Ops; 1199 Ops.push_back(MemAddr); 1200 Ops.push_back(Align); 1201 1202 unsigned Opc = 0; 1203 if (is64BitVector) { 1204 Opc = DOpcodes[OpcodeIndex]; 1205 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) 1206 Ops.push_back(N->getOperand(Vec+3)); 1207 } else { 1208 // Check if this is loading the even or odd subreg of a Q register. 1209 if (Lane < NumElts) { 1210 Opc = QOpcodes0[OpcodeIndex]; 1211 } else { 1212 Lane -= NumElts; 1213 Opc = QOpcodes1[OpcodeIndex]; 1214 } 1215 // Extract the subregs of the input vector. 1216 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) 1217 Ops.push_back(CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT, 1218 N->getOperand(Vec+3))); 1219 } 1220 Ops.push_back(getI32Imm(Lane)); 1221 Ops.push_back(Pred); 1222 Ops.push_back(Reg0); 1223 Ops.push_back(Chain); 1224 1225 if (!IsLoad) 1226 return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops.data(), NumVecs+6); 1227 1228 std::vector<EVT> ResTys(NumVecs, RegVT); 1229 ResTys.push_back(MVT::Other); 1230 SDNode *VLdLn = 1231 CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(), NumVecs+6); 1232 // For a 64-bit vector load to D registers, nothing more needs to be done. 1233 if (is64BitVector) 1234 return VLdLn; 1235 1236 // For 128-bit vectors, take the 64-bit results of the load and insert them 1237 // as subregs into the result. 1238 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) { 1239 SDValue QuadVec = CurDAG->getTargetInsertSubreg(SubregIdx, dl, VT, 1240 N->getOperand(Vec+3), 1241 SDValue(VLdLn, Vec)); 1242 ReplaceUses(SDValue(N, Vec), QuadVec); 1243 } 1244 1245 Chain = SDValue(VLdLn, NumVecs); 1246 ReplaceUses(SDValue(N, NumVecs), Chain); 1247 return NULL; 1248} 1249 1250SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N, 1251 bool isSigned) { 1252 if (!Subtarget->hasV6T2Ops()) 1253 return NULL; 1254 1255 unsigned Opc = isSigned ? (Subtarget->isThumb() ? ARM::t2SBFX : ARM::SBFX) 1256 : (Subtarget->isThumb() ? ARM::t2UBFX : ARM::UBFX); 1257 1258 1259 // For unsigned extracts, check for a shift right and mask 1260 unsigned And_imm = 0; 1261 if (N->getOpcode() == ISD::AND) { 1262 if (isOpcWithIntImmediate(N, ISD::AND, And_imm)) { 1263 1264 // The immediate is a mask of the low bits iff imm & (imm+1) == 0 1265 if (And_imm & (And_imm + 1)) 1266 return NULL; 1267 1268 unsigned Srl_imm = 0; 1269 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL, 1270 Srl_imm)) { 1271 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!"); 1272 1273 unsigned Width = CountTrailingOnes_32(And_imm); 1274 unsigned LSB = Srl_imm; 1275 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); 1276 SDValue Ops[] = { N->getOperand(0).getOperand(0), 1277 CurDAG->getTargetConstant(LSB, MVT::i32), 1278 CurDAG->getTargetConstant(Width, MVT::i32), 1279 getAL(CurDAG), Reg0 }; 1280 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5); 1281 } 1282 } 1283 return NULL; 1284 } 1285 1286 // Otherwise, we're looking for a shift of a shift 1287 unsigned Shl_imm = 0; 1288 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) { 1289 assert(Shl_imm > 0 && Shl_imm < 32 && "bad amount in shift node!"); 1290 unsigned Srl_imm = 0; 1291 if (isInt32Immediate(N->getOperand(1), Srl_imm)) { 1292 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!"); 1293 unsigned Width = 32 - Srl_imm; 1294 int LSB = Srl_imm - Shl_imm; 1295 if (LSB < 0) 1296 return NULL; 1297 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); 1298 SDValue Ops[] = { N->getOperand(0).getOperand(0), 1299 CurDAG->getTargetConstant(LSB, MVT::i32), 1300 CurDAG->getTargetConstant(Width, MVT::i32), 1301 getAL(CurDAG), Reg0 }; 1302 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5); 1303 } 1304 } 1305 return NULL; 1306} 1307 1308SDNode *ARMDAGToDAGISel:: 1309SelectT2CMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal, 1310 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) { 1311 SDValue CPTmp0; 1312 SDValue CPTmp1; 1313 if (SelectT2ShifterOperandReg(N, TrueVal, CPTmp0, CPTmp1)) { 1314 unsigned SOVal = cast<ConstantSDNode>(CPTmp1)->getZExtValue(); 1315 unsigned SOShOp = ARM_AM::getSORegShOp(SOVal); 1316 unsigned Opc = 0; 1317 switch (SOShOp) { 1318 case ARM_AM::lsl: Opc = ARM::t2MOVCClsl; break; 1319 case ARM_AM::lsr: Opc = ARM::t2MOVCClsr; break; 1320 case ARM_AM::asr: Opc = ARM::t2MOVCCasr; break; 1321 case ARM_AM::ror: Opc = ARM::t2MOVCCror; break; 1322 default: 1323 llvm_unreachable("Unknown so_reg opcode!"); 1324 break; 1325 } 1326 SDValue SOShImm = 1327 CurDAG->getTargetConstant(ARM_AM::getSORegOffset(SOVal), MVT::i32); 1328 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32); 1329 SDValue Ops[] = { FalseVal, CPTmp0, SOShImm, CC, CCR, InFlag }; 1330 return CurDAG->SelectNodeTo(N, Opc, MVT::i32,Ops, 6); 1331 } 1332 return 0; 1333} 1334 1335SDNode *ARMDAGToDAGISel:: 1336SelectARMCMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal, 1337 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) { 1338 SDValue CPTmp0; 1339 SDValue CPTmp1; 1340 SDValue CPTmp2; 1341 if (SelectShifterOperandReg(N, TrueVal, CPTmp0, CPTmp1, CPTmp2)) { 1342 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32); 1343 SDValue Ops[] = { FalseVal, CPTmp0, CPTmp1, CPTmp2, CC, CCR, InFlag }; 1344 return CurDAG->SelectNodeTo(N, ARM::MOVCCs, MVT::i32, Ops, 7); 1345 } 1346 return 0; 1347} 1348 1349SDNode *ARMDAGToDAGISel:: 1350SelectT2CMOVSoImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal, 1351 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) { 1352 ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal); 1353 if (!T) 1354 return 0; 1355 1356 if (Predicate_t2_so_imm(TrueVal.getNode())) { 1357 SDValue True = CurDAG->getTargetConstant(T->getZExtValue(), MVT::i32); 1358 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32); 1359 SDValue Ops[] = { FalseVal, True, CC, CCR, InFlag }; 1360 return CurDAG->SelectNodeTo(N, 1361 ARM::t2MOVCCi, MVT::i32, Ops, 5); 1362 } 1363 return 0; 1364} 1365 1366SDNode *ARMDAGToDAGISel:: 1367SelectARMCMOVSoImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal, 1368 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) { 1369 ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal); 1370 if (!T) 1371 return 0; 1372 1373 if (Predicate_so_imm(TrueVal.getNode())) { 1374 SDValue True = CurDAG->getTargetConstant(T->getZExtValue(), MVT::i32); 1375 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32); 1376 SDValue Ops[] = { FalseVal, True, CC, CCR, InFlag }; 1377 return CurDAG->SelectNodeTo(N, 1378 ARM::MOVCCi, MVT::i32, Ops, 5); 1379 } 1380 return 0; 1381} 1382 1383SDNode *ARMDAGToDAGISel::SelectCMOVOp(SDNode *N) { 1384 EVT VT = N->getValueType(0); 1385 SDValue FalseVal = N->getOperand(0); 1386 SDValue TrueVal = N->getOperand(1); 1387 SDValue CC = N->getOperand(2); 1388 SDValue CCR = N->getOperand(3); 1389 SDValue InFlag = N->getOperand(4); 1390 assert(CC.getOpcode() == ISD::Constant); 1391 assert(CCR.getOpcode() == ISD::Register); 1392 ARMCC::CondCodes CCVal = 1393 (ARMCC::CondCodes)cast<ConstantSDNode>(CC)->getZExtValue(); 1394 1395 if (!Subtarget->isThumb1Only() && VT == MVT::i32) { 1396 // Pattern: (ARMcmov:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc) 1397 // Emits: (MOVCCs:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc) 1398 // Pattern complexity = 18 cost = 1 size = 0 1399 SDValue CPTmp0; 1400 SDValue CPTmp1; 1401 SDValue CPTmp2; 1402 if (Subtarget->isThumb()) { 1403 SDNode *Res = SelectT2CMOVShiftOp(N, FalseVal, TrueVal, 1404 CCVal, CCR, InFlag); 1405 if (!Res) 1406 Res = SelectT2CMOVShiftOp(N, TrueVal, FalseVal, 1407 ARMCC::getOppositeCondition(CCVal), CCR, InFlag); 1408 if (Res) 1409 return Res; 1410 } else { 1411 SDNode *Res = SelectARMCMOVShiftOp(N, FalseVal, TrueVal, 1412 CCVal, CCR, InFlag); 1413 if (!Res) 1414 Res = SelectARMCMOVShiftOp(N, TrueVal, FalseVal, 1415 ARMCC::getOppositeCondition(CCVal), CCR, InFlag); 1416 if (Res) 1417 return Res; 1418 } 1419 1420 // Pattern: (ARMcmov:i32 GPR:i32:$false, 1421 // (imm:i32)<<P:Predicate_so_imm>>:$true, 1422 // (imm:i32):$cc) 1423 // Emits: (MOVCCi:i32 GPR:i32:$false, 1424 // (so_imm:i32 (imm:i32):$true), (imm:i32):$cc) 1425 // Pattern complexity = 10 cost = 1 size = 0 1426 if (Subtarget->isThumb()) { 1427 SDNode *Res = SelectT2CMOVSoImmOp(N, FalseVal, TrueVal, 1428 CCVal, CCR, InFlag); 1429 if (!Res) 1430 Res = SelectT2CMOVSoImmOp(N, TrueVal, FalseVal, 1431 ARMCC::getOppositeCondition(CCVal), CCR, InFlag); 1432 if (Res) 1433 return Res; 1434 } else { 1435 SDNode *Res = SelectARMCMOVSoImmOp(N, FalseVal, TrueVal, 1436 CCVal, CCR, InFlag); 1437 if (!Res) 1438 Res = SelectARMCMOVSoImmOp(N, TrueVal, FalseVal, 1439 ARMCC::getOppositeCondition(CCVal), CCR, InFlag); 1440 if (Res) 1441 return Res; 1442 } 1443 } 1444 1445 // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc) 1446 // Emits: (MOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc) 1447 // Pattern complexity = 6 cost = 1 size = 0 1448 // 1449 // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc) 1450 // Emits: (tMOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc) 1451 // Pattern complexity = 6 cost = 11 size = 0 1452 // 1453 // Also FCPYScc and FCPYDcc. 1454 SDValue Tmp2 = CurDAG->getTargetConstant(CCVal, MVT::i32); 1455 SDValue Ops[] = { FalseVal, TrueVal, Tmp2, CCR, InFlag }; 1456 unsigned Opc = 0; 1457 switch (VT.getSimpleVT().SimpleTy) { 1458 default: assert(false && "Illegal conditional move type!"); 1459 break; 1460 case MVT::i32: 1461 Opc = Subtarget->isThumb() 1462 ? (Subtarget->hasThumb2() ? ARM::t2MOVCCr : ARM::tMOVCCr_pseudo) 1463 : ARM::MOVCCr; 1464 break; 1465 case MVT::f32: 1466 Opc = ARM::VMOVScc; 1467 break; 1468 case MVT::f64: 1469 Opc = ARM::VMOVDcc; 1470 break; 1471 } 1472 return CurDAG->SelectNodeTo(N, Opc, VT, Ops, 5); 1473} 1474 1475SDNode *ARMDAGToDAGISel::Select(SDNode *N) { 1476 DebugLoc dl = N->getDebugLoc(); 1477 1478 if (N->isMachineOpcode()) 1479 return NULL; // Already selected. 1480 1481 switch (N->getOpcode()) { 1482 default: break; 1483 case ISD::Constant: { 1484 unsigned Val = cast<ConstantSDNode>(N)->getZExtValue(); 1485 bool UseCP = true; 1486 if (Subtarget->hasThumb2()) 1487 // Thumb2-aware targets have the MOVT instruction, so all immediates can 1488 // be done with MOV + MOVT, at worst. 1489 UseCP = 0; 1490 else { 1491 if (Subtarget->isThumb()) { 1492 UseCP = (Val > 255 && // MOV 1493 ~Val > 255 && // MOV + MVN 1494 !ARM_AM::isThumbImmShiftedVal(Val)); // MOV + LSL 1495 } else 1496 UseCP = (ARM_AM::getSOImmVal(Val) == -1 && // MOV 1497 ARM_AM::getSOImmVal(~Val) == -1 && // MVN 1498 !ARM_AM::isSOImmTwoPartVal(Val)); // two instrs. 1499 } 1500 1501 if (UseCP) { 1502 SDValue CPIdx = 1503 CurDAG->getTargetConstantPool(ConstantInt::get( 1504 Type::getInt32Ty(*CurDAG->getContext()), Val), 1505 TLI.getPointerTy()); 1506 1507 SDNode *ResNode; 1508 if (Subtarget->isThumb1Only()) { 1509 SDValue Pred = getAL(CurDAG); 1510 SDValue PredReg = CurDAG->getRegister(0, MVT::i32); 1511 SDValue Ops[] = { CPIdx, Pred, PredReg, CurDAG->getEntryNode() }; 1512 ResNode = CurDAG->getMachineNode(ARM::tLDRcp, dl, MVT::i32, MVT::Other, 1513 Ops, 4); 1514 } else { 1515 SDValue Ops[] = { 1516 CPIdx, 1517 CurDAG->getRegister(0, MVT::i32), 1518 CurDAG->getTargetConstant(0, MVT::i32), 1519 getAL(CurDAG), 1520 CurDAG->getRegister(0, MVT::i32), 1521 CurDAG->getEntryNode() 1522 }; 1523 ResNode=CurDAG->getMachineNode(ARM::LDRcp, dl, MVT::i32, MVT::Other, 1524 Ops, 6); 1525 } 1526 ReplaceUses(SDValue(N, 0), SDValue(ResNode, 0)); 1527 return NULL; 1528 } 1529 1530 // Other cases are autogenerated. 1531 break; 1532 } 1533 case ISD::FrameIndex: { 1534 // Selects to ADDri FI, 0 which in turn will become ADDri SP, imm. 1535 int FI = cast<FrameIndexSDNode>(N)->getIndex(); 1536 SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 1537 if (Subtarget->isThumb1Only()) { 1538 return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, MVT::i32, TFI, 1539 CurDAG->getTargetConstant(0, MVT::i32)); 1540 } else { 1541 unsigned Opc = ((Subtarget->isThumb() && Subtarget->hasThumb2()) ? 1542 ARM::t2ADDri : ARM::ADDri); 1543 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32), 1544 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), 1545 CurDAG->getRegister(0, MVT::i32) }; 1546 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5); 1547 } 1548 } 1549 case ISD::SRL: 1550 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false)) 1551 return I; 1552 break; 1553 case ISD::SRA: 1554 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, true)) 1555 return I; 1556 break; 1557 case ISD::MUL: 1558 if (Subtarget->isThumb1Only()) 1559 break; 1560 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) { 1561 unsigned RHSV = C->getZExtValue(); 1562 if (!RHSV) break; 1563 if (isPowerOf2_32(RHSV-1)) { // 2^n+1? 1564 unsigned ShImm = Log2_32(RHSV-1); 1565 if (ShImm >= 32) 1566 break; 1567 SDValue V = N->getOperand(0); 1568 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm); 1569 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32); 1570 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); 1571 if (Subtarget->isThumb()) { 1572 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 }; 1573 return CurDAG->SelectNodeTo(N, ARM::t2ADDrs, MVT::i32, Ops, 6); 1574 } else { 1575 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 }; 1576 return CurDAG->SelectNodeTo(N, ARM::ADDrs, MVT::i32, Ops, 7); 1577 } 1578 } 1579 if (isPowerOf2_32(RHSV+1)) { // 2^n-1? 1580 unsigned ShImm = Log2_32(RHSV+1); 1581 if (ShImm >= 32) 1582 break; 1583 SDValue V = N->getOperand(0); 1584 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm); 1585 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32); 1586 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); 1587 if (Subtarget->isThumb()) { 1588 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0 }; 1589 return CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops, 5); 1590 } else { 1591 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 }; 1592 return CurDAG->SelectNodeTo(N, ARM::RSBrs, MVT::i32, Ops, 7); 1593 } 1594 } 1595 } 1596 break; 1597 case ISD::AND: { 1598 // Check for unsigned bitfield extract 1599 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false)) 1600 return I; 1601 1602 // (and (or x, c2), c1) and top 16-bits of c1 and c2 match, lower 16-bits 1603 // of c1 are 0xffff, and lower 16-bit of c2 are 0. That is, the top 16-bits 1604 // are entirely contributed by c2 and lower 16-bits are entirely contributed 1605 // by x. That's equal to (or (and x, 0xffff), (and c1, 0xffff0000)). 1606 // Select it to: "movt x, ((c1 & 0xffff) >> 16) 1607 EVT VT = N->getValueType(0); 1608 if (VT != MVT::i32) 1609 break; 1610 unsigned Opc = (Subtarget->isThumb() && Subtarget->hasThumb2()) 1611 ? ARM::t2MOVTi16 1612 : (Subtarget->hasV6T2Ops() ? ARM::MOVTi16 : 0); 1613 if (!Opc) 1614 break; 1615 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); 1616 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1617 if (!N1C) 1618 break; 1619 if (N0.getOpcode() == ISD::OR && N0.getNode()->hasOneUse()) { 1620 SDValue N2 = N0.getOperand(1); 1621 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2); 1622 if (!N2C) 1623 break; 1624 unsigned N1CVal = N1C->getZExtValue(); 1625 unsigned N2CVal = N2C->getZExtValue(); 1626 if ((N1CVal & 0xffff0000U) == (N2CVal & 0xffff0000U) && 1627 (N1CVal & 0xffffU) == 0xffffU && 1628 (N2CVal & 0xffffU) == 0x0U) { 1629 SDValue Imm16 = CurDAG->getTargetConstant((N2CVal & 0xFFFF0000U) >> 16, 1630 MVT::i32); 1631 SDValue Ops[] = { N0.getOperand(0), Imm16, 1632 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) }; 1633 return CurDAG->getMachineNode(Opc, dl, VT, Ops, 4); 1634 } 1635 } 1636 break; 1637 } 1638 case ARMISD::VMOVRRD: 1639 return CurDAG->getMachineNode(ARM::VMOVRRD, dl, MVT::i32, MVT::i32, 1640 N->getOperand(0), getAL(CurDAG), 1641 CurDAG->getRegister(0, MVT::i32)); 1642 case ISD::UMUL_LOHI: { 1643 if (Subtarget->isThumb1Only()) 1644 break; 1645 if (Subtarget->isThumb()) { 1646 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), 1647 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), 1648 CurDAG->getRegister(0, MVT::i32) }; 1649 return CurDAG->getMachineNode(ARM::t2UMULL, dl, MVT::i32, MVT::i32, Ops,4); 1650 } else { 1651 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), 1652 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), 1653 CurDAG->getRegister(0, MVT::i32) }; 1654 return CurDAG->getMachineNode(ARM::UMULL, dl, MVT::i32, MVT::i32, Ops, 5); 1655 } 1656 } 1657 case ISD::SMUL_LOHI: { 1658 if (Subtarget->isThumb1Only()) 1659 break; 1660 if (Subtarget->isThumb()) { 1661 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), 1662 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) }; 1663 return CurDAG->getMachineNode(ARM::t2SMULL, dl, MVT::i32, MVT::i32, Ops,4); 1664 } else { 1665 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), 1666 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), 1667 CurDAG->getRegister(0, MVT::i32) }; 1668 return CurDAG->getMachineNode(ARM::SMULL, dl, MVT::i32, MVT::i32, Ops, 5); 1669 } 1670 } 1671 case ISD::LOAD: { 1672 SDNode *ResNode = 0; 1673 if (Subtarget->isThumb() && Subtarget->hasThumb2()) 1674 ResNode = SelectT2IndexedLoad(N); 1675 else 1676 ResNode = SelectARMIndexedLoad(N); 1677 if (ResNode) 1678 return ResNode; 1679 1680 // VLDMQ must be custom-selected for "v2f64 load" to set the AM5Opc value. 1681 if (Subtarget->hasVFP2() && 1682 N->getValueType(0).getSimpleVT().SimpleTy == MVT::v2f64) { 1683 SDValue Chain = N->getOperand(0); 1684 SDValue AM5Opc = 1685 CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::ia, 4), MVT::i32); 1686 SDValue Pred = getAL(CurDAG); 1687 SDValue PredReg = CurDAG->getRegister(0, MVT::i32); 1688 SDValue Ops[] = { N->getOperand(1), AM5Opc, Pred, PredReg, Chain }; 1689 return CurDAG->getMachineNode(ARM::VLDMQ, dl, MVT::v2f64, MVT::Other, 1690 Ops, 5); 1691 } 1692 // Other cases are autogenerated. 1693 break; 1694 } 1695 case ISD::STORE: { 1696 // VSTMQ must be custom-selected for "v2f64 store" to set the AM5Opc value. 1697 if (Subtarget->hasVFP2() && 1698 N->getOperand(1).getValueType().getSimpleVT().SimpleTy == MVT::v2f64) { 1699 SDValue Chain = N->getOperand(0); 1700 SDValue AM5Opc = 1701 CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::ia, 4), MVT::i32); 1702 SDValue Pred = getAL(CurDAG); 1703 SDValue PredReg = CurDAG->getRegister(0, MVT::i32); 1704 SDValue Ops[] = { N->getOperand(1), N->getOperand(2), 1705 AM5Opc, Pred, PredReg, Chain }; 1706 return CurDAG->getMachineNode(ARM::VSTMQ, dl, MVT::Other, Ops, 6); 1707 } 1708 // Other cases are autogenerated. 1709 break; 1710 } 1711 case ARMISD::BRCOND: { 1712 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc) 1713 // Emits: (Bcc:void (bb:Other):$dst, (imm:i32):$cc) 1714 // Pattern complexity = 6 cost = 1 size = 0 1715 1716 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc) 1717 // Emits: (tBcc:void (bb:Other):$dst, (imm:i32):$cc) 1718 // Pattern complexity = 6 cost = 1 size = 0 1719 1720 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc) 1721 // Emits: (t2Bcc:void (bb:Other):$dst, (imm:i32):$cc) 1722 // Pattern complexity = 6 cost = 1 size = 0 1723 1724 unsigned Opc = Subtarget->isThumb() ? 1725 ((Subtarget->hasThumb2()) ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc; 1726 SDValue Chain = N->getOperand(0); 1727 SDValue N1 = N->getOperand(1); 1728 SDValue N2 = N->getOperand(2); 1729 SDValue N3 = N->getOperand(3); 1730 SDValue InFlag = N->getOperand(4); 1731 assert(N1.getOpcode() == ISD::BasicBlock); 1732 assert(N2.getOpcode() == ISD::Constant); 1733 assert(N3.getOpcode() == ISD::Register); 1734 1735 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned) 1736 cast<ConstantSDNode>(N2)->getZExtValue()), 1737 MVT::i32); 1738 SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag }; 1739 SDNode *ResNode = CurDAG->getMachineNode(Opc, dl, MVT::Other, 1740 MVT::Flag, Ops, 5); 1741 Chain = SDValue(ResNode, 0); 1742 if (N->getNumValues() == 2) { 1743 InFlag = SDValue(ResNode, 1); 1744 ReplaceUses(SDValue(N, 1), InFlag); 1745 } 1746 ReplaceUses(SDValue(N, 0), 1747 SDValue(Chain.getNode(), Chain.getResNo())); 1748 return NULL; 1749 } 1750 case ARMISD::CMOV: 1751 return SelectCMOVOp(N); 1752 case ARMISD::CNEG: { 1753 EVT VT = N->getValueType(0); 1754 SDValue N0 = N->getOperand(0); 1755 SDValue N1 = N->getOperand(1); 1756 SDValue N2 = N->getOperand(2); 1757 SDValue N3 = N->getOperand(3); 1758 SDValue InFlag = N->getOperand(4); 1759 assert(N2.getOpcode() == ISD::Constant); 1760 assert(N3.getOpcode() == ISD::Register); 1761 1762 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned) 1763 cast<ConstantSDNode>(N2)->getZExtValue()), 1764 MVT::i32); 1765 SDValue Ops[] = { N0, N1, Tmp2, N3, InFlag }; 1766 unsigned Opc = 0; 1767 switch (VT.getSimpleVT().SimpleTy) { 1768 default: assert(false && "Illegal conditional move type!"); 1769 break; 1770 case MVT::f32: 1771 Opc = ARM::VNEGScc; 1772 break; 1773 case MVT::f64: 1774 Opc = ARM::VNEGDcc; 1775 break; 1776 } 1777 return CurDAG->SelectNodeTo(N, Opc, VT, Ops, 5); 1778 } 1779 1780 case ARMISD::VZIP: { 1781 unsigned Opc = 0; 1782 EVT VT = N->getValueType(0); 1783 switch (VT.getSimpleVT().SimpleTy) { 1784 default: return NULL; 1785 case MVT::v8i8: Opc = ARM::VZIPd8; break; 1786 case MVT::v4i16: Opc = ARM::VZIPd16; break; 1787 case MVT::v2f32: 1788 case MVT::v2i32: Opc = ARM::VZIPd32; break; 1789 case MVT::v16i8: Opc = ARM::VZIPq8; break; 1790 case MVT::v8i16: Opc = ARM::VZIPq16; break; 1791 case MVT::v4f32: 1792 case MVT::v4i32: Opc = ARM::VZIPq32; break; 1793 } 1794 SDValue Pred = getAL(CurDAG); 1795 SDValue PredReg = CurDAG->getRegister(0, MVT::i32); 1796 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg }; 1797 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4); 1798 } 1799 case ARMISD::VUZP: { 1800 unsigned Opc = 0; 1801 EVT VT = N->getValueType(0); 1802 switch (VT.getSimpleVT().SimpleTy) { 1803 default: return NULL; 1804 case MVT::v8i8: Opc = ARM::VUZPd8; break; 1805 case MVT::v4i16: Opc = ARM::VUZPd16; break; 1806 case MVT::v2f32: 1807 case MVT::v2i32: Opc = ARM::VUZPd32; break; 1808 case MVT::v16i8: Opc = ARM::VUZPq8; break; 1809 case MVT::v8i16: Opc = ARM::VUZPq16; break; 1810 case MVT::v4f32: 1811 case MVT::v4i32: Opc = ARM::VUZPq32; break; 1812 } 1813 SDValue Pred = getAL(CurDAG); 1814 SDValue PredReg = CurDAG->getRegister(0, MVT::i32); 1815 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg }; 1816 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4); 1817 } 1818 case ARMISD::VTRN: { 1819 unsigned Opc = 0; 1820 EVT VT = N->getValueType(0); 1821 switch (VT.getSimpleVT().SimpleTy) { 1822 default: return NULL; 1823 case MVT::v8i8: Opc = ARM::VTRNd8; break; 1824 case MVT::v4i16: Opc = ARM::VTRNd16; break; 1825 case MVT::v2f32: 1826 case MVT::v2i32: Opc = ARM::VTRNd32; break; 1827 case MVT::v16i8: Opc = ARM::VTRNq8; break; 1828 case MVT::v8i16: Opc = ARM::VTRNq16; break; 1829 case MVT::v4f32: 1830 case MVT::v4i32: Opc = ARM::VTRNq32; break; 1831 } 1832 SDValue Pred = getAL(CurDAG); 1833 SDValue PredReg = CurDAG->getRegister(0, MVT::i32); 1834 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg }; 1835 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4); 1836 } 1837 1838 case ISD::INTRINSIC_VOID: 1839 case ISD::INTRINSIC_W_CHAIN: { 1840 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 1841 switch (IntNo) { 1842 default: 1843 break; 1844 1845 case Intrinsic::arm_neon_vld1: { 1846 unsigned DOpcodes[] = { ARM::VLD1d8, ARM::VLD1d16, 1847 ARM::VLD1d32, ARM::VLD1d64 }; 1848 unsigned QOpcodes[] = { ARM::VLD1q8, ARM::VLD1q16, 1849 ARM::VLD1q32, ARM::VLD1q64 }; 1850 return SelectVLD(N, 1, DOpcodes, QOpcodes, 0); 1851 } 1852 1853 case Intrinsic::arm_neon_vld2: { 1854 unsigned DOpcodes[] = { ARM::VLD2d8, ARM::VLD2d16, 1855 ARM::VLD2d32, ARM::VLD1q64 }; 1856 unsigned QOpcodes[] = { ARM::VLD2q8, ARM::VLD2q16, ARM::VLD2q32 }; 1857 return SelectVLD(N, 2, DOpcodes, QOpcodes, 0); 1858 } 1859 1860 case Intrinsic::arm_neon_vld3: { 1861 unsigned DOpcodes[] = { ARM::VLD3d8, ARM::VLD3d16, 1862 ARM::VLD3d32, ARM::VLD1d64T }; 1863 unsigned QOpcodes0[] = { ARM::VLD3q8_UPD, 1864 ARM::VLD3q16_UPD, 1865 ARM::VLD3q32_UPD }; 1866 unsigned QOpcodes1[] = { ARM::VLD3q8odd_UPD, 1867 ARM::VLD3q16odd_UPD, 1868 ARM::VLD3q32odd_UPD }; 1869 return SelectVLD(N, 3, DOpcodes, QOpcodes0, QOpcodes1); 1870 } 1871 1872 case Intrinsic::arm_neon_vld4: { 1873 unsigned DOpcodes[] = { ARM::VLD4d8, ARM::VLD4d16, 1874 ARM::VLD4d32, ARM::VLD1d64Q }; 1875 unsigned QOpcodes0[] = { ARM::VLD4q8_UPD, 1876 ARM::VLD4q16_UPD, 1877 ARM::VLD4q32_UPD }; 1878 unsigned QOpcodes1[] = { ARM::VLD4q8odd_UPD, 1879 ARM::VLD4q16odd_UPD, 1880 ARM::VLD4q32odd_UPD }; 1881 return SelectVLD(N, 4, DOpcodes, QOpcodes0, QOpcodes1); 1882 } 1883 1884 case Intrinsic::arm_neon_vld2lane: { 1885 unsigned DOpcodes[] = { ARM::VLD2LNd8, ARM::VLD2LNd16, ARM::VLD2LNd32 }; 1886 unsigned QOpcodes0[] = { ARM::VLD2LNq16, ARM::VLD2LNq32 }; 1887 unsigned QOpcodes1[] = { ARM::VLD2LNq16odd, ARM::VLD2LNq32odd }; 1888 return SelectVLDSTLane(N, true, 2, DOpcodes, QOpcodes0, QOpcodes1); 1889 } 1890 1891 case Intrinsic::arm_neon_vld3lane: { 1892 unsigned DOpcodes[] = { ARM::VLD3LNd8, ARM::VLD3LNd16, ARM::VLD3LNd32 }; 1893 unsigned QOpcodes0[] = { ARM::VLD3LNq16, ARM::VLD3LNq32 }; 1894 unsigned QOpcodes1[] = { ARM::VLD3LNq16odd, ARM::VLD3LNq32odd }; 1895 return SelectVLDSTLane(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1); 1896 } 1897 1898 case Intrinsic::arm_neon_vld4lane: { 1899 unsigned DOpcodes[] = { ARM::VLD4LNd8, ARM::VLD4LNd16, ARM::VLD4LNd32 }; 1900 unsigned QOpcodes0[] = { ARM::VLD4LNq16, ARM::VLD4LNq32 }; 1901 unsigned QOpcodes1[] = { ARM::VLD4LNq16odd, ARM::VLD4LNq32odd }; 1902 return SelectVLDSTLane(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1); 1903 } 1904 1905 case Intrinsic::arm_neon_vst1: { 1906 unsigned DOpcodes[] = { ARM::VST1d8, ARM::VST1d16, 1907 ARM::VST1d32, ARM::VST1d64 }; 1908 unsigned QOpcodes[] = { ARM::VST1q8, ARM::VST1q16, 1909 ARM::VST1q32, ARM::VST1q64 }; 1910 return SelectVST(N, 1, DOpcodes, QOpcodes, 0); 1911 } 1912 1913 case Intrinsic::arm_neon_vst2: { 1914 unsigned DOpcodes[] = { ARM::VST2d8, ARM::VST2d16, 1915 ARM::VST2d32, ARM::VST1q64 }; 1916 unsigned QOpcodes[] = { ARM::VST2q8, ARM::VST2q16, ARM::VST2q32 }; 1917 return SelectVST(N, 2, DOpcodes, QOpcodes, 0); 1918 } 1919 1920 case Intrinsic::arm_neon_vst3: { 1921 unsigned DOpcodes[] = { ARM::VST3d8, ARM::VST3d16, 1922 ARM::VST3d32, ARM::VST1d64T }; 1923 unsigned QOpcodes0[] = { ARM::VST3q8_UPD, 1924 ARM::VST3q16_UPD, 1925 ARM::VST3q32_UPD }; 1926 unsigned QOpcodes1[] = { ARM::VST3q8odd_UPD, 1927 ARM::VST3q16odd_UPD, 1928 ARM::VST3q32odd_UPD }; 1929 return SelectVST(N, 3, DOpcodes, QOpcodes0, QOpcodes1); 1930 } 1931 1932 case Intrinsic::arm_neon_vst4: { 1933 unsigned DOpcodes[] = { ARM::VST4d8, ARM::VST4d16, 1934 ARM::VST4d32, ARM::VST1d64Q }; 1935 unsigned QOpcodes0[] = { ARM::VST4q8_UPD, 1936 ARM::VST4q16_UPD, 1937 ARM::VST4q32_UPD }; 1938 unsigned QOpcodes1[] = { ARM::VST4q8odd_UPD, 1939 ARM::VST4q16odd_UPD, 1940 ARM::VST4q32odd_UPD }; 1941 return SelectVST(N, 4, DOpcodes, QOpcodes0, QOpcodes1); 1942 } 1943 1944 case Intrinsic::arm_neon_vst2lane: { 1945 unsigned DOpcodes[] = { ARM::VST2LNd8, ARM::VST2LNd16, ARM::VST2LNd32 }; 1946 unsigned QOpcodes0[] = { ARM::VST2LNq16, ARM::VST2LNq32 }; 1947 unsigned QOpcodes1[] = { ARM::VST2LNq16odd, ARM::VST2LNq32odd }; 1948 return SelectVLDSTLane(N, false, 2, DOpcodes, QOpcodes0, QOpcodes1); 1949 } 1950 1951 case Intrinsic::arm_neon_vst3lane: { 1952 unsigned DOpcodes[] = { ARM::VST3LNd8, ARM::VST3LNd16, ARM::VST3LNd32 }; 1953 unsigned QOpcodes0[] = { ARM::VST3LNq16, ARM::VST3LNq32 }; 1954 unsigned QOpcodes1[] = { ARM::VST3LNq16odd, ARM::VST3LNq32odd }; 1955 return SelectVLDSTLane(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1); 1956 } 1957 1958 case Intrinsic::arm_neon_vst4lane: { 1959 unsigned DOpcodes[] = { ARM::VST4LNd8, ARM::VST4LNd16, ARM::VST4LNd32 }; 1960 unsigned QOpcodes0[] = { ARM::VST4LNq16, ARM::VST4LNq32 }; 1961 unsigned QOpcodes1[] = { ARM::VST4LNq16odd, ARM::VST4LNq32odd }; 1962 return SelectVLDSTLane(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1); 1963 } 1964 } 1965 } 1966 } 1967 1968 return SelectCode(N); 1969} 1970 1971bool ARMDAGToDAGISel:: 1972SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode, 1973 std::vector<SDValue> &OutOps) { 1974 assert(ConstraintCode == 'm' && "unexpected asm memory constraint"); 1975 // Require the address to be in a register. That is safe for all ARM 1976 // variants and it is hard to do anything much smarter without knowing 1977 // how the operand is used. 1978 OutOps.push_back(Op); 1979 return false; 1980} 1981 1982/// createARMISelDag - This pass converts a legalized DAG into a 1983/// ARM-specific DAG, ready for instruction scheduling. 1984/// 1985FunctionPass *llvm::createARMISelDag(ARMBaseTargetMachine &TM, 1986 CodeGenOpt::Level OptLevel) { 1987 return new ARMDAGToDAGISel(TM, OptLevel); 1988} 1989