1//===-- ARMISelDAGToDAG.cpp - A dag to dag inst selector for ARM ----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines an instruction selector for the ARM target. 11// 12//===----------------------------------------------------------------------===// 13 14#define DEBUG_TYPE "arm-isel" 15#include "ARM.h" 16#include "ARMBaseInstrInfo.h" 17#include "ARMTargetMachine.h" 18#include "MCTargetDesc/ARMAddressingModes.h" 19#include "llvm/CodeGen/MachineFrameInfo.h" 20#include "llvm/CodeGen/MachineFunction.h" 21#include "llvm/CodeGen/MachineInstrBuilder.h" 22#include "llvm/CodeGen/MachineRegisterInfo.h" 23#include "llvm/CodeGen/SelectionDAG.h" 24#include "llvm/CodeGen/SelectionDAGISel.h" 25#include "llvm/IR/CallingConv.h" 26#include "llvm/IR/Constants.h" 27#include "llvm/IR/DerivedTypes.h" 28#include "llvm/IR/Function.h" 29#include "llvm/IR/Intrinsics.h" 30#include "llvm/IR/LLVMContext.h" 31#include "llvm/Support/CommandLine.h" 32#include "llvm/Support/Compiler.h" 33#include "llvm/Support/Debug.h" 34#include "llvm/Support/ErrorHandling.h" 35#include "llvm/Support/raw_ostream.h" 36#include "llvm/Target/TargetLowering.h" 37#include "llvm/Target/TargetOptions.h" 38 39using namespace llvm; 40 41static cl::opt<bool> 42DisableShifterOp("disable-shifter-op", cl::Hidden, 43 cl::desc("Disable isel of shifter-op"), 44 cl::init(false)); 45 46static cl::opt<bool> 47CheckVMLxHazard("check-vmlx-hazard", cl::Hidden, 48 cl::desc("Check fp vmla / vmls hazard at isel time"), 49 cl::init(true)); 50 51//===--------------------------------------------------------------------===// 52/// ARMDAGToDAGISel - ARM specific code to select ARM machine 53/// instructions for SelectionDAG operations. 54/// 55namespace { 56 57enum AddrMode2Type { 58 AM2_BASE, // Simple AM2 (+-imm12) 59 AM2_SHOP // Shifter-op AM2 60}; 61 62class ARMDAGToDAGISel : public SelectionDAGISel { 63 ARMBaseTargetMachine &TM; 64 65 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 66 /// make the right decision when generating code for different targets. 67 const ARMSubtarget *Subtarget; 68 69public: 70 explicit ARMDAGToDAGISel(ARMBaseTargetMachine &tm, 71 CodeGenOpt::Level OptLevel) 72 : SelectionDAGISel(tm, OptLevel), TM(tm), 73 Subtarget(&TM.getSubtarget<ARMSubtarget>()) { 74 } 75 76 virtual const char *getPassName() const { 77 return "ARM Instruction Selection"; 78 } 79 80 virtual void PreprocessISelDAG(); 81 82 /// getI32Imm - Return a target constant of type i32 with the specified 83 /// value. 84 inline SDValue getI32Imm(unsigned Imm) { 85 return CurDAG->getTargetConstant(Imm, MVT::i32); 86 } 87 88 SDNode *Select(SDNode *N); 89 90 91 bool hasNoVMLxHazardUse(SDNode *N) const; 92 bool isShifterOpProfitable(const SDValue &Shift, 93 ARM_AM::ShiftOpc ShOpcVal, unsigned ShAmt); 94 bool SelectRegShifterOperand(SDValue N, SDValue &A, 95 SDValue &B, SDValue &C, 96 bool CheckProfitability = true); 97 bool SelectImmShifterOperand(SDValue N, SDValue &A, 98 SDValue &B, bool CheckProfitability = true); 99 bool SelectShiftRegShifterOperand(SDValue N, SDValue &A, 100 SDValue &B, SDValue &C) { 101 // Don't apply the profitability check 102 return SelectRegShifterOperand(N, A, B, C, false); 103 } 104 bool SelectShiftImmShifterOperand(SDValue N, SDValue &A, 105 SDValue &B) { 106 // Don't apply the profitability check 107 return SelectImmShifterOperand(N, A, B, false); 108 } 109 110 bool SelectAddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm); 111 bool SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset, SDValue &Opc); 112 113 AddrMode2Type SelectAddrMode2Worker(SDValue N, SDValue &Base, 114 SDValue &Offset, SDValue &Opc); 115 bool SelectAddrMode2Base(SDValue N, SDValue &Base, SDValue &Offset, 116 SDValue &Opc) { 117 return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_BASE; 118 } 119 120 bool SelectAddrMode2ShOp(SDValue N, SDValue &Base, SDValue &Offset, 121 SDValue &Opc) { 122 return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_SHOP; 123 } 124 125 bool SelectAddrMode2(SDValue N, SDValue &Base, SDValue &Offset, 126 SDValue &Opc) { 127 SelectAddrMode2Worker(N, Base, Offset, Opc); 128// return SelectAddrMode2ShOp(N, Base, Offset, Opc); 129 // This always matches one way or another. 130 return true; 131 } 132 133 bool SelectCMOVPred(SDValue N, SDValue &Pred, SDValue &Reg) { 134 const ConstantSDNode *CN = cast<ConstantSDNode>(N); 135 Pred = CurDAG->getTargetConstant(CN->getZExtValue(), MVT::i32); 136 Reg = CurDAG->getRegister(ARM::CPSR, MVT::i32); 137 return true; 138 } 139 140 bool SelectAddrMode2OffsetReg(SDNode *Op, SDValue N, 141 SDValue &Offset, SDValue &Opc); 142 bool SelectAddrMode2OffsetImm(SDNode *Op, SDValue N, 143 SDValue &Offset, SDValue &Opc); 144 bool SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N, 145 SDValue &Offset, SDValue &Opc); 146 bool SelectAddrOffsetNone(SDValue N, SDValue &Base); 147 bool SelectAddrMode3(SDValue N, SDValue &Base, 148 SDValue &Offset, SDValue &Opc); 149 bool SelectAddrMode3Offset(SDNode *Op, SDValue N, 150 SDValue &Offset, SDValue &Opc); 151 bool SelectAddrMode5(SDValue N, SDValue &Base, 152 SDValue &Offset); 153 bool SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,SDValue &Align); 154 bool SelectAddrMode6Offset(SDNode *Op, SDValue N, SDValue &Offset); 155 156 bool SelectAddrModePC(SDValue N, SDValue &Offset, SDValue &Label); 157 158 // Thumb Addressing Modes: 159 bool SelectThumbAddrModeRR(SDValue N, SDValue &Base, SDValue &Offset); 160 bool SelectThumbAddrModeRI(SDValue N, SDValue &Base, SDValue &Offset, 161 unsigned Scale); 162 bool SelectThumbAddrModeRI5S1(SDValue N, SDValue &Base, SDValue &Offset); 163 bool SelectThumbAddrModeRI5S2(SDValue N, SDValue &Base, SDValue &Offset); 164 bool SelectThumbAddrModeRI5S4(SDValue N, SDValue &Base, SDValue &Offset); 165 bool SelectThumbAddrModeImm5S(SDValue N, unsigned Scale, SDValue &Base, 166 SDValue &OffImm); 167 bool SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base, 168 SDValue &OffImm); 169 bool SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base, 170 SDValue &OffImm); 171 bool SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base, 172 SDValue &OffImm); 173 bool SelectThumbAddrModeSP(SDValue N, SDValue &Base, SDValue &OffImm); 174 175 // Thumb 2 Addressing Modes: 176 bool SelectT2ShifterOperandReg(SDValue N, 177 SDValue &BaseReg, SDValue &Opc); 178 bool SelectT2AddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm); 179 bool SelectT2AddrModeImm8(SDValue N, SDValue &Base, 180 SDValue &OffImm); 181 bool SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N, 182 SDValue &OffImm); 183 bool SelectT2AddrModeSoReg(SDValue N, SDValue &Base, 184 SDValue &OffReg, SDValue &ShImm); 185 bool SelectT2AddrModeExclusive(SDValue N, SDValue &Base, SDValue &OffImm); 186 187 inline bool is_so_imm(unsigned Imm) const { 188 return ARM_AM::getSOImmVal(Imm) != -1; 189 } 190 191 inline bool is_so_imm_not(unsigned Imm) const { 192 return ARM_AM::getSOImmVal(~Imm) != -1; 193 } 194 195 inline bool is_t2_so_imm(unsigned Imm) const { 196 return ARM_AM::getT2SOImmVal(Imm) != -1; 197 } 198 199 inline bool is_t2_so_imm_not(unsigned Imm) const { 200 return ARM_AM::getT2SOImmVal(~Imm) != -1; 201 } 202 203 // Include the pieces autogenerated from the target description. 204#include "ARMGenDAGISel.inc" 205 206private: 207 /// SelectARMIndexedLoad - Indexed (pre/post inc/dec) load matching code for 208 /// ARM. 209 SDNode *SelectARMIndexedLoad(SDNode *N); 210 SDNode *SelectT2IndexedLoad(SDNode *N); 211 212 /// SelectVLD - Select NEON load intrinsics. NumVecs should be 213 /// 1, 2, 3 or 4. The opcode arrays specify the instructions used for 214 /// loads of D registers and even subregs and odd subregs of Q registers. 215 /// For NumVecs <= 2, QOpcodes1 is not used. 216 SDNode *SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs, 217 const uint16_t *DOpcodes, 218 const uint16_t *QOpcodes0, const uint16_t *QOpcodes1); 219 220 /// SelectVST - Select NEON store intrinsics. NumVecs should 221 /// be 1, 2, 3 or 4. The opcode arrays specify the instructions used for 222 /// stores of D registers and even subregs and odd subregs of Q registers. 223 /// For NumVecs <= 2, QOpcodes1 is not used. 224 SDNode *SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs, 225 const uint16_t *DOpcodes, 226 const uint16_t *QOpcodes0, const uint16_t *QOpcodes1); 227 228 /// SelectVLDSTLane - Select NEON load/store lane intrinsics. NumVecs should 229 /// be 2, 3 or 4. The opcode arrays specify the instructions used for 230 /// load/store of D registers and Q registers. 231 SDNode *SelectVLDSTLane(SDNode *N, bool IsLoad, 232 bool isUpdating, unsigned NumVecs, 233 const uint16_t *DOpcodes, const uint16_t *QOpcodes); 234 235 /// SelectVLDDup - Select NEON load-duplicate intrinsics. NumVecs 236 /// should be 2, 3 or 4. The opcode array specifies the instructions used 237 /// for loading D registers. (Q registers are not supported.) 238 SDNode *SelectVLDDup(SDNode *N, bool isUpdating, unsigned NumVecs, 239 const uint16_t *Opcodes); 240 241 /// SelectVTBL - Select NEON VTBL and VTBX intrinsics. NumVecs should be 2, 242 /// 3 or 4. These are custom-selected so that a REG_SEQUENCE can be 243 /// generated to force the table registers to be consecutive. 244 SDNode *SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs, unsigned Opc); 245 246 /// SelectV6T2BitfieldExtractOp - Select SBFX/UBFX instructions for ARM. 247 SDNode *SelectV6T2BitfieldExtractOp(SDNode *N, bool isSigned); 248 249 // Select special operations if node forms integer ABS pattern 250 SDNode *SelectABSOp(SDNode *N); 251 252 SDNode *SelectInlineAsm(SDNode *N); 253 254 SDNode *SelectConcatVector(SDNode *N); 255 256 SDNode *SelectAtomic(SDNode *N, unsigned Op8, unsigned Op16, unsigned Op32, unsigned Op64); 257 258 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for 259 /// inline asm expressions. 260 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op, 261 char ConstraintCode, 262 std::vector<SDValue> &OutOps); 263 264 // Form pairs of consecutive R, S, D, or Q registers. 265 SDNode *createGPRPairNode(EVT VT, SDValue V0, SDValue V1); 266 SDNode *createSRegPairNode(EVT VT, SDValue V0, SDValue V1); 267 SDNode *createDRegPairNode(EVT VT, SDValue V0, SDValue V1); 268 SDNode *createQRegPairNode(EVT VT, SDValue V0, SDValue V1); 269 270 // Form sequences of 4 consecutive S, D, or Q registers. 271 SDNode *createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3); 272 SDNode *createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3); 273 SDNode *createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3); 274 275 // Get the alignment operand for a NEON VLD or VST instruction. 276 SDValue GetVLDSTAlign(SDValue Align, unsigned NumVecs, bool is64BitVector); 277}; 278} 279 280/// isInt32Immediate - This method tests to see if the node is a 32-bit constant 281/// operand. If so Imm will receive the 32-bit value. 282static bool isInt32Immediate(SDNode *N, unsigned &Imm) { 283 if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) { 284 Imm = cast<ConstantSDNode>(N)->getZExtValue(); 285 return true; 286 } 287 return false; 288} 289 290// isInt32Immediate - This method tests to see if a constant operand. 291// If so Imm will receive the 32 bit value. 292static bool isInt32Immediate(SDValue N, unsigned &Imm) { 293 return isInt32Immediate(N.getNode(), Imm); 294} 295 296// isOpcWithIntImmediate - This method tests to see if the node is a specific 297// opcode and that it has a immediate integer right operand. 298// If so Imm will receive the 32 bit value. 299static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) { 300 return N->getOpcode() == Opc && 301 isInt32Immediate(N->getOperand(1).getNode(), Imm); 302} 303 304/// \brief Check whether a particular node is a constant value representable as 305/// (N * Scale) where (N in [\p RangeMin, \p RangeMax). 306/// 307/// \param ScaledConstant [out] - On success, the pre-scaled constant value. 308static bool isScaledConstantInRange(SDValue Node, int Scale, 309 int RangeMin, int RangeMax, 310 int &ScaledConstant) { 311 assert(Scale > 0 && "Invalid scale!"); 312 313 // Check that this is a constant. 314 const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Node); 315 if (!C) 316 return false; 317 318 ScaledConstant = (int) C->getZExtValue(); 319 if ((ScaledConstant % Scale) != 0) 320 return false; 321 322 ScaledConstant /= Scale; 323 return ScaledConstant >= RangeMin && ScaledConstant < RangeMax; 324} 325 326void ARMDAGToDAGISel::PreprocessISelDAG() { 327 if (!Subtarget->hasV6T2Ops()) 328 return; 329 330 bool isThumb2 = Subtarget->isThumb(); 331 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(), 332 E = CurDAG->allnodes_end(); I != E; ) { 333 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues. 334 335 if (N->getOpcode() != ISD::ADD) 336 continue; 337 338 // Look for (add X1, (and (srl X2, c1), c2)) where c2 is constant with 339 // leading zeros, followed by consecutive set bits, followed by 1 or 2 340 // trailing zeros, e.g. 1020. 341 // Transform the expression to 342 // (add X1, (shl (and (srl X2, c1), (c2>>tz)), tz)) where tz is the number 343 // of trailing zeros of c2. The left shift would be folded as an shifter 344 // operand of 'add' and the 'and' and 'srl' would become a bits extraction 345 // node (UBFX). 346 347 SDValue N0 = N->getOperand(0); 348 SDValue N1 = N->getOperand(1); 349 unsigned And_imm = 0; 350 if (!isOpcWithIntImmediate(N1.getNode(), ISD::AND, And_imm)) { 351 if (isOpcWithIntImmediate(N0.getNode(), ISD::AND, And_imm)) 352 std::swap(N0, N1); 353 } 354 if (!And_imm) 355 continue; 356 357 // Check if the AND mask is an immediate of the form: 000.....1111111100 358 unsigned TZ = countTrailingZeros(And_imm); 359 if (TZ != 1 && TZ != 2) 360 // Be conservative here. Shifter operands aren't always free. e.g. On 361 // Swift, left shifter operand of 1 / 2 for free but others are not. 362 // e.g. 363 // ubfx r3, r1, #16, #8 364 // ldr.w r3, [r0, r3, lsl #2] 365 // vs. 366 // mov.w r9, #1020 367 // and.w r2, r9, r1, lsr #14 368 // ldr r2, [r0, r2] 369 continue; 370 And_imm >>= TZ; 371 if (And_imm & (And_imm + 1)) 372 continue; 373 374 // Look for (and (srl X, c1), c2). 375 SDValue Srl = N1.getOperand(0); 376 unsigned Srl_imm = 0; 377 if (!isOpcWithIntImmediate(Srl.getNode(), ISD::SRL, Srl_imm) || 378 (Srl_imm <= 2)) 379 continue; 380 381 // Make sure first operand is not a shifter operand which would prevent 382 // folding of the left shift. 383 SDValue CPTmp0; 384 SDValue CPTmp1; 385 SDValue CPTmp2; 386 if (isThumb2) { 387 if (SelectT2ShifterOperandReg(N0, CPTmp0, CPTmp1)) 388 continue; 389 } else { 390 if (SelectImmShifterOperand(N0, CPTmp0, CPTmp1) || 391 SelectRegShifterOperand(N0, CPTmp0, CPTmp1, CPTmp2)) 392 continue; 393 } 394 395 // Now make the transformation. 396 Srl = CurDAG->getNode(ISD::SRL, SDLoc(Srl), MVT::i32, 397 Srl.getOperand(0), 398 CurDAG->getConstant(Srl_imm+TZ, MVT::i32)); 399 N1 = CurDAG->getNode(ISD::AND, SDLoc(N1), MVT::i32, 400 Srl, CurDAG->getConstant(And_imm, MVT::i32)); 401 N1 = CurDAG->getNode(ISD::SHL, SDLoc(N1), MVT::i32, 402 N1, CurDAG->getConstant(TZ, MVT::i32)); 403 CurDAG->UpdateNodeOperands(N, N0, N1); 404 } 405} 406 407/// hasNoVMLxHazardUse - Return true if it's desirable to select a FP MLA / MLS 408/// node. VFP / NEON fp VMLA / VMLS instructions have special RAW hazards (at 409/// least on current ARM implementations) which should be avoidded. 410bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const { 411 if (OptLevel == CodeGenOpt::None) 412 return true; 413 414 if (!CheckVMLxHazard) 415 return true; 416 417 if (!Subtarget->isCortexA8() && !Subtarget->isCortexA9() && 418 !Subtarget->isSwift()) 419 return true; 420 421 if (!N->hasOneUse()) 422 return false; 423 424 SDNode *Use = *N->use_begin(); 425 if (Use->getOpcode() == ISD::CopyToReg) 426 return true; 427 if (Use->isMachineOpcode()) { 428 const ARMBaseInstrInfo *TII = 429 static_cast<const ARMBaseInstrInfo*>(TM.getInstrInfo()); 430 431 const MCInstrDesc &MCID = TII->get(Use->getMachineOpcode()); 432 if (MCID.mayStore()) 433 return true; 434 unsigned Opcode = MCID.getOpcode(); 435 if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD) 436 return true; 437 // vmlx feeding into another vmlx. We actually want to unfold 438 // the use later in the MLxExpansion pass. e.g. 439 // vmla 440 // vmla (stall 8 cycles) 441 // 442 // vmul (5 cycles) 443 // vadd (5 cycles) 444 // vmla 445 // This adds up to about 18 - 19 cycles. 446 // 447 // vmla 448 // vmul (stall 4 cycles) 449 // vadd adds up to about 14 cycles. 450 return TII->isFpMLxInstruction(Opcode); 451 } 452 453 return false; 454} 455 456bool ARMDAGToDAGISel::isShifterOpProfitable(const SDValue &Shift, 457 ARM_AM::ShiftOpc ShOpcVal, 458 unsigned ShAmt) { 459 if (!Subtarget->isLikeA9() && !Subtarget->isSwift()) 460 return true; 461 if (Shift.hasOneUse()) 462 return true; 463 // R << 2 is free. 464 return ShOpcVal == ARM_AM::lsl && 465 (ShAmt == 2 || (Subtarget->isSwift() && ShAmt == 1)); 466} 467 468bool ARMDAGToDAGISel::SelectImmShifterOperand(SDValue N, 469 SDValue &BaseReg, 470 SDValue &Opc, 471 bool CheckProfitability) { 472 if (DisableShifterOp) 473 return false; 474 475 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode()); 476 477 // Don't match base register only case. That is matched to a separate 478 // lower complexity pattern with explicit register operand. 479 if (ShOpcVal == ARM_AM::no_shift) return false; 480 481 BaseReg = N.getOperand(0); 482 unsigned ShImmVal = 0; 483 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1)); 484 if (!RHS) return false; 485 ShImmVal = RHS->getZExtValue() & 31; 486 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal), 487 MVT::i32); 488 return true; 489} 490 491bool ARMDAGToDAGISel::SelectRegShifterOperand(SDValue N, 492 SDValue &BaseReg, 493 SDValue &ShReg, 494 SDValue &Opc, 495 bool CheckProfitability) { 496 if (DisableShifterOp) 497 return false; 498 499 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode()); 500 501 // Don't match base register only case. That is matched to a separate 502 // lower complexity pattern with explicit register operand. 503 if (ShOpcVal == ARM_AM::no_shift) return false; 504 505 BaseReg = N.getOperand(0); 506 unsigned ShImmVal = 0; 507 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1)); 508 if (RHS) return false; 509 510 ShReg = N.getOperand(1); 511 if (CheckProfitability && !isShifterOpProfitable(N, ShOpcVal, ShImmVal)) 512 return false; 513 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal), 514 MVT::i32); 515 return true; 516} 517 518 519bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N, 520 SDValue &Base, 521 SDValue &OffImm) { 522 // Match simple R + imm12 operands. 523 524 // Base only. 525 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB && 526 !CurDAG->isBaseWithConstantOffset(N)) { 527 if (N.getOpcode() == ISD::FrameIndex) { 528 // Match frame index. 529 int FI = cast<FrameIndexSDNode>(N)->getIndex(); 530 Base = CurDAG->getTargetFrameIndex(FI, 531 getTargetLowering()->getPointerTy()); 532 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 533 return true; 534 } 535 536 if (N.getOpcode() == ARMISD::Wrapper && 537 !(Subtarget->useMovt() && 538 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) { 539 Base = N.getOperand(0); 540 } else 541 Base = N; 542 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 543 return true; 544 } 545 546 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 547 int RHSC = (int)RHS->getZExtValue(); 548 if (N.getOpcode() == ISD::SUB) 549 RHSC = -RHSC; 550 551 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned) 552 Base = N.getOperand(0); 553 if (Base.getOpcode() == ISD::FrameIndex) { 554 int FI = cast<FrameIndexSDNode>(Base)->getIndex(); 555 Base = CurDAG->getTargetFrameIndex(FI, 556 getTargetLowering()->getPointerTy()); 557 } 558 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32); 559 return true; 560 } 561 } 562 563 // Base only. 564 Base = N; 565 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 566 return true; 567} 568 569 570 571bool ARMDAGToDAGISel::SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset, 572 SDValue &Opc) { 573 if (N.getOpcode() == ISD::MUL && 574 ((!Subtarget->isLikeA9() && !Subtarget->isSwift()) || N.hasOneUse())) { 575 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 576 // X * [3,5,9] -> X + X * [2,4,8] etc. 577 int RHSC = (int)RHS->getZExtValue(); 578 if (RHSC & 1) { 579 RHSC = RHSC & ~1; 580 ARM_AM::AddrOpc AddSub = ARM_AM::add; 581 if (RHSC < 0) { 582 AddSub = ARM_AM::sub; 583 RHSC = - RHSC; 584 } 585 if (isPowerOf2_32(RHSC)) { 586 unsigned ShAmt = Log2_32(RHSC); 587 Base = Offset = N.getOperand(0); 588 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, 589 ARM_AM::lsl), 590 MVT::i32); 591 return true; 592 } 593 } 594 } 595 } 596 597 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB && 598 // ISD::OR that is equivalent to an ISD::ADD. 599 !CurDAG->isBaseWithConstantOffset(N)) 600 return false; 601 602 // Leave simple R +/- imm12 operands for LDRi12 603 if (N.getOpcode() == ISD::ADD || N.getOpcode() == ISD::OR) { 604 int RHSC; 605 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1, 606 -0x1000+1, 0x1000, RHSC)) // 12 bits. 607 return false; 608 } 609 610 // Otherwise this is R +/- [possibly shifted] R. 611 ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::SUB ? ARM_AM::sub:ARM_AM::add; 612 ARM_AM::ShiftOpc ShOpcVal = 613 ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode()); 614 unsigned ShAmt = 0; 615 616 Base = N.getOperand(0); 617 Offset = N.getOperand(1); 618 619 if (ShOpcVal != ARM_AM::no_shift) { 620 // Check to see if the RHS of the shift is a constant, if not, we can't fold 621 // it. 622 if (ConstantSDNode *Sh = 623 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) { 624 ShAmt = Sh->getZExtValue(); 625 if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt)) 626 Offset = N.getOperand(1).getOperand(0); 627 else { 628 ShAmt = 0; 629 ShOpcVal = ARM_AM::no_shift; 630 } 631 } else { 632 ShOpcVal = ARM_AM::no_shift; 633 } 634 } 635 636 // Try matching (R shl C) + (R). 637 if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift && 638 !(Subtarget->isLikeA9() || Subtarget->isSwift() || 639 N.getOperand(0).hasOneUse())) { 640 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode()); 641 if (ShOpcVal != ARM_AM::no_shift) { 642 // Check to see if the RHS of the shift is a constant, if not, we can't 643 // fold it. 644 if (ConstantSDNode *Sh = 645 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) { 646 ShAmt = Sh->getZExtValue(); 647 if (isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt)) { 648 Offset = N.getOperand(0).getOperand(0); 649 Base = N.getOperand(1); 650 } else { 651 ShAmt = 0; 652 ShOpcVal = ARM_AM::no_shift; 653 } 654 } else { 655 ShOpcVal = ARM_AM::no_shift; 656 } 657 } 658 } 659 660 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal), 661 MVT::i32); 662 return true; 663} 664 665 666//----- 667 668AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N, 669 SDValue &Base, 670 SDValue &Offset, 671 SDValue &Opc) { 672 if (N.getOpcode() == ISD::MUL && 673 (!(Subtarget->isLikeA9() || Subtarget->isSwift()) || N.hasOneUse())) { 674 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 675 // X * [3,5,9] -> X + X * [2,4,8] etc. 676 int RHSC = (int)RHS->getZExtValue(); 677 if (RHSC & 1) { 678 RHSC = RHSC & ~1; 679 ARM_AM::AddrOpc AddSub = ARM_AM::add; 680 if (RHSC < 0) { 681 AddSub = ARM_AM::sub; 682 RHSC = - RHSC; 683 } 684 if (isPowerOf2_32(RHSC)) { 685 unsigned ShAmt = Log2_32(RHSC); 686 Base = Offset = N.getOperand(0); 687 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, 688 ARM_AM::lsl), 689 MVT::i32); 690 return AM2_SHOP; 691 } 692 } 693 } 694 } 695 696 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB && 697 // ISD::OR that is equivalent to an ADD. 698 !CurDAG->isBaseWithConstantOffset(N)) { 699 Base = N; 700 if (N.getOpcode() == ISD::FrameIndex) { 701 int FI = cast<FrameIndexSDNode>(N)->getIndex(); 702 Base = CurDAG->getTargetFrameIndex(FI, 703 getTargetLowering()->getPointerTy()); 704 } else if (N.getOpcode() == ARMISD::Wrapper && 705 !(Subtarget->useMovt() && 706 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) { 707 Base = N.getOperand(0); 708 } 709 Offset = CurDAG->getRegister(0, MVT::i32); 710 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0, 711 ARM_AM::no_shift), 712 MVT::i32); 713 return AM2_BASE; 714 } 715 716 // Match simple R +/- imm12 operands. 717 if (N.getOpcode() != ISD::SUB) { 718 int RHSC; 719 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1, 720 -0x1000+1, 0x1000, RHSC)) { // 12 bits. 721 Base = N.getOperand(0); 722 if (Base.getOpcode() == ISD::FrameIndex) { 723 int FI = cast<FrameIndexSDNode>(Base)->getIndex(); 724 Base = CurDAG->getTargetFrameIndex(FI, 725 getTargetLowering()->getPointerTy()); 726 } 727 Offset = CurDAG->getRegister(0, MVT::i32); 728 729 ARM_AM::AddrOpc AddSub = ARM_AM::add; 730 if (RHSC < 0) { 731 AddSub = ARM_AM::sub; 732 RHSC = - RHSC; 733 } 734 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, RHSC, 735 ARM_AM::no_shift), 736 MVT::i32); 737 return AM2_BASE; 738 } 739 } 740 741 if ((Subtarget->isLikeA9() || Subtarget->isSwift()) && !N.hasOneUse()) { 742 // Compute R +/- (R << N) and reuse it. 743 Base = N; 744 Offset = CurDAG->getRegister(0, MVT::i32); 745 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0, 746 ARM_AM::no_shift), 747 MVT::i32); 748 return AM2_BASE; 749 } 750 751 // Otherwise this is R +/- [possibly shifted] R. 752 ARM_AM::AddrOpc AddSub = N.getOpcode() != ISD::SUB ? ARM_AM::add:ARM_AM::sub; 753 ARM_AM::ShiftOpc ShOpcVal = 754 ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode()); 755 unsigned ShAmt = 0; 756 757 Base = N.getOperand(0); 758 Offset = N.getOperand(1); 759 760 if (ShOpcVal != ARM_AM::no_shift) { 761 // Check to see if the RHS of the shift is a constant, if not, we can't fold 762 // it. 763 if (ConstantSDNode *Sh = 764 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) { 765 ShAmt = Sh->getZExtValue(); 766 if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt)) 767 Offset = N.getOperand(1).getOperand(0); 768 else { 769 ShAmt = 0; 770 ShOpcVal = ARM_AM::no_shift; 771 } 772 } else { 773 ShOpcVal = ARM_AM::no_shift; 774 } 775 } 776 777 // Try matching (R shl C) + (R). 778 if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift && 779 !(Subtarget->isLikeA9() || Subtarget->isSwift() || 780 N.getOperand(0).hasOneUse())) { 781 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode()); 782 if (ShOpcVal != ARM_AM::no_shift) { 783 // Check to see if the RHS of the shift is a constant, if not, we can't 784 // fold it. 785 if (ConstantSDNode *Sh = 786 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) { 787 ShAmt = Sh->getZExtValue(); 788 if (isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt)) { 789 Offset = N.getOperand(0).getOperand(0); 790 Base = N.getOperand(1); 791 } else { 792 ShAmt = 0; 793 ShOpcVal = ARM_AM::no_shift; 794 } 795 } else { 796 ShOpcVal = ARM_AM::no_shift; 797 } 798 } 799 } 800 801 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal), 802 MVT::i32); 803 return AM2_SHOP; 804} 805 806bool ARMDAGToDAGISel::SelectAddrMode2OffsetReg(SDNode *Op, SDValue N, 807 SDValue &Offset, SDValue &Opc) { 808 unsigned Opcode = Op->getOpcode(); 809 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD) 810 ? cast<LoadSDNode>(Op)->getAddressingMode() 811 : cast<StoreSDNode>(Op)->getAddressingMode(); 812 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC) 813 ? ARM_AM::add : ARM_AM::sub; 814 int Val; 815 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) 816 return false; 817 818 Offset = N; 819 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode()); 820 unsigned ShAmt = 0; 821 if (ShOpcVal != ARM_AM::no_shift) { 822 // Check to see if the RHS of the shift is a constant, if not, we can't fold 823 // it. 824 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 825 ShAmt = Sh->getZExtValue(); 826 if (isShifterOpProfitable(N, ShOpcVal, ShAmt)) 827 Offset = N.getOperand(0); 828 else { 829 ShAmt = 0; 830 ShOpcVal = ARM_AM::no_shift; 831 } 832 } else { 833 ShOpcVal = ARM_AM::no_shift; 834 } 835 } 836 837 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal), 838 MVT::i32); 839 return true; 840} 841 842bool ARMDAGToDAGISel::SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N, 843 SDValue &Offset, SDValue &Opc) { 844 unsigned Opcode = Op->getOpcode(); 845 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD) 846 ? cast<LoadSDNode>(Op)->getAddressingMode() 847 : cast<StoreSDNode>(Op)->getAddressingMode(); 848 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC) 849 ? ARM_AM::add : ARM_AM::sub; 850 int Val; 851 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits. 852 if (AddSub == ARM_AM::sub) Val *= -1; 853 Offset = CurDAG->getRegister(0, MVT::i32); 854 Opc = CurDAG->getTargetConstant(Val, MVT::i32); 855 return true; 856 } 857 858 return false; 859} 860 861 862bool ARMDAGToDAGISel::SelectAddrMode2OffsetImm(SDNode *Op, SDValue N, 863 SDValue &Offset, SDValue &Opc) { 864 unsigned Opcode = Op->getOpcode(); 865 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD) 866 ? cast<LoadSDNode>(Op)->getAddressingMode() 867 : cast<StoreSDNode>(Op)->getAddressingMode(); 868 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC) 869 ? ARM_AM::add : ARM_AM::sub; 870 int Val; 871 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits. 872 Offset = CurDAG->getRegister(0, MVT::i32); 873 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val, 874 ARM_AM::no_shift), 875 MVT::i32); 876 return true; 877 } 878 879 return false; 880} 881 882bool ARMDAGToDAGISel::SelectAddrOffsetNone(SDValue N, SDValue &Base) { 883 Base = N; 884 return true; 885} 886 887bool ARMDAGToDAGISel::SelectAddrMode3(SDValue N, 888 SDValue &Base, SDValue &Offset, 889 SDValue &Opc) { 890 if (N.getOpcode() == ISD::SUB) { 891 // X - C is canonicalize to X + -C, no need to handle it here. 892 Base = N.getOperand(0); 893 Offset = N.getOperand(1); 894 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0),MVT::i32); 895 return true; 896 } 897 898 if (!CurDAG->isBaseWithConstantOffset(N)) { 899 Base = N; 900 if (N.getOpcode() == ISD::FrameIndex) { 901 int FI = cast<FrameIndexSDNode>(N)->getIndex(); 902 Base = CurDAG->getTargetFrameIndex(FI, 903 getTargetLowering()->getPointerTy()); 904 } 905 Offset = CurDAG->getRegister(0, MVT::i32); 906 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),MVT::i32); 907 return true; 908 } 909 910 // If the RHS is +/- imm8, fold into addr mode. 911 int RHSC; 912 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1, 913 -256 + 1, 256, RHSC)) { // 8 bits. 914 Base = N.getOperand(0); 915 if (Base.getOpcode() == ISD::FrameIndex) { 916 int FI = cast<FrameIndexSDNode>(Base)->getIndex(); 917 Base = CurDAG->getTargetFrameIndex(FI, 918 getTargetLowering()->getPointerTy()); 919 } 920 Offset = CurDAG->getRegister(0, MVT::i32); 921 922 ARM_AM::AddrOpc AddSub = ARM_AM::add; 923 if (RHSC < 0) { 924 AddSub = ARM_AM::sub; 925 RHSC = -RHSC; 926 } 927 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC),MVT::i32); 928 return true; 929 } 930 931 Base = N.getOperand(0); 932 Offset = N.getOperand(1); 933 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), MVT::i32); 934 return true; 935} 936 937bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDNode *Op, SDValue N, 938 SDValue &Offset, SDValue &Opc) { 939 unsigned Opcode = Op->getOpcode(); 940 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD) 941 ? cast<LoadSDNode>(Op)->getAddressingMode() 942 : cast<StoreSDNode>(Op)->getAddressingMode(); 943 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC) 944 ? ARM_AM::add : ARM_AM::sub; 945 int Val; 946 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 256, Val)) { // 12 bits. 947 Offset = CurDAG->getRegister(0, MVT::i32); 948 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), MVT::i32); 949 return true; 950 } 951 952 Offset = N; 953 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), MVT::i32); 954 return true; 955} 956 957bool ARMDAGToDAGISel::SelectAddrMode5(SDValue N, 958 SDValue &Base, SDValue &Offset) { 959 if (!CurDAG->isBaseWithConstantOffset(N)) { 960 Base = N; 961 if (N.getOpcode() == ISD::FrameIndex) { 962 int FI = cast<FrameIndexSDNode>(N)->getIndex(); 963 Base = CurDAG->getTargetFrameIndex(FI, 964 getTargetLowering()->getPointerTy()); 965 } else if (N.getOpcode() == ARMISD::Wrapper && 966 !(Subtarget->useMovt() && 967 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) { 968 Base = N.getOperand(0); 969 } 970 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0), 971 MVT::i32); 972 return true; 973 } 974 975 // If the RHS is +/- imm8, fold into addr mode. 976 int RHSC; 977 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4, 978 -256 + 1, 256, RHSC)) { 979 Base = N.getOperand(0); 980 if (Base.getOpcode() == ISD::FrameIndex) { 981 int FI = cast<FrameIndexSDNode>(Base)->getIndex(); 982 Base = CurDAG->getTargetFrameIndex(FI, 983 getTargetLowering()->getPointerTy()); 984 } 985 986 ARM_AM::AddrOpc AddSub = ARM_AM::add; 987 if (RHSC < 0) { 988 AddSub = ARM_AM::sub; 989 RHSC = -RHSC; 990 } 991 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(AddSub, RHSC), 992 MVT::i32); 993 return true; 994 } 995 996 Base = N; 997 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0), 998 MVT::i32); 999 return true; 1000} 1001 1002bool ARMDAGToDAGISel::SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr, 1003 SDValue &Align) { 1004 Addr = N; 1005 1006 unsigned Alignment = 0; 1007 if (LSBaseSDNode *LSN = dyn_cast<LSBaseSDNode>(Parent)) { 1008 // This case occurs only for VLD1-lane/dup and VST1-lane instructions. 1009 // The maximum alignment is equal to the memory size being referenced. 1010 unsigned LSNAlign = LSN->getAlignment(); 1011 unsigned MemSize = LSN->getMemoryVT().getSizeInBits() / 8; 1012 if (LSNAlign >= MemSize && MemSize > 1) 1013 Alignment = MemSize; 1014 } else { 1015 // All other uses of addrmode6 are for intrinsics. For now just record 1016 // the raw alignment value; it will be refined later based on the legal 1017 // alignment operands for the intrinsic. 1018 Alignment = cast<MemIntrinsicSDNode>(Parent)->getAlignment(); 1019 } 1020 1021 Align = CurDAG->getTargetConstant(Alignment, MVT::i32); 1022 return true; 1023} 1024 1025bool ARMDAGToDAGISel::SelectAddrMode6Offset(SDNode *Op, SDValue N, 1026 SDValue &Offset) { 1027 LSBaseSDNode *LdSt = cast<LSBaseSDNode>(Op); 1028 ISD::MemIndexedMode AM = LdSt->getAddressingMode(); 1029 if (AM != ISD::POST_INC) 1030 return false; 1031 Offset = N; 1032 if (ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N)) { 1033 if (NC->getZExtValue() * 8 == LdSt->getMemoryVT().getSizeInBits()) 1034 Offset = CurDAG->getRegister(0, MVT::i32); 1035 } 1036 return true; 1037} 1038 1039bool ARMDAGToDAGISel::SelectAddrModePC(SDValue N, 1040 SDValue &Offset, SDValue &Label) { 1041 if (N.getOpcode() == ARMISD::PIC_ADD && N.hasOneUse()) { 1042 Offset = N.getOperand(0); 1043 SDValue N1 = N.getOperand(1); 1044 Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(), 1045 MVT::i32); 1046 return true; 1047 } 1048 1049 return false; 1050} 1051 1052 1053//===----------------------------------------------------------------------===// 1054// Thumb Addressing Modes 1055//===----------------------------------------------------------------------===// 1056 1057bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDValue N, 1058 SDValue &Base, SDValue &Offset){ 1059 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N)) { 1060 ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N); 1061 if (!NC || !NC->isNullValue()) 1062 return false; 1063 1064 Base = Offset = N; 1065 return true; 1066 } 1067 1068 Base = N.getOperand(0); 1069 Offset = N.getOperand(1); 1070 return true; 1071} 1072 1073bool 1074ARMDAGToDAGISel::SelectThumbAddrModeRI(SDValue N, SDValue &Base, 1075 SDValue &Offset, unsigned Scale) { 1076 if (Scale == 4) { 1077 SDValue TmpBase, TmpOffImm; 1078 if (SelectThumbAddrModeSP(N, TmpBase, TmpOffImm)) 1079 return false; // We want to select tLDRspi / tSTRspi instead. 1080 1081 if (N.getOpcode() == ARMISD::Wrapper && 1082 N.getOperand(0).getOpcode() == ISD::TargetConstantPool) 1083 return false; // We want to select tLDRpci instead. 1084 } 1085 1086 if (!CurDAG->isBaseWithConstantOffset(N)) 1087 return false; 1088 1089 // Thumb does not have [sp, r] address mode. 1090 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0)); 1091 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1)); 1092 if ((LHSR && LHSR->getReg() == ARM::SP) || 1093 (RHSR && RHSR->getReg() == ARM::SP)) 1094 return false; 1095 1096 // FIXME: Why do we explicitly check for a match here and then return false? 1097 // Presumably to allow something else to match, but shouldn't this be 1098 // documented? 1099 int RHSC; 1100 if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC)) 1101 return false; 1102 1103 Base = N.getOperand(0); 1104 Offset = N.getOperand(1); 1105 return true; 1106} 1107 1108bool 1109ARMDAGToDAGISel::SelectThumbAddrModeRI5S1(SDValue N, 1110 SDValue &Base, 1111 SDValue &Offset) { 1112 return SelectThumbAddrModeRI(N, Base, Offset, 1); 1113} 1114 1115bool 1116ARMDAGToDAGISel::SelectThumbAddrModeRI5S2(SDValue N, 1117 SDValue &Base, 1118 SDValue &Offset) { 1119 return SelectThumbAddrModeRI(N, Base, Offset, 2); 1120} 1121 1122bool 1123ARMDAGToDAGISel::SelectThumbAddrModeRI5S4(SDValue N, 1124 SDValue &Base, 1125 SDValue &Offset) { 1126 return SelectThumbAddrModeRI(N, Base, Offset, 4); 1127} 1128 1129bool 1130ARMDAGToDAGISel::SelectThumbAddrModeImm5S(SDValue N, unsigned Scale, 1131 SDValue &Base, SDValue &OffImm) { 1132 if (Scale == 4) { 1133 SDValue TmpBase, TmpOffImm; 1134 if (SelectThumbAddrModeSP(N, TmpBase, TmpOffImm)) 1135 return false; // We want to select tLDRspi / tSTRspi instead. 1136 1137 if (N.getOpcode() == ARMISD::Wrapper && 1138 N.getOperand(0).getOpcode() == ISD::TargetConstantPool) 1139 return false; // We want to select tLDRpci instead. 1140 } 1141 1142 if (!CurDAG->isBaseWithConstantOffset(N)) { 1143 if (N.getOpcode() == ARMISD::Wrapper && 1144 !(Subtarget->useMovt() && 1145 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) { 1146 Base = N.getOperand(0); 1147 } else { 1148 Base = N; 1149 } 1150 1151 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 1152 return true; 1153 } 1154 1155 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0)); 1156 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1)); 1157 if ((LHSR && LHSR->getReg() == ARM::SP) || 1158 (RHSR && RHSR->getReg() == ARM::SP)) { 1159 ConstantSDNode *LHS = dyn_cast<ConstantSDNode>(N.getOperand(0)); 1160 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1)); 1161 unsigned LHSC = LHS ? LHS->getZExtValue() : 0; 1162 unsigned RHSC = RHS ? RHS->getZExtValue() : 0; 1163 1164 // Thumb does not have [sp, #imm5] address mode for non-zero imm5. 1165 if (LHSC != 0 || RHSC != 0) return false; 1166 1167 Base = N; 1168 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 1169 return true; 1170 } 1171 1172 // If the RHS is + imm5 * scale, fold into addr mode. 1173 int RHSC; 1174 if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC)) { 1175 Base = N.getOperand(0); 1176 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32); 1177 return true; 1178 } 1179 1180 Base = N.getOperand(0); 1181 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 1182 return true; 1183} 1184 1185bool 1186ARMDAGToDAGISel::SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base, 1187 SDValue &OffImm) { 1188 return SelectThumbAddrModeImm5S(N, 4, Base, OffImm); 1189} 1190 1191bool 1192ARMDAGToDAGISel::SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base, 1193 SDValue &OffImm) { 1194 return SelectThumbAddrModeImm5S(N, 2, Base, OffImm); 1195} 1196 1197bool 1198ARMDAGToDAGISel::SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base, 1199 SDValue &OffImm) { 1200 return SelectThumbAddrModeImm5S(N, 1, Base, OffImm); 1201} 1202 1203bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue N, 1204 SDValue &Base, SDValue &OffImm) { 1205 if (N.getOpcode() == ISD::FrameIndex) { 1206 int FI = cast<FrameIndexSDNode>(N)->getIndex(); 1207 Base = CurDAG->getTargetFrameIndex(FI, 1208 getTargetLowering()->getPointerTy()); 1209 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 1210 return true; 1211 } 1212 1213 if (!CurDAG->isBaseWithConstantOffset(N)) 1214 return false; 1215 1216 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0)); 1217 if (N.getOperand(0).getOpcode() == ISD::FrameIndex || 1218 (LHSR && LHSR->getReg() == ARM::SP)) { 1219 // If the RHS is + imm8 * scale, fold into addr mode. 1220 int RHSC; 1221 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4, 0, 256, RHSC)) { 1222 Base = N.getOperand(0); 1223 if (Base.getOpcode() == ISD::FrameIndex) { 1224 int FI = cast<FrameIndexSDNode>(Base)->getIndex(); 1225 Base = CurDAG->getTargetFrameIndex(FI, 1226 getTargetLowering()->getPointerTy()); 1227 } 1228 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32); 1229 return true; 1230 } 1231 } 1232 1233 return false; 1234} 1235 1236 1237//===----------------------------------------------------------------------===// 1238// Thumb 2 Addressing Modes 1239//===----------------------------------------------------------------------===// 1240 1241 1242bool ARMDAGToDAGISel::SelectT2ShifterOperandReg(SDValue N, SDValue &BaseReg, 1243 SDValue &Opc) { 1244 if (DisableShifterOp) 1245 return false; 1246 1247 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode()); 1248 1249 // Don't match base register only case. That is matched to a separate 1250 // lower complexity pattern with explicit register operand. 1251 if (ShOpcVal == ARM_AM::no_shift) return false; 1252 1253 BaseReg = N.getOperand(0); 1254 unsigned ShImmVal = 0; 1255 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 1256 ShImmVal = RHS->getZExtValue() & 31; 1257 Opc = getI32Imm(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal)); 1258 return true; 1259 } 1260 1261 return false; 1262} 1263 1264bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue N, 1265 SDValue &Base, SDValue &OffImm) { 1266 // Match simple R + imm12 operands. 1267 1268 // Base only. 1269 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB && 1270 !CurDAG->isBaseWithConstantOffset(N)) { 1271 if (N.getOpcode() == ISD::FrameIndex) { 1272 // Match frame index. 1273 int FI = cast<FrameIndexSDNode>(N)->getIndex(); 1274 Base = CurDAG->getTargetFrameIndex(FI, 1275 getTargetLowering()->getPointerTy()); 1276 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 1277 return true; 1278 } 1279 1280 if (N.getOpcode() == ARMISD::Wrapper && 1281 !(Subtarget->useMovt() && 1282 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) { 1283 Base = N.getOperand(0); 1284 if (Base.getOpcode() == ISD::TargetConstantPool) 1285 return false; // We want to select t2LDRpci instead. 1286 } else 1287 Base = N; 1288 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 1289 return true; 1290 } 1291 1292 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 1293 if (SelectT2AddrModeImm8(N, Base, OffImm)) 1294 // Let t2LDRi8 handle (R - imm8). 1295 return false; 1296 1297 int RHSC = (int)RHS->getZExtValue(); 1298 if (N.getOpcode() == ISD::SUB) 1299 RHSC = -RHSC; 1300 1301 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned) 1302 Base = N.getOperand(0); 1303 if (Base.getOpcode() == ISD::FrameIndex) { 1304 int FI = cast<FrameIndexSDNode>(Base)->getIndex(); 1305 Base = CurDAG->getTargetFrameIndex(FI, 1306 getTargetLowering()->getPointerTy()); 1307 } 1308 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32); 1309 return true; 1310 } 1311 } 1312 1313 // Base only. 1314 Base = N; 1315 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 1316 return true; 1317} 1318 1319bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue N, 1320 SDValue &Base, SDValue &OffImm) { 1321 // Match simple R - imm8 operands. 1322 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB && 1323 !CurDAG->isBaseWithConstantOffset(N)) 1324 return false; 1325 1326 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 1327 int RHSC = (int)RHS->getSExtValue(); 1328 if (N.getOpcode() == ISD::SUB) 1329 RHSC = -RHSC; 1330 1331 if ((RHSC >= -255) && (RHSC < 0)) { // 8 bits (always negative) 1332 Base = N.getOperand(0); 1333 if (Base.getOpcode() == ISD::FrameIndex) { 1334 int FI = cast<FrameIndexSDNode>(Base)->getIndex(); 1335 Base = CurDAG->getTargetFrameIndex(FI, 1336 getTargetLowering()->getPointerTy()); 1337 } 1338 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32); 1339 return true; 1340 } 1341 } 1342 1343 return false; 1344} 1345 1346bool ARMDAGToDAGISel::SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N, 1347 SDValue &OffImm){ 1348 unsigned Opcode = Op->getOpcode(); 1349 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD) 1350 ? cast<LoadSDNode>(Op)->getAddressingMode() 1351 : cast<StoreSDNode>(Op)->getAddressingMode(); 1352 int RHSC; 1353 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x100, RHSC)) { // 8 bits. 1354 OffImm = ((AM == ISD::PRE_INC) || (AM == ISD::POST_INC)) 1355 ? CurDAG->getTargetConstant(RHSC, MVT::i32) 1356 : CurDAG->getTargetConstant(-RHSC, MVT::i32); 1357 return true; 1358 } 1359 1360 return false; 1361} 1362 1363bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue N, 1364 SDValue &Base, 1365 SDValue &OffReg, SDValue &ShImm) { 1366 // (R - imm8) should be handled by t2LDRi8. The rest are handled by t2LDRi12. 1367 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N)) 1368 return false; 1369 1370 // Leave (R + imm12) for t2LDRi12, (R - imm8) for t2LDRi8. 1371 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 1372 int RHSC = (int)RHS->getZExtValue(); 1373 if (RHSC >= 0 && RHSC < 0x1000) // 12 bits (unsigned) 1374 return false; 1375 else if (RHSC < 0 && RHSC >= -255) // 8 bits 1376 return false; 1377 } 1378 1379 // Look for (R + R) or (R + (R << [1,2,3])). 1380 unsigned ShAmt = 0; 1381 Base = N.getOperand(0); 1382 OffReg = N.getOperand(1); 1383 1384 // Swap if it is ((R << c) + R). 1385 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(OffReg.getOpcode()); 1386 if (ShOpcVal != ARM_AM::lsl) { 1387 ShOpcVal = ARM_AM::getShiftOpcForNode(Base.getOpcode()); 1388 if (ShOpcVal == ARM_AM::lsl) 1389 std::swap(Base, OffReg); 1390 } 1391 1392 if (ShOpcVal == ARM_AM::lsl) { 1393 // Check to see if the RHS of the shift is a constant, if not, we can't fold 1394 // it. 1395 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(OffReg.getOperand(1))) { 1396 ShAmt = Sh->getZExtValue(); 1397 if (ShAmt < 4 && isShifterOpProfitable(OffReg, ShOpcVal, ShAmt)) 1398 OffReg = OffReg.getOperand(0); 1399 else { 1400 ShAmt = 0; 1401 ShOpcVal = ARM_AM::no_shift; 1402 } 1403 } else { 1404 ShOpcVal = ARM_AM::no_shift; 1405 } 1406 } 1407 1408 ShImm = CurDAG->getTargetConstant(ShAmt, MVT::i32); 1409 1410 return true; 1411} 1412 1413bool ARMDAGToDAGISel::SelectT2AddrModeExclusive(SDValue N, SDValue &Base, 1414 SDValue &OffImm) { 1415 // This *must* succeed since it's used for the irreplacable ldrex and strex 1416 // instructions. 1417 Base = N; 1418 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 1419 1420 if (N.getOpcode() != ISD::ADD || !CurDAG->isBaseWithConstantOffset(N)) 1421 return true; 1422 1423 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1)); 1424 if (!RHS) 1425 return true; 1426 1427 uint32_t RHSC = (int)RHS->getZExtValue(); 1428 if (RHSC > 1020 || RHSC % 4 != 0) 1429 return true; 1430 1431 Base = N.getOperand(0); 1432 if (Base.getOpcode() == ISD::FrameIndex) { 1433 int FI = cast<FrameIndexSDNode>(Base)->getIndex(); 1434 Base = CurDAG->getTargetFrameIndex(FI, getTargetLowering()->getPointerTy()); 1435 } 1436 1437 OffImm = CurDAG->getTargetConstant(RHSC / 4, MVT::i32); 1438 return true; 1439} 1440 1441//===--------------------------------------------------------------------===// 1442 1443/// getAL - Returns a ARMCC::AL immediate node. 1444static inline SDValue getAL(SelectionDAG *CurDAG) { 1445 return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, MVT::i32); 1446} 1447 1448SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDNode *N) { 1449 LoadSDNode *LD = cast<LoadSDNode>(N); 1450 ISD::MemIndexedMode AM = LD->getAddressingMode(); 1451 if (AM == ISD::UNINDEXED) 1452 return NULL; 1453 1454 EVT LoadedVT = LD->getMemoryVT(); 1455 SDValue Offset, AMOpc; 1456 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC); 1457 unsigned Opcode = 0; 1458 bool Match = false; 1459 if (LoadedVT == MVT::i32 && isPre && 1460 SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) { 1461 Opcode = ARM::LDR_PRE_IMM; 1462 Match = true; 1463 } else if (LoadedVT == MVT::i32 && !isPre && 1464 SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) { 1465 Opcode = ARM::LDR_POST_IMM; 1466 Match = true; 1467 } else if (LoadedVT == MVT::i32 && 1468 SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) { 1469 Opcode = isPre ? ARM::LDR_PRE_REG : ARM::LDR_POST_REG; 1470 Match = true; 1471 1472 } else if (LoadedVT == MVT::i16 && 1473 SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) { 1474 Match = true; 1475 Opcode = (LD->getExtensionType() == ISD::SEXTLOAD) 1476 ? (isPre ? ARM::LDRSH_PRE : ARM::LDRSH_POST) 1477 : (isPre ? ARM::LDRH_PRE : ARM::LDRH_POST); 1478 } else if (LoadedVT == MVT::i8 || LoadedVT == MVT::i1) { 1479 if (LD->getExtensionType() == ISD::SEXTLOAD) { 1480 if (SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) { 1481 Match = true; 1482 Opcode = isPre ? ARM::LDRSB_PRE : ARM::LDRSB_POST; 1483 } 1484 } else { 1485 if (isPre && 1486 SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) { 1487 Match = true; 1488 Opcode = ARM::LDRB_PRE_IMM; 1489 } else if (!isPre && 1490 SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) { 1491 Match = true; 1492 Opcode = ARM::LDRB_POST_IMM; 1493 } else if (SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) { 1494 Match = true; 1495 Opcode = isPre ? ARM::LDRB_PRE_REG : ARM::LDRB_POST_REG; 1496 } 1497 } 1498 } 1499 1500 if (Match) { 1501 if (Opcode == ARM::LDR_PRE_IMM || Opcode == ARM::LDRB_PRE_IMM) { 1502 SDValue Chain = LD->getChain(); 1503 SDValue Base = LD->getBasePtr(); 1504 SDValue Ops[]= { Base, AMOpc, getAL(CurDAG), 1505 CurDAG->getRegister(0, MVT::i32), Chain }; 1506 return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32, 1507 MVT::i32, MVT::Other, Ops); 1508 } else { 1509 SDValue Chain = LD->getChain(); 1510 SDValue Base = LD->getBasePtr(); 1511 SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG), 1512 CurDAG->getRegister(0, MVT::i32), Chain }; 1513 return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32, 1514 MVT::i32, MVT::Other, Ops); 1515 } 1516 } 1517 1518 return NULL; 1519} 1520 1521SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) { 1522 LoadSDNode *LD = cast<LoadSDNode>(N); 1523 ISD::MemIndexedMode AM = LD->getAddressingMode(); 1524 if (AM == ISD::UNINDEXED) 1525 return NULL; 1526 1527 EVT LoadedVT = LD->getMemoryVT(); 1528 bool isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD; 1529 SDValue Offset; 1530 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC); 1531 unsigned Opcode = 0; 1532 bool Match = false; 1533 if (SelectT2AddrModeImm8Offset(N, LD->getOffset(), Offset)) { 1534 switch (LoadedVT.getSimpleVT().SimpleTy) { 1535 case MVT::i32: 1536 Opcode = isPre ? ARM::t2LDR_PRE : ARM::t2LDR_POST; 1537 break; 1538 case MVT::i16: 1539 if (isSExtLd) 1540 Opcode = isPre ? ARM::t2LDRSH_PRE : ARM::t2LDRSH_POST; 1541 else 1542 Opcode = isPre ? ARM::t2LDRH_PRE : ARM::t2LDRH_POST; 1543 break; 1544 case MVT::i8: 1545 case MVT::i1: 1546 if (isSExtLd) 1547 Opcode = isPre ? ARM::t2LDRSB_PRE : ARM::t2LDRSB_POST; 1548 else 1549 Opcode = isPre ? ARM::t2LDRB_PRE : ARM::t2LDRB_POST; 1550 break; 1551 default: 1552 return NULL; 1553 } 1554 Match = true; 1555 } 1556 1557 if (Match) { 1558 SDValue Chain = LD->getChain(); 1559 SDValue Base = LD->getBasePtr(); 1560 SDValue Ops[]= { Base, Offset, getAL(CurDAG), 1561 CurDAG->getRegister(0, MVT::i32), Chain }; 1562 return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32, MVT::i32, 1563 MVT::Other, Ops); 1564 } 1565 1566 return NULL; 1567} 1568 1569/// \brief Form a GPRPair pseudo register from a pair of GPR regs. 1570SDNode *ARMDAGToDAGISel::createGPRPairNode(EVT VT, SDValue V0, SDValue V1) { 1571 SDLoc dl(V0.getNode()); 1572 SDValue RegClass = 1573 CurDAG->getTargetConstant(ARM::GPRPairRegClassID, MVT::i32); 1574 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::gsub_0, MVT::i32); 1575 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::gsub_1, MVT::i32); 1576 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 }; 1577 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops); 1578} 1579 1580/// \brief Form a D register from a pair of S registers. 1581SDNode *ARMDAGToDAGISel::createSRegPairNode(EVT VT, SDValue V0, SDValue V1) { 1582 SDLoc dl(V0.getNode()); 1583 SDValue RegClass = 1584 CurDAG->getTargetConstant(ARM::DPR_VFP2RegClassID, MVT::i32); 1585 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32); 1586 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32); 1587 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 }; 1588 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops); 1589} 1590 1591/// \brief Form a quad register from a pair of D registers. 1592SDNode *ARMDAGToDAGISel::createDRegPairNode(EVT VT, SDValue V0, SDValue V1) { 1593 SDLoc dl(V0.getNode()); 1594 SDValue RegClass = CurDAG->getTargetConstant(ARM::QPRRegClassID, MVT::i32); 1595 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32); 1596 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32); 1597 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 }; 1598 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops); 1599} 1600 1601/// \brief Form 4 consecutive D registers from a pair of Q registers. 1602SDNode *ARMDAGToDAGISel::createQRegPairNode(EVT VT, SDValue V0, SDValue V1) { 1603 SDLoc dl(V0.getNode()); 1604 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32); 1605 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32); 1606 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32); 1607 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 }; 1608 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops); 1609} 1610 1611/// \brief Form 4 consecutive S registers. 1612SDNode *ARMDAGToDAGISel::createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1, 1613 SDValue V2, SDValue V3) { 1614 SDLoc dl(V0.getNode()); 1615 SDValue RegClass = 1616 CurDAG->getTargetConstant(ARM::QPR_VFP2RegClassID, MVT::i32); 1617 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32); 1618 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32); 1619 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::ssub_2, MVT::i32); 1620 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::ssub_3, MVT::i32); 1621 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1, 1622 V2, SubReg2, V3, SubReg3 }; 1623 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops); 1624} 1625 1626/// \brief Form 4 consecutive D registers. 1627SDNode *ARMDAGToDAGISel::createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1, 1628 SDValue V2, SDValue V3) { 1629 SDLoc dl(V0.getNode()); 1630 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32); 1631 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32); 1632 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32); 1633 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::dsub_2, MVT::i32); 1634 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::dsub_3, MVT::i32); 1635 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1, 1636 V2, SubReg2, V3, SubReg3 }; 1637 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops); 1638} 1639 1640/// \brief Form 4 consecutive Q registers. 1641SDNode *ARMDAGToDAGISel::createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1, 1642 SDValue V2, SDValue V3) { 1643 SDLoc dl(V0.getNode()); 1644 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQQQPRRegClassID, MVT::i32); 1645 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32); 1646 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32); 1647 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::qsub_2, MVT::i32); 1648 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::qsub_3, MVT::i32); 1649 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1, 1650 V2, SubReg2, V3, SubReg3 }; 1651 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops); 1652} 1653 1654/// GetVLDSTAlign - Get the alignment (in bytes) for the alignment operand 1655/// of a NEON VLD or VST instruction. The supported values depend on the 1656/// number of registers being loaded. 1657SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, unsigned NumVecs, 1658 bool is64BitVector) { 1659 unsigned NumRegs = NumVecs; 1660 if (!is64BitVector && NumVecs < 3) 1661 NumRegs *= 2; 1662 1663 unsigned Alignment = cast<ConstantSDNode>(Align)->getZExtValue(); 1664 if (Alignment >= 32 && NumRegs == 4) 1665 Alignment = 32; 1666 else if (Alignment >= 16 && (NumRegs == 2 || NumRegs == 4)) 1667 Alignment = 16; 1668 else if (Alignment >= 8) 1669 Alignment = 8; 1670 else 1671 Alignment = 0; 1672 1673 return CurDAG->getTargetConstant(Alignment, MVT::i32); 1674} 1675 1676static bool isVLDfixed(unsigned Opc) 1677{ 1678 switch (Opc) { 1679 default: return false; 1680 case ARM::VLD1d8wb_fixed : return true; 1681 case ARM::VLD1d16wb_fixed : return true; 1682 case ARM::VLD1d64Qwb_fixed : return true; 1683 case ARM::VLD1d32wb_fixed : return true; 1684 case ARM::VLD1d64wb_fixed : return true; 1685 case ARM::VLD1d64TPseudoWB_fixed : return true; 1686 case ARM::VLD1d64QPseudoWB_fixed : return true; 1687 case ARM::VLD1q8wb_fixed : return true; 1688 case ARM::VLD1q16wb_fixed : return true; 1689 case ARM::VLD1q32wb_fixed : return true; 1690 case ARM::VLD1q64wb_fixed : return true; 1691 case ARM::VLD2d8wb_fixed : return true; 1692 case ARM::VLD2d16wb_fixed : return true; 1693 case ARM::VLD2d32wb_fixed : return true; 1694 case ARM::VLD2q8PseudoWB_fixed : return true; 1695 case ARM::VLD2q16PseudoWB_fixed : return true; 1696 case ARM::VLD2q32PseudoWB_fixed : return true; 1697 case ARM::VLD2DUPd8wb_fixed : return true; 1698 case ARM::VLD2DUPd16wb_fixed : return true; 1699 case ARM::VLD2DUPd32wb_fixed : return true; 1700 } 1701} 1702 1703static bool isVSTfixed(unsigned Opc) 1704{ 1705 switch (Opc) { 1706 default: return false; 1707 case ARM::VST1d8wb_fixed : return true; 1708 case ARM::VST1d16wb_fixed : return true; 1709 case ARM::VST1d32wb_fixed : return true; 1710 case ARM::VST1d64wb_fixed : return true; 1711 case ARM::VST1q8wb_fixed : return true; 1712 case ARM::VST1q16wb_fixed : return true; 1713 case ARM::VST1q32wb_fixed : return true; 1714 case ARM::VST1q64wb_fixed : return true; 1715 case ARM::VST1d64TPseudoWB_fixed : return true; 1716 case ARM::VST1d64QPseudoWB_fixed : return true; 1717 case ARM::VST2d8wb_fixed : return true; 1718 case ARM::VST2d16wb_fixed : return true; 1719 case ARM::VST2d32wb_fixed : return true; 1720 case ARM::VST2q8PseudoWB_fixed : return true; 1721 case ARM::VST2q16PseudoWB_fixed : return true; 1722 case ARM::VST2q32PseudoWB_fixed : return true; 1723 } 1724} 1725 1726// Get the register stride update opcode of a VLD/VST instruction that 1727// is otherwise equivalent to the given fixed stride updating instruction. 1728static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) { 1729 assert((isVLDfixed(Opc) || isVSTfixed(Opc)) 1730 && "Incorrect fixed stride updating instruction."); 1731 switch (Opc) { 1732 default: break; 1733 case ARM::VLD1d8wb_fixed: return ARM::VLD1d8wb_register; 1734 case ARM::VLD1d16wb_fixed: return ARM::VLD1d16wb_register; 1735 case ARM::VLD1d32wb_fixed: return ARM::VLD1d32wb_register; 1736 case ARM::VLD1d64wb_fixed: return ARM::VLD1d64wb_register; 1737 case ARM::VLD1q8wb_fixed: return ARM::VLD1q8wb_register; 1738 case ARM::VLD1q16wb_fixed: return ARM::VLD1q16wb_register; 1739 case ARM::VLD1q32wb_fixed: return ARM::VLD1q32wb_register; 1740 case ARM::VLD1q64wb_fixed: return ARM::VLD1q64wb_register; 1741 case ARM::VLD1d64Twb_fixed: return ARM::VLD1d64Twb_register; 1742 case ARM::VLD1d64Qwb_fixed: return ARM::VLD1d64Qwb_register; 1743 case ARM::VLD1d64TPseudoWB_fixed: return ARM::VLD1d64TPseudoWB_register; 1744 case ARM::VLD1d64QPseudoWB_fixed: return ARM::VLD1d64QPseudoWB_register; 1745 1746 case ARM::VST1d8wb_fixed: return ARM::VST1d8wb_register; 1747 case ARM::VST1d16wb_fixed: return ARM::VST1d16wb_register; 1748 case ARM::VST1d32wb_fixed: return ARM::VST1d32wb_register; 1749 case ARM::VST1d64wb_fixed: return ARM::VST1d64wb_register; 1750 case ARM::VST1q8wb_fixed: return ARM::VST1q8wb_register; 1751 case ARM::VST1q16wb_fixed: return ARM::VST1q16wb_register; 1752 case ARM::VST1q32wb_fixed: return ARM::VST1q32wb_register; 1753 case ARM::VST1q64wb_fixed: return ARM::VST1q64wb_register; 1754 case ARM::VST1d64TPseudoWB_fixed: return ARM::VST1d64TPseudoWB_register; 1755 case ARM::VST1d64QPseudoWB_fixed: return ARM::VST1d64QPseudoWB_register; 1756 1757 case ARM::VLD2d8wb_fixed: return ARM::VLD2d8wb_register; 1758 case ARM::VLD2d16wb_fixed: return ARM::VLD2d16wb_register; 1759 case ARM::VLD2d32wb_fixed: return ARM::VLD2d32wb_register; 1760 case ARM::VLD2q8PseudoWB_fixed: return ARM::VLD2q8PseudoWB_register; 1761 case ARM::VLD2q16PseudoWB_fixed: return ARM::VLD2q16PseudoWB_register; 1762 case ARM::VLD2q32PseudoWB_fixed: return ARM::VLD2q32PseudoWB_register; 1763 1764 case ARM::VST2d8wb_fixed: return ARM::VST2d8wb_register; 1765 case ARM::VST2d16wb_fixed: return ARM::VST2d16wb_register; 1766 case ARM::VST2d32wb_fixed: return ARM::VST2d32wb_register; 1767 case ARM::VST2q8PseudoWB_fixed: return ARM::VST2q8PseudoWB_register; 1768 case ARM::VST2q16PseudoWB_fixed: return ARM::VST2q16PseudoWB_register; 1769 case ARM::VST2q32PseudoWB_fixed: return ARM::VST2q32PseudoWB_register; 1770 1771 case ARM::VLD2DUPd8wb_fixed: return ARM::VLD2DUPd8wb_register; 1772 case ARM::VLD2DUPd16wb_fixed: return ARM::VLD2DUPd16wb_register; 1773 case ARM::VLD2DUPd32wb_fixed: return ARM::VLD2DUPd32wb_register; 1774 } 1775 return Opc; // If not one we handle, return it unchanged. 1776} 1777 1778SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs, 1779 const uint16_t *DOpcodes, 1780 const uint16_t *QOpcodes0, 1781 const uint16_t *QOpcodes1) { 1782 assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range"); 1783 SDLoc dl(N); 1784 1785 SDValue MemAddr, Align; 1786 unsigned AddrOpIdx = isUpdating ? 1 : 2; 1787 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align)) 1788 return NULL; 1789 1790 SDValue Chain = N->getOperand(0); 1791 EVT VT = N->getValueType(0); 1792 bool is64BitVector = VT.is64BitVector(); 1793 Align = GetVLDSTAlign(Align, NumVecs, is64BitVector); 1794 1795 unsigned OpcodeIndex; 1796 switch (VT.getSimpleVT().SimpleTy) { 1797 default: llvm_unreachable("unhandled vld type"); 1798 // Double-register operations: 1799 case MVT::v8i8: OpcodeIndex = 0; break; 1800 case MVT::v4i16: OpcodeIndex = 1; break; 1801 case MVT::v2f32: 1802 case MVT::v2i32: OpcodeIndex = 2; break; 1803 case MVT::v1i64: OpcodeIndex = 3; break; 1804 // Quad-register operations: 1805 case MVT::v16i8: OpcodeIndex = 0; break; 1806 case MVT::v8i16: OpcodeIndex = 1; break; 1807 case MVT::v4f32: 1808 case MVT::v4i32: OpcodeIndex = 2; break; 1809 case MVT::v2i64: OpcodeIndex = 3; 1810 assert(NumVecs == 1 && "v2i64 type only supported for VLD1"); 1811 break; 1812 } 1813 1814 EVT ResTy; 1815 if (NumVecs == 1) 1816 ResTy = VT; 1817 else { 1818 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs; 1819 if (!is64BitVector) 1820 ResTyElts *= 2; 1821 ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, ResTyElts); 1822 } 1823 std::vector<EVT> ResTys; 1824 ResTys.push_back(ResTy); 1825 if (isUpdating) 1826 ResTys.push_back(MVT::i32); 1827 ResTys.push_back(MVT::Other); 1828 1829 SDValue Pred = getAL(CurDAG); 1830 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); 1831 SDNode *VLd; 1832 SmallVector<SDValue, 7> Ops; 1833 1834 // Double registers and VLD1/VLD2 quad registers are directly supported. 1835 if (is64BitVector || NumVecs <= 2) { 1836 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] : 1837 QOpcodes0[OpcodeIndex]); 1838 Ops.push_back(MemAddr); 1839 Ops.push_back(Align); 1840 if (isUpdating) { 1841 SDValue Inc = N->getOperand(AddrOpIdx + 1); 1842 // FIXME: VLD1/VLD2 fixed increment doesn't need Reg0. Remove the reg0 1843 // case entirely when the rest are updated to that form, too. 1844 if ((NumVecs <= 2) && !isa<ConstantSDNode>(Inc.getNode())) 1845 Opc = getVLDSTRegisterUpdateOpcode(Opc); 1846 // FIXME: We use a VLD1 for v1i64 even if the pseudo says vld2/3/4, so 1847 // check for that explicitly too. Horribly hacky, but temporary. 1848 if ((NumVecs > 2 && !isVLDfixed(Opc)) || 1849 !isa<ConstantSDNode>(Inc.getNode())) 1850 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc); 1851 } 1852 Ops.push_back(Pred); 1853 Ops.push_back(Reg0); 1854 Ops.push_back(Chain); 1855 VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops); 1856 1857 } else { 1858 // Otherwise, quad registers are loaded with two separate instructions, 1859 // where one loads the even registers and the other loads the odd registers. 1860 EVT AddrTy = MemAddr.getValueType(); 1861 1862 // Load the even subregs. This is always an updating load, so that it 1863 // provides the address to the second load for the odd subregs. 1864 SDValue ImplDef = 1865 SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, ResTy), 0); 1866 const SDValue OpsA[] = { MemAddr, Align, Reg0, ImplDef, Pred, Reg0, Chain }; 1867 SDNode *VLdA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl, 1868 ResTy, AddrTy, MVT::Other, OpsA); 1869 Chain = SDValue(VLdA, 2); 1870 1871 // Load the odd subregs. 1872 Ops.push_back(SDValue(VLdA, 1)); 1873 Ops.push_back(Align); 1874 if (isUpdating) { 1875 SDValue Inc = N->getOperand(AddrOpIdx + 1); 1876 assert(isa<ConstantSDNode>(Inc.getNode()) && 1877 "only constant post-increment update allowed for VLD3/4"); 1878 (void)Inc; 1879 Ops.push_back(Reg0); 1880 } 1881 Ops.push_back(SDValue(VLdA, 0)); 1882 Ops.push_back(Pred); 1883 Ops.push_back(Reg0); 1884 Ops.push_back(Chain); 1885 VLd = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys, Ops); 1886 } 1887 1888 // Transfer memoperands. 1889 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 1890 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand(); 1891 cast<MachineSDNode>(VLd)->setMemRefs(MemOp, MemOp + 1); 1892 1893 if (NumVecs == 1) 1894 return VLd; 1895 1896 // Extract out the subregisters. 1897 SDValue SuperReg = SDValue(VLd, 0); 1898 assert(ARM::dsub_7 == ARM::dsub_0+7 && 1899 ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering"); 1900 unsigned Sub0 = (is64BitVector ? ARM::dsub_0 : ARM::qsub_0); 1901 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) 1902 ReplaceUses(SDValue(N, Vec), 1903 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg)); 1904 ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1)); 1905 if (isUpdating) 1906 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLd, 2)); 1907 return NULL; 1908} 1909 1910SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs, 1911 const uint16_t *DOpcodes, 1912 const uint16_t *QOpcodes0, 1913 const uint16_t *QOpcodes1) { 1914 assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range"); 1915 SDLoc dl(N); 1916 1917 SDValue MemAddr, Align; 1918 unsigned AddrOpIdx = isUpdating ? 1 : 2; 1919 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1) 1920 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align)) 1921 return NULL; 1922 1923 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 1924 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand(); 1925 1926 SDValue Chain = N->getOperand(0); 1927 EVT VT = N->getOperand(Vec0Idx).getValueType(); 1928 bool is64BitVector = VT.is64BitVector(); 1929 Align = GetVLDSTAlign(Align, NumVecs, is64BitVector); 1930 1931 unsigned OpcodeIndex; 1932 switch (VT.getSimpleVT().SimpleTy) { 1933 default: llvm_unreachable("unhandled vst type"); 1934 // Double-register operations: 1935 case MVT::v8i8: OpcodeIndex = 0; break; 1936 case MVT::v4i16: OpcodeIndex = 1; break; 1937 case MVT::v2f32: 1938 case MVT::v2i32: OpcodeIndex = 2; break; 1939 case MVT::v1i64: OpcodeIndex = 3; break; 1940 // Quad-register operations: 1941 case MVT::v16i8: OpcodeIndex = 0; break; 1942 case MVT::v8i16: OpcodeIndex = 1; break; 1943 case MVT::v4f32: 1944 case MVT::v4i32: OpcodeIndex = 2; break; 1945 case MVT::v2i64: OpcodeIndex = 3; 1946 assert(NumVecs == 1 && "v2i64 type only supported for VST1"); 1947 break; 1948 } 1949 1950 std::vector<EVT> ResTys; 1951 if (isUpdating) 1952 ResTys.push_back(MVT::i32); 1953 ResTys.push_back(MVT::Other); 1954 1955 SDValue Pred = getAL(CurDAG); 1956 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); 1957 SmallVector<SDValue, 7> Ops; 1958 1959 // Double registers and VST1/VST2 quad registers are directly supported. 1960 if (is64BitVector || NumVecs <= 2) { 1961 SDValue SrcReg; 1962 if (NumVecs == 1) { 1963 SrcReg = N->getOperand(Vec0Idx); 1964 } else if (is64BitVector) { 1965 // Form a REG_SEQUENCE to force register allocation. 1966 SDValue V0 = N->getOperand(Vec0Idx + 0); 1967 SDValue V1 = N->getOperand(Vec0Idx + 1); 1968 if (NumVecs == 2) 1969 SrcReg = SDValue(createDRegPairNode(MVT::v2i64, V0, V1), 0); 1970 else { 1971 SDValue V2 = N->getOperand(Vec0Idx + 2); 1972 // If it's a vst3, form a quad D-register and leave the last part as 1973 // an undef. 1974 SDValue V3 = (NumVecs == 3) 1975 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0) 1976 : N->getOperand(Vec0Idx + 3); 1977 SrcReg = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0); 1978 } 1979 } else { 1980 // Form a QQ register. 1981 SDValue Q0 = N->getOperand(Vec0Idx); 1982 SDValue Q1 = N->getOperand(Vec0Idx + 1); 1983 SrcReg = SDValue(createQRegPairNode(MVT::v4i64, Q0, Q1), 0); 1984 } 1985 1986 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] : 1987 QOpcodes0[OpcodeIndex]); 1988 Ops.push_back(MemAddr); 1989 Ops.push_back(Align); 1990 if (isUpdating) { 1991 SDValue Inc = N->getOperand(AddrOpIdx + 1); 1992 // FIXME: VST1/VST2 fixed increment doesn't need Reg0. Remove the reg0 1993 // case entirely when the rest are updated to that form, too. 1994 if (NumVecs <= 2 && !isa<ConstantSDNode>(Inc.getNode())) 1995 Opc = getVLDSTRegisterUpdateOpcode(Opc); 1996 // FIXME: We use a VST1 for v1i64 even if the pseudo says vld2/3/4, so 1997 // check for that explicitly too. Horribly hacky, but temporary. 1998 if (!isa<ConstantSDNode>(Inc.getNode())) 1999 Ops.push_back(Inc); 2000 else if (NumVecs > 2 && !isVSTfixed(Opc)) 2001 Ops.push_back(Reg0); 2002 } 2003 Ops.push_back(SrcReg); 2004 Ops.push_back(Pred); 2005 Ops.push_back(Reg0); 2006 Ops.push_back(Chain); 2007 SDNode *VSt = CurDAG->getMachineNode(Opc, dl, ResTys, Ops); 2008 2009 // Transfer memoperands. 2010 cast<MachineSDNode>(VSt)->setMemRefs(MemOp, MemOp + 1); 2011 2012 return VSt; 2013 } 2014 2015 // Otherwise, quad registers are stored with two separate instructions, 2016 // where one stores the even registers and the other stores the odd registers. 2017 2018 // Form the QQQQ REG_SEQUENCE. 2019 SDValue V0 = N->getOperand(Vec0Idx + 0); 2020 SDValue V1 = N->getOperand(Vec0Idx + 1); 2021 SDValue V2 = N->getOperand(Vec0Idx + 2); 2022 SDValue V3 = (NumVecs == 3) 2023 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0) 2024 : N->getOperand(Vec0Idx + 3); 2025 SDValue RegSeq = SDValue(createQuadQRegsNode(MVT::v8i64, V0, V1, V2, V3), 0); 2026 2027 // Store the even D registers. This is always an updating store, so that it 2028 // provides the address to the second store for the odd subregs. 2029 const SDValue OpsA[] = { MemAddr, Align, Reg0, RegSeq, Pred, Reg0, Chain }; 2030 SDNode *VStA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl, 2031 MemAddr.getValueType(), 2032 MVT::Other, OpsA); 2033 cast<MachineSDNode>(VStA)->setMemRefs(MemOp, MemOp + 1); 2034 Chain = SDValue(VStA, 1); 2035 2036 // Store the odd D registers. 2037 Ops.push_back(SDValue(VStA, 0)); 2038 Ops.push_back(Align); 2039 if (isUpdating) { 2040 SDValue Inc = N->getOperand(AddrOpIdx + 1); 2041 assert(isa<ConstantSDNode>(Inc.getNode()) && 2042 "only constant post-increment update allowed for VST3/4"); 2043 (void)Inc; 2044 Ops.push_back(Reg0); 2045 } 2046 Ops.push_back(RegSeq); 2047 Ops.push_back(Pred); 2048 Ops.push_back(Reg0); 2049 Ops.push_back(Chain); 2050 SDNode *VStB = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys, 2051 Ops); 2052 cast<MachineSDNode>(VStB)->setMemRefs(MemOp, MemOp + 1); 2053 return VStB; 2054} 2055 2056SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad, 2057 bool isUpdating, unsigned NumVecs, 2058 const uint16_t *DOpcodes, 2059 const uint16_t *QOpcodes) { 2060 assert(NumVecs >=2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range"); 2061 SDLoc dl(N); 2062 2063 SDValue MemAddr, Align; 2064 unsigned AddrOpIdx = isUpdating ? 1 : 2; 2065 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1) 2066 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align)) 2067 return NULL; 2068 2069 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 2070 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand(); 2071 2072 SDValue Chain = N->getOperand(0); 2073 unsigned Lane = 2074 cast<ConstantSDNode>(N->getOperand(Vec0Idx + NumVecs))->getZExtValue(); 2075 EVT VT = N->getOperand(Vec0Idx).getValueType(); 2076 bool is64BitVector = VT.is64BitVector(); 2077 2078 unsigned Alignment = 0; 2079 if (NumVecs != 3) { 2080 Alignment = cast<ConstantSDNode>(Align)->getZExtValue(); 2081 unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8; 2082 if (Alignment > NumBytes) 2083 Alignment = NumBytes; 2084 if (Alignment < 8 && Alignment < NumBytes) 2085 Alignment = 0; 2086 // Alignment must be a power of two; make sure of that. 2087 Alignment = (Alignment & -Alignment); 2088 if (Alignment == 1) 2089 Alignment = 0; 2090 } 2091 Align = CurDAG->getTargetConstant(Alignment, MVT::i32); 2092 2093 unsigned OpcodeIndex; 2094 switch (VT.getSimpleVT().SimpleTy) { 2095 default: llvm_unreachable("unhandled vld/vst lane type"); 2096 // Double-register operations: 2097 case MVT::v8i8: OpcodeIndex = 0; break; 2098 case MVT::v4i16: OpcodeIndex = 1; break; 2099 case MVT::v2f32: 2100 case MVT::v2i32: OpcodeIndex = 2; break; 2101 // Quad-register operations: 2102 case MVT::v8i16: OpcodeIndex = 0; break; 2103 case MVT::v4f32: 2104 case MVT::v4i32: OpcodeIndex = 1; break; 2105 } 2106 2107 std::vector<EVT> ResTys; 2108 if (IsLoad) { 2109 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs; 2110 if (!is64BitVector) 2111 ResTyElts *= 2; 2112 ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(), 2113 MVT::i64, ResTyElts)); 2114 } 2115 if (isUpdating) 2116 ResTys.push_back(MVT::i32); 2117 ResTys.push_back(MVT::Other); 2118 2119 SDValue Pred = getAL(CurDAG); 2120 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); 2121 2122 SmallVector<SDValue, 8> Ops; 2123 Ops.push_back(MemAddr); 2124 Ops.push_back(Align); 2125 if (isUpdating) { 2126 SDValue Inc = N->getOperand(AddrOpIdx + 1); 2127 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc); 2128 } 2129 2130 SDValue SuperReg; 2131 SDValue V0 = N->getOperand(Vec0Idx + 0); 2132 SDValue V1 = N->getOperand(Vec0Idx + 1); 2133 if (NumVecs == 2) { 2134 if (is64BitVector) 2135 SuperReg = SDValue(createDRegPairNode(MVT::v2i64, V0, V1), 0); 2136 else 2137 SuperReg = SDValue(createQRegPairNode(MVT::v4i64, V0, V1), 0); 2138 } else { 2139 SDValue V2 = N->getOperand(Vec0Idx + 2); 2140 SDValue V3 = (NumVecs == 3) 2141 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0) 2142 : N->getOperand(Vec0Idx + 3); 2143 if (is64BitVector) 2144 SuperReg = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0); 2145 else 2146 SuperReg = SDValue(createQuadQRegsNode(MVT::v8i64, V0, V1, V2, V3), 0); 2147 } 2148 Ops.push_back(SuperReg); 2149 Ops.push_back(getI32Imm(Lane)); 2150 Ops.push_back(Pred); 2151 Ops.push_back(Reg0); 2152 Ops.push_back(Chain); 2153 2154 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] : 2155 QOpcodes[OpcodeIndex]); 2156 SDNode *VLdLn = CurDAG->getMachineNode(Opc, dl, ResTys, Ops); 2157 cast<MachineSDNode>(VLdLn)->setMemRefs(MemOp, MemOp + 1); 2158 if (!IsLoad) 2159 return VLdLn; 2160 2161 // Extract the subregisters. 2162 SuperReg = SDValue(VLdLn, 0); 2163 assert(ARM::dsub_7 == ARM::dsub_0+7 && 2164 ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering"); 2165 unsigned Sub0 = is64BitVector ? ARM::dsub_0 : ARM::qsub_0; 2166 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) 2167 ReplaceUses(SDValue(N, Vec), 2168 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg)); 2169 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, 1)); 2170 if (isUpdating) 2171 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdLn, 2)); 2172 return NULL; 2173} 2174 2175SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating, 2176 unsigned NumVecs, 2177 const uint16_t *Opcodes) { 2178 assert(NumVecs >=2 && NumVecs <= 4 && "VLDDup NumVecs out-of-range"); 2179 SDLoc dl(N); 2180 2181 SDValue MemAddr, Align; 2182 if (!SelectAddrMode6(N, N->getOperand(1), MemAddr, Align)) 2183 return NULL; 2184 2185 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 2186 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand(); 2187 2188 SDValue Chain = N->getOperand(0); 2189 EVT VT = N->getValueType(0); 2190 2191 unsigned Alignment = 0; 2192 if (NumVecs != 3) { 2193 Alignment = cast<ConstantSDNode>(Align)->getZExtValue(); 2194 unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8; 2195 if (Alignment > NumBytes) 2196 Alignment = NumBytes; 2197 if (Alignment < 8 && Alignment < NumBytes) 2198 Alignment = 0; 2199 // Alignment must be a power of two; make sure of that. 2200 Alignment = (Alignment & -Alignment); 2201 if (Alignment == 1) 2202 Alignment = 0; 2203 } 2204 Align = CurDAG->getTargetConstant(Alignment, MVT::i32); 2205 2206 unsigned OpcodeIndex; 2207 switch (VT.getSimpleVT().SimpleTy) { 2208 default: llvm_unreachable("unhandled vld-dup type"); 2209 case MVT::v8i8: OpcodeIndex = 0; break; 2210 case MVT::v4i16: OpcodeIndex = 1; break; 2211 case MVT::v2f32: 2212 case MVT::v2i32: OpcodeIndex = 2; break; 2213 } 2214 2215 SDValue Pred = getAL(CurDAG); 2216 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); 2217 SDValue SuperReg; 2218 unsigned Opc = Opcodes[OpcodeIndex]; 2219 SmallVector<SDValue, 6> Ops; 2220 Ops.push_back(MemAddr); 2221 Ops.push_back(Align); 2222 if (isUpdating) { 2223 // fixed-stride update instructions don't have an explicit writeback 2224 // operand. It's implicit in the opcode itself. 2225 SDValue Inc = N->getOperand(2); 2226 if (!isa<ConstantSDNode>(Inc.getNode())) 2227 Ops.push_back(Inc); 2228 // FIXME: VLD3 and VLD4 haven't been updated to that form yet. 2229 else if (NumVecs > 2) 2230 Ops.push_back(Reg0); 2231 } 2232 Ops.push_back(Pred); 2233 Ops.push_back(Reg0); 2234 Ops.push_back(Chain); 2235 2236 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs; 2237 std::vector<EVT> ResTys; 2238 ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(), MVT::i64,ResTyElts)); 2239 if (isUpdating) 2240 ResTys.push_back(MVT::i32); 2241 ResTys.push_back(MVT::Other); 2242 SDNode *VLdDup = CurDAG->getMachineNode(Opc, dl, ResTys, Ops); 2243 cast<MachineSDNode>(VLdDup)->setMemRefs(MemOp, MemOp + 1); 2244 SuperReg = SDValue(VLdDup, 0); 2245 2246 // Extract the subregisters. 2247 assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering"); 2248 unsigned SubIdx = ARM::dsub_0; 2249 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) 2250 ReplaceUses(SDValue(N, Vec), 2251 CurDAG->getTargetExtractSubreg(SubIdx+Vec, dl, VT, SuperReg)); 2252 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdDup, 1)); 2253 if (isUpdating) 2254 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdDup, 2)); 2255 return NULL; 2256} 2257 2258SDNode *ARMDAGToDAGISel::SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs, 2259 unsigned Opc) { 2260 assert(NumVecs >= 2 && NumVecs <= 4 && "VTBL NumVecs out-of-range"); 2261 SDLoc dl(N); 2262 EVT VT = N->getValueType(0); 2263 unsigned FirstTblReg = IsExt ? 2 : 1; 2264 2265 // Form a REG_SEQUENCE to force register allocation. 2266 SDValue RegSeq; 2267 SDValue V0 = N->getOperand(FirstTblReg + 0); 2268 SDValue V1 = N->getOperand(FirstTblReg + 1); 2269 if (NumVecs == 2) 2270 RegSeq = SDValue(createDRegPairNode(MVT::v16i8, V0, V1), 0); 2271 else { 2272 SDValue V2 = N->getOperand(FirstTblReg + 2); 2273 // If it's a vtbl3, form a quad D-register and leave the last part as 2274 // an undef. 2275 SDValue V3 = (NumVecs == 3) 2276 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0) 2277 : N->getOperand(FirstTblReg + 3); 2278 RegSeq = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0); 2279 } 2280 2281 SmallVector<SDValue, 6> Ops; 2282 if (IsExt) 2283 Ops.push_back(N->getOperand(1)); 2284 Ops.push_back(RegSeq); 2285 Ops.push_back(N->getOperand(FirstTblReg + NumVecs)); 2286 Ops.push_back(getAL(CurDAG)); // predicate 2287 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // predicate register 2288 return CurDAG->getMachineNode(Opc, dl, VT, Ops); 2289} 2290 2291SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N, 2292 bool isSigned) { 2293 if (!Subtarget->hasV6T2Ops()) 2294 return NULL; 2295 2296 unsigned Opc = isSigned 2297 ? (Subtarget->isThumb() ? ARM::t2SBFX : ARM::SBFX) 2298 : (Subtarget->isThumb() ? ARM::t2UBFX : ARM::UBFX); 2299 2300 // For unsigned extracts, check for a shift right and mask 2301 unsigned And_imm = 0; 2302 if (N->getOpcode() == ISD::AND) { 2303 if (isOpcWithIntImmediate(N, ISD::AND, And_imm)) { 2304 2305 // The immediate is a mask of the low bits iff imm & (imm+1) == 0 2306 if (And_imm & (And_imm + 1)) 2307 return NULL; 2308 2309 unsigned Srl_imm = 0; 2310 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL, 2311 Srl_imm)) { 2312 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!"); 2313 2314 // Note: The width operand is encoded as width-1. 2315 unsigned Width = CountTrailingOnes_32(And_imm) - 1; 2316 unsigned LSB = Srl_imm; 2317 2318 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); 2319 2320 if ((LSB + Width + 1) == N->getValueType(0).getSizeInBits()) { 2321 // It's cheaper to use a right shift to extract the top bits. 2322 if (Subtarget->isThumb()) { 2323 Opc = isSigned ? ARM::t2ASRri : ARM::t2LSRri; 2324 SDValue Ops[] = { N->getOperand(0).getOperand(0), 2325 CurDAG->getTargetConstant(LSB, MVT::i32), 2326 getAL(CurDAG), Reg0, Reg0 }; 2327 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5); 2328 } 2329 2330 // ARM models shift instructions as MOVsi with shifter operand. 2331 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(ISD::SRL); 2332 SDValue ShOpc = 2333 CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, LSB), 2334 MVT::i32); 2335 SDValue Ops[] = { N->getOperand(0).getOperand(0), ShOpc, 2336 getAL(CurDAG), Reg0, Reg0 }; 2337 return CurDAG->SelectNodeTo(N, ARM::MOVsi, MVT::i32, Ops, 5); 2338 } 2339 2340 SDValue Ops[] = { N->getOperand(0).getOperand(0), 2341 CurDAG->getTargetConstant(LSB, MVT::i32), 2342 CurDAG->getTargetConstant(Width, MVT::i32), 2343 getAL(CurDAG), Reg0 }; 2344 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5); 2345 } 2346 } 2347 return NULL; 2348 } 2349 2350 // Otherwise, we're looking for a shift of a shift 2351 unsigned Shl_imm = 0; 2352 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) { 2353 assert(Shl_imm > 0 && Shl_imm < 32 && "bad amount in shift node!"); 2354 unsigned Srl_imm = 0; 2355 if (isInt32Immediate(N->getOperand(1), Srl_imm)) { 2356 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!"); 2357 // Note: The width operand is encoded as width-1. 2358 unsigned Width = 32 - Srl_imm - 1; 2359 int LSB = Srl_imm - Shl_imm; 2360 if (LSB < 0) 2361 return NULL; 2362 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); 2363 SDValue Ops[] = { N->getOperand(0).getOperand(0), 2364 CurDAG->getTargetConstant(LSB, MVT::i32), 2365 CurDAG->getTargetConstant(Width, MVT::i32), 2366 getAL(CurDAG), Reg0 }; 2367 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5); 2368 } 2369 } 2370 return NULL; 2371} 2372 2373/// Target-specific DAG combining for ISD::XOR. 2374/// Target-independent combining lowers SELECT_CC nodes of the form 2375/// select_cc setg[ge] X, 0, X, -X 2376/// select_cc setgt X, -1, X, -X 2377/// select_cc setl[te] X, 0, -X, X 2378/// select_cc setlt X, 1, -X, X 2379/// which represent Integer ABS into: 2380/// Y = sra (X, size(X)-1); xor (add (X, Y), Y) 2381/// ARM instruction selection detects the latter and matches it to 2382/// ARM::ABS or ARM::t2ABS machine node. 2383SDNode *ARMDAGToDAGISel::SelectABSOp(SDNode *N){ 2384 SDValue XORSrc0 = N->getOperand(0); 2385 SDValue XORSrc1 = N->getOperand(1); 2386 EVT VT = N->getValueType(0); 2387 2388 if (Subtarget->isThumb1Only()) 2389 return NULL; 2390 2391 if (XORSrc0.getOpcode() != ISD::ADD || XORSrc1.getOpcode() != ISD::SRA) 2392 return NULL; 2393 2394 SDValue ADDSrc0 = XORSrc0.getOperand(0); 2395 SDValue ADDSrc1 = XORSrc0.getOperand(1); 2396 SDValue SRASrc0 = XORSrc1.getOperand(0); 2397 SDValue SRASrc1 = XORSrc1.getOperand(1); 2398 ConstantSDNode *SRAConstant = dyn_cast<ConstantSDNode>(SRASrc1); 2399 EVT XType = SRASrc0.getValueType(); 2400 unsigned Size = XType.getSizeInBits() - 1; 2401 2402 if (ADDSrc1 == XORSrc1 && ADDSrc0 == SRASrc0 && 2403 XType.isInteger() && SRAConstant != NULL && 2404 Size == SRAConstant->getZExtValue()) { 2405 unsigned Opcode = Subtarget->isThumb2() ? ARM::t2ABS : ARM::ABS; 2406 return CurDAG->SelectNodeTo(N, Opcode, VT, ADDSrc0); 2407 } 2408 2409 return NULL; 2410} 2411 2412SDNode *ARMDAGToDAGISel::SelectConcatVector(SDNode *N) { 2413 // The only time a CONCAT_VECTORS operation can have legal types is when 2414 // two 64-bit vectors are concatenated to a 128-bit vector. 2415 EVT VT = N->getValueType(0); 2416 if (!VT.is128BitVector() || N->getNumOperands() != 2) 2417 llvm_unreachable("unexpected CONCAT_VECTORS"); 2418 return createDRegPairNode(VT, N->getOperand(0), N->getOperand(1)); 2419} 2420 2421SDNode *ARMDAGToDAGISel::SelectAtomic(SDNode *Node, unsigned Op8, 2422 unsigned Op16,unsigned Op32, 2423 unsigned Op64) { 2424 // Mostly direct translation to the given operations, except that we preserve 2425 // the AtomicOrdering for use later on. 2426 AtomicSDNode *AN = cast<AtomicSDNode>(Node); 2427 EVT VT = AN->getMemoryVT(); 2428 2429 unsigned Op; 2430 SDVTList VTs = CurDAG->getVTList(AN->getValueType(0), MVT::Other); 2431 if (VT == MVT::i8) 2432 Op = Op8; 2433 else if (VT == MVT::i16) 2434 Op = Op16; 2435 else if (VT == MVT::i32) 2436 Op = Op32; 2437 else if (VT == MVT::i64) { 2438 Op = Op64; 2439 VTs = CurDAG->getVTList(MVT::i32, MVT::i32, MVT::Other); 2440 } else 2441 llvm_unreachable("Unexpected atomic operation"); 2442 2443 SmallVector<SDValue, 6> Ops; 2444 for (unsigned i = 1; i < AN->getNumOperands(); ++i) 2445 Ops.push_back(AN->getOperand(i)); 2446 2447 Ops.push_back(CurDAG->getTargetConstant(AN->getOrdering(), MVT::i32)); 2448 Ops.push_back(AN->getOperand(0)); // Chain moves to the end 2449 2450 return CurDAG->SelectNodeTo(Node, Op, VTs, &Ops[0], Ops.size()); 2451} 2452 2453SDNode *ARMDAGToDAGISel::Select(SDNode *N) { 2454 SDLoc dl(N); 2455 2456 if (N->isMachineOpcode()) { 2457 N->setNodeId(-1); 2458 return NULL; // Already selected. 2459 } 2460 2461 switch (N->getOpcode()) { 2462 default: break; 2463 case ISD::INLINEASM: { 2464 SDNode *ResNode = SelectInlineAsm(N); 2465 if (ResNode) 2466 return ResNode; 2467 break; 2468 } 2469 case ISD::XOR: { 2470 // Select special operations if XOR node forms integer ABS pattern 2471 SDNode *ResNode = SelectABSOp(N); 2472 if (ResNode) 2473 return ResNode; 2474 // Other cases are autogenerated. 2475 break; 2476 } 2477 case ISD::Constant: { 2478 unsigned Val = cast<ConstantSDNode>(N)->getZExtValue(); 2479 bool UseCP = true; 2480 if (Subtarget->hasThumb2()) 2481 // Thumb2-aware targets have the MOVT instruction, so all immediates can 2482 // be done with MOV + MOVT, at worst. 2483 UseCP = 0; 2484 else { 2485 if (Subtarget->isThumb()) { 2486 UseCP = (Val > 255 && // MOV 2487 ~Val > 255 && // MOV + MVN 2488 !ARM_AM::isThumbImmShiftedVal(Val)); // MOV + LSL 2489 } else 2490 UseCP = (ARM_AM::getSOImmVal(Val) == -1 && // MOV 2491 ARM_AM::getSOImmVal(~Val) == -1 && // MVN 2492 !ARM_AM::isSOImmTwoPartVal(Val)); // two instrs. 2493 } 2494 2495 if (UseCP) { 2496 SDValue CPIdx = 2497 CurDAG->getTargetConstantPool(ConstantInt::get( 2498 Type::getInt32Ty(*CurDAG->getContext()), Val), 2499 getTargetLowering()->getPointerTy()); 2500 2501 SDNode *ResNode; 2502 if (Subtarget->isThumb1Only()) { 2503 SDValue Pred = getAL(CurDAG); 2504 SDValue PredReg = CurDAG->getRegister(0, MVT::i32); 2505 SDValue Ops[] = { CPIdx, Pred, PredReg, CurDAG->getEntryNode() }; 2506 ResNode = CurDAG->getMachineNode(ARM::tLDRpci, dl, MVT::i32, MVT::Other, 2507 Ops); 2508 } else { 2509 SDValue Ops[] = { 2510 CPIdx, 2511 CurDAG->getTargetConstant(0, MVT::i32), 2512 getAL(CurDAG), 2513 CurDAG->getRegister(0, MVT::i32), 2514 CurDAG->getEntryNode() 2515 }; 2516 ResNode=CurDAG->getMachineNode(ARM::LDRcp, dl, MVT::i32, MVT::Other, 2517 Ops); 2518 } 2519 ReplaceUses(SDValue(N, 0), SDValue(ResNode, 0)); 2520 return NULL; 2521 } 2522 2523 // Other cases are autogenerated. 2524 break; 2525 } 2526 case ISD::FrameIndex: { 2527 // Selects to ADDri FI, 0 which in turn will become ADDri SP, imm. 2528 int FI = cast<FrameIndexSDNode>(N)->getIndex(); 2529 SDValue TFI = CurDAG->getTargetFrameIndex(FI, 2530 getTargetLowering()->getPointerTy()); 2531 if (Subtarget->isThumb1Only()) { 2532 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32), 2533 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) }; 2534 return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, MVT::i32, Ops, 4); 2535 } else { 2536 unsigned Opc = ((Subtarget->isThumb() && Subtarget->hasThumb2()) ? 2537 ARM::t2ADDri : ARM::ADDri); 2538 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32), 2539 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), 2540 CurDAG->getRegister(0, MVT::i32) }; 2541 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5); 2542 } 2543 } 2544 case ISD::SRL: 2545 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false)) 2546 return I; 2547 break; 2548 case ISD::SRA: 2549 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, true)) 2550 return I; 2551 break; 2552 case ISD::MUL: 2553 if (Subtarget->isThumb1Only()) 2554 break; 2555 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) { 2556 unsigned RHSV = C->getZExtValue(); 2557 if (!RHSV) break; 2558 if (isPowerOf2_32(RHSV-1)) { // 2^n+1? 2559 unsigned ShImm = Log2_32(RHSV-1); 2560 if (ShImm >= 32) 2561 break; 2562 SDValue V = N->getOperand(0); 2563 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm); 2564 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32); 2565 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); 2566 if (Subtarget->isThumb()) { 2567 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 }; 2568 return CurDAG->SelectNodeTo(N, ARM::t2ADDrs, MVT::i32, Ops, 6); 2569 } else { 2570 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 }; 2571 return CurDAG->SelectNodeTo(N, ARM::ADDrsi, MVT::i32, Ops, 7); 2572 } 2573 } 2574 if (isPowerOf2_32(RHSV+1)) { // 2^n-1? 2575 unsigned ShImm = Log2_32(RHSV+1); 2576 if (ShImm >= 32) 2577 break; 2578 SDValue V = N->getOperand(0); 2579 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm); 2580 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32); 2581 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); 2582 if (Subtarget->isThumb()) { 2583 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 }; 2584 return CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops, 6); 2585 } else { 2586 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 }; 2587 return CurDAG->SelectNodeTo(N, ARM::RSBrsi, MVT::i32, Ops, 7); 2588 } 2589 } 2590 } 2591 break; 2592 case ISD::AND: { 2593 // Check for unsigned bitfield extract 2594 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false)) 2595 return I; 2596 2597 // (and (or x, c2), c1) and top 16-bits of c1 and c2 match, lower 16-bits 2598 // of c1 are 0xffff, and lower 16-bit of c2 are 0. That is, the top 16-bits 2599 // are entirely contributed by c2 and lower 16-bits are entirely contributed 2600 // by x. That's equal to (or (and x, 0xffff), (and c1, 0xffff0000)). 2601 // Select it to: "movt x, ((c1 & 0xffff) >> 16) 2602 EVT VT = N->getValueType(0); 2603 if (VT != MVT::i32) 2604 break; 2605 unsigned Opc = (Subtarget->isThumb() && Subtarget->hasThumb2()) 2606 ? ARM::t2MOVTi16 2607 : (Subtarget->hasV6T2Ops() ? ARM::MOVTi16 : 0); 2608 if (!Opc) 2609 break; 2610 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); 2611 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 2612 if (!N1C) 2613 break; 2614 if (N0.getOpcode() == ISD::OR && N0.getNode()->hasOneUse()) { 2615 SDValue N2 = N0.getOperand(1); 2616 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2); 2617 if (!N2C) 2618 break; 2619 unsigned N1CVal = N1C->getZExtValue(); 2620 unsigned N2CVal = N2C->getZExtValue(); 2621 if ((N1CVal & 0xffff0000U) == (N2CVal & 0xffff0000U) && 2622 (N1CVal & 0xffffU) == 0xffffU && 2623 (N2CVal & 0xffffU) == 0x0U) { 2624 SDValue Imm16 = CurDAG->getTargetConstant((N2CVal & 0xFFFF0000U) >> 16, 2625 MVT::i32); 2626 SDValue Ops[] = { N0.getOperand(0), Imm16, 2627 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) }; 2628 return CurDAG->getMachineNode(Opc, dl, VT, Ops); 2629 } 2630 } 2631 break; 2632 } 2633 case ARMISD::VMOVRRD: 2634 return CurDAG->getMachineNode(ARM::VMOVRRD, dl, MVT::i32, MVT::i32, 2635 N->getOperand(0), getAL(CurDAG), 2636 CurDAG->getRegister(0, MVT::i32)); 2637 case ISD::UMUL_LOHI: { 2638 if (Subtarget->isThumb1Only()) 2639 break; 2640 if (Subtarget->isThumb()) { 2641 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), 2642 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) }; 2643 return CurDAG->getMachineNode(ARM::t2UMULL, dl, MVT::i32, MVT::i32, Ops); 2644 } else { 2645 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), 2646 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), 2647 CurDAG->getRegister(0, MVT::i32) }; 2648 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ? 2649 ARM::UMULL : ARM::UMULLv5, 2650 dl, MVT::i32, MVT::i32, Ops); 2651 } 2652 } 2653 case ISD::SMUL_LOHI: { 2654 if (Subtarget->isThumb1Only()) 2655 break; 2656 if (Subtarget->isThumb()) { 2657 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), 2658 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) }; 2659 return CurDAG->getMachineNode(ARM::t2SMULL, dl, MVT::i32, MVT::i32, Ops); 2660 } else { 2661 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), 2662 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), 2663 CurDAG->getRegister(0, MVT::i32) }; 2664 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ? 2665 ARM::SMULL : ARM::SMULLv5, 2666 dl, MVT::i32, MVT::i32, Ops); 2667 } 2668 } 2669 case ARMISD::UMLAL:{ 2670 if (Subtarget->isThumb()) { 2671 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2), 2672 N->getOperand(3), getAL(CurDAG), 2673 CurDAG->getRegister(0, MVT::i32)}; 2674 return CurDAG->getMachineNode(ARM::t2UMLAL, dl, MVT::i32, MVT::i32, Ops); 2675 }else{ 2676 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2), 2677 N->getOperand(3), getAL(CurDAG), 2678 CurDAG->getRegister(0, MVT::i32), 2679 CurDAG->getRegister(0, MVT::i32) }; 2680 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ? 2681 ARM::UMLAL : ARM::UMLALv5, 2682 dl, MVT::i32, MVT::i32, Ops); 2683 } 2684 } 2685 case ARMISD::SMLAL:{ 2686 if (Subtarget->isThumb()) { 2687 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2), 2688 N->getOperand(3), getAL(CurDAG), 2689 CurDAG->getRegister(0, MVT::i32)}; 2690 return CurDAG->getMachineNode(ARM::t2SMLAL, dl, MVT::i32, MVT::i32, Ops); 2691 }else{ 2692 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2), 2693 N->getOperand(3), getAL(CurDAG), 2694 CurDAG->getRegister(0, MVT::i32), 2695 CurDAG->getRegister(0, MVT::i32) }; 2696 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ? 2697 ARM::SMLAL : ARM::SMLALv5, 2698 dl, MVT::i32, MVT::i32, Ops); 2699 } 2700 } 2701 case ISD::LOAD: { 2702 SDNode *ResNode = 0; 2703 if (Subtarget->isThumb() && Subtarget->hasThumb2()) 2704 ResNode = SelectT2IndexedLoad(N); 2705 else 2706 ResNode = SelectARMIndexedLoad(N); 2707 if (ResNode) 2708 return ResNode; 2709 // Other cases are autogenerated. 2710 break; 2711 } 2712 case ARMISD::BRCOND: { 2713 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc) 2714 // Emits: (Bcc:void (bb:Other):$dst, (imm:i32):$cc) 2715 // Pattern complexity = 6 cost = 1 size = 0 2716 2717 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc) 2718 // Emits: (tBcc:void (bb:Other):$dst, (imm:i32):$cc) 2719 // Pattern complexity = 6 cost = 1 size = 0 2720 2721 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc) 2722 // Emits: (t2Bcc:void (bb:Other):$dst, (imm:i32):$cc) 2723 // Pattern complexity = 6 cost = 1 size = 0 2724 2725 unsigned Opc = Subtarget->isThumb() ? 2726 ((Subtarget->hasThumb2()) ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc; 2727 SDValue Chain = N->getOperand(0); 2728 SDValue N1 = N->getOperand(1); 2729 SDValue N2 = N->getOperand(2); 2730 SDValue N3 = N->getOperand(3); 2731 SDValue InFlag = N->getOperand(4); 2732 assert(N1.getOpcode() == ISD::BasicBlock); 2733 assert(N2.getOpcode() == ISD::Constant); 2734 assert(N3.getOpcode() == ISD::Register); 2735 2736 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned) 2737 cast<ConstantSDNode>(N2)->getZExtValue()), 2738 MVT::i32); 2739 SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag }; 2740 SDNode *ResNode = CurDAG->getMachineNode(Opc, dl, MVT::Other, 2741 MVT::Glue, Ops); 2742 Chain = SDValue(ResNode, 0); 2743 if (N->getNumValues() == 2) { 2744 InFlag = SDValue(ResNode, 1); 2745 ReplaceUses(SDValue(N, 1), InFlag); 2746 } 2747 ReplaceUses(SDValue(N, 0), 2748 SDValue(Chain.getNode(), Chain.getResNo())); 2749 return NULL; 2750 } 2751 case ARMISD::VZIP: { 2752 unsigned Opc = 0; 2753 EVT VT = N->getValueType(0); 2754 switch (VT.getSimpleVT().SimpleTy) { 2755 default: return NULL; 2756 case MVT::v8i8: Opc = ARM::VZIPd8; break; 2757 case MVT::v4i16: Opc = ARM::VZIPd16; break; 2758 case MVT::v2f32: 2759 // vzip.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm. 2760 case MVT::v2i32: Opc = ARM::VTRNd32; break; 2761 case MVT::v16i8: Opc = ARM::VZIPq8; break; 2762 case MVT::v8i16: Opc = ARM::VZIPq16; break; 2763 case MVT::v4f32: 2764 case MVT::v4i32: Opc = ARM::VZIPq32; break; 2765 } 2766 SDValue Pred = getAL(CurDAG); 2767 SDValue PredReg = CurDAG->getRegister(0, MVT::i32); 2768 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg }; 2769 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops); 2770 } 2771 case ARMISD::VUZP: { 2772 unsigned Opc = 0; 2773 EVT VT = N->getValueType(0); 2774 switch (VT.getSimpleVT().SimpleTy) { 2775 default: return NULL; 2776 case MVT::v8i8: Opc = ARM::VUZPd8; break; 2777 case MVT::v4i16: Opc = ARM::VUZPd16; break; 2778 case MVT::v2f32: 2779 // vuzp.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm. 2780 case MVT::v2i32: Opc = ARM::VTRNd32; break; 2781 case MVT::v16i8: Opc = ARM::VUZPq8; break; 2782 case MVT::v8i16: Opc = ARM::VUZPq16; break; 2783 case MVT::v4f32: 2784 case MVT::v4i32: Opc = ARM::VUZPq32; break; 2785 } 2786 SDValue Pred = getAL(CurDAG); 2787 SDValue PredReg = CurDAG->getRegister(0, MVT::i32); 2788 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg }; 2789 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops); 2790 } 2791 case ARMISD::VTRN: { 2792 unsigned Opc = 0; 2793 EVT VT = N->getValueType(0); 2794 switch (VT.getSimpleVT().SimpleTy) { 2795 default: return NULL; 2796 case MVT::v8i8: Opc = ARM::VTRNd8; break; 2797 case MVT::v4i16: Opc = ARM::VTRNd16; break; 2798 case MVT::v2f32: 2799 case MVT::v2i32: Opc = ARM::VTRNd32; break; 2800 case MVT::v16i8: Opc = ARM::VTRNq8; break; 2801 case MVT::v8i16: Opc = ARM::VTRNq16; break; 2802 case MVT::v4f32: 2803 case MVT::v4i32: Opc = ARM::VTRNq32; break; 2804 } 2805 SDValue Pred = getAL(CurDAG); 2806 SDValue PredReg = CurDAG->getRegister(0, MVT::i32); 2807 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg }; 2808 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops); 2809 } 2810 case ARMISD::BUILD_VECTOR: { 2811 EVT VecVT = N->getValueType(0); 2812 EVT EltVT = VecVT.getVectorElementType(); 2813 unsigned NumElts = VecVT.getVectorNumElements(); 2814 if (EltVT == MVT::f64) { 2815 assert(NumElts == 2 && "unexpected type for BUILD_VECTOR"); 2816 return createDRegPairNode(VecVT, N->getOperand(0), N->getOperand(1)); 2817 } 2818 assert(EltVT == MVT::f32 && "unexpected type for BUILD_VECTOR"); 2819 if (NumElts == 2) 2820 return createSRegPairNode(VecVT, N->getOperand(0), N->getOperand(1)); 2821 assert(NumElts == 4 && "unexpected type for BUILD_VECTOR"); 2822 return createQuadSRegsNode(VecVT, N->getOperand(0), N->getOperand(1), 2823 N->getOperand(2), N->getOperand(3)); 2824 } 2825 2826 case ARMISD::VLD2DUP: { 2827 static const uint16_t Opcodes[] = { ARM::VLD2DUPd8, ARM::VLD2DUPd16, 2828 ARM::VLD2DUPd32 }; 2829 return SelectVLDDup(N, false, 2, Opcodes); 2830 } 2831 2832 case ARMISD::VLD3DUP: { 2833 static const uint16_t Opcodes[] = { ARM::VLD3DUPd8Pseudo, 2834 ARM::VLD3DUPd16Pseudo, 2835 ARM::VLD3DUPd32Pseudo }; 2836 return SelectVLDDup(N, false, 3, Opcodes); 2837 } 2838 2839 case ARMISD::VLD4DUP: { 2840 static const uint16_t Opcodes[] = { ARM::VLD4DUPd8Pseudo, 2841 ARM::VLD4DUPd16Pseudo, 2842 ARM::VLD4DUPd32Pseudo }; 2843 return SelectVLDDup(N, false, 4, Opcodes); 2844 } 2845 2846 case ARMISD::VLD2DUP_UPD: { 2847 static const uint16_t Opcodes[] = { ARM::VLD2DUPd8wb_fixed, 2848 ARM::VLD2DUPd16wb_fixed, 2849 ARM::VLD2DUPd32wb_fixed }; 2850 return SelectVLDDup(N, true, 2, Opcodes); 2851 } 2852 2853 case ARMISD::VLD3DUP_UPD: { 2854 static const uint16_t Opcodes[] = { ARM::VLD3DUPd8Pseudo_UPD, 2855 ARM::VLD3DUPd16Pseudo_UPD, 2856 ARM::VLD3DUPd32Pseudo_UPD }; 2857 return SelectVLDDup(N, true, 3, Opcodes); 2858 } 2859 2860 case ARMISD::VLD4DUP_UPD: { 2861 static const uint16_t Opcodes[] = { ARM::VLD4DUPd8Pseudo_UPD, 2862 ARM::VLD4DUPd16Pseudo_UPD, 2863 ARM::VLD4DUPd32Pseudo_UPD }; 2864 return SelectVLDDup(N, true, 4, Opcodes); 2865 } 2866 2867 case ARMISD::VLD1_UPD: { 2868 static const uint16_t DOpcodes[] = { ARM::VLD1d8wb_fixed, 2869 ARM::VLD1d16wb_fixed, 2870 ARM::VLD1d32wb_fixed, 2871 ARM::VLD1d64wb_fixed }; 2872 static const uint16_t QOpcodes[] = { ARM::VLD1q8wb_fixed, 2873 ARM::VLD1q16wb_fixed, 2874 ARM::VLD1q32wb_fixed, 2875 ARM::VLD1q64wb_fixed }; 2876 return SelectVLD(N, true, 1, DOpcodes, QOpcodes, 0); 2877 } 2878 2879 case ARMISD::VLD2_UPD: { 2880 static const uint16_t DOpcodes[] = { ARM::VLD2d8wb_fixed, 2881 ARM::VLD2d16wb_fixed, 2882 ARM::VLD2d32wb_fixed, 2883 ARM::VLD1q64wb_fixed}; 2884 static const uint16_t QOpcodes[] = { ARM::VLD2q8PseudoWB_fixed, 2885 ARM::VLD2q16PseudoWB_fixed, 2886 ARM::VLD2q32PseudoWB_fixed }; 2887 return SelectVLD(N, true, 2, DOpcodes, QOpcodes, 0); 2888 } 2889 2890 case ARMISD::VLD3_UPD: { 2891 static const uint16_t DOpcodes[] = { ARM::VLD3d8Pseudo_UPD, 2892 ARM::VLD3d16Pseudo_UPD, 2893 ARM::VLD3d32Pseudo_UPD, 2894 ARM::VLD1d64TPseudoWB_fixed}; 2895 static const uint16_t QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD, 2896 ARM::VLD3q16Pseudo_UPD, 2897 ARM::VLD3q32Pseudo_UPD }; 2898 static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo_UPD, 2899 ARM::VLD3q16oddPseudo_UPD, 2900 ARM::VLD3q32oddPseudo_UPD }; 2901 return SelectVLD(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1); 2902 } 2903 2904 case ARMISD::VLD4_UPD: { 2905 static const uint16_t DOpcodes[] = { ARM::VLD4d8Pseudo_UPD, 2906 ARM::VLD4d16Pseudo_UPD, 2907 ARM::VLD4d32Pseudo_UPD, 2908 ARM::VLD1d64QPseudoWB_fixed}; 2909 static const uint16_t QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD, 2910 ARM::VLD4q16Pseudo_UPD, 2911 ARM::VLD4q32Pseudo_UPD }; 2912 static const uint16_t QOpcodes1[] = { ARM::VLD4q8oddPseudo_UPD, 2913 ARM::VLD4q16oddPseudo_UPD, 2914 ARM::VLD4q32oddPseudo_UPD }; 2915 return SelectVLD(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1); 2916 } 2917 2918 case ARMISD::VLD2LN_UPD: { 2919 static const uint16_t DOpcodes[] = { ARM::VLD2LNd8Pseudo_UPD, 2920 ARM::VLD2LNd16Pseudo_UPD, 2921 ARM::VLD2LNd32Pseudo_UPD }; 2922 static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo_UPD, 2923 ARM::VLD2LNq32Pseudo_UPD }; 2924 return SelectVLDSTLane(N, true, true, 2, DOpcodes, QOpcodes); 2925 } 2926 2927 case ARMISD::VLD3LN_UPD: { 2928 static const uint16_t DOpcodes[] = { ARM::VLD3LNd8Pseudo_UPD, 2929 ARM::VLD3LNd16Pseudo_UPD, 2930 ARM::VLD3LNd32Pseudo_UPD }; 2931 static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo_UPD, 2932 ARM::VLD3LNq32Pseudo_UPD }; 2933 return SelectVLDSTLane(N, true, true, 3, DOpcodes, QOpcodes); 2934 } 2935 2936 case ARMISD::VLD4LN_UPD: { 2937 static const uint16_t DOpcodes[] = { ARM::VLD4LNd8Pseudo_UPD, 2938 ARM::VLD4LNd16Pseudo_UPD, 2939 ARM::VLD4LNd32Pseudo_UPD }; 2940 static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo_UPD, 2941 ARM::VLD4LNq32Pseudo_UPD }; 2942 return SelectVLDSTLane(N, true, true, 4, DOpcodes, QOpcodes); 2943 } 2944 2945 case ARMISD::VST1_UPD: { 2946 static const uint16_t DOpcodes[] = { ARM::VST1d8wb_fixed, 2947 ARM::VST1d16wb_fixed, 2948 ARM::VST1d32wb_fixed, 2949 ARM::VST1d64wb_fixed }; 2950 static const uint16_t QOpcodes[] = { ARM::VST1q8wb_fixed, 2951 ARM::VST1q16wb_fixed, 2952 ARM::VST1q32wb_fixed, 2953 ARM::VST1q64wb_fixed }; 2954 return SelectVST(N, true, 1, DOpcodes, QOpcodes, 0); 2955 } 2956 2957 case ARMISD::VST2_UPD: { 2958 static const uint16_t DOpcodes[] = { ARM::VST2d8wb_fixed, 2959 ARM::VST2d16wb_fixed, 2960 ARM::VST2d32wb_fixed, 2961 ARM::VST1q64wb_fixed}; 2962 static const uint16_t QOpcodes[] = { ARM::VST2q8PseudoWB_fixed, 2963 ARM::VST2q16PseudoWB_fixed, 2964 ARM::VST2q32PseudoWB_fixed }; 2965 return SelectVST(N, true, 2, DOpcodes, QOpcodes, 0); 2966 } 2967 2968 case ARMISD::VST3_UPD: { 2969 static const uint16_t DOpcodes[] = { ARM::VST3d8Pseudo_UPD, 2970 ARM::VST3d16Pseudo_UPD, 2971 ARM::VST3d32Pseudo_UPD, 2972 ARM::VST1d64TPseudoWB_fixed}; 2973 static const uint16_t QOpcodes0[] = { ARM::VST3q8Pseudo_UPD, 2974 ARM::VST3q16Pseudo_UPD, 2975 ARM::VST3q32Pseudo_UPD }; 2976 static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo_UPD, 2977 ARM::VST3q16oddPseudo_UPD, 2978 ARM::VST3q32oddPseudo_UPD }; 2979 return SelectVST(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1); 2980 } 2981 2982 case ARMISD::VST4_UPD: { 2983 static const uint16_t DOpcodes[] = { ARM::VST4d8Pseudo_UPD, 2984 ARM::VST4d16Pseudo_UPD, 2985 ARM::VST4d32Pseudo_UPD, 2986 ARM::VST1d64QPseudoWB_fixed}; 2987 static const uint16_t QOpcodes0[] = { ARM::VST4q8Pseudo_UPD, 2988 ARM::VST4q16Pseudo_UPD, 2989 ARM::VST4q32Pseudo_UPD }; 2990 static const uint16_t QOpcodes1[] = { ARM::VST4q8oddPseudo_UPD, 2991 ARM::VST4q16oddPseudo_UPD, 2992 ARM::VST4q32oddPseudo_UPD }; 2993 return SelectVST(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1); 2994 } 2995 2996 case ARMISD::VST2LN_UPD: { 2997 static const uint16_t DOpcodes[] = { ARM::VST2LNd8Pseudo_UPD, 2998 ARM::VST2LNd16Pseudo_UPD, 2999 ARM::VST2LNd32Pseudo_UPD }; 3000 static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo_UPD, 3001 ARM::VST2LNq32Pseudo_UPD }; 3002 return SelectVLDSTLane(N, false, true, 2, DOpcodes, QOpcodes); 3003 } 3004 3005 case ARMISD::VST3LN_UPD: { 3006 static const uint16_t DOpcodes[] = { ARM::VST3LNd8Pseudo_UPD, 3007 ARM::VST3LNd16Pseudo_UPD, 3008 ARM::VST3LNd32Pseudo_UPD }; 3009 static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo_UPD, 3010 ARM::VST3LNq32Pseudo_UPD }; 3011 return SelectVLDSTLane(N, false, true, 3, DOpcodes, QOpcodes); 3012 } 3013 3014 case ARMISD::VST4LN_UPD: { 3015 static const uint16_t DOpcodes[] = { ARM::VST4LNd8Pseudo_UPD, 3016 ARM::VST4LNd16Pseudo_UPD, 3017 ARM::VST4LNd32Pseudo_UPD }; 3018 static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo_UPD, 3019 ARM::VST4LNq32Pseudo_UPD }; 3020 return SelectVLDSTLane(N, false, true, 4, DOpcodes, QOpcodes); 3021 } 3022 3023 case ISD::INTRINSIC_VOID: 3024 case ISD::INTRINSIC_W_CHAIN: { 3025 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 3026 switch (IntNo) { 3027 default: 3028 break; 3029 3030 case Intrinsic::arm_ldrexd: { 3031 SDValue MemAddr = N->getOperand(2); 3032 SDLoc dl(N); 3033 SDValue Chain = N->getOperand(0); 3034 3035 bool isThumb = Subtarget->isThumb() && Subtarget->hasThumb2(); 3036 unsigned NewOpc = isThumb ? ARM::t2LDREXD :ARM::LDREXD; 3037 3038 // arm_ldrexd returns a i64 value in {i32, i32} 3039 std::vector<EVT> ResTys; 3040 if (isThumb) { 3041 ResTys.push_back(MVT::i32); 3042 ResTys.push_back(MVT::i32); 3043 } else 3044 ResTys.push_back(MVT::Untyped); 3045 ResTys.push_back(MVT::Other); 3046 3047 // Place arguments in the right order. 3048 SmallVector<SDValue, 7> Ops; 3049 Ops.push_back(MemAddr); 3050 Ops.push_back(getAL(CurDAG)); 3051 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); 3052 Ops.push_back(Chain); 3053 SDNode *Ld = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops); 3054 // Transfer memoperands. 3055 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 3056 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand(); 3057 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1); 3058 3059 // Remap uses. 3060 SDValue OutChain = isThumb ? SDValue(Ld, 2) : SDValue(Ld, 1); 3061 if (!SDValue(N, 0).use_empty()) { 3062 SDValue Result; 3063 if (isThumb) 3064 Result = SDValue(Ld, 0); 3065 else { 3066 SDValue SubRegIdx = CurDAG->getTargetConstant(ARM::gsub_0, MVT::i32); 3067 SDNode *ResNode = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, 3068 dl, MVT::i32, SDValue(Ld, 0), SubRegIdx); 3069 Result = SDValue(ResNode,0); 3070 } 3071 ReplaceUses(SDValue(N, 0), Result); 3072 } 3073 if (!SDValue(N, 1).use_empty()) { 3074 SDValue Result; 3075 if (isThumb) 3076 Result = SDValue(Ld, 1); 3077 else { 3078 SDValue SubRegIdx = CurDAG->getTargetConstant(ARM::gsub_1, MVT::i32); 3079 SDNode *ResNode = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, 3080 dl, MVT::i32, SDValue(Ld, 0), SubRegIdx); 3081 Result = SDValue(ResNode,0); 3082 } 3083 ReplaceUses(SDValue(N, 1), Result); 3084 } 3085 ReplaceUses(SDValue(N, 2), OutChain); 3086 return NULL; 3087 } 3088 3089 case Intrinsic::arm_strexd: { 3090 SDLoc dl(N); 3091 SDValue Chain = N->getOperand(0); 3092 SDValue Val0 = N->getOperand(2); 3093 SDValue Val1 = N->getOperand(3); 3094 SDValue MemAddr = N->getOperand(4); 3095 3096 // Store exclusive double return a i32 value which is the return status 3097 // of the issued store. 3098 EVT ResTys[] = { MVT::i32, MVT::Other }; 3099 3100 bool isThumb = Subtarget->isThumb() && Subtarget->hasThumb2(); 3101 // Place arguments in the right order. 3102 SmallVector<SDValue, 7> Ops; 3103 if (isThumb) { 3104 Ops.push_back(Val0); 3105 Ops.push_back(Val1); 3106 } else 3107 // arm_strexd uses GPRPair. 3108 Ops.push_back(SDValue(createGPRPairNode(MVT::Untyped, Val0, Val1), 0)); 3109 Ops.push_back(MemAddr); 3110 Ops.push_back(getAL(CurDAG)); 3111 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); 3112 Ops.push_back(Chain); 3113 3114 unsigned NewOpc = isThumb ? ARM::t2STREXD : ARM::STREXD; 3115 3116 SDNode *St = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops); 3117 // Transfer memoperands. 3118 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 3119 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand(); 3120 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1); 3121 3122 return St; 3123 } 3124 3125 case Intrinsic::arm_neon_vld1: { 3126 static const uint16_t DOpcodes[] = { ARM::VLD1d8, ARM::VLD1d16, 3127 ARM::VLD1d32, ARM::VLD1d64 }; 3128 static const uint16_t QOpcodes[] = { ARM::VLD1q8, ARM::VLD1q16, 3129 ARM::VLD1q32, ARM::VLD1q64}; 3130 return SelectVLD(N, false, 1, DOpcodes, QOpcodes, 0); 3131 } 3132 3133 case Intrinsic::arm_neon_vld2: { 3134 static const uint16_t DOpcodes[] = { ARM::VLD2d8, ARM::VLD2d16, 3135 ARM::VLD2d32, ARM::VLD1q64 }; 3136 static const uint16_t QOpcodes[] = { ARM::VLD2q8Pseudo, ARM::VLD2q16Pseudo, 3137 ARM::VLD2q32Pseudo }; 3138 return SelectVLD(N, false, 2, DOpcodes, QOpcodes, 0); 3139 } 3140 3141 case Intrinsic::arm_neon_vld3: { 3142 static const uint16_t DOpcodes[] = { ARM::VLD3d8Pseudo, 3143 ARM::VLD3d16Pseudo, 3144 ARM::VLD3d32Pseudo, 3145 ARM::VLD1d64TPseudo }; 3146 static const uint16_t QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD, 3147 ARM::VLD3q16Pseudo_UPD, 3148 ARM::VLD3q32Pseudo_UPD }; 3149 static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo, 3150 ARM::VLD3q16oddPseudo, 3151 ARM::VLD3q32oddPseudo }; 3152 return SelectVLD(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1); 3153 } 3154 3155 case Intrinsic::arm_neon_vld4: { 3156 static const uint16_t DOpcodes[] = { ARM::VLD4d8Pseudo, 3157 ARM::VLD4d16Pseudo, 3158 ARM::VLD4d32Pseudo, 3159 ARM::VLD1d64QPseudo }; 3160 static const uint16_t QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD, 3161 ARM::VLD4q16Pseudo_UPD, 3162 ARM::VLD4q32Pseudo_UPD }; 3163 static const uint16_t QOpcodes1[] = { ARM::VLD4q8oddPseudo, 3164 ARM::VLD4q16oddPseudo, 3165 ARM::VLD4q32oddPseudo }; 3166 return SelectVLD(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1); 3167 } 3168 3169 case Intrinsic::arm_neon_vld2lane: { 3170 static const uint16_t DOpcodes[] = { ARM::VLD2LNd8Pseudo, 3171 ARM::VLD2LNd16Pseudo, 3172 ARM::VLD2LNd32Pseudo }; 3173 static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo, 3174 ARM::VLD2LNq32Pseudo }; 3175 return SelectVLDSTLane(N, true, false, 2, DOpcodes, QOpcodes); 3176 } 3177 3178 case Intrinsic::arm_neon_vld3lane: { 3179 static const uint16_t DOpcodes[] = { ARM::VLD3LNd8Pseudo, 3180 ARM::VLD3LNd16Pseudo, 3181 ARM::VLD3LNd32Pseudo }; 3182 static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo, 3183 ARM::VLD3LNq32Pseudo }; 3184 return SelectVLDSTLane(N, true, false, 3, DOpcodes, QOpcodes); 3185 } 3186 3187 case Intrinsic::arm_neon_vld4lane: { 3188 static const uint16_t DOpcodes[] = { ARM::VLD4LNd8Pseudo, 3189 ARM::VLD4LNd16Pseudo, 3190 ARM::VLD4LNd32Pseudo }; 3191 static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo, 3192 ARM::VLD4LNq32Pseudo }; 3193 return SelectVLDSTLane(N, true, false, 4, DOpcodes, QOpcodes); 3194 } 3195 3196 case Intrinsic::arm_neon_vst1: { 3197 static const uint16_t DOpcodes[] = { ARM::VST1d8, ARM::VST1d16, 3198 ARM::VST1d32, ARM::VST1d64 }; 3199 static const uint16_t QOpcodes[] = { ARM::VST1q8, ARM::VST1q16, 3200 ARM::VST1q32, ARM::VST1q64 }; 3201 return SelectVST(N, false, 1, DOpcodes, QOpcodes, 0); 3202 } 3203 3204 case Intrinsic::arm_neon_vst2: { 3205 static const uint16_t DOpcodes[] = { ARM::VST2d8, ARM::VST2d16, 3206 ARM::VST2d32, ARM::VST1q64 }; 3207 static uint16_t QOpcodes[] = { ARM::VST2q8Pseudo, ARM::VST2q16Pseudo, 3208 ARM::VST2q32Pseudo }; 3209 return SelectVST(N, false, 2, DOpcodes, QOpcodes, 0); 3210 } 3211 3212 case Intrinsic::arm_neon_vst3: { 3213 static const uint16_t DOpcodes[] = { ARM::VST3d8Pseudo, 3214 ARM::VST3d16Pseudo, 3215 ARM::VST3d32Pseudo, 3216 ARM::VST1d64TPseudo }; 3217 static const uint16_t QOpcodes0[] = { ARM::VST3q8Pseudo_UPD, 3218 ARM::VST3q16Pseudo_UPD, 3219 ARM::VST3q32Pseudo_UPD }; 3220 static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo, 3221 ARM::VST3q16oddPseudo, 3222 ARM::VST3q32oddPseudo }; 3223 return SelectVST(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1); 3224 } 3225 3226 case Intrinsic::arm_neon_vst4: { 3227 static const uint16_t DOpcodes[] = { ARM::VST4d8Pseudo, 3228 ARM::VST4d16Pseudo, 3229 ARM::VST4d32Pseudo, 3230 ARM::VST1d64QPseudo }; 3231 static const uint16_t QOpcodes0[] = { ARM::VST4q8Pseudo_UPD, 3232 ARM::VST4q16Pseudo_UPD, 3233 ARM::VST4q32Pseudo_UPD }; 3234 static const uint16_t QOpcodes1[] = { ARM::VST4q8oddPseudo, 3235 ARM::VST4q16oddPseudo, 3236 ARM::VST4q32oddPseudo }; 3237 return SelectVST(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1); 3238 } 3239 3240 case Intrinsic::arm_neon_vst2lane: { 3241 static const uint16_t DOpcodes[] = { ARM::VST2LNd8Pseudo, 3242 ARM::VST2LNd16Pseudo, 3243 ARM::VST2LNd32Pseudo }; 3244 static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo, 3245 ARM::VST2LNq32Pseudo }; 3246 return SelectVLDSTLane(N, false, false, 2, DOpcodes, QOpcodes); 3247 } 3248 3249 case Intrinsic::arm_neon_vst3lane: { 3250 static const uint16_t DOpcodes[] = { ARM::VST3LNd8Pseudo, 3251 ARM::VST3LNd16Pseudo, 3252 ARM::VST3LNd32Pseudo }; 3253 static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo, 3254 ARM::VST3LNq32Pseudo }; 3255 return SelectVLDSTLane(N, false, false, 3, DOpcodes, QOpcodes); 3256 } 3257 3258 case Intrinsic::arm_neon_vst4lane: { 3259 static const uint16_t DOpcodes[] = { ARM::VST4LNd8Pseudo, 3260 ARM::VST4LNd16Pseudo, 3261 ARM::VST4LNd32Pseudo }; 3262 static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo, 3263 ARM::VST4LNq32Pseudo }; 3264 return SelectVLDSTLane(N, false, false, 4, DOpcodes, QOpcodes); 3265 } 3266 } 3267 break; 3268 } 3269 3270 case ISD::INTRINSIC_WO_CHAIN: { 3271 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 3272 switch (IntNo) { 3273 default: 3274 break; 3275 3276 case Intrinsic::arm_neon_vtbl2: 3277 return SelectVTBL(N, false, 2, ARM::VTBL2); 3278 case Intrinsic::arm_neon_vtbl3: 3279 return SelectVTBL(N, false, 3, ARM::VTBL3Pseudo); 3280 case Intrinsic::arm_neon_vtbl4: 3281 return SelectVTBL(N, false, 4, ARM::VTBL4Pseudo); 3282 3283 case Intrinsic::arm_neon_vtbx2: 3284 return SelectVTBL(N, true, 2, ARM::VTBX2); 3285 case Intrinsic::arm_neon_vtbx3: 3286 return SelectVTBL(N, true, 3, ARM::VTBX3Pseudo); 3287 case Intrinsic::arm_neon_vtbx4: 3288 return SelectVTBL(N, true, 4, ARM::VTBX4Pseudo); 3289 } 3290 break; 3291 } 3292 3293 case ARMISD::VTBL1: { 3294 SDLoc dl(N); 3295 EVT VT = N->getValueType(0); 3296 SmallVector<SDValue, 6> Ops; 3297 3298 Ops.push_back(N->getOperand(0)); 3299 Ops.push_back(N->getOperand(1)); 3300 Ops.push_back(getAL(CurDAG)); // Predicate 3301 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register 3302 return CurDAG->getMachineNode(ARM::VTBL1, dl, VT, Ops); 3303 } 3304 case ARMISD::VTBL2: { 3305 SDLoc dl(N); 3306 EVT VT = N->getValueType(0); 3307 3308 // Form a REG_SEQUENCE to force register allocation. 3309 SDValue V0 = N->getOperand(0); 3310 SDValue V1 = N->getOperand(1); 3311 SDValue RegSeq = SDValue(createDRegPairNode(MVT::v16i8, V0, V1), 0); 3312 3313 SmallVector<SDValue, 6> Ops; 3314 Ops.push_back(RegSeq); 3315 Ops.push_back(N->getOperand(2)); 3316 Ops.push_back(getAL(CurDAG)); // Predicate 3317 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register 3318 return CurDAG->getMachineNode(ARM::VTBL2, dl, VT, Ops); 3319 } 3320 3321 case ISD::CONCAT_VECTORS: 3322 return SelectConcatVector(N); 3323 3324 case ISD::ATOMIC_LOAD: 3325 if (cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64) 3326 return SelectAtomic(N, 0, 0, 0, ARM::ATOMIC_LOAD_I64); 3327 else 3328 break; 3329 3330 case ISD::ATOMIC_STORE: 3331 if (cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64) 3332 return SelectAtomic(N, 0, 0, 0, ARM::ATOMIC_STORE_I64); 3333 else 3334 break; 3335 3336 case ISD::ATOMIC_LOAD_ADD: 3337 return SelectAtomic(N, 3338 ARM::ATOMIC_LOAD_ADD_I8, 3339 ARM::ATOMIC_LOAD_ADD_I16, 3340 ARM::ATOMIC_LOAD_ADD_I32, 3341 ARM::ATOMIC_LOAD_ADD_I64); 3342 case ISD::ATOMIC_LOAD_SUB: 3343 return SelectAtomic(N, 3344 ARM::ATOMIC_LOAD_SUB_I8, 3345 ARM::ATOMIC_LOAD_SUB_I16, 3346 ARM::ATOMIC_LOAD_SUB_I32, 3347 ARM::ATOMIC_LOAD_SUB_I64); 3348 case ISD::ATOMIC_LOAD_AND: 3349 return SelectAtomic(N, 3350 ARM::ATOMIC_LOAD_AND_I8, 3351 ARM::ATOMIC_LOAD_AND_I16, 3352 ARM::ATOMIC_LOAD_AND_I32, 3353 ARM::ATOMIC_LOAD_AND_I64); 3354 case ISD::ATOMIC_LOAD_OR: 3355 return SelectAtomic(N, 3356 ARM::ATOMIC_LOAD_OR_I8, 3357 ARM::ATOMIC_LOAD_OR_I16, 3358 ARM::ATOMIC_LOAD_OR_I32, 3359 ARM::ATOMIC_LOAD_OR_I64); 3360 case ISD::ATOMIC_LOAD_XOR: 3361 return SelectAtomic(N, 3362 ARM::ATOMIC_LOAD_XOR_I8, 3363 ARM::ATOMIC_LOAD_XOR_I16, 3364 ARM::ATOMIC_LOAD_XOR_I32, 3365 ARM::ATOMIC_LOAD_XOR_I64); 3366 case ISD::ATOMIC_LOAD_NAND: 3367 return SelectAtomic(N, 3368 ARM::ATOMIC_LOAD_NAND_I8, 3369 ARM::ATOMIC_LOAD_NAND_I16, 3370 ARM::ATOMIC_LOAD_NAND_I32, 3371 ARM::ATOMIC_LOAD_NAND_I64); 3372 case ISD::ATOMIC_LOAD_MIN: 3373 return SelectAtomic(N, 3374 ARM::ATOMIC_LOAD_MIN_I8, 3375 ARM::ATOMIC_LOAD_MIN_I16, 3376 ARM::ATOMIC_LOAD_MIN_I32, 3377 ARM::ATOMIC_LOAD_MIN_I64); 3378 case ISD::ATOMIC_LOAD_MAX: 3379 return SelectAtomic(N, 3380 ARM::ATOMIC_LOAD_MAX_I8, 3381 ARM::ATOMIC_LOAD_MAX_I16, 3382 ARM::ATOMIC_LOAD_MAX_I32, 3383 ARM::ATOMIC_LOAD_MAX_I64); 3384 case ISD::ATOMIC_LOAD_UMIN: 3385 return SelectAtomic(N, 3386 ARM::ATOMIC_LOAD_UMIN_I8, 3387 ARM::ATOMIC_LOAD_UMIN_I16, 3388 ARM::ATOMIC_LOAD_UMIN_I32, 3389 ARM::ATOMIC_LOAD_UMIN_I64); 3390 case ISD::ATOMIC_LOAD_UMAX: 3391 return SelectAtomic(N, 3392 ARM::ATOMIC_LOAD_UMAX_I8, 3393 ARM::ATOMIC_LOAD_UMAX_I16, 3394 ARM::ATOMIC_LOAD_UMAX_I32, 3395 ARM::ATOMIC_LOAD_UMAX_I64); 3396 case ISD::ATOMIC_SWAP: 3397 return SelectAtomic(N, 3398 ARM::ATOMIC_SWAP_I8, 3399 ARM::ATOMIC_SWAP_I16, 3400 ARM::ATOMIC_SWAP_I32, 3401 ARM::ATOMIC_SWAP_I64); 3402 case ISD::ATOMIC_CMP_SWAP: 3403 return SelectAtomic(N, 3404 ARM::ATOMIC_CMP_SWAP_I8, 3405 ARM::ATOMIC_CMP_SWAP_I16, 3406 ARM::ATOMIC_CMP_SWAP_I32, 3407 ARM::ATOMIC_CMP_SWAP_I64); 3408 } 3409 3410 return SelectCode(N); 3411} 3412 3413SDNode *ARMDAGToDAGISel::SelectInlineAsm(SDNode *N){ 3414 std::vector<SDValue> AsmNodeOperands; 3415 unsigned Flag, Kind; 3416 bool Changed = false; 3417 unsigned NumOps = N->getNumOperands(); 3418 3419 // Normally, i64 data is bounded to two arbitrary GRPs for "%r" constraint. 3420 // However, some instrstions (e.g. ldrexd/strexd in ARM mode) require 3421 // (even/even+1) GPRs and use %n and %Hn to refer to the individual regs 3422 // respectively. Since there is no constraint to explicitly specify a 3423 // reg pair, we use GPRPair reg class for "%r" for 64-bit data. For Thumb, 3424 // the 64-bit data may be referred by H, Q, R modifiers, so we still pack 3425 // them into a GPRPair. 3426 3427 SDLoc dl(N); 3428 SDValue Glue = N->getGluedNode() ? N->getOperand(NumOps-1) : SDValue(0,0); 3429 3430 SmallVector<bool, 8> OpChanged; 3431 // Glue node will be appended late. 3432 for(unsigned i = 0, e = N->getGluedNode() ? NumOps - 1 : NumOps; i < e; ++i) { 3433 SDValue op = N->getOperand(i); 3434 AsmNodeOperands.push_back(op); 3435 3436 if (i < InlineAsm::Op_FirstOperand) 3437 continue; 3438 3439 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(i))) { 3440 Flag = C->getZExtValue(); 3441 Kind = InlineAsm::getKind(Flag); 3442 } 3443 else 3444 continue; 3445 3446 // Immediate operands to inline asm in the SelectionDAG are modeled with 3447 // two operands. The first is a constant of value InlineAsm::Kind_Imm, and 3448 // the second is a constant with the value of the immediate. If we get here 3449 // and we have a Kind_Imm, skip the next operand, and continue. 3450 if (Kind == InlineAsm::Kind_Imm) { 3451 SDValue op = N->getOperand(++i); 3452 AsmNodeOperands.push_back(op); 3453 continue; 3454 } 3455 3456 unsigned NumRegs = InlineAsm::getNumOperandRegisters(Flag); 3457 if (NumRegs) 3458 OpChanged.push_back(false); 3459 3460 unsigned DefIdx = 0; 3461 bool IsTiedToChangedOp = false; 3462 // If it's a use that is tied with a previous def, it has no 3463 // reg class constraint. 3464 if (Changed && InlineAsm::isUseOperandTiedToDef(Flag, DefIdx)) 3465 IsTiedToChangedOp = OpChanged[DefIdx]; 3466 3467 if (Kind != InlineAsm::Kind_RegUse && Kind != InlineAsm::Kind_RegDef 3468 && Kind != InlineAsm::Kind_RegDefEarlyClobber) 3469 continue; 3470 3471 unsigned RC; 3472 bool HasRC = InlineAsm::hasRegClassConstraint(Flag, RC); 3473 if ((!IsTiedToChangedOp && (!HasRC || RC != ARM::GPRRegClassID)) 3474 || NumRegs != 2) 3475 continue; 3476 3477 assert((i+2 < NumOps) && "Invalid number of operands in inline asm"); 3478 SDValue V0 = N->getOperand(i+1); 3479 SDValue V1 = N->getOperand(i+2); 3480 unsigned Reg0 = cast<RegisterSDNode>(V0)->getReg(); 3481 unsigned Reg1 = cast<RegisterSDNode>(V1)->getReg(); 3482 SDValue PairedReg; 3483 MachineRegisterInfo &MRI = MF->getRegInfo(); 3484 3485 if (Kind == InlineAsm::Kind_RegDef || 3486 Kind == InlineAsm::Kind_RegDefEarlyClobber) { 3487 // Replace the two GPRs with 1 GPRPair and copy values from GPRPair to 3488 // the original GPRs. 3489 3490 unsigned GPVR = MRI.createVirtualRegister(&ARM::GPRPairRegClass); 3491 PairedReg = CurDAG->getRegister(GPVR, MVT::Untyped); 3492 SDValue Chain = SDValue(N,0); 3493 3494 SDNode *GU = N->getGluedUser(); 3495 SDValue RegCopy = CurDAG->getCopyFromReg(Chain, dl, GPVR, MVT::Untyped, 3496 Chain.getValue(1)); 3497 3498 // Extract values from a GPRPair reg and copy to the original GPR reg. 3499 SDValue Sub0 = CurDAG->getTargetExtractSubreg(ARM::gsub_0, dl, MVT::i32, 3500 RegCopy); 3501 SDValue Sub1 = CurDAG->getTargetExtractSubreg(ARM::gsub_1, dl, MVT::i32, 3502 RegCopy); 3503 SDValue T0 = CurDAG->getCopyToReg(Sub0, dl, Reg0, Sub0, 3504 RegCopy.getValue(1)); 3505 SDValue T1 = CurDAG->getCopyToReg(Sub1, dl, Reg1, Sub1, T0.getValue(1)); 3506 3507 // Update the original glue user. 3508 std::vector<SDValue> Ops(GU->op_begin(), GU->op_end()-1); 3509 Ops.push_back(T1.getValue(1)); 3510 CurDAG->UpdateNodeOperands(GU, &Ops[0], Ops.size()); 3511 GU = T1.getNode(); 3512 } 3513 else { 3514 // For Kind == InlineAsm::Kind_RegUse, we first copy two GPRs into a 3515 // GPRPair and then pass the GPRPair to the inline asm. 3516 SDValue Chain = AsmNodeOperands[InlineAsm::Op_InputChain]; 3517 3518 // As REG_SEQ doesn't take RegisterSDNode, we copy them first. 3519 SDValue T0 = CurDAG->getCopyFromReg(Chain, dl, Reg0, MVT::i32, 3520 Chain.getValue(1)); 3521 SDValue T1 = CurDAG->getCopyFromReg(Chain, dl, Reg1, MVT::i32, 3522 T0.getValue(1)); 3523 SDValue Pair = SDValue(createGPRPairNode(MVT::Untyped, T0, T1), 0); 3524 3525 // Copy REG_SEQ into a GPRPair-typed VR and replace the original two 3526 // i32 VRs of inline asm with it. 3527 unsigned GPVR = MRI.createVirtualRegister(&ARM::GPRPairRegClass); 3528 PairedReg = CurDAG->getRegister(GPVR, MVT::Untyped); 3529 Chain = CurDAG->getCopyToReg(T1, dl, GPVR, Pair, T1.getValue(1)); 3530 3531 AsmNodeOperands[InlineAsm::Op_InputChain] = Chain; 3532 Glue = Chain.getValue(1); 3533 } 3534 3535 Changed = true; 3536 3537 if(PairedReg.getNode()) { 3538 OpChanged[OpChanged.size() -1 ] = true; 3539 Flag = InlineAsm::getFlagWord(Kind, 1 /* RegNum*/); 3540 if (IsTiedToChangedOp) 3541 Flag = InlineAsm::getFlagWordForMatchingOp(Flag, DefIdx); 3542 else 3543 Flag = InlineAsm::getFlagWordForRegClass(Flag, ARM::GPRPairRegClassID); 3544 // Replace the current flag. 3545 AsmNodeOperands[AsmNodeOperands.size() -1] = CurDAG->getTargetConstant( 3546 Flag, MVT::i32); 3547 // Add the new register node and skip the original two GPRs. 3548 AsmNodeOperands.push_back(PairedReg); 3549 // Skip the next two GPRs. 3550 i += 2; 3551 } 3552 } 3553 3554 if (Glue.getNode()) 3555 AsmNodeOperands.push_back(Glue); 3556 if (!Changed) 3557 return NULL; 3558 3559 SDValue New = CurDAG->getNode(ISD::INLINEASM, SDLoc(N), 3560 CurDAG->getVTList(MVT::Other, MVT::Glue), &AsmNodeOperands[0], 3561 AsmNodeOperands.size()); 3562 New->setNodeId(-1); 3563 return New.getNode(); 3564} 3565 3566 3567bool ARMDAGToDAGISel:: 3568SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode, 3569 std::vector<SDValue> &OutOps) { 3570 assert(ConstraintCode == 'm' && "unexpected asm memory constraint"); 3571 // Require the address to be in a register. That is safe for all ARM 3572 // variants and it is hard to do anything much smarter without knowing 3573 // how the operand is used. 3574 OutOps.push_back(Op); 3575 return false; 3576} 3577 3578/// createARMISelDag - This pass converts a legalized DAG into a 3579/// ARM-specific DAG, ready for instruction scheduling. 3580/// 3581FunctionPass *llvm::createARMISelDag(ARMBaseTargetMachine &TM, 3582 CodeGenOpt::Level OptLevel) { 3583 return new ARMDAGToDAGISel(TM, OptLevel); 3584} 3585