ARMFastISel.cpp revision 263508
1//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the ARM-specific support for the FastISel class. Some 11// of the target-specific code is generated by tablegen in the file 12// ARMGenFastISel.inc, which is #included here. 13// 14//===----------------------------------------------------------------------===// 15 16#include "ARM.h" 17#include "ARMBaseInstrInfo.h" 18#include "ARMCallingConv.h" 19#include "ARMConstantPoolValue.h" 20#include "ARMSubtarget.h" 21#include "ARMTargetMachine.h" 22#include "MCTargetDesc/ARMAddressingModes.h" 23#include "llvm/ADT/STLExtras.h" 24#include "llvm/CodeGen/Analysis.h" 25#include "llvm/CodeGen/FastISel.h" 26#include "llvm/CodeGen/FunctionLoweringInfo.h" 27#include "llvm/CodeGen/MachineConstantPool.h" 28#include "llvm/CodeGen/MachineFrameInfo.h" 29#include "llvm/CodeGen/MachineInstrBuilder.h" 30#include "llvm/CodeGen/MachineMemOperand.h" 31#include "llvm/CodeGen/MachineModuleInfo.h" 32#include "llvm/CodeGen/MachineRegisterInfo.h" 33#include "llvm/IR/CallingConv.h" 34#include "llvm/IR/DataLayout.h" 35#include "llvm/IR/DerivedTypes.h" 36#include "llvm/IR/GlobalVariable.h" 37#include "llvm/IR/Instructions.h" 38#include "llvm/IR/IntrinsicInst.h" 39#include "llvm/IR/Module.h" 40#include "llvm/IR/Operator.h" 41#include "llvm/Support/CallSite.h" 42#include "llvm/Support/CommandLine.h" 43#include "llvm/Support/ErrorHandling.h" 44#include "llvm/Support/GetElementPtrTypeIterator.h" 45#include "llvm/Target/TargetInstrInfo.h" 46#include "llvm/Target/TargetLowering.h" 47#include "llvm/Target/TargetMachine.h" 48#include "llvm/Target/TargetOptions.h" 49using namespace llvm; 50 51extern cl::opt<bool> EnableARMLongCalls; 52 53namespace { 54 55 // All possible address modes, plus some. 56 typedef struct Address { 57 enum { 58 RegBase, 59 FrameIndexBase 60 } BaseType; 61 62 union { 63 unsigned Reg; 64 int FI; 65 } Base; 66 67 int Offset; 68 69 // Innocuous defaults for our address. 70 Address() 71 : BaseType(RegBase), Offset(0) { 72 Base.Reg = 0; 73 } 74 } Address; 75 76class ARMFastISel : public FastISel { 77 78 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 79 /// make the right decision when generating code for different targets. 80 const ARMSubtarget *Subtarget; 81 const TargetMachine &TM; 82 const TargetInstrInfo &TII; 83 const TargetLowering &TLI; 84 ARMFunctionInfo *AFI; 85 86 // Convenience variables to avoid some queries. 87 bool isThumb2; 88 LLVMContext *Context; 89 90 public: 91 explicit ARMFastISel(FunctionLoweringInfo &funcInfo, 92 const TargetLibraryInfo *libInfo) 93 : FastISel(funcInfo, libInfo), 94 TM(funcInfo.MF->getTarget()), 95 TII(*TM.getInstrInfo()), 96 TLI(*TM.getTargetLowering()) { 97 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 98 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 99 isThumb2 = AFI->isThumbFunction(); 100 Context = &funcInfo.Fn->getContext(); 101 } 102 103 // Code from FastISel.cpp. 104 private: 105 unsigned FastEmitInst_(unsigned MachineInstOpcode, 106 const TargetRegisterClass *RC); 107 unsigned FastEmitInst_r(unsigned MachineInstOpcode, 108 const TargetRegisterClass *RC, 109 unsigned Op0, bool Op0IsKill); 110 unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 111 const TargetRegisterClass *RC, 112 unsigned Op0, bool Op0IsKill, 113 unsigned Op1, bool Op1IsKill); 114 unsigned FastEmitInst_rrr(unsigned MachineInstOpcode, 115 const TargetRegisterClass *RC, 116 unsigned Op0, bool Op0IsKill, 117 unsigned Op1, bool Op1IsKill, 118 unsigned Op2, bool Op2IsKill); 119 unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 120 const TargetRegisterClass *RC, 121 unsigned Op0, bool Op0IsKill, 122 uint64_t Imm); 123 unsigned FastEmitInst_rf(unsigned MachineInstOpcode, 124 const TargetRegisterClass *RC, 125 unsigned Op0, bool Op0IsKill, 126 const ConstantFP *FPImm); 127 unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 128 const TargetRegisterClass *RC, 129 unsigned Op0, bool Op0IsKill, 130 unsigned Op1, bool Op1IsKill, 131 uint64_t Imm); 132 unsigned FastEmitInst_i(unsigned MachineInstOpcode, 133 const TargetRegisterClass *RC, 134 uint64_t Imm); 135 unsigned FastEmitInst_ii(unsigned MachineInstOpcode, 136 const TargetRegisterClass *RC, 137 uint64_t Imm1, uint64_t Imm2); 138 139 unsigned FastEmitInst_extractsubreg(MVT RetVT, 140 unsigned Op0, bool Op0IsKill, 141 uint32_t Idx); 142 143 // Backend specific FastISel code. 144 private: 145 virtual bool TargetSelectInstruction(const Instruction *I); 146 virtual unsigned TargetMaterializeConstant(const Constant *C); 147 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); 148 virtual bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, 149 const LoadInst *LI); 150 virtual bool FastLowerArguments(); 151 private: 152 #include "ARMGenFastISel.inc" 153 154 // Instruction selection routines. 155 private: 156 bool SelectLoad(const Instruction *I); 157 bool SelectStore(const Instruction *I); 158 bool SelectBranch(const Instruction *I); 159 bool SelectIndirectBr(const Instruction *I); 160 bool SelectCmp(const Instruction *I); 161 bool SelectFPExt(const Instruction *I); 162 bool SelectFPTrunc(const Instruction *I); 163 bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode); 164 bool SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode); 165 bool SelectIToFP(const Instruction *I, bool isSigned); 166 bool SelectFPToI(const Instruction *I, bool isSigned); 167 bool SelectDiv(const Instruction *I, bool isSigned); 168 bool SelectRem(const Instruction *I, bool isSigned); 169 bool SelectCall(const Instruction *I, const char *IntrMemName); 170 bool SelectIntrinsicCall(const IntrinsicInst &I); 171 bool SelectSelect(const Instruction *I); 172 bool SelectRet(const Instruction *I); 173 bool SelectTrunc(const Instruction *I); 174 bool SelectIntExt(const Instruction *I); 175 bool SelectShift(const Instruction *I, ARM_AM::ShiftOpc ShiftTy); 176 177 // Utility routines. 178 private: 179 unsigned constrainOperandRegClass(const MCInstrDesc &II, unsigned OpNum, 180 unsigned Op); 181 bool isTypeLegal(Type *Ty, MVT &VT); 182 bool isLoadTypeLegal(Type *Ty, MVT &VT); 183 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 184 bool isZExt); 185 bool ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, 186 unsigned Alignment = 0, bool isZExt = true, 187 bool allocReg = true); 188 bool ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr, 189 unsigned Alignment = 0); 190 bool ARMComputeAddress(const Value *Obj, Address &Addr); 191 void ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3); 192 bool ARMIsMemCpySmall(uint64_t Len); 193 bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len, 194 unsigned Alignment); 195 unsigned ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt); 196 unsigned ARMMaterializeFP(const ConstantFP *CFP, MVT VT); 197 unsigned ARMMaterializeInt(const Constant *C, MVT VT); 198 unsigned ARMMaterializeGV(const GlobalValue *GV, MVT VT); 199 unsigned ARMMoveToFPReg(MVT VT, unsigned SrcReg); 200 unsigned ARMMoveToIntReg(MVT VT, unsigned SrcReg); 201 unsigned ARMSelectCallOp(bool UseReg); 202 unsigned ARMLowerPICELF(const GlobalValue *GV, unsigned Align, MVT VT); 203 204 // Call handling routines. 205 private: 206 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, 207 bool Return, 208 bool isVarArg); 209 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 210 SmallVectorImpl<unsigned> &ArgRegs, 211 SmallVectorImpl<MVT> &ArgVTs, 212 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 213 SmallVectorImpl<unsigned> &RegArgs, 214 CallingConv::ID CC, 215 unsigned &NumBytes, 216 bool isVarArg); 217 unsigned getLibcallReg(const Twine &Name); 218 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 219 const Instruction *I, CallingConv::ID CC, 220 unsigned &NumBytes, bool isVarArg); 221 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 222 223 // OptionalDef handling routines. 224 private: 225 bool isARMNEONPred(const MachineInstr *MI); 226 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 227 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 228 void AddLoadStoreOperands(MVT VT, Address &Addr, 229 const MachineInstrBuilder &MIB, 230 unsigned Flags, bool useAM3); 231}; 232 233} // end anonymous namespace 234 235#include "ARMGenCallingConv.inc" 236 237// DefinesOptionalPredicate - This is different from DefinesPredicate in that 238// we don't care about implicit defs here, just places we'll need to add a 239// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 240bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 241 if (!MI->hasOptionalDef()) 242 return false; 243 244 // Look to see if our OptionalDef is defining CPSR or CCR. 245 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 246 const MachineOperand &MO = MI->getOperand(i); 247 if (!MO.isReg() || !MO.isDef()) continue; 248 if (MO.getReg() == ARM::CPSR) 249 *CPSR = true; 250 } 251 return true; 252} 253 254bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { 255 const MCInstrDesc &MCID = MI->getDesc(); 256 257 // If we're a thumb2 or not NEON function we'll be handled via isPredicable. 258 if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || 259 AFI->isThumb2Function()) 260 return MI->isPredicable(); 261 262 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) 263 if (MCID.OpInfo[i].isPredicate()) 264 return true; 265 266 return false; 267} 268 269// If the machine is predicable go ahead and add the predicate operands, if 270// it needs default CC operands add those. 271// TODO: If we want to support thumb1 then we'll need to deal with optional 272// CPSR defs that need to be added before the remaining operands. See s_cc_out 273// for descriptions why. 274const MachineInstrBuilder & 275ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 276 MachineInstr *MI = &*MIB; 277 278 // Do we use a predicate? or... 279 // Are we NEON in ARM mode and have a predicate operand? If so, I know 280 // we're not predicable but add it anyways. 281 if (isARMNEONPred(MI)) 282 AddDefaultPred(MIB); 283 284 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 285 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 286 bool CPSR = false; 287 if (DefinesOptionalPredicate(MI, &CPSR)) { 288 if (CPSR) 289 AddDefaultT1CC(MIB); 290 else 291 AddDefaultCC(MIB); 292 } 293 return MIB; 294} 295 296unsigned ARMFastISel::constrainOperandRegClass(const MCInstrDesc &II, 297 unsigned Op, unsigned OpNum) { 298 if (TargetRegisterInfo::isVirtualRegister(Op)) { 299 const TargetRegisterClass *RegClass = 300 TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF); 301 if (!MRI.constrainRegClass(Op, RegClass)) { 302 // If it's not legal to COPY between the register classes, something 303 // has gone very wrong before we got here. 304 unsigned NewOp = createResultReg(RegClass); 305 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 306 TII.get(TargetOpcode::COPY), NewOp).addReg(Op)); 307 return NewOp; 308 } 309 } 310 return Op; 311} 312 313unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, 314 const TargetRegisterClass* RC) { 315 unsigned ResultReg = createResultReg(RC); 316 const MCInstrDesc &II = TII.get(MachineInstOpcode); 317 318 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); 319 return ResultReg; 320} 321 322unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 323 const TargetRegisterClass *RC, 324 unsigned Op0, bool Op0IsKill) { 325 unsigned ResultReg = createResultReg(RC); 326 const MCInstrDesc &II = TII.get(MachineInstOpcode); 327 328 // Make sure the input operand is sufficiently constrained to be legal 329 // for this instruction. 330 Op0 = constrainOperandRegClass(II, Op0, 1); 331 if (II.getNumDefs() >= 1) { 332 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 333 .addReg(Op0, Op0IsKill * RegState::Kill)); 334 } else { 335 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 336 .addReg(Op0, Op0IsKill * RegState::Kill)); 337 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 338 TII.get(TargetOpcode::COPY), ResultReg) 339 .addReg(II.ImplicitDefs[0])); 340 } 341 return ResultReg; 342} 343 344unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 345 const TargetRegisterClass *RC, 346 unsigned Op0, bool Op0IsKill, 347 unsigned Op1, bool Op1IsKill) { 348 unsigned ResultReg = createResultReg(RC); 349 const MCInstrDesc &II = TII.get(MachineInstOpcode); 350 351 // Make sure the input operands are sufficiently constrained to be legal 352 // for this instruction. 353 Op0 = constrainOperandRegClass(II, Op0, 1); 354 Op1 = constrainOperandRegClass(II, Op1, 2); 355 356 if (II.getNumDefs() >= 1) { 357 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 358 .addReg(Op0, Op0IsKill * RegState::Kill) 359 .addReg(Op1, Op1IsKill * RegState::Kill)); 360 } else { 361 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 362 .addReg(Op0, Op0IsKill * RegState::Kill) 363 .addReg(Op1, Op1IsKill * RegState::Kill)); 364 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 365 TII.get(TargetOpcode::COPY), ResultReg) 366 .addReg(II.ImplicitDefs[0])); 367 } 368 return ResultReg; 369} 370 371unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode, 372 const TargetRegisterClass *RC, 373 unsigned Op0, bool Op0IsKill, 374 unsigned Op1, bool Op1IsKill, 375 unsigned Op2, bool Op2IsKill) { 376 unsigned ResultReg = createResultReg(RC); 377 const MCInstrDesc &II = TII.get(MachineInstOpcode); 378 379 // Make sure the input operands are sufficiently constrained to be legal 380 // for this instruction. 381 Op0 = constrainOperandRegClass(II, Op0, 1); 382 Op1 = constrainOperandRegClass(II, Op1, 2); 383 Op2 = constrainOperandRegClass(II, Op1, 3); 384 385 if (II.getNumDefs() >= 1) { 386 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 387 .addReg(Op0, Op0IsKill * RegState::Kill) 388 .addReg(Op1, Op1IsKill * RegState::Kill) 389 .addReg(Op2, Op2IsKill * RegState::Kill)); 390 } else { 391 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 392 .addReg(Op0, Op0IsKill * RegState::Kill) 393 .addReg(Op1, Op1IsKill * RegState::Kill) 394 .addReg(Op2, Op2IsKill * RegState::Kill)); 395 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 396 TII.get(TargetOpcode::COPY), ResultReg) 397 .addReg(II.ImplicitDefs[0])); 398 } 399 return ResultReg; 400} 401 402unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 403 const TargetRegisterClass *RC, 404 unsigned Op0, bool Op0IsKill, 405 uint64_t Imm) { 406 unsigned ResultReg = createResultReg(RC); 407 const MCInstrDesc &II = TII.get(MachineInstOpcode); 408 409 // Make sure the input operand is sufficiently constrained to be legal 410 // for this instruction. 411 Op0 = constrainOperandRegClass(II, Op0, 1); 412 if (II.getNumDefs() >= 1) { 413 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 414 .addReg(Op0, Op0IsKill * RegState::Kill) 415 .addImm(Imm)); 416 } else { 417 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 418 .addReg(Op0, Op0IsKill * RegState::Kill) 419 .addImm(Imm)); 420 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 421 TII.get(TargetOpcode::COPY), ResultReg) 422 .addReg(II.ImplicitDefs[0])); 423 } 424 return ResultReg; 425} 426 427unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 428 const TargetRegisterClass *RC, 429 unsigned Op0, bool Op0IsKill, 430 const ConstantFP *FPImm) { 431 unsigned ResultReg = createResultReg(RC); 432 const MCInstrDesc &II = TII.get(MachineInstOpcode); 433 434 // Make sure the input operand is sufficiently constrained to be legal 435 // for this instruction. 436 Op0 = constrainOperandRegClass(II, Op0, 1); 437 if (II.getNumDefs() >= 1) { 438 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 439 .addReg(Op0, Op0IsKill * RegState::Kill) 440 .addFPImm(FPImm)); 441 } else { 442 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 443 .addReg(Op0, Op0IsKill * RegState::Kill) 444 .addFPImm(FPImm)); 445 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 446 TII.get(TargetOpcode::COPY), ResultReg) 447 .addReg(II.ImplicitDefs[0])); 448 } 449 return ResultReg; 450} 451 452unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 453 const TargetRegisterClass *RC, 454 unsigned Op0, bool Op0IsKill, 455 unsigned Op1, bool Op1IsKill, 456 uint64_t Imm) { 457 unsigned ResultReg = createResultReg(RC); 458 const MCInstrDesc &II = TII.get(MachineInstOpcode); 459 460 // Make sure the input operands are sufficiently constrained to be legal 461 // for this instruction. 462 Op0 = constrainOperandRegClass(II, Op0, 1); 463 Op1 = constrainOperandRegClass(II, Op1, 2); 464 if (II.getNumDefs() >= 1) { 465 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 466 .addReg(Op0, Op0IsKill * RegState::Kill) 467 .addReg(Op1, Op1IsKill * RegState::Kill) 468 .addImm(Imm)); 469 } else { 470 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 471 .addReg(Op0, Op0IsKill * RegState::Kill) 472 .addReg(Op1, Op1IsKill * RegState::Kill) 473 .addImm(Imm)); 474 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 475 TII.get(TargetOpcode::COPY), ResultReg) 476 .addReg(II.ImplicitDefs[0])); 477 } 478 return ResultReg; 479} 480 481unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 482 const TargetRegisterClass *RC, 483 uint64_t Imm) { 484 unsigned ResultReg = createResultReg(RC); 485 const MCInstrDesc &II = TII.get(MachineInstOpcode); 486 487 if (II.getNumDefs() >= 1) { 488 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 489 .addImm(Imm)); 490 } else { 491 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 492 .addImm(Imm)); 493 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 494 TII.get(TargetOpcode::COPY), ResultReg) 495 .addReg(II.ImplicitDefs[0])); 496 } 497 return ResultReg; 498} 499 500unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode, 501 const TargetRegisterClass *RC, 502 uint64_t Imm1, uint64_t Imm2) { 503 unsigned ResultReg = createResultReg(RC); 504 const MCInstrDesc &II = TII.get(MachineInstOpcode); 505 506 if (II.getNumDefs() >= 1) { 507 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 508 .addImm(Imm1).addImm(Imm2)); 509 } else { 510 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 511 .addImm(Imm1).addImm(Imm2)); 512 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 513 TII.get(TargetOpcode::COPY), 514 ResultReg) 515 .addReg(II.ImplicitDefs[0])); 516 } 517 return ResultReg; 518} 519 520unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, 521 unsigned Op0, bool Op0IsKill, 522 uint32_t Idx) { 523 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 524 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 525 "Cannot yet extract from physregs"); 526 527 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 528 DL, TII.get(TargetOpcode::COPY), ResultReg) 529 .addReg(Op0, getKillRegState(Op0IsKill), Idx)); 530 return ResultReg; 531} 532 533// TODO: Don't worry about 64-bit now, but when this is fixed remove the 534// checks from the various callers. 535unsigned ARMFastISel::ARMMoveToFPReg(MVT VT, unsigned SrcReg) { 536 if (VT == MVT::f64) return 0; 537 538 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 539 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 540 TII.get(ARM::VMOVSR), MoveReg) 541 .addReg(SrcReg)); 542 return MoveReg; 543} 544 545unsigned ARMFastISel::ARMMoveToIntReg(MVT VT, unsigned SrcReg) { 546 if (VT == MVT::i64) return 0; 547 548 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 549 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 550 TII.get(ARM::VMOVRS), MoveReg) 551 .addReg(SrcReg)); 552 return MoveReg; 553} 554 555// For double width floating point we need to materialize two constants 556// (the high and the low) into integer registers then use a move to get 557// the combined constant into an FP reg. 558unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) { 559 const APFloat Val = CFP->getValueAPF(); 560 bool is64bit = VT == MVT::f64; 561 562 // This checks to see if we can use VFP3 instructions to materialize 563 // a constant, otherwise we have to go through the constant pool. 564 if (TLI.isFPImmLegal(Val, VT)) { 565 int Imm; 566 unsigned Opc; 567 if (is64bit) { 568 Imm = ARM_AM::getFP64Imm(Val); 569 Opc = ARM::FCONSTD; 570 } else { 571 Imm = ARM_AM::getFP32Imm(Val); 572 Opc = ARM::FCONSTS; 573 } 574 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 575 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 576 DestReg) 577 .addImm(Imm)); 578 return DestReg; 579 } 580 581 // Require VFP2 for loading fp constants. 582 if (!Subtarget->hasVFP2()) return false; 583 584 // MachineConstantPool wants an explicit alignment. 585 unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); 586 if (Align == 0) { 587 // TODO: Figure out if this is correct. 588 Align = TD.getTypeAllocSize(CFP->getType()); 589 } 590 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 591 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 592 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 593 594 // The extra reg is for addrmode5. 595 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 596 DestReg) 597 .addConstantPoolIndex(Idx) 598 .addReg(0)); 599 return DestReg; 600} 601 602unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) { 603 604 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1) 605 return false; 606 607 // If we can do this in a single instruction without a constant pool entry 608 // do so now. 609 const ConstantInt *CI = cast<ConstantInt>(C); 610 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) { 611 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16; 612 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass : 613 &ARM::GPRRegClass; 614 unsigned ImmReg = createResultReg(RC); 615 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 616 TII.get(Opc), ImmReg) 617 .addImm(CI->getZExtValue())); 618 return ImmReg; 619 } 620 621 // Use MVN to emit negative constants. 622 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) { 623 unsigned Imm = (unsigned)~(CI->getSExtValue()); 624 bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 625 (ARM_AM::getSOImmVal(Imm) != -1); 626 if (UseImm) { 627 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi; 628 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 629 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 630 TII.get(Opc), ImmReg) 631 .addImm(Imm)); 632 return ImmReg; 633 } 634 } 635 636 // Load from constant pool. For now 32-bit only. 637 if (VT != MVT::i32) 638 return false; 639 640 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 641 642 // MachineConstantPool wants an explicit alignment. 643 unsigned Align = TD.getPrefTypeAlignment(C->getType()); 644 if (Align == 0) { 645 // TODO: Figure out if this is correct. 646 Align = TD.getTypeAllocSize(C->getType()); 647 } 648 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 649 650 if (isThumb2) 651 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 652 TII.get(ARM::t2LDRpci), DestReg) 653 .addConstantPoolIndex(Idx)); 654 else 655 // The extra immediate is for addrmode2. 656 DestReg = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg, 0); 657 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 658 TII.get(ARM::LDRcp), DestReg) 659 .addConstantPoolIndex(Idx) 660 .addImm(0)); 661 662 return DestReg; 663} 664 665unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) { 666 // For now 32-bit only. 667 if (VT != MVT::i32) return 0; 668 669 Reloc::Model RelocM = TM.getRelocationModel(); 670 bool IsIndirect = Subtarget->GVIsIndirectSymbol(GV, RelocM); 671 const TargetRegisterClass *RC = isThumb2 ? 672 (const TargetRegisterClass*)&ARM::rGPRRegClass : 673 (const TargetRegisterClass*)&ARM::GPRRegClass; 674 unsigned DestReg = createResultReg(RC); 675 676 // FastISel TLS support on non-Darwin is broken, punt to SelectionDAG. 677 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); 678 bool IsThreadLocal = GVar && GVar->isThreadLocal(); 679 if (!Subtarget->isTargetDarwin() && IsThreadLocal) return 0; 680 681 // Use movw+movt when possible, it avoids constant pool entries. 682 // Darwin targets don't support movt with Reloc::Static, see 683 // ARMTargetLowering::LowerGlobalAddressDarwin. Other targets only support 684 // static movt relocations. 685 if (Subtarget->useMovt() && 686 Subtarget->isTargetDarwin() == (RelocM != Reloc::Static)) { 687 unsigned Opc; 688 switch (RelocM) { 689 case Reloc::PIC_: 690 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel; 691 break; 692 case Reloc::DynamicNoPIC: 693 Opc = isThumb2 ? ARM::t2MOV_ga_dyn : ARM::MOV_ga_dyn; 694 break; 695 default: 696 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm; 697 break; 698 } 699 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 700 DestReg).addGlobalAddress(GV)); 701 } else { 702 // MachineConstantPool wants an explicit alignment. 703 unsigned Align = TD.getPrefTypeAlignment(GV->getType()); 704 if (Align == 0) { 705 // TODO: Figure out if this is correct. 706 Align = TD.getTypeAllocSize(GV->getType()); 707 } 708 709 if (Subtarget->isTargetELF() && RelocM == Reloc::PIC_) 710 return ARMLowerPICELF(GV, Align, VT); 711 712 // Grab index. 713 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : 714 (Subtarget->isThumb() ? 4 : 8); 715 unsigned Id = AFI->createPICLabelUId(); 716 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id, 717 ARMCP::CPValue, 718 PCAdj); 719 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 720 721 // Load value. 722 MachineInstrBuilder MIB; 723 if (isThumb2) { 724 unsigned Opc = (RelocM!=Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 725 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) 726 .addConstantPoolIndex(Idx); 727 if (RelocM == Reloc::PIC_) 728 MIB.addImm(Id); 729 AddOptionalDefs(MIB); 730 } else { 731 // The extra immediate is for addrmode2. 732 DestReg = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg, 0); 733 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), 734 DestReg) 735 .addConstantPoolIndex(Idx) 736 .addImm(0); 737 AddOptionalDefs(MIB); 738 739 if (RelocM == Reloc::PIC_) { 740 unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD; 741 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 742 743 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 744 DL, TII.get(Opc), NewDestReg) 745 .addReg(DestReg) 746 .addImm(Id); 747 AddOptionalDefs(MIB); 748 return NewDestReg; 749 } 750 } 751 } 752 753 if (IsIndirect) { 754 MachineInstrBuilder MIB; 755 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 756 if (isThumb2) 757 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 758 TII.get(ARM::t2LDRi12), NewDestReg) 759 .addReg(DestReg) 760 .addImm(0); 761 else 762 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRi12), 763 NewDestReg) 764 .addReg(DestReg) 765 .addImm(0); 766 DestReg = NewDestReg; 767 AddOptionalDefs(MIB); 768 } 769 770 return DestReg; 771} 772 773unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 774 EVT CEVT = TLI.getValueType(C->getType(), true); 775 776 // Only handle simple types. 777 if (!CEVT.isSimple()) return 0; 778 MVT VT = CEVT.getSimpleVT(); 779 780 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 781 return ARMMaterializeFP(CFP, VT); 782 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 783 return ARMMaterializeGV(GV, VT); 784 else if (isa<ConstantInt>(C)) 785 return ARMMaterializeInt(C, VT); 786 787 return 0; 788} 789 790// TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF); 791 792unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 793 // Don't handle dynamic allocas. 794 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 795 796 MVT VT; 797 if (!isLoadTypeLegal(AI->getType(), VT)) return 0; 798 799 DenseMap<const AllocaInst*, int>::iterator SI = 800 FuncInfo.StaticAllocaMap.find(AI); 801 802 // This will get lowered later into the correct offsets and registers 803 // via rewriteXFrameIndex. 804 if (SI != FuncInfo.StaticAllocaMap.end()) { 805 const TargetRegisterClass* RC = TLI.getRegClassFor(VT); 806 unsigned ResultReg = createResultReg(RC); 807 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 808 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 809 TII.get(Opc), ResultReg) 810 .addFrameIndex(SI->second) 811 .addImm(0)); 812 return ResultReg; 813 } 814 815 return 0; 816} 817 818bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) { 819 EVT evt = TLI.getValueType(Ty, true); 820 821 // Only handle simple types. 822 if (evt == MVT::Other || !evt.isSimple()) return false; 823 VT = evt.getSimpleVT(); 824 825 // Handle all legal types, i.e. a register that will directly hold this 826 // value. 827 return TLI.isTypeLegal(VT); 828} 829 830bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { 831 if (isTypeLegal(Ty, VT)) return true; 832 833 // If this is a type than can be sign or zero-extended to a basic operation 834 // go ahead and accept it now. 835 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 836 return true; 837 838 return false; 839} 840 841// Computes the address to get to an object. 842bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 843 // Some boilerplate from the X86 FastISel. 844 const User *U = NULL; 845 unsigned Opcode = Instruction::UserOp1; 846 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 847 // Don't walk into other basic blocks unless the object is an alloca from 848 // another block, otherwise it may not have a virtual register assigned. 849 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 850 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 851 Opcode = I->getOpcode(); 852 U = I; 853 } 854 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 855 Opcode = C->getOpcode(); 856 U = C; 857 } 858 859 if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 860 if (Ty->getAddressSpace() > 255) 861 // Fast instruction selection doesn't support the special 862 // address spaces. 863 return false; 864 865 switch (Opcode) { 866 default: 867 break; 868 case Instruction::BitCast: 869 // Look through bitcasts. 870 return ARMComputeAddress(U->getOperand(0), Addr); 871 case Instruction::IntToPtr: 872 // Look past no-op inttoptrs. 873 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 874 return ARMComputeAddress(U->getOperand(0), Addr); 875 break; 876 case Instruction::PtrToInt: 877 // Look past no-op ptrtoints. 878 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 879 return ARMComputeAddress(U->getOperand(0), Addr); 880 break; 881 case Instruction::GetElementPtr: { 882 Address SavedAddr = Addr; 883 int TmpOffset = Addr.Offset; 884 885 // Iterate through the GEP folding the constants into offsets where 886 // we can. 887 gep_type_iterator GTI = gep_type_begin(U); 888 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 889 i != e; ++i, ++GTI) { 890 const Value *Op = *i; 891 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 892 const StructLayout *SL = TD.getStructLayout(STy); 893 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 894 TmpOffset += SL->getElementOffset(Idx); 895 } else { 896 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); 897 for (;;) { 898 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 899 // Constant-offset addressing. 900 TmpOffset += CI->getSExtValue() * S; 901 break; 902 } 903 if (canFoldAddIntoGEP(U, Op)) { 904 // A compatible add with a constant operand. Fold the constant. 905 ConstantInt *CI = 906 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 907 TmpOffset += CI->getSExtValue() * S; 908 // Iterate on the other operand. 909 Op = cast<AddOperator>(Op)->getOperand(0); 910 continue; 911 } 912 // Unsupported 913 goto unsupported_gep; 914 } 915 } 916 } 917 918 // Try to grab the base operand now. 919 Addr.Offset = TmpOffset; 920 if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 921 922 // We failed, restore everything and try the other options. 923 Addr = SavedAddr; 924 925 unsupported_gep: 926 break; 927 } 928 case Instruction::Alloca: { 929 const AllocaInst *AI = cast<AllocaInst>(Obj); 930 DenseMap<const AllocaInst*, int>::iterator SI = 931 FuncInfo.StaticAllocaMap.find(AI); 932 if (SI != FuncInfo.StaticAllocaMap.end()) { 933 Addr.BaseType = Address::FrameIndexBase; 934 Addr.Base.FI = SI->second; 935 return true; 936 } 937 break; 938 } 939 } 940 941 // Try to get this in a register if nothing else has worked. 942 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 943 return Addr.Base.Reg != 0; 944} 945 946void ARMFastISel::ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3) { 947 bool needsLowering = false; 948 switch (VT.SimpleTy) { 949 default: llvm_unreachable("Unhandled load/store type!"); 950 case MVT::i1: 951 case MVT::i8: 952 case MVT::i16: 953 case MVT::i32: 954 if (!useAM3) { 955 // Integer loads/stores handle 12-bit offsets. 956 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 957 // Handle negative offsets. 958 if (needsLowering && isThumb2) 959 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 && 960 Addr.Offset > -256); 961 } else { 962 // ARM halfword load/stores and signed byte loads use +/-imm8 offsets. 963 needsLowering = (Addr.Offset > 255 || Addr.Offset < -255); 964 } 965 break; 966 case MVT::f32: 967 case MVT::f64: 968 // Floating point operands handle 8-bit offsets. 969 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 970 break; 971 } 972 973 // If this is a stack pointer and the offset needs to be simplified then 974 // put the alloca address into a register, set the base type back to 975 // register and continue. This should almost never happen. 976 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 977 const TargetRegisterClass *RC = isThumb2 ? 978 (const TargetRegisterClass*)&ARM::tGPRRegClass : 979 (const TargetRegisterClass*)&ARM::GPRRegClass; 980 unsigned ResultReg = createResultReg(RC); 981 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 982 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 983 TII.get(Opc), ResultReg) 984 .addFrameIndex(Addr.Base.FI) 985 .addImm(0)); 986 Addr.Base.Reg = ResultReg; 987 Addr.BaseType = Address::RegBase; 988 } 989 990 // Since the offset is too large for the load/store instruction 991 // get the reg+offset into a register. 992 if (needsLowering) { 993 Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, 994 /*Op0IsKill*/false, Addr.Offset, MVT::i32); 995 Addr.Offset = 0; 996 } 997} 998 999void ARMFastISel::AddLoadStoreOperands(MVT VT, Address &Addr, 1000 const MachineInstrBuilder &MIB, 1001 unsigned Flags, bool useAM3) { 1002 // addrmode5 output depends on the selection dag addressing dividing the 1003 // offset by 4 that it then later multiplies. Do this here as well. 1004 if (VT.SimpleTy == MVT::f32 || VT.SimpleTy == MVT::f64) 1005 Addr.Offset /= 4; 1006 1007 // Frame base works a bit differently. Handle it separately. 1008 if (Addr.BaseType == Address::FrameIndexBase) { 1009 int FI = Addr.Base.FI; 1010 int Offset = Addr.Offset; 1011 MachineMemOperand *MMO = 1012 FuncInfo.MF->getMachineMemOperand( 1013 MachinePointerInfo::getFixedStack(FI, Offset), 1014 Flags, 1015 MFI.getObjectSize(FI), 1016 MFI.getObjectAlignment(FI)); 1017 // Now add the rest of the operands. 1018 MIB.addFrameIndex(FI); 1019 1020 // ARM halfword load/stores and signed byte loads need an additional 1021 // operand. 1022 if (useAM3) { 1023 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 1024 MIB.addReg(0); 1025 MIB.addImm(Imm); 1026 } else { 1027 MIB.addImm(Addr.Offset); 1028 } 1029 MIB.addMemOperand(MMO); 1030 } else { 1031 // Now add the rest of the operands. 1032 MIB.addReg(Addr.Base.Reg); 1033 1034 // ARM halfword load/stores and signed byte loads need an additional 1035 // operand. 1036 if (useAM3) { 1037 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 1038 MIB.addReg(0); 1039 MIB.addImm(Imm); 1040 } else { 1041 MIB.addImm(Addr.Offset); 1042 } 1043 } 1044 AddOptionalDefs(MIB); 1045} 1046 1047bool ARMFastISel::ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, 1048 unsigned Alignment, bool isZExt, bool allocReg) { 1049 unsigned Opc; 1050 bool useAM3 = false; 1051 bool needVMOV = false; 1052 const TargetRegisterClass *RC; 1053 switch (VT.SimpleTy) { 1054 // This is mostly going to be Neon/vector support. 1055 default: return false; 1056 case MVT::i1: 1057 case MVT::i8: 1058 if (isThumb2) { 1059 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1060 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8; 1061 else 1062 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12; 1063 } else { 1064 if (isZExt) { 1065 Opc = ARM::LDRBi12; 1066 } else { 1067 Opc = ARM::LDRSB; 1068 useAM3 = true; 1069 } 1070 } 1071 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 1072 break; 1073 case MVT::i16: 1074 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem()) 1075 return false; 1076 1077 if (isThumb2) { 1078 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1079 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8; 1080 else 1081 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12; 1082 } else { 1083 Opc = isZExt ? ARM::LDRH : ARM::LDRSH; 1084 useAM3 = true; 1085 } 1086 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 1087 break; 1088 case MVT::i32: 1089 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem()) 1090 return false; 1091 1092 if (isThumb2) { 1093 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1094 Opc = ARM::t2LDRi8; 1095 else 1096 Opc = ARM::t2LDRi12; 1097 } else { 1098 Opc = ARM::LDRi12; 1099 } 1100 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 1101 break; 1102 case MVT::f32: 1103 if (!Subtarget->hasVFP2()) return false; 1104 // Unaligned loads need special handling. Floats require word-alignment. 1105 if (Alignment && Alignment < 4) { 1106 needVMOV = true; 1107 VT = MVT::i32; 1108 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12; 1109 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 1110 } else { 1111 Opc = ARM::VLDRS; 1112 RC = TLI.getRegClassFor(VT); 1113 } 1114 break; 1115 case MVT::f64: 1116 if (!Subtarget->hasVFP2()) return false; 1117 // FIXME: Unaligned loads need special handling. Doublewords require 1118 // word-alignment. 1119 if (Alignment && Alignment < 4) 1120 return false; 1121 1122 Opc = ARM::VLDRD; 1123 RC = TLI.getRegClassFor(VT); 1124 break; 1125 } 1126 // Simplify this down to something we can handle. 1127 ARMSimplifyAddress(Addr, VT, useAM3); 1128 1129 // Create the base instruction, then add the operands. 1130 if (allocReg) 1131 ResultReg = createResultReg(RC); 1132 assert (ResultReg > 255 && "Expected an allocated virtual register."); 1133 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1134 TII.get(Opc), ResultReg); 1135 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3); 1136 1137 // If we had an unaligned load of a float we've converted it to an regular 1138 // load. Now we must move from the GRP to the FP register. 1139 if (needVMOV) { 1140 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1141 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1142 TII.get(ARM::VMOVSR), MoveReg) 1143 .addReg(ResultReg)); 1144 ResultReg = MoveReg; 1145 } 1146 return true; 1147} 1148 1149bool ARMFastISel::SelectLoad(const Instruction *I) { 1150 // Atomic loads need special handling. 1151 if (cast<LoadInst>(I)->isAtomic()) 1152 return false; 1153 1154 // Verify we have a legal type before going any further. 1155 MVT VT; 1156 if (!isLoadTypeLegal(I->getType(), VT)) 1157 return false; 1158 1159 // See if we can handle this address. 1160 Address Addr; 1161 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 1162 1163 unsigned ResultReg; 1164 if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment())) 1165 return false; 1166 UpdateValueMap(I, ResultReg); 1167 return true; 1168} 1169 1170bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr, 1171 unsigned Alignment) { 1172 unsigned StrOpc; 1173 bool useAM3 = false; 1174 switch (VT.SimpleTy) { 1175 // This is mostly going to be Neon/vector support. 1176 default: return false; 1177 case MVT::i1: { 1178 unsigned Res = createResultReg(isThumb2 ? 1179 (const TargetRegisterClass*)&ARM::tGPRRegClass : 1180 (const TargetRegisterClass*)&ARM::GPRRegClass); 1181 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 1182 SrcReg = constrainOperandRegClass(TII.get(Opc), SrcReg, 1); 1183 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1184 TII.get(Opc), Res) 1185 .addReg(SrcReg).addImm(1)); 1186 SrcReg = Res; 1187 } // Fallthrough here. 1188 case MVT::i8: 1189 if (isThumb2) { 1190 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1191 StrOpc = ARM::t2STRBi8; 1192 else 1193 StrOpc = ARM::t2STRBi12; 1194 } else { 1195 StrOpc = ARM::STRBi12; 1196 } 1197 break; 1198 case MVT::i16: 1199 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem()) 1200 return false; 1201 1202 if (isThumb2) { 1203 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1204 StrOpc = ARM::t2STRHi8; 1205 else 1206 StrOpc = ARM::t2STRHi12; 1207 } else { 1208 StrOpc = ARM::STRH; 1209 useAM3 = true; 1210 } 1211 break; 1212 case MVT::i32: 1213 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem()) 1214 return false; 1215 1216 if (isThumb2) { 1217 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1218 StrOpc = ARM::t2STRi8; 1219 else 1220 StrOpc = ARM::t2STRi12; 1221 } else { 1222 StrOpc = ARM::STRi12; 1223 } 1224 break; 1225 case MVT::f32: 1226 if (!Subtarget->hasVFP2()) return false; 1227 // Unaligned stores need special handling. Floats require word-alignment. 1228 if (Alignment && Alignment < 4) { 1229 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1230 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1231 TII.get(ARM::VMOVRS), MoveReg) 1232 .addReg(SrcReg)); 1233 SrcReg = MoveReg; 1234 VT = MVT::i32; 1235 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12; 1236 } else { 1237 StrOpc = ARM::VSTRS; 1238 } 1239 break; 1240 case MVT::f64: 1241 if (!Subtarget->hasVFP2()) return false; 1242 // FIXME: Unaligned stores need special handling. Doublewords require 1243 // word-alignment. 1244 if (Alignment && Alignment < 4) 1245 return false; 1246 1247 StrOpc = ARM::VSTRD; 1248 break; 1249 } 1250 // Simplify this down to something we can handle. 1251 ARMSimplifyAddress(Addr, VT, useAM3); 1252 1253 // Create the base instruction, then add the operands. 1254 SrcReg = constrainOperandRegClass(TII.get(StrOpc), SrcReg, 0); 1255 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1256 TII.get(StrOpc)) 1257 .addReg(SrcReg); 1258 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3); 1259 return true; 1260} 1261 1262bool ARMFastISel::SelectStore(const Instruction *I) { 1263 Value *Op0 = I->getOperand(0); 1264 unsigned SrcReg = 0; 1265 1266 // Atomic stores need special handling. 1267 if (cast<StoreInst>(I)->isAtomic()) 1268 return false; 1269 1270 // Verify we have a legal type before going any further. 1271 MVT VT; 1272 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 1273 return false; 1274 1275 // Get the value to be stored into a register. 1276 SrcReg = getRegForValue(Op0); 1277 if (SrcReg == 0) return false; 1278 1279 // See if we can handle this address. 1280 Address Addr; 1281 if (!ARMComputeAddress(I->getOperand(1), Addr)) 1282 return false; 1283 1284 if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment())) 1285 return false; 1286 return true; 1287} 1288 1289static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 1290 switch (Pred) { 1291 // Needs two compares... 1292 case CmpInst::FCMP_ONE: 1293 case CmpInst::FCMP_UEQ: 1294 default: 1295 // AL is our "false" for now. The other two need more compares. 1296 return ARMCC::AL; 1297 case CmpInst::ICMP_EQ: 1298 case CmpInst::FCMP_OEQ: 1299 return ARMCC::EQ; 1300 case CmpInst::ICMP_SGT: 1301 case CmpInst::FCMP_OGT: 1302 return ARMCC::GT; 1303 case CmpInst::ICMP_SGE: 1304 case CmpInst::FCMP_OGE: 1305 return ARMCC::GE; 1306 case CmpInst::ICMP_UGT: 1307 case CmpInst::FCMP_UGT: 1308 return ARMCC::HI; 1309 case CmpInst::FCMP_OLT: 1310 return ARMCC::MI; 1311 case CmpInst::ICMP_ULE: 1312 case CmpInst::FCMP_OLE: 1313 return ARMCC::LS; 1314 case CmpInst::FCMP_ORD: 1315 return ARMCC::VC; 1316 case CmpInst::FCMP_UNO: 1317 return ARMCC::VS; 1318 case CmpInst::FCMP_UGE: 1319 return ARMCC::PL; 1320 case CmpInst::ICMP_SLT: 1321 case CmpInst::FCMP_ULT: 1322 return ARMCC::LT; 1323 case CmpInst::ICMP_SLE: 1324 case CmpInst::FCMP_ULE: 1325 return ARMCC::LE; 1326 case CmpInst::FCMP_UNE: 1327 case CmpInst::ICMP_NE: 1328 return ARMCC::NE; 1329 case CmpInst::ICMP_UGE: 1330 return ARMCC::HS; 1331 case CmpInst::ICMP_ULT: 1332 return ARMCC::LO; 1333 } 1334} 1335 1336bool ARMFastISel::SelectBranch(const Instruction *I) { 1337 const BranchInst *BI = cast<BranchInst>(I); 1338 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 1339 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1340 1341 // Simple branch support. 1342 1343 // If we can, avoid recomputing the compare - redoing it could lead to wonky 1344 // behavior. 1345 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1346 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 1347 1348 // Get the compare predicate. 1349 // Try to take advantage of fallthrough opportunities. 1350 CmpInst::Predicate Predicate = CI->getPredicate(); 1351 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1352 std::swap(TBB, FBB); 1353 Predicate = CmpInst::getInversePredicate(Predicate); 1354 } 1355 1356 ARMCC::CondCodes ARMPred = getComparePred(Predicate); 1357 1358 // We may not handle every CC for now. 1359 if (ARMPred == ARMCC::AL) return false; 1360 1361 // Emit the compare. 1362 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1363 return false; 1364 1365 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1366 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1367 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1368 FastEmitBranch(FBB, DL); 1369 FuncInfo.MBB->addSuccessor(TBB); 1370 return true; 1371 } 1372 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) { 1373 MVT SourceVT; 1374 if (TI->hasOneUse() && TI->getParent() == I->getParent() && 1375 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { 1376 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1377 unsigned OpReg = getRegForValue(TI->getOperand(0)); 1378 OpReg = constrainOperandRegClass(TII.get(TstOpc), OpReg, 0); 1379 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1380 TII.get(TstOpc)) 1381 .addReg(OpReg).addImm(1)); 1382 1383 unsigned CCMode = ARMCC::NE; 1384 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1385 std::swap(TBB, FBB); 1386 CCMode = ARMCC::EQ; 1387 } 1388 1389 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1390 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1391 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1392 1393 FastEmitBranch(FBB, DL); 1394 FuncInfo.MBB->addSuccessor(TBB); 1395 return true; 1396 } 1397 } else if (const ConstantInt *CI = 1398 dyn_cast<ConstantInt>(BI->getCondition())) { 1399 uint64_t Imm = CI->getZExtValue(); 1400 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; 1401 FastEmitBranch(Target, DL); 1402 return true; 1403 } 1404 1405 unsigned CmpReg = getRegForValue(BI->getCondition()); 1406 if (CmpReg == 0) return false; 1407 1408 // We've been divorced from our compare! Our block was split, and 1409 // now our compare lives in a predecessor block. We musn't 1410 // re-compare here, as the children of the compare aren't guaranteed 1411 // live across the block boundary (we *could* check for this). 1412 // Regardless, the compare has been done in the predecessor block, 1413 // and it left a value for us in a virtual register. Ergo, we test 1414 // the one-bit value left in the virtual register. 1415 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1416 CmpReg = constrainOperandRegClass(TII.get(TstOpc), CmpReg, 0); 1417 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc)) 1418 .addReg(CmpReg).addImm(1)); 1419 1420 unsigned CCMode = ARMCC::NE; 1421 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1422 std::swap(TBB, FBB); 1423 CCMode = ARMCC::EQ; 1424 } 1425 1426 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1427 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1428 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1429 FastEmitBranch(FBB, DL); 1430 FuncInfo.MBB->addSuccessor(TBB); 1431 return true; 1432} 1433 1434bool ARMFastISel::SelectIndirectBr(const Instruction *I) { 1435 unsigned AddrReg = getRegForValue(I->getOperand(0)); 1436 if (AddrReg == 0) return false; 1437 1438 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX; 1439 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc)) 1440 .addReg(AddrReg)); 1441 1442 const IndirectBrInst *IB = cast<IndirectBrInst>(I); 1443 for (unsigned i = 0, e = IB->getNumSuccessors(); i != e; ++i) 1444 FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[IB->getSuccessor(i)]); 1445 1446 return true; 1447} 1448 1449bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 1450 bool isZExt) { 1451 Type *Ty = Src1Value->getType(); 1452 EVT SrcEVT = TLI.getValueType(Ty, true); 1453 if (!SrcEVT.isSimple()) return false; 1454 MVT SrcVT = SrcEVT.getSimpleVT(); 1455 1456 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1457 if (isFloat && !Subtarget->hasVFP2()) 1458 return false; 1459 1460 // Check to see if the 2nd operand is a constant that we can encode directly 1461 // in the compare. 1462 int Imm = 0; 1463 bool UseImm = false; 1464 bool isNegativeImm = false; 1465 // FIXME: At -O0 we don't have anything that canonicalizes operand order. 1466 // Thus, Src1Value may be a ConstantInt, but we're missing it. 1467 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) { 1468 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 || 1469 SrcVT == MVT::i1) { 1470 const APInt &CIVal = ConstInt->getValue(); 1471 Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue(); 1472 // For INT_MIN/LONG_MIN (i.e., 0x80000000) we need to use a cmp, rather 1473 // then a cmn, because there is no way to represent 2147483648 as a 1474 // signed 32-bit int. 1475 if (Imm < 0 && Imm != (int)0x80000000) { 1476 isNegativeImm = true; 1477 Imm = -Imm; 1478 } 1479 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1480 (ARM_AM::getSOImmVal(Imm) != -1); 1481 } 1482 } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) { 1483 if (SrcVT == MVT::f32 || SrcVT == MVT::f64) 1484 if (ConstFP->isZero() && !ConstFP->isNegative()) 1485 UseImm = true; 1486 } 1487 1488 unsigned CmpOpc; 1489 bool isICmp = true; 1490 bool needsExt = false; 1491 switch (SrcVT.SimpleTy) { 1492 default: return false; 1493 // TODO: Verify compares. 1494 case MVT::f32: 1495 isICmp = false; 1496 CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES; 1497 break; 1498 case MVT::f64: 1499 isICmp = false; 1500 CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED; 1501 break; 1502 case MVT::i1: 1503 case MVT::i8: 1504 case MVT::i16: 1505 needsExt = true; 1506 // Intentional fall-through. 1507 case MVT::i32: 1508 if (isThumb2) { 1509 if (!UseImm) 1510 CmpOpc = ARM::t2CMPrr; 1511 else 1512 CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri; 1513 } else { 1514 if (!UseImm) 1515 CmpOpc = ARM::CMPrr; 1516 else 1517 CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri; 1518 } 1519 break; 1520 } 1521 1522 unsigned SrcReg1 = getRegForValue(Src1Value); 1523 if (SrcReg1 == 0) return false; 1524 1525 unsigned SrcReg2 = 0; 1526 if (!UseImm) { 1527 SrcReg2 = getRegForValue(Src2Value); 1528 if (SrcReg2 == 0) return false; 1529 } 1530 1531 // We have i1, i8, or i16, we need to either zero extend or sign extend. 1532 if (needsExt) { 1533 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt); 1534 if (SrcReg1 == 0) return false; 1535 if (!UseImm) { 1536 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt); 1537 if (SrcReg2 == 0) return false; 1538 } 1539 } 1540 1541 const MCInstrDesc &II = TII.get(CmpOpc); 1542 SrcReg1 = constrainOperandRegClass(II, SrcReg1, 0); 1543 if (!UseImm) { 1544 SrcReg2 = constrainOperandRegClass(II, SrcReg2, 1); 1545 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 1546 .addReg(SrcReg1).addReg(SrcReg2)); 1547 } else { 1548 MachineInstrBuilder MIB; 1549 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 1550 .addReg(SrcReg1); 1551 1552 // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0. 1553 if (isICmp) 1554 MIB.addImm(Imm); 1555 AddOptionalDefs(MIB); 1556 } 1557 1558 // For floating point we need to move the result to a comparison register 1559 // that we can then use for branches. 1560 if (Ty->isFloatTy() || Ty->isDoubleTy()) 1561 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1562 TII.get(ARM::FMSTAT))); 1563 return true; 1564} 1565 1566bool ARMFastISel::SelectCmp(const Instruction *I) { 1567 const CmpInst *CI = cast<CmpInst>(I); 1568 1569 // Get the compare predicate. 1570 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1571 1572 // We may not handle every CC for now. 1573 if (ARMPred == ARMCC::AL) return false; 1574 1575 // Emit the compare. 1576 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1577 return false; 1578 1579 // Now set a register based on the comparison. Explicitly set the predicates 1580 // here. 1581 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1582 const TargetRegisterClass *RC = isThumb2 ? 1583 (const TargetRegisterClass*)&ARM::rGPRRegClass : 1584 (const TargetRegisterClass*)&ARM::GPRRegClass; 1585 unsigned DestReg = createResultReg(RC); 1586 Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1587 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1588 // ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR. 1589 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) 1590 .addReg(ZeroReg).addImm(1) 1591 .addImm(ARMPred).addReg(ARM::CPSR); 1592 1593 UpdateValueMap(I, DestReg); 1594 return true; 1595} 1596 1597bool ARMFastISel::SelectFPExt(const Instruction *I) { 1598 // Make sure we have VFP and that we're extending float to double. 1599 if (!Subtarget->hasVFP2()) return false; 1600 1601 Value *V = I->getOperand(0); 1602 if (!I->getType()->isDoubleTy() || 1603 !V->getType()->isFloatTy()) return false; 1604 1605 unsigned Op = getRegForValue(V); 1606 if (Op == 0) return false; 1607 1608 unsigned Result = createResultReg(&ARM::DPRRegClass); 1609 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1610 TII.get(ARM::VCVTDS), Result) 1611 .addReg(Op)); 1612 UpdateValueMap(I, Result); 1613 return true; 1614} 1615 1616bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1617 // Make sure we have VFP and that we're truncating double to float. 1618 if (!Subtarget->hasVFP2()) return false; 1619 1620 Value *V = I->getOperand(0); 1621 if (!(I->getType()->isFloatTy() && 1622 V->getType()->isDoubleTy())) return false; 1623 1624 unsigned Op = getRegForValue(V); 1625 if (Op == 0) return false; 1626 1627 unsigned Result = createResultReg(&ARM::SPRRegClass); 1628 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1629 TII.get(ARM::VCVTSD), Result) 1630 .addReg(Op)); 1631 UpdateValueMap(I, Result); 1632 return true; 1633} 1634 1635bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) { 1636 // Make sure we have VFP. 1637 if (!Subtarget->hasVFP2()) return false; 1638 1639 MVT DstVT; 1640 Type *Ty = I->getType(); 1641 if (!isTypeLegal(Ty, DstVT)) 1642 return false; 1643 1644 Value *Src = I->getOperand(0); 1645 EVT SrcEVT = TLI.getValueType(Src->getType(), true); 1646 if (!SrcEVT.isSimple()) 1647 return false; 1648 MVT SrcVT = SrcEVT.getSimpleVT(); 1649 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 1650 return false; 1651 1652 unsigned SrcReg = getRegForValue(Src); 1653 if (SrcReg == 0) return false; 1654 1655 // Handle sign-extension. 1656 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) { 1657 SrcReg = ARMEmitIntExt(SrcVT, SrcReg, MVT::i32, 1658 /*isZExt*/!isSigned); 1659 if (SrcReg == 0) return false; 1660 } 1661 1662 // The conversion routine works on fp-reg to fp-reg and the operand above 1663 // was an integer, move it to the fp registers if possible. 1664 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg); 1665 if (FP == 0) return false; 1666 1667 unsigned Opc; 1668 if (Ty->isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS; 1669 else if (Ty->isDoubleTy()) Opc = isSigned ? ARM::VSITOD : ARM::VUITOD; 1670 else return false; 1671 1672 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1673 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1674 ResultReg) 1675 .addReg(FP)); 1676 UpdateValueMap(I, ResultReg); 1677 return true; 1678} 1679 1680bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) { 1681 // Make sure we have VFP. 1682 if (!Subtarget->hasVFP2()) return false; 1683 1684 MVT DstVT; 1685 Type *RetTy = I->getType(); 1686 if (!isTypeLegal(RetTy, DstVT)) 1687 return false; 1688 1689 unsigned Op = getRegForValue(I->getOperand(0)); 1690 if (Op == 0) return false; 1691 1692 unsigned Opc; 1693 Type *OpTy = I->getOperand(0)->getType(); 1694 if (OpTy->isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS; 1695 else if (OpTy->isDoubleTy()) Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD; 1696 else return false; 1697 1698 // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg. 1699 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1700 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1701 ResultReg) 1702 .addReg(Op)); 1703 1704 // This result needs to be in an integer register, but the conversion only 1705 // takes place in fp-regs. 1706 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1707 if (IntReg == 0) return false; 1708 1709 UpdateValueMap(I, IntReg); 1710 return true; 1711} 1712 1713bool ARMFastISel::SelectSelect(const Instruction *I) { 1714 MVT VT; 1715 if (!isTypeLegal(I->getType(), VT)) 1716 return false; 1717 1718 // Things need to be register sized for register moves. 1719 if (VT != MVT::i32) return false; 1720 1721 unsigned CondReg = getRegForValue(I->getOperand(0)); 1722 if (CondReg == 0) return false; 1723 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1724 if (Op1Reg == 0) return false; 1725 1726 // Check to see if we can use an immediate in the conditional move. 1727 int Imm = 0; 1728 bool UseImm = false; 1729 bool isNegativeImm = false; 1730 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) { 1731 assert (VT == MVT::i32 && "Expecting an i32."); 1732 Imm = (int)ConstInt->getValue().getZExtValue(); 1733 if (Imm < 0) { 1734 isNegativeImm = true; 1735 Imm = ~Imm; 1736 } 1737 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1738 (ARM_AM::getSOImmVal(Imm) != -1); 1739 } 1740 1741 unsigned Op2Reg = 0; 1742 if (!UseImm) { 1743 Op2Reg = getRegForValue(I->getOperand(2)); 1744 if (Op2Reg == 0) return false; 1745 } 1746 1747 unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri; 1748 CondReg = constrainOperandRegClass(TII.get(CmpOpc), CondReg, 0); 1749 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1750 .addReg(CondReg).addImm(0)); 1751 1752 unsigned MovCCOpc; 1753 const TargetRegisterClass *RC; 1754 if (!UseImm) { 1755 RC = isThumb2 ? &ARM::tGPRRegClass : &ARM::GPRRegClass; 1756 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr; 1757 } else { 1758 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass; 1759 if (!isNegativeImm) 1760 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1761 else 1762 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi; 1763 } 1764 unsigned ResultReg = createResultReg(RC); 1765 if (!UseImm) { 1766 Op2Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op2Reg, 1); 1767 Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 2); 1768 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1769 .addReg(Op2Reg).addReg(Op1Reg).addImm(ARMCC::NE).addReg(ARM::CPSR); 1770 } else { 1771 Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 1); 1772 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1773 .addReg(Op1Reg).addImm(Imm).addImm(ARMCC::EQ).addReg(ARM::CPSR); 1774 } 1775 UpdateValueMap(I, ResultReg); 1776 return true; 1777} 1778 1779bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) { 1780 MVT VT; 1781 Type *Ty = I->getType(); 1782 if (!isTypeLegal(Ty, VT)) 1783 return false; 1784 1785 // If we have integer div support we should have selected this automagically. 1786 // In case we have a real miss go ahead and return false and we'll pick 1787 // it up later. 1788 if (Subtarget->hasDivide()) return false; 1789 1790 // Otherwise emit a libcall. 1791 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1792 if (VT == MVT::i8) 1793 LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8; 1794 else if (VT == MVT::i16) 1795 LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16; 1796 else if (VT == MVT::i32) 1797 LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32; 1798 else if (VT == MVT::i64) 1799 LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64; 1800 else if (VT == MVT::i128) 1801 LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128; 1802 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1803 1804 return ARMEmitLibcall(I, LC); 1805} 1806 1807bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) { 1808 MVT VT; 1809 Type *Ty = I->getType(); 1810 if (!isTypeLegal(Ty, VT)) 1811 return false; 1812 1813 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1814 if (VT == MVT::i8) 1815 LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8; 1816 else if (VT == MVT::i16) 1817 LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16; 1818 else if (VT == MVT::i32) 1819 LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32; 1820 else if (VT == MVT::i64) 1821 LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64; 1822 else if (VT == MVT::i128) 1823 LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128; 1824 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1825 1826 return ARMEmitLibcall(I, LC); 1827} 1828 1829bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) { 1830 EVT DestVT = TLI.getValueType(I->getType(), true); 1831 1832 // We can get here in the case when we have a binary operation on a non-legal 1833 // type and the target independent selector doesn't know how to handle it. 1834 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 1835 return false; 1836 1837 unsigned Opc; 1838 switch (ISDOpcode) { 1839 default: return false; 1840 case ISD::ADD: 1841 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr; 1842 break; 1843 case ISD::OR: 1844 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr; 1845 break; 1846 case ISD::SUB: 1847 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr; 1848 break; 1849 } 1850 1851 unsigned SrcReg1 = getRegForValue(I->getOperand(0)); 1852 if (SrcReg1 == 0) return false; 1853 1854 // TODO: Often the 2nd operand is an immediate, which can be encoded directly 1855 // in the instruction, rather then materializing the value in a register. 1856 unsigned SrcReg2 = getRegForValue(I->getOperand(1)); 1857 if (SrcReg2 == 0) return false; 1858 1859 unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass); 1860 SrcReg1 = constrainOperandRegClass(TII.get(Opc), SrcReg1, 1); 1861 SrcReg2 = constrainOperandRegClass(TII.get(Opc), SrcReg2, 2); 1862 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1863 TII.get(Opc), ResultReg) 1864 .addReg(SrcReg1).addReg(SrcReg2)); 1865 UpdateValueMap(I, ResultReg); 1866 return true; 1867} 1868 1869bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) { 1870 EVT FPVT = TLI.getValueType(I->getType(), true); 1871 if (!FPVT.isSimple()) return false; 1872 MVT VT = FPVT.getSimpleVT(); 1873 1874 // We can get here in the case when we want to use NEON for our fp 1875 // operations, but can't figure out how to. Just use the vfp instructions 1876 // if we have them. 1877 // FIXME: It'd be nice to use NEON instructions. 1878 Type *Ty = I->getType(); 1879 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1880 if (isFloat && !Subtarget->hasVFP2()) 1881 return false; 1882 1883 unsigned Opc; 1884 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1885 switch (ISDOpcode) { 1886 default: return false; 1887 case ISD::FADD: 1888 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1889 break; 1890 case ISD::FSUB: 1891 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1892 break; 1893 case ISD::FMUL: 1894 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1895 break; 1896 } 1897 unsigned Op1 = getRegForValue(I->getOperand(0)); 1898 if (Op1 == 0) return false; 1899 1900 unsigned Op2 = getRegForValue(I->getOperand(1)); 1901 if (Op2 == 0) return false; 1902 1903 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy)); 1904 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1905 TII.get(Opc), ResultReg) 1906 .addReg(Op1).addReg(Op2)); 1907 UpdateValueMap(I, ResultReg); 1908 return true; 1909} 1910 1911// Call Handling Code 1912 1913// This is largely taken directly from CCAssignFnForNode 1914// TODO: We may not support all of this. 1915CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, 1916 bool Return, 1917 bool isVarArg) { 1918 switch (CC) { 1919 default: 1920 llvm_unreachable("Unsupported calling convention"); 1921 case CallingConv::Fast: 1922 if (Subtarget->hasVFP2() && !isVarArg) { 1923 if (!Subtarget->isAAPCS_ABI()) 1924 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 1925 // For AAPCS ABI targets, just use VFP variant of the calling convention. 1926 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1927 } 1928 // Fallthrough 1929 case CallingConv::C: 1930 // Use target triple & subtarget features to do actual dispatch. 1931 if (Subtarget->isAAPCS_ABI()) { 1932 if (Subtarget->hasVFP2() && 1933 TM.Options.FloatABIType == FloatABI::Hard && !isVarArg) 1934 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1935 else 1936 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1937 } else 1938 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1939 case CallingConv::ARM_AAPCS_VFP: 1940 if (!isVarArg) 1941 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1942 // Fall through to soft float variant, variadic functions don't 1943 // use hard floating point ABI. 1944 case CallingConv::ARM_AAPCS: 1945 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1946 case CallingConv::ARM_APCS: 1947 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1948 case CallingConv::GHC: 1949 if (Return) 1950 llvm_unreachable("Can't return in GHC call convention"); 1951 else 1952 return CC_ARM_APCS_GHC; 1953 } 1954} 1955 1956bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1957 SmallVectorImpl<unsigned> &ArgRegs, 1958 SmallVectorImpl<MVT> &ArgVTs, 1959 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1960 SmallVectorImpl<unsigned> &RegArgs, 1961 CallingConv::ID CC, 1962 unsigned &NumBytes, 1963 bool isVarArg) { 1964 SmallVector<CCValAssign, 16> ArgLocs; 1965 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, ArgLocs, *Context); 1966 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, 1967 CCAssignFnForCall(CC, false, isVarArg)); 1968 1969 // Check that we can handle all of the arguments. If we can't, then bail out 1970 // now before we add code to the MBB. 1971 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1972 CCValAssign &VA = ArgLocs[i]; 1973 MVT ArgVT = ArgVTs[VA.getValNo()]; 1974 1975 // We don't handle NEON/vector parameters yet. 1976 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) 1977 return false; 1978 1979 // Now copy/store arg to correct locations. 1980 if (VA.isRegLoc() && !VA.needsCustom()) { 1981 continue; 1982 } else if (VA.needsCustom()) { 1983 // TODO: We need custom lowering for vector (v2f64) args. 1984 if (VA.getLocVT() != MVT::f64 || 1985 // TODO: Only handle register args for now. 1986 !VA.isRegLoc() || !ArgLocs[++i].isRegLoc()) 1987 return false; 1988 } else { 1989 switch (ArgVT.SimpleTy) { 1990 default: 1991 return false; 1992 case MVT::i1: 1993 case MVT::i8: 1994 case MVT::i16: 1995 case MVT::i32: 1996 break; 1997 case MVT::f32: 1998 if (!Subtarget->hasVFP2()) 1999 return false; 2000 break; 2001 case MVT::f64: 2002 if (!Subtarget->hasVFP2()) 2003 return false; 2004 break; 2005 } 2006 } 2007 } 2008 2009 // At the point, we are able to handle the call's arguments in fast isel. 2010 2011 // Get a count of how many bytes are to be pushed on the stack. 2012 NumBytes = CCInfo.getNextStackOffset(); 2013 2014 // Issue CALLSEQ_START 2015 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 2016 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2017 TII.get(AdjStackDown)) 2018 .addImm(NumBytes)); 2019 2020 // Process the args. 2021 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2022 CCValAssign &VA = ArgLocs[i]; 2023 unsigned Arg = ArgRegs[VA.getValNo()]; 2024 MVT ArgVT = ArgVTs[VA.getValNo()]; 2025 2026 assert((!ArgVT.isVector() && ArgVT.getSizeInBits() <= 64) && 2027 "We don't handle NEON/vector parameters yet."); 2028 2029 // Handle arg promotion, etc. 2030 switch (VA.getLocInfo()) { 2031 case CCValAssign::Full: break; 2032 case CCValAssign::SExt: { 2033 MVT DestVT = VA.getLocVT(); 2034 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/false); 2035 assert (Arg != 0 && "Failed to emit a sext"); 2036 ArgVT = DestVT; 2037 break; 2038 } 2039 case CCValAssign::AExt: 2040 // Intentional fall-through. Handle AExt and ZExt. 2041 case CCValAssign::ZExt: { 2042 MVT DestVT = VA.getLocVT(); 2043 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/true); 2044 assert (Arg != 0 && "Failed to emit a zext"); 2045 ArgVT = DestVT; 2046 break; 2047 } 2048 case CCValAssign::BCvt: { 2049 unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 2050 /*TODO: Kill=*/false); 2051 assert(BC != 0 && "Failed to emit a bitcast!"); 2052 Arg = BC; 2053 ArgVT = VA.getLocVT(); 2054 break; 2055 } 2056 default: llvm_unreachable("Unknown arg promotion!"); 2057 } 2058 2059 // Now copy/store arg to correct locations. 2060 if (VA.isRegLoc() && !VA.needsCustom()) { 2061 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2062 VA.getLocReg()) 2063 .addReg(Arg); 2064 RegArgs.push_back(VA.getLocReg()); 2065 } else if (VA.needsCustom()) { 2066 // TODO: We need custom lowering for vector (v2f64) args. 2067 assert(VA.getLocVT() == MVT::f64 && 2068 "Custom lowering for v2f64 args not available"); 2069 2070 CCValAssign &NextVA = ArgLocs[++i]; 2071 2072 assert(VA.isRegLoc() && NextVA.isRegLoc() && 2073 "We only handle register args!"); 2074 2075 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2076 TII.get(ARM::VMOVRRD), VA.getLocReg()) 2077 .addReg(NextVA.getLocReg(), RegState::Define) 2078 .addReg(Arg)); 2079 RegArgs.push_back(VA.getLocReg()); 2080 RegArgs.push_back(NextVA.getLocReg()); 2081 } else { 2082 assert(VA.isMemLoc()); 2083 // Need to store on the stack. 2084 Address Addr; 2085 Addr.BaseType = Address::RegBase; 2086 Addr.Base.Reg = ARM::SP; 2087 Addr.Offset = VA.getLocMemOffset(); 2088 2089 bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet; 2090 assert(EmitRet && "Could not emit a store for argument!"); 2091 } 2092 } 2093 2094 return true; 2095} 2096 2097bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 2098 const Instruction *I, CallingConv::ID CC, 2099 unsigned &NumBytes, bool isVarArg) { 2100 // Issue CALLSEQ_END 2101 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 2102 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2103 TII.get(AdjStackUp)) 2104 .addImm(NumBytes).addImm(0)); 2105 2106 // Now the return value. 2107 if (RetVT != MVT::isVoid) { 2108 SmallVector<CCValAssign, 16> RVLocs; 2109 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context); 2110 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2111 2112 // Copy all of the result registers out of their specified physreg. 2113 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 2114 // For this move we copy into two registers and then move into the 2115 // double fp reg we want. 2116 MVT DestVT = RVLocs[0].getValVT(); 2117 const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 2118 unsigned ResultReg = createResultReg(DstRC); 2119 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2120 TII.get(ARM::VMOVDRR), ResultReg) 2121 .addReg(RVLocs[0].getLocReg()) 2122 .addReg(RVLocs[1].getLocReg())); 2123 2124 UsedRegs.push_back(RVLocs[0].getLocReg()); 2125 UsedRegs.push_back(RVLocs[1].getLocReg()); 2126 2127 // Finally update the result. 2128 UpdateValueMap(I, ResultReg); 2129 } else { 2130 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 2131 MVT CopyVT = RVLocs[0].getValVT(); 2132 2133 // Special handling for extended integers. 2134 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16) 2135 CopyVT = MVT::i32; 2136 2137 const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 2138 2139 unsigned ResultReg = createResultReg(DstRC); 2140 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2141 ResultReg).addReg(RVLocs[0].getLocReg()); 2142 UsedRegs.push_back(RVLocs[0].getLocReg()); 2143 2144 // Finally update the result. 2145 UpdateValueMap(I, ResultReg); 2146 } 2147 } 2148 2149 return true; 2150} 2151 2152bool ARMFastISel::SelectRet(const Instruction *I) { 2153 const ReturnInst *Ret = cast<ReturnInst>(I); 2154 const Function &F = *I->getParent()->getParent(); 2155 2156 if (!FuncInfo.CanLowerReturn) 2157 return false; 2158 2159 // Build a list of return value registers. 2160 SmallVector<unsigned, 4> RetRegs; 2161 2162 CallingConv::ID CC = F.getCallingConv(); 2163 if (Ret->getNumOperands() > 0) { 2164 SmallVector<ISD::OutputArg, 4> Outs; 2165 GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI); 2166 2167 // Analyze operands of the call, assigning locations to each operand. 2168 SmallVector<CCValAssign, 16> ValLocs; 2169 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,I->getContext()); 2170 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */, 2171 F.isVarArg())); 2172 2173 const Value *RV = Ret->getOperand(0); 2174 unsigned Reg = getRegForValue(RV); 2175 if (Reg == 0) 2176 return false; 2177 2178 // Only handle a single return value for now. 2179 if (ValLocs.size() != 1) 2180 return false; 2181 2182 CCValAssign &VA = ValLocs[0]; 2183 2184 // Don't bother handling odd stuff for now. 2185 if (VA.getLocInfo() != CCValAssign::Full) 2186 return false; 2187 // Only handle register returns for now. 2188 if (!VA.isRegLoc()) 2189 return false; 2190 2191 unsigned SrcReg = Reg + VA.getValNo(); 2192 EVT RVEVT = TLI.getValueType(RV->getType()); 2193 if (!RVEVT.isSimple()) return false; 2194 MVT RVVT = RVEVT.getSimpleVT(); 2195 MVT DestVT = VA.getValVT(); 2196 // Special handling for extended integers. 2197 if (RVVT != DestVT) { 2198 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) 2199 return false; 2200 2201 assert(DestVT == MVT::i32 && "ARM should always ext to i32"); 2202 2203 // Perform extension if flagged as either zext or sext. Otherwise, do 2204 // nothing. 2205 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) { 2206 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt()); 2207 if (SrcReg == 0) return false; 2208 } 2209 } 2210 2211 // Make the copy. 2212 unsigned DstReg = VA.getLocReg(); 2213 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 2214 // Avoid a cross-class copy. This is very unlikely. 2215 if (!SrcRC->contains(DstReg)) 2216 return false; 2217 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2218 DstReg).addReg(SrcReg); 2219 2220 // Add register to return instruction. 2221 RetRegs.push_back(VA.getLocReg()); 2222 } 2223 2224 unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET; 2225 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2226 TII.get(RetOpc)); 2227 AddOptionalDefs(MIB); 2228 for (unsigned i = 0, e = RetRegs.size(); i != e; ++i) 2229 MIB.addReg(RetRegs[i], RegState::Implicit); 2230 return true; 2231} 2232 2233unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) { 2234 if (UseReg) 2235 return isThumb2 ? ARM::tBLXr : ARM::BLX; 2236 else 2237 return isThumb2 ? ARM::tBL : ARM::BL; 2238} 2239 2240unsigned ARMFastISel::getLibcallReg(const Twine &Name) { 2241 // Manually compute the global's type to avoid building it when unnecessary. 2242 Type *GVTy = Type::getInt32PtrTy(*Context, /*AS=*/0); 2243 EVT LCREVT = TLI.getValueType(GVTy); 2244 if (!LCREVT.isSimple()) return 0; 2245 2246 GlobalValue *GV = new GlobalVariable(Type::getInt32Ty(*Context), false, 2247 GlobalValue::ExternalLinkage, 0, Name); 2248 assert(GV->getType() == GVTy && "We miscomputed the type for the global!"); 2249 return ARMMaterializeGV(GV, LCREVT.getSimpleVT()); 2250} 2251 2252// A quick function that will emit a call for a named libcall in F with the 2253// vector of passed arguments for the Instruction in I. We can assume that we 2254// can emit a call for any libcall we can produce. This is an abridged version 2255// of the full call infrastructure since we won't need to worry about things 2256// like computed function pointers or strange arguments at call sites. 2257// TODO: Try to unify this and the normal call bits for ARM, then try to unify 2258// with X86. 2259bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 2260 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 2261 2262 // Handle *simple* calls for now. 2263 Type *RetTy = I->getType(); 2264 MVT RetVT; 2265 if (RetTy->isVoidTy()) 2266 RetVT = MVT::isVoid; 2267 else if (!isTypeLegal(RetTy, RetVT)) 2268 return false; 2269 2270 // Can't handle non-double multi-reg retvals. 2271 if (RetVT != MVT::isVoid && RetVT != MVT::i32) { 2272 SmallVector<CCValAssign, 16> RVLocs; 2273 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); 2274 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, false)); 2275 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2276 return false; 2277 } 2278 2279 // Set up the argument vectors. 2280 SmallVector<Value*, 8> Args; 2281 SmallVector<unsigned, 8> ArgRegs; 2282 SmallVector<MVT, 8> ArgVTs; 2283 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2284 Args.reserve(I->getNumOperands()); 2285 ArgRegs.reserve(I->getNumOperands()); 2286 ArgVTs.reserve(I->getNumOperands()); 2287 ArgFlags.reserve(I->getNumOperands()); 2288 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 2289 Value *Op = I->getOperand(i); 2290 unsigned Arg = getRegForValue(Op); 2291 if (Arg == 0) return false; 2292 2293 Type *ArgTy = Op->getType(); 2294 MVT ArgVT; 2295 if (!isTypeLegal(ArgTy, ArgVT)) return false; 2296 2297 ISD::ArgFlagsTy Flags; 2298 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2299 Flags.setOrigAlign(OriginalAlignment); 2300 2301 Args.push_back(Op); 2302 ArgRegs.push_back(Arg); 2303 ArgVTs.push_back(ArgVT); 2304 ArgFlags.push_back(Flags); 2305 } 2306 2307 // Handle the arguments now that we've gotten them. 2308 SmallVector<unsigned, 4> RegArgs; 2309 unsigned NumBytes; 2310 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2311 RegArgs, CC, NumBytes, false)) 2312 return false; 2313 2314 unsigned CalleeReg = 0; 2315 if (EnableARMLongCalls) { 2316 CalleeReg = getLibcallReg(TLI.getLibcallName(Call)); 2317 if (CalleeReg == 0) return false; 2318 } 2319 2320 // Issue the call. 2321 unsigned CallOpc = ARMSelectCallOp(EnableARMLongCalls); 2322 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2323 DL, TII.get(CallOpc)); 2324 // BL / BLX don't take a predicate, but tBL / tBLX do. 2325 if (isThumb2) 2326 AddDefaultPred(MIB); 2327 if (EnableARMLongCalls) 2328 MIB.addReg(CalleeReg); 2329 else 2330 MIB.addExternalSymbol(TLI.getLibcallName(Call)); 2331 2332 // Add implicit physical register uses to the call. 2333 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2334 MIB.addReg(RegArgs[i], RegState::Implicit); 2335 2336 // Add a register mask with the call-preserved registers. 2337 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2338 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2339 2340 // Finish off the call including any return values. 2341 SmallVector<unsigned, 4> UsedRegs; 2342 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, false)) return false; 2343 2344 // Set all unused physreg defs as dead. 2345 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2346 2347 return true; 2348} 2349 2350bool ARMFastISel::SelectCall(const Instruction *I, 2351 const char *IntrMemName = 0) { 2352 const CallInst *CI = cast<CallInst>(I); 2353 const Value *Callee = CI->getCalledValue(); 2354 2355 // Can't handle inline asm. 2356 if (isa<InlineAsm>(Callee)) return false; 2357 2358 // Allow SelectionDAG isel to handle tail calls. 2359 if (CI->isTailCall()) return false; 2360 2361 // Check the calling convention. 2362 ImmutableCallSite CS(CI); 2363 CallingConv::ID CC = CS.getCallingConv(); 2364 2365 // TODO: Avoid some calling conventions? 2366 2367 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 2368 FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 2369 bool isVarArg = FTy->isVarArg(); 2370 2371 // Handle *simple* calls for now. 2372 Type *RetTy = I->getType(); 2373 MVT RetVT; 2374 if (RetTy->isVoidTy()) 2375 RetVT = MVT::isVoid; 2376 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 && 2377 RetVT != MVT::i8 && RetVT != MVT::i1) 2378 return false; 2379 2380 // Can't handle non-double multi-reg retvals. 2381 if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 && 2382 RetVT != MVT::i16 && RetVT != MVT::i32) { 2383 SmallVector<CCValAssign, 16> RVLocs; 2384 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context); 2385 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2386 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2387 return false; 2388 } 2389 2390 // Set up the argument vectors. 2391 SmallVector<Value*, 8> Args; 2392 SmallVector<unsigned, 8> ArgRegs; 2393 SmallVector<MVT, 8> ArgVTs; 2394 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2395 unsigned arg_size = CS.arg_size(); 2396 Args.reserve(arg_size); 2397 ArgRegs.reserve(arg_size); 2398 ArgVTs.reserve(arg_size); 2399 ArgFlags.reserve(arg_size); 2400 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 2401 i != e; ++i) { 2402 // If we're lowering a memory intrinsic instead of a regular call, skip the 2403 // last two arguments, which shouldn't be passed to the underlying function. 2404 if (IntrMemName && e-i <= 2) 2405 break; 2406 2407 ISD::ArgFlagsTy Flags; 2408 unsigned AttrInd = i - CS.arg_begin() + 1; 2409 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 2410 Flags.setSExt(); 2411 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 2412 Flags.setZExt(); 2413 2414 // FIXME: Only handle *easy* calls for now. 2415 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 2416 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 2417 CS.paramHasAttr(AttrInd, Attribute::Nest) || 2418 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 2419 return false; 2420 2421 Type *ArgTy = (*i)->getType(); 2422 MVT ArgVT; 2423 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 && 2424 ArgVT != MVT::i1) 2425 return false; 2426 2427 unsigned Arg = getRegForValue(*i); 2428 if (Arg == 0) 2429 return false; 2430 2431 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2432 Flags.setOrigAlign(OriginalAlignment); 2433 2434 Args.push_back(*i); 2435 ArgRegs.push_back(Arg); 2436 ArgVTs.push_back(ArgVT); 2437 ArgFlags.push_back(Flags); 2438 } 2439 2440 // Handle the arguments now that we've gotten them. 2441 SmallVector<unsigned, 4> RegArgs; 2442 unsigned NumBytes; 2443 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2444 RegArgs, CC, NumBytes, isVarArg)) 2445 return false; 2446 2447 bool UseReg = false; 2448 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 2449 if (!GV || EnableARMLongCalls) UseReg = true; 2450 2451 unsigned CalleeReg = 0; 2452 if (UseReg) { 2453 if (IntrMemName) 2454 CalleeReg = getLibcallReg(IntrMemName); 2455 else 2456 CalleeReg = getRegForValue(Callee); 2457 2458 if (CalleeReg == 0) return false; 2459 } 2460 2461 // Issue the call. 2462 unsigned CallOpc = ARMSelectCallOp(UseReg); 2463 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2464 DL, TII.get(CallOpc)); 2465 2466 unsigned char OpFlags = 0; 2467 2468 // Add MO_PLT for global address or external symbol in the PIC relocation 2469 // model. 2470 if (Subtarget->isTargetELF() && TM.getRelocationModel() == Reloc::PIC_) 2471 OpFlags = ARMII::MO_PLT; 2472 2473 // ARM calls don't take a predicate, but tBL / tBLX do. 2474 if(isThumb2) 2475 AddDefaultPred(MIB); 2476 if (UseReg) 2477 MIB.addReg(CalleeReg); 2478 else if (!IntrMemName) 2479 MIB.addGlobalAddress(GV, 0, OpFlags); 2480 else 2481 MIB.addExternalSymbol(IntrMemName, OpFlags); 2482 2483 // Add implicit physical register uses to the call. 2484 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2485 MIB.addReg(RegArgs[i], RegState::Implicit); 2486 2487 // Add a register mask with the call-preserved registers. 2488 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2489 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2490 2491 // Finish off the call including any return values. 2492 SmallVector<unsigned, 4> UsedRegs; 2493 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, isVarArg)) 2494 return false; 2495 2496 // Set all unused physreg defs as dead. 2497 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2498 2499 return true; 2500} 2501 2502bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) { 2503 return Len <= 16; 2504} 2505 2506bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, 2507 uint64_t Len, unsigned Alignment) { 2508 // Make sure we don't bloat code by inlining very large memcpy's. 2509 if (!ARMIsMemCpySmall(Len)) 2510 return false; 2511 2512 while (Len) { 2513 MVT VT; 2514 if (!Alignment || Alignment >= 4) { 2515 if (Len >= 4) 2516 VT = MVT::i32; 2517 else if (Len >= 2) 2518 VT = MVT::i16; 2519 else { 2520 assert (Len == 1 && "Expected a length of 1!"); 2521 VT = MVT::i8; 2522 } 2523 } else { 2524 // Bound based on alignment. 2525 if (Len >= 2 && Alignment == 2) 2526 VT = MVT::i16; 2527 else { 2528 VT = MVT::i8; 2529 } 2530 } 2531 2532 bool RV; 2533 unsigned ResultReg; 2534 RV = ARMEmitLoad(VT, ResultReg, Src); 2535 assert (RV == true && "Should be able to handle this load."); 2536 RV = ARMEmitStore(VT, ResultReg, Dest); 2537 assert (RV == true && "Should be able to handle this store."); 2538 (void)RV; 2539 2540 unsigned Size = VT.getSizeInBits()/8; 2541 Len -= Size; 2542 Dest.Offset += Size; 2543 Src.Offset += Size; 2544 } 2545 2546 return true; 2547} 2548 2549bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) { 2550 // FIXME: Handle more intrinsics. 2551 switch (I.getIntrinsicID()) { 2552 default: return false; 2553 case Intrinsic::frameaddress: { 2554 MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo(); 2555 MFI->setFrameAddressIsTaken(true); 2556 2557 unsigned LdrOpc; 2558 const TargetRegisterClass *RC; 2559 if (isThumb2) { 2560 LdrOpc = ARM::t2LDRi12; 2561 RC = (const TargetRegisterClass*)&ARM::tGPRRegClass; 2562 } else { 2563 LdrOpc = ARM::LDRi12; 2564 RC = (const TargetRegisterClass*)&ARM::GPRRegClass; 2565 } 2566 2567 const ARMBaseRegisterInfo *RegInfo = 2568 static_cast<const ARMBaseRegisterInfo*>(TM.getRegisterInfo()); 2569 unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF)); 2570 unsigned SrcReg = FramePtr; 2571 2572 // Recursively load frame address 2573 // ldr r0 [fp] 2574 // ldr r0 [r0] 2575 // ldr r0 [r0] 2576 // ... 2577 unsigned DestReg; 2578 unsigned Depth = cast<ConstantInt>(I.getOperand(0))->getZExtValue(); 2579 while (Depth--) { 2580 DestReg = createResultReg(RC); 2581 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2582 TII.get(LdrOpc), DestReg) 2583 .addReg(SrcReg).addImm(0)); 2584 SrcReg = DestReg; 2585 } 2586 UpdateValueMap(&I, SrcReg); 2587 return true; 2588 } 2589 case Intrinsic::memcpy: 2590 case Intrinsic::memmove: { 2591 const MemTransferInst &MTI = cast<MemTransferInst>(I); 2592 // Don't handle volatile. 2593 if (MTI.isVolatile()) 2594 return false; 2595 2596 // Disable inlining for memmove before calls to ComputeAddress. Otherwise, 2597 // we would emit dead code because we don't currently handle memmoves. 2598 bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy); 2599 if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) { 2600 // Small memcpy's are common enough that we want to do them without a call 2601 // if possible. 2602 uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue(); 2603 if (ARMIsMemCpySmall(Len)) { 2604 Address Dest, Src; 2605 if (!ARMComputeAddress(MTI.getRawDest(), Dest) || 2606 !ARMComputeAddress(MTI.getRawSource(), Src)) 2607 return false; 2608 unsigned Alignment = MTI.getAlignment(); 2609 if (ARMTryEmitSmallMemCpy(Dest, Src, Len, Alignment)) 2610 return true; 2611 } 2612 } 2613 2614 if (!MTI.getLength()->getType()->isIntegerTy(32)) 2615 return false; 2616 2617 if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255) 2618 return false; 2619 2620 const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove"; 2621 return SelectCall(&I, IntrMemName); 2622 } 2623 case Intrinsic::memset: { 2624 const MemSetInst &MSI = cast<MemSetInst>(I); 2625 // Don't handle volatile. 2626 if (MSI.isVolatile()) 2627 return false; 2628 2629 if (!MSI.getLength()->getType()->isIntegerTy(32)) 2630 return false; 2631 2632 if (MSI.getDestAddressSpace() > 255) 2633 return false; 2634 2635 return SelectCall(&I, "memset"); 2636 } 2637 case Intrinsic::trap: { 2638 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get( 2639 Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP)); 2640 return true; 2641 } 2642 } 2643} 2644 2645bool ARMFastISel::SelectTrunc(const Instruction *I) { 2646 // The high bits for a type smaller than the register size are assumed to be 2647 // undefined. 2648 Value *Op = I->getOperand(0); 2649 2650 EVT SrcVT, DestVT; 2651 SrcVT = TLI.getValueType(Op->getType(), true); 2652 DestVT = TLI.getValueType(I->getType(), true); 2653 2654 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 2655 return false; 2656 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 2657 return false; 2658 2659 unsigned SrcReg = getRegForValue(Op); 2660 if (!SrcReg) return false; 2661 2662 // Because the high bits are undefined, a truncate doesn't generate 2663 // any code. 2664 UpdateValueMap(I, SrcReg); 2665 return true; 2666} 2667 2668unsigned ARMFastISel::ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, 2669 bool isZExt) { 2670 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8) 2671 return 0; 2672 if (SrcVT != MVT::i16 && SrcVT != MVT::i8 && SrcVT != MVT::i1) 2673 return 0; 2674 2675 // Table of which combinations can be emitted as a single instruction, 2676 // and which will require two. 2677 static const uint8_t isSingleInstrTbl[3][2][2][2] = { 2678 // ARM Thumb 2679 // !hasV6Ops hasV6Ops !hasV6Ops hasV6Ops 2680 // ext: s z s z s z s z 2681 /* 1 */ { { { 0, 1 }, { 0, 1 } }, { { 0, 0 }, { 0, 1 } } }, 2682 /* 8 */ { { { 0, 1 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } }, 2683 /* 16 */ { { { 0, 0 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } } 2684 }; 2685 2686 // Target registers for: 2687 // - For ARM can never be PC. 2688 // - For 16-bit Thumb are restricted to lower 8 registers. 2689 // - For 32-bit Thumb are restricted to non-SP and non-PC. 2690 static const TargetRegisterClass *RCTbl[2][2] = { 2691 // Instructions: Two Single 2692 /* ARM */ { &ARM::GPRnopcRegClass, &ARM::GPRnopcRegClass }, 2693 /* Thumb */ { &ARM::tGPRRegClass, &ARM::rGPRRegClass } 2694 }; 2695 2696 // Table governing the instruction(s) to be emitted. 2697 static const struct InstructionTable { 2698 uint32_t Opc : 16; 2699 uint32_t hasS : 1; // Some instructions have an S bit, always set it to 0. 2700 uint32_t Shift : 7; // For shift operand addressing mode, used by MOVsi. 2701 uint32_t Imm : 8; // All instructions have either a shift or a mask. 2702 } IT[2][2][3][2] = { 2703 { // Two instructions (first is left shift, second is in this table). 2704 { // ARM Opc S Shift Imm 2705 /* 1 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 31 }, 2706 /* 1 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 31 } }, 2707 /* 8 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 24 }, 2708 /* 8 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 24 } }, 2709 /* 16 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 16 }, 2710 /* 16 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 16 } } 2711 }, 2712 { // Thumb Opc S Shift Imm 2713 /* 1 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 31 }, 2714 /* 1 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 31 } }, 2715 /* 8 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 24 }, 2716 /* 8 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 24 } }, 2717 /* 16 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 16 }, 2718 /* 16 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 16 } } 2719 } 2720 }, 2721 { // Single instruction. 2722 { // ARM Opc S Shift Imm 2723 /* 1 bit sext */ { { ARM::KILL , 0, ARM_AM::no_shift, 0 }, 2724 /* 1 bit zext */ { ARM::ANDri , 1, ARM_AM::no_shift, 1 } }, 2725 /* 8 bit sext */ { { ARM::SXTB , 0, ARM_AM::no_shift, 0 }, 2726 /* 8 bit zext */ { ARM::ANDri , 1, ARM_AM::no_shift, 255 } }, 2727 /* 16 bit sext */ { { ARM::SXTH , 0, ARM_AM::no_shift, 0 }, 2728 /* 16 bit zext */ { ARM::UXTH , 0, ARM_AM::no_shift, 0 } } 2729 }, 2730 { // Thumb Opc S Shift Imm 2731 /* 1 bit sext */ { { ARM::KILL , 0, ARM_AM::no_shift, 0 }, 2732 /* 1 bit zext */ { ARM::t2ANDri, 1, ARM_AM::no_shift, 1 } }, 2733 /* 8 bit sext */ { { ARM::t2SXTB , 0, ARM_AM::no_shift, 0 }, 2734 /* 8 bit zext */ { ARM::t2ANDri, 1, ARM_AM::no_shift, 255 } }, 2735 /* 16 bit sext */ { { ARM::t2SXTH , 0, ARM_AM::no_shift, 0 }, 2736 /* 16 bit zext */ { ARM::t2UXTH , 0, ARM_AM::no_shift, 0 } } 2737 } 2738 } 2739 }; 2740 2741 unsigned SrcBits = SrcVT.getSizeInBits(); 2742 unsigned DestBits = DestVT.getSizeInBits(); 2743 (void) DestBits; 2744 assert((SrcBits < DestBits) && "can only extend to larger types"); 2745 assert((DestBits == 32 || DestBits == 16 || DestBits == 8) && 2746 "other sizes unimplemented"); 2747 assert((SrcBits == 16 || SrcBits == 8 || SrcBits == 1) && 2748 "other sizes unimplemented"); 2749 2750 bool hasV6Ops = Subtarget->hasV6Ops(); 2751 unsigned Bitness = SrcBits / 8; // {1,8,16}=>{0,1,2} 2752 assert((Bitness < 3) && "sanity-check table bounds"); 2753 2754 bool isSingleInstr = isSingleInstrTbl[Bitness][isThumb2][hasV6Ops][isZExt]; 2755 const TargetRegisterClass *RC = RCTbl[isThumb2][isSingleInstr]; 2756 const InstructionTable *ITP = &IT[isSingleInstr][isThumb2][Bitness][isZExt]; 2757 unsigned Opc = ITP->Opc; 2758 assert(ARM::KILL != Opc && "Invalid table entry"); 2759 unsigned hasS = ITP->hasS; 2760 ARM_AM::ShiftOpc Shift = (ARM_AM::ShiftOpc) ITP->Shift; 2761 assert(((Shift == ARM_AM::no_shift) == (Opc != ARM::MOVsi)) && 2762 "only MOVsi has shift operand addressing mode"); 2763 unsigned Imm = ITP->Imm; 2764 2765 // 16-bit Thumb instructions always set CPSR (unless they're in an IT block). 2766 bool setsCPSR = &ARM::tGPRRegClass == RC; 2767 unsigned LSLOpc = isThumb2 ? ARM::tLSLri : ARM::MOVsi; 2768 unsigned ResultReg; 2769 // MOVsi encodes shift and immediate in shift operand addressing mode. 2770 // The following condition has the same value when emitting two 2771 // instruction sequences: both are shifts. 2772 bool ImmIsSO = (Shift != ARM_AM::no_shift); 2773 2774 // Either one or two instructions are emitted. 2775 // They're always of the form: 2776 // dst = in OP imm 2777 // CPSR is set only by 16-bit Thumb instructions. 2778 // Predicate, if any, is AL. 2779 // S bit, if available, is always 0. 2780 // When two are emitted the first's result will feed as the second's input, 2781 // that value is then dead. 2782 unsigned NumInstrsEmitted = isSingleInstr ? 1 : 2; 2783 for (unsigned Instr = 0; Instr != NumInstrsEmitted; ++Instr) { 2784 ResultReg = createResultReg(RC); 2785 bool isLsl = (0 == Instr) && !isSingleInstr; 2786 unsigned Opcode = isLsl ? LSLOpc : Opc; 2787 ARM_AM::ShiftOpc ShiftAM = isLsl ? ARM_AM::lsl : Shift; 2788 unsigned ImmEnc = ImmIsSO ? ARM_AM::getSORegOpc(ShiftAM, Imm) : Imm; 2789 bool isKill = 1 == Instr; 2790 MachineInstrBuilder MIB = BuildMI( 2791 *FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opcode), ResultReg); 2792 if (setsCPSR) 2793 MIB.addReg(ARM::CPSR, RegState::Define); 2794 SrcReg = constrainOperandRegClass(TII.get(Opcode), SrcReg, 1 + setsCPSR); 2795 AddDefaultPred(MIB.addReg(SrcReg, isKill * RegState::Kill).addImm(ImmEnc)); 2796 if (hasS) 2797 AddDefaultCC(MIB); 2798 // Second instruction consumes the first's result. 2799 SrcReg = ResultReg; 2800 } 2801 2802 return ResultReg; 2803} 2804 2805bool ARMFastISel::SelectIntExt(const Instruction *I) { 2806 // On ARM, in general, integer casts don't involve legal types; this code 2807 // handles promotable integers. 2808 Type *DestTy = I->getType(); 2809 Value *Src = I->getOperand(0); 2810 Type *SrcTy = Src->getType(); 2811 2812 bool isZExt = isa<ZExtInst>(I); 2813 unsigned SrcReg = getRegForValue(Src); 2814 if (!SrcReg) return false; 2815 2816 EVT SrcEVT, DestEVT; 2817 SrcEVT = TLI.getValueType(SrcTy, true); 2818 DestEVT = TLI.getValueType(DestTy, true); 2819 if (!SrcEVT.isSimple()) return false; 2820 if (!DestEVT.isSimple()) return false; 2821 2822 MVT SrcVT = SrcEVT.getSimpleVT(); 2823 MVT DestVT = DestEVT.getSimpleVT(); 2824 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt); 2825 if (ResultReg == 0) return false; 2826 UpdateValueMap(I, ResultReg); 2827 return true; 2828} 2829 2830bool ARMFastISel::SelectShift(const Instruction *I, 2831 ARM_AM::ShiftOpc ShiftTy) { 2832 // We handle thumb2 mode by target independent selector 2833 // or SelectionDAG ISel. 2834 if (isThumb2) 2835 return false; 2836 2837 // Only handle i32 now. 2838 EVT DestVT = TLI.getValueType(I->getType(), true); 2839 if (DestVT != MVT::i32) 2840 return false; 2841 2842 unsigned Opc = ARM::MOVsr; 2843 unsigned ShiftImm; 2844 Value *Src2Value = I->getOperand(1); 2845 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Src2Value)) { 2846 ShiftImm = CI->getZExtValue(); 2847 2848 // Fall back to selection DAG isel if the shift amount 2849 // is zero or greater than the width of the value type. 2850 if (ShiftImm == 0 || ShiftImm >=32) 2851 return false; 2852 2853 Opc = ARM::MOVsi; 2854 } 2855 2856 Value *Src1Value = I->getOperand(0); 2857 unsigned Reg1 = getRegForValue(Src1Value); 2858 if (Reg1 == 0) return false; 2859 2860 unsigned Reg2 = 0; 2861 if (Opc == ARM::MOVsr) { 2862 Reg2 = getRegForValue(Src2Value); 2863 if (Reg2 == 0) return false; 2864 } 2865 2866 unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass); 2867 if(ResultReg == 0) return false; 2868 2869 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2870 TII.get(Opc), ResultReg) 2871 .addReg(Reg1); 2872 2873 if (Opc == ARM::MOVsi) 2874 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, ShiftImm)); 2875 else if (Opc == ARM::MOVsr) { 2876 MIB.addReg(Reg2); 2877 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, 0)); 2878 } 2879 2880 AddOptionalDefs(MIB); 2881 UpdateValueMap(I, ResultReg); 2882 return true; 2883} 2884 2885// TODO: SoftFP support. 2886bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 2887 2888 switch (I->getOpcode()) { 2889 case Instruction::Load: 2890 return SelectLoad(I); 2891 case Instruction::Store: 2892 return SelectStore(I); 2893 case Instruction::Br: 2894 return SelectBranch(I); 2895 case Instruction::IndirectBr: 2896 return SelectIndirectBr(I); 2897 case Instruction::ICmp: 2898 case Instruction::FCmp: 2899 return SelectCmp(I); 2900 case Instruction::FPExt: 2901 return SelectFPExt(I); 2902 case Instruction::FPTrunc: 2903 return SelectFPTrunc(I); 2904 case Instruction::SIToFP: 2905 return SelectIToFP(I, /*isSigned*/ true); 2906 case Instruction::UIToFP: 2907 return SelectIToFP(I, /*isSigned*/ false); 2908 case Instruction::FPToSI: 2909 return SelectFPToI(I, /*isSigned*/ true); 2910 case Instruction::FPToUI: 2911 return SelectFPToI(I, /*isSigned*/ false); 2912 case Instruction::Add: 2913 return SelectBinaryIntOp(I, ISD::ADD); 2914 case Instruction::Or: 2915 return SelectBinaryIntOp(I, ISD::OR); 2916 case Instruction::Sub: 2917 return SelectBinaryIntOp(I, ISD::SUB); 2918 case Instruction::FAdd: 2919 return SelectBinaryFPOp(I, ISD::FADD); 2920 case Instruction::FSub: 2921 return SelectBinaryFPOp(I, ISD::FSUB); 2922 case Instruction::FMul: 2923 return SelectBinaryFPOp(I, ISD::FMUL); 2924 case Instruction::SDiv: 2925 return SelectDiv(I, /*isSigned*/ true); 2926 case Instruction::UDiv: 2927 return SelectDiv(I, /*isSigned*/ false); 2928 case Instruction::SRem: 2929 return SelectRem(I, /*isSigned*/ true); 2930 case Instruction::URem: 2931 return SelectRem(I, /*isSigned*/ false); 2932 case Instruction::Call: 2933 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 2934 return SelectIntrinsicCall(*II); 2935 return SelectCall(I); 2936 case Instruction::Select: 2937 return SelectSelect(I); 2938 case Instruction::Ret: 2939 return SelectRet(I); 2940 case Instruction::Trunc: 2941 return SelectTrunc(I); 2942 case Instruction::ZExt: 2943 case Instruction::SExt: 2944 return SelectIntExt(I); 2945 case Instruction::Shl: 2946 return SelectShift(I, ARM_AM::lsl); 2947 case Instruction::LShr: 2948 return SelectShift(I, ARM_AM::lsr); 2949 case Instruction::AShr: 2950 return SelectShift(I, ARM_AM::asr); 2951 default: break; 2952 } 2953 return false; 2954} 2955 2956namespace { 2957// This table describes sign- and zero-extend instructions which can be 2958// folded into a preceding load. All of these extends have an immediate 2959// (sometimes a mask and sometimes a shift) that's applied after 2960// extension. 2961const struct FoldableLoadExtendsStruct { 2962 uint16_t Opc[2]; // ARM, Thumb. 2963 uint8_t ExpectedImm; 2964 uint8_t isZExt : 1; 2965 uint8_t ExpectedVT : 7; 2966} FoldableLoadExtends[] = { 2967 { { ARM::SXTH, ARM::t2SXTH }, 0, 0, MVT::i16 }, 2968 { { ARM::UXTH, ARM::t2UXTH }, 0, 1, MVT::i16 }, 2969 { { ARM::ANDri, ARM::t2ANDri }, 255, 1, MVT::i8 }, 2970 { { ARM::SXTB, ARM::t2SXTB }, 0, 0, MVT::i8 }, 2971 { { ARM::UXTB, ARM::t2UXTB }, 0, 1, MVT::i8 } 2972}; 2973} 2974 2975/// \brief The specified machine instr operand is a vreg, and that 2976/// vreg is being provided by the specified load instruction. If possible, 2977/// try to fold the load as an operand to the instruction, returning true if 2978/// successful. 2979bool ARMFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, 2980 const LoadInst *LI) { 2981 // Verify we have a legal type before going any further. 2982 MVT VT; 2983 if (!isLoadTypeLegal(LI->getType(), VT)) 2984 return false; 2985 2986 // Combine load followed by zero- or sign-extend. 2987 // ldrb r1, [r0] ldrb r1, [r0] 2988 // uxtb r2, r1 => 2989 // mov r3, r2 mov r3, r1 2990 if (MI->getNumOperands() < 3 || !MI->getOperand(2).isImm()) 2991 return false; 2992 const uint64_t Imm = MI->getOperand(2).getImm(); 2993 2994 bool Found = false; 2995 bool isZExt; 2996 for (unsigned i = 0, e = array_lengthof(FoldableLoadExtends); 2997 i != e; ++i) { 2998 if (FoldableLoadExtends[i].Opc[isThumb2] == MI->getOpcode() && 2999 (uint64_t)FoldableLoadExtends[i].ExpectedImm == Imm && 3000 MVT((MVT::SimpleValueType)FoldableLoadExtends[i].ExpectedVT) == VT) { 3001 Found = true; 3002 isZExt = FoldableLoadExtends[i].isZExt; 3003 } 3004 } 3005 if (!Found) return false; 3006 3007 // See if we can handle this address. 3008 Address Addr; 3009 if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false; 3010 3011 unsigned ResultReg = MI->getOperand(0).getReg(); 3012 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false)) 3013 return false; 3014 MI->eraseFromParent(); 3015 return true; 3016} 3017 3018unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, 3019 unsigned Align, MVT VT) { 3020 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); 3021 ARMConstantPoolConstant *CPV = 3022 ARMConstantPoolConstant::Create(GV, UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT); 3023 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 3024 3025 unsigned Opc; 3026 unsigned DestReg1 = createResultReg(TLI.getRegClassFor(VT)); 3027 // Load value. 3028 if (isThumb2) { 3029 DestReg1 = constrainOperandRegClass(TII.get(ARM::t2LDRpci), DestReg1, 0); 3030 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 3031 TII.get(ARM::t2LDRpci), DestReg1) 3032 .addConstantPoolIndex(Idx)); 3033 Opc = UseGOTOFF ? ARM::t2ADDrr : ARM::t2LDRs; 3034 } else { 3035 // The extra immediate is for addrmode2. 3036 DestReg1 = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg1, 0); 3037 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 3038 DL, TII.get(ARM::LDRcp), DestReg1) 3039 .addConstantPoolIndex(Idx).addImm(0)); 3040 Opc = UseGOTOFF ? ARM::ADDrr : ARM::LDRrs; 3041 } 3042 3043 unsigned GlobalBaseReg = AFI->getGlobalBaseReg(); 3044 if (GlobalBaseReg == 0) { 3045 GlobalBaseReg = MRI.createVirtualRegister(TLI.getRegClassFor(VT)); 3046 AFI->setGlobalBaseReg(GlobalBaseReg); 3047 } 3048 3049 unsigned DestReg2 = createResultReg(TLI.getRegClassFor(VT)); 3050 DestReg2 = constrainOperandRegClass(TII.get(Opc), DestReg2, 0); 3051 DestReg1 = constrainOperandRegClass(TII.get(Opc), DestReg1, 1); 3052 GlobalBaseReg = constrainOperandRegClass(TII.get(Opc), GlobalBaseReg, 2); 3053 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 3054 DL, TII.get(Opc), DestReg2) 3055 .addReg(DestReg1) 3056 .addReg(GlobalBaseReg); 3057 if (!UseGOTOFF) 3058 MIB.addImm(0); 3059 AddOptionalDefs(MIB); 3060 3061 return DestReg2; 3062} 3063 3064bool ARMFastISel::FastLowerArguments() { 3065 if (!FuncInfo.CanLowerReturn) 3066 return false; 3067 3068 const Function *F = FuncInfo.Fn; 3069 if (F->isVarArg()) 3070 return false; 3071 3072 CallingConv::ID CC = F->getCallingConv(); 3073 switch (CC) { 3074 default: 3075 return false; 3076 case CallingConv::Fast: 3077 case CallingConv::C: 3078 case CallingConv::ARM_AAPCS_VFP: 3079 case CallingConv::ARM_AAPCS: 3080 case CallingConv::ARM_APCS: 3081 break; 3082 } 3083 3084 // Only handle simple cases. i.e. Up to 4 i8/i16/i32 scalar arguments 3085 // which are passed in r0 - r3. 3086 unsigned Idx = 1; 3087 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); 3088 I != E; ++I, ++Idx) { 3089 if (Idx > 4) 3090 return false; 3091 3092 if (F->getAttributes().hasAttribute(Idx, Attribute::InReg) || 3093 F->getAttributes().hasAttribute(Idx, Attribute::StructRet) || 3094 F->getAttributes().hasAttribute(Idx, Attribute::ByVal)) 3095 return false; 3096 3097 Type *ArgTy = I->getType(); 3098 if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy()) 3099 return false; 3100 3101 EVT ArgVT = TLI.getValueType(ArgTy); 3102 if (!ArgVT.isSimple()) return false; 3103 switch (ArgVT.getSimpleVT().SimpleTy) { 3104 case MVT::i8: 3105 case MVT::i16: 3106 case MVT::i32: 3107 break; 3108 default: 3109 return false; 3110 } 3111 } 3112 3113 3114 static const uint16_t GPRArgRegs[] = { 3115 ARM::R0, ARM::R1, ARM::R2, ARM::R3 3116 }; 3117 3118 const TargetRegisterClass *RC = &ARM::rGPRRegClass; 3119 Idx = 0; 3120 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); 3121 I != E; ++I, ++Idx) { 3122 unsigned SrcReg = GPRArgRegs[Idx]; 3123 unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC); 3124 // FIXME: Unfortunately it's necessary to emit a copy from the livein copy. 3125 // Without this, EmitLiveInCopies may eliminate the livein if its only 3126 // use is a bitcast (which isn't turned into an instruction). 3127 unsigned ResultReg = createResultReg(RC); 3128 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 3129 ResultReg).addReg(DstReg, getKillRegState(true)); 3130 UpdateValueMap(I, ResultReg); 3131 } 3132 3133 return true; 3134} 3135 3136namespace llvm { 3137 FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo, 3138 const TargetLibraryInfo *libInfo) { 3139 const TargetMachine &TM = funcInfo.MF->getTarget(); 3140 3141 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 3142 // Thumb2 support on iOS; ARM support on iOS, Linux and NaCl. 3143 bool UseFastISel = false; 3144 UseFastISel |= Subtarget->isTargetIOS() && !Subtarget->isThumb1Only(); 3145 UseFastISel |= Subtarget->isTargetLinux() && !Subtarget->isThumb(); 3146 UseFastISel |= Subtarget->isTargetNaCl() && !Subtarget->isThumb(); 3147 3148 if (UseFastISel) { 3149 // iOS always has a FP for backtracking, force other targets 3150 // to keep their FP when doing FastISel. The emitted code is 3151 // currently superior, and in cases like test-suite's lencod 3152 // FastISel isn't quite correct when FP is eliminated. 3153 TM.Options.NoFramePointerElim = true; 3154 return new ARMFastISel(funcInfo, libInfo); 3155 } 3156 return 0; 3157 } 3158} 3159