ARMFastISel.cpp revision 218893
1//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the ARM-specific support for the FastISel class. Some 11// of the target-specific code is generated by tablegen in the file 12// ARMGenFastISel.inc, which is #included here. 13// 14//===----------------------------------------------------------------------===// 15 16#include "ARM.h" 17#include "ARMBaseInstrInfo.h" 18#include "ARMCallingConv.h" 19#include "ARMRegisterInfo.h" 20#include "ARMTargetMachine.h" 21#include "ARMSubtarget.h" 22#include "ARMConstantPoolValue.h" 23#include "llvm/CallingConv.h" 24#include "llvm/DerivedTypes.h" 25#include "llvm/GlobalVariable.h" 26#include "llvm/Instructions.h" 27#include "llvm/IntrinsicInst.h" 28#include "llvm/Module.h" 29#include "llvm/CodeGen/Analysis.h" 30#include "llvm/CodeGen/FastISel.h" 31#include "llvm/CodeGen/FunctionLoweringInfo.h" 32#include "llvm/CodeGen/MachineInstrBuilder.h" 33#include "llvm/CodeGen/MachineModuleInfo.h" 34#include "llvm/CodeGen/MachineConstantPool.h" 35#include "llvm/CodeGen/MachineFrameInfo.h" 36#include "llvm/CodeGen/MachineMemOperand.h" 37#include "llvm/CodeGen/MachineRegisterInfo.h" 38#include "llvm/CodeGen/PseudoSourceValue.h" 39#include "llvm/Support/CallSite.h" 40#include "llvm/Support/CommandLine.h" 41#include "llvm/Support/ErrorHandling.h" 42#include "llvm/Support/GetElementPtrTypeIterator.h" 43#include "llvm/Target/TargetData.h" 44#include "llvm/Target/TargetInstrInfo.h" 45#include "llvm/Target/TargetLowering.h" 46#include "llvm/Target/TargetMachine.h" 47#include "llvm/Target/TargetOptions.h" 48using namespace llvm; 49 50static cl::opt<bool> 51DisableARMFastISel("disable-arm-fast-isel", 52 cl::desc("Turn off experimental ARM fast-isel support"), 53 cl::init(false), cl::Hidden); 54 55extern cl::opt<bool> EnableARMLongCalls; 56 57namespace { 58 59 // All possible address modes, plus some. 60 typedef struct Address { 61 enum { 62 RegBase, 63 FrameIndexBase 64 } BaseType; 65 66 union { 67 unsigned Reg; 68 int FI; 69 } Base; 70 71 int Offset; 72 unsigned Scale; 73 unsigned PlusReg; 74 75 // Innocuous defaults for our address. 76 Address() 77 : BaseType(RegBase), Offset(0), Scale(0), PlusReg(0) { 78 Base.Reg = 0; 79 } 80 } Address; 81 82class ARMFastISel : public FastISel { 83 84 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 85 /// make the right decision when generating code for different targets. 86 const ARMSubtarget *Subtarget; 87 const TargetMachine &TM; 88 const TargetInstrInfo &TII; 89 const TargetLowering &TLI; 90 ARMFunctionInfo *AFI; 91 92 // Convenience variables to avoid some queries. 93 bool isThumb; 94 LLVMContext *Context; 95 96 public: 97 explicit ARMFastISel(FunctionLoweringInfo &funcInfo) 98 : FastISel(funcInfo), 99 TM(funcInfo.MF->getTarget()), 100 TII(*TM.getInstrInfo()), 101 TLI(*TM.getTargetLowering()) { 102 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 103 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 104 isThumb = AFI->isThumbFunction(); 105 Context = &funcInfo.Fn->getContext(); 106 } 107 108 // Code from FastISel.cpp. 109 virtual unsigned FastEmitInst_(unsigned MachineInstOpcode, 110 const TargetRegisterClass *RC); 111 virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode, 112 const TargetRegisterClass *RC, 113 unsigned Op0, bool Op0IsKill); 114 virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 115 const TargetRegisterClass *RC, 116 unsigned Op0, bool Op0IsKill, 117 unsigned Op1, bool Op1IsKill); 118 virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 119 const TargetRegisterClass *RC, 120 unsigned Op0, bool Op0IsKill, 121 uint64_t Imm); 122 virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode, 123 const TargetRegisterClass *RC, 124 unsigned Op0, bool Op0IsKill, 125 const ConstantFP *FPImm); 126 virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode, 127 const TargetRegisterClass *RC, 128 uint64_t Imm); 129 virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 130 const TargetRegisterClass *RC, 131 unsigned Op0, bool Op0IsKill, 132 unsigned Op1, bool Op1IsKill, 133 uint64_t Imm); 134 virtual unsigned FastEmitInst_extractsubreg(MVT RetVT, 135 unsigned Op0, bool Op0IsKill, 136 uint32_t Idx); 137 138 // Backend specific FastISel code. 139 virtual bool TargetSelectInstruction(const Instruction *I); 140 virtual unsigned TargetMaterializeConstant(const Constant *C); 141 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); 142 143 #include "ARMGenFastISel.inc" 144 145 // Instruction selection routines. 146 private: 147 bool SelectLoad(const Instruction *I); 148 bool SelectStore(const Instruction *I); 149 bool SelectBranch(const Instruction *I); 150 bool SelectCmp(const Instruction *I); 151 bool SelectFPExt(const Instruction *I); 152 bool SelectFPTrunc(const Instruction *I); 153 bool SelectBinaryOp(const Instruction *I, unsigned ISDOpcode); 154 bool SelectSIToFP(const Instruction *I); 155 bool SelectFPToSI(const Instruction *I); 156 bool SelectSDiv(const Instruction *I); 157 bool SelectSRem(const Instruction *I); 158 bool SelectCall(const Instruction *I); 159 bool SelectSelect(const Instruction *I); 160 bool SelectRet(const Instruction *I); 161 162 // Utility routines. 163 private: 164 bool isTypeLegal(const Type *Ty, MVT &VT); 165 bool isLoadTypeLegal(const Type *Ty, MVT &VT); 166 bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr); 167 bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr); 168 bool ARMComputeAddress(const Value *Obj, Address &Addr); 169 void ARMSimplifyAddress(Address &Addr, EVT VT); 170 unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT); 171 unsigned ARMMaterializeInt(const Constant *C, EVT VT); 172 unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT); 173 unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg); 174 unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg); 175 176 // Call handling routines. 177 private: 178 bool FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT, 179 unsigned &ResultReg); 180 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool Return); 181 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 182 SmallVectorImpl<unsigned> &ArgRegs, 183 SmallVectorImpl<MVT> &ArgVTs, 184 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 185 SmallVectorImpl<unsigned> &RegArgs, 186 CallingConv::ID CC, 187 unsigned &NumBytes); 188 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 189 const Instruction *I, CallingConv::ID CC, 190 unsigned &NumBytes); 191 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 192 193 // OptionalDef handling routines. 194 private: 195 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 196 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 197 void AddLoadStoreOperands(EVT VT, Address &Addr, 198 const MachineInstrBuilder &MIB); 199}; 200 201} // end anonymous namespace 202 203#include "ARMGenCallingConv.inc" 204 205// DefinesOptionalPredicate - This is different from DefinesPredicate in that 206// we don't care about implicit defs here, just places we'll need to add a 207// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 208bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 209 const TargetInstrDesc &TID = MI->getDesc(); 210 if (!TID.hasOptionalDef()) 211 return false; 212 213 // Look to see if our OptionalDef is defining CPSR or CCR. 214 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 215 const MachineOperand &MO = MI->getOperand(i); 216 if (!MO.isReg() || !MO.isDef()) continue; 217 if (MO.getReg() == ARM::CPSR) 218 *CPSR = true; 219 } 220 return true; 221} 222 223// If the machine is predicable go ahead and add the predicate operands, if 224// it needs default CC operands add those. 225// TODO: If we want to support thumb1 then we'll need to deal with optional 226// CPSR defs that need to be added before the remaining operands. See s_cc_out 227// for descriptions why. 228const MachineInstrBuilder & 229ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 230 MachineInstr *MI = &*MIB; 231 232 // Do we use a predicate? 233 if (TII.isPredicable(MI)) 234 AddDefaultPred(MIB); 235 236 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 237 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 238 bool CPSR = false; 239 if (DefinesOptionalPredicate(MI, &CPSR)) { 240 if (CPSR) 241 AddDefaultT1CC(MIB); 242 else 243 AddDefaultCC(MIB); 244 } 245 return MIB; 246} 247 248unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, 249 const TargetRegisterClass* RC) { 250 unsigned ResultReg = createResultReg(RC); 251 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 252 253 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); 254 return ResultReg; 255} 256 257unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 258 const TargetRegisterClass *RC, 259 unsigned Op0, bool Op0IsKill) { 260 unsigned ResultReg = createResultReg(RC); 261 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 262 263 if (II.getNumDefs() >= 1) 264 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 265 .addReg(Op0, Op0IsKill * RegState::Kill)); 266 else { 267 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 268 .addReg(Op0, Op0IsKill * RegState::Kill)); 269 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 270 TII.get(TargetOpcode::COPY), ResultReg) 271 .addReg(II.ImplicitDefs[0])); 272 } 273 return ResultReg; 274} 275 276unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 277 const TargetRegisterClass *RC, 278 unsigned Op0, bool Op0IsKill, 279 unsigned Op1, bool Op1IsKill) { 280 unsigned ResultReg = createResultReg(RC); 281 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 282 283 if (II.getNumDefs() >= 1) 284 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 285 .addReg(Op0, Op0IsKill * RegState::Kill) 286 .addReg(Op1, Op1IsKill * RegState::Kill)); 287 else { 288 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 289 .addReg(Op0, Op0IsKill * RegState::Kill) 290 .addReg(Op1, Op1IsKill * RegState::Kill)); 291 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 292 TII.get(TargetOpcode::COPY), ResultReg) 293 .addReg(II.ImplicitDefs[0])); 294 } 295 return ResultReg; 296} 297 298unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 299 const TargetRegisterClass *RC, 300 unsigned Op0, bool Op0IsKill, 301 uint64_t Imm) { 302 unsigned ResultReg = createResultReg(RC); 303 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 304 305 if (II.getNumDefs() >= 1) 306 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 307 .addReg(Op0, Op0IsKill * RegState::Kill) 308 .addImm(Imm)); 309 else { 310 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 311 .addReg(Op0, Op0IsKill * RegState::Kill) 312 .addImm(Imm)); 313 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 314 TII.get(TargetOpcode::COPY), ResultReg) 315 .addReg(II.ImplicitDefs[0])); 316 } 317 return ResultReg; 318} 319 320unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 321 const TargetRegisterClass *RC, 322 unsigned Op0, bool Op0IsKill, 323 const ConstantFP *FPImm) { 324 unsigned ResultReg = createResultReg(RC); 325 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 326 327 if (II.getNumDefs() >= 1) 328 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 329 .addReg(Op0, Op0IsKill * RegState::Kill) 330 .addFPImm(FPImm)); 331 else { 332 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 333 .addReg(Op0, Op0IsKill * RegState::Kill) 334 .addFPImm(FPImm)); 335 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 336 TII.get(TargetOpcode::COPY), ResultReg) 337 .addReg(II.ImplicitDefs[0])); 338 } 339 return ResultReg; 340} 341 342unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 343 const TargetRegisterClass *RC, 344 unsigned Op0, bool Op0IsKill, 345 unsigned Op1, bool Op1IsKill, 346 uint64_t Imm) { 347 unsigned ResultReg = createResultReg(RC); 348 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 349 350 if (II.getNumDefs() >= 1) 351 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 352 .addReg(Op0, Op0IsKill * RegState::Kill) 353 .addReg(Op1, Op1IsKill * RegState::Kill) 354 .addImm(Imm)); 355 else { 356 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 357 .addReg(Op0, Op0IsKill * RegState::Kill) 358 .addReg(Op1, Op1IsKill * RegState::Kill) 359 .addImm(Imm)); 360 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 361 TII.get(TargetOpcode::COPY), ResultReg) 362 .addReg(II.ImplicitDefs[0])); 363 } 364 return ResultReg; 365} 366 367unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 368 const TargetRegisterClass *RC, 369 uint64_t Imm) { 370 unsigned ResultReg = createResultReg(RC); 371 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 372 373 if (II.getNumDefs() >= 1) 374 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 375 .addImm(Imm)); 376 else { 377 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 378 .addImm(Imm)); 379 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 380 TII.get(TargetOpcode::COPY), ResultReg) 381 .addReg(II.ImplicitDefs[0])); 382 } 383 return ResultReg; 384} 385 386unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, 387 unsigned Op0, bool Op0IsKill, 388 uint32_t Idx) { 389 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 390 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 391 "Cannot yet extract from physregs"); 392 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 393 DL, TII.get(TargetOpcode::COPY), ResultReg) 394 .addReg(Op0, getKillRegState(Op0IsKill), Idx)); 395 return ResultReg; 396} 397 398// TODO: Don't worry about 64-bit now, but when this is fixed remove the 399// checks from the various callers. 400unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) { 401 if (VT == MVT::f64) return 0; 402 403 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 404 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 405 TII.get(ARM::VMOVRS), MoveReg) 406 .addReg(SrcReg)); 407 return MoveReg; 408} 409 410unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { 411 if (VT == MVT::i64) return 0; 412 413 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 414 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 415 TII.get(ARM::VMOVSR), MoveReg) 416 .addReg(SrcReg)); 417 return MoveReg; 418} 419 420// For double width floating point we need to materialize two constants 421// (the high and the low) into integer registers then use a move to get 422// the combined constant into an FP reg. 423unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { 424 const APFloat Val = CFP->getValueAPF(); 425 bool is64bit = VT == MVT::f64; 426 427 // This checks to see if we can use VFP3 instructions to materialize 428 // a constant, otherwise we have to go through the constant pool. 429 if (TLI.isFPImmLegal(Val, VT)) { 430 unsigned Opc = is64bit ? ARM::FCONSTD : ARM::FCONSTS; 431 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 432 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 433 DestReg) 434 .addFPImm(CFP)); 435 return DestReg; 436 } 437 438 // Require VFP2 for loading fp constants. 439 if (!Subtarget->hasVFP2()) return false; 440 441 // MachineConstantPool wants an explicit alignment. 442 unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); 443 if (Align == 0) { 444 // TODO: Figure out if this is correct. 445 Align = TD.getTypeAllocSize(CFP->getType()); 446 } 447 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 448 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 449 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 450 451 // The extra reg is for addrmode5. 452 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 453 DestReg) 454 .addConstantPoolIndex(Idx) 455 .addReg(0)); 456 return DestReg; 457} 458 459unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { 460 461 // For now 32-bit only. 462 if (VT != MVT::i32) return false; 463 464 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 465 466 // If we can do this in a single instruction without a constant pool entry 467 // do so now. 468 const ConstantInt *CI = cast<ConstantInt>(C); 469 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getSExtValue())) { 470 unsigned Opc = isThumb ? ARM::t2MOVi16 : ARM::MOVi16; 471 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 472 TII.get(Opc), DestReg) 473 .addImm(CI->getSExtValue())); 474 return DestReg; 475 } 476 477 // MachineConstantPool wants an explicit alignment. 478 unsigned Align = TD.getPrefTypeAlignment(C->getType()); 479 if (Align == 0) { 480 // TODO: Figure out if this is correct. 481 Align = TD.getTypeAllocSize(C->getType()); 482 } 483 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 484 485 if (isThumb) 486 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 487 TII.get(ARM::t2LDRpci), DestReg) 488 .addConstantPoolIndex(Idx)); 489 else 490 // The extra immediate is for addrmode2. 491 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 492 TII.get(ARM::LDRcp), DestReg) 493 .addConstantPoolIndex(Idx) 494 .addImm(0)); 495 496 return DestReg; 497} 498 499unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { 500 // For now 32-bit only. 501 if (VT != MVT::i32) return 0; 502 503 Reloc::Model RelocM = TM.getRelocationModel(); 504 505 // TODO: No external globals for now. 506 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) return 0; 507 508 // TODO: Need more magic for ARM PIC. 509 if (!isThumb && (RelocM == Reloc::PIC_)) return 0; 510 511 // MachineConstantPool wants an explicit alignment. 512 unsigned Align = TD.getPrefTypeAlignment(GV->getType()); 513 if (Align == 0) { 514 // TODO: Figure out if this is correct. 515 Align = TD.getTypeAllocSize(GV->getType()); 516 } 517 518 // Grab index. 519 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb() ? 4 : 8); 520 unsigned Id = AFI->createPICLabelUId(); 521 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, Id, 522 ARMCP::CPValue, PCAdj); 523 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 524 525 // Load value. 526 MachineInstrBuilder MIB; 527 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 528 if (isThumb) { 529 unsigned Opc = (RelocM != Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 530 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) 531 .addConstantPoolIndex(Idx); 532 if (RelocM == Reloc::PIC_) 533 MIB.addImm(Id); 534 } else { 535 // The extra immediate is for addrmode2. 536 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), 537 DestReg) 538 .addConstantPoolIndex(Idx) 539 .addImm(0); 540 } 541 AddOptionalDefs(MIB); 542 return DestReg; 543} 544 545unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 546 EVT VT = TLI.getValueType(C->getType(), true); 547 548 // Only handle simple types. 549 if (!VT.isSimple()) return 0; 550 551 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 552 return ARMMaterializeFP(CFP, VT); 553 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 554 return ARMMaterializeGV(GV, VT); 555 else if (isa<ConstantInt>(C)) 556 return ARMMaterializeInt(C, VT); 557 558 return 0; 559} 560 561unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 562 // Don't handle dynamic allocas. 563 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 564 565 MVT VT; 566 if (!isLoadTypeLegal(AI->getType(), VT)) return false; 567 568 DenseMap<const AllocaInst*, int>::iterator SI = 569 FuncInfo.StaticAllocaMap.find(AI); 570 571 // This will get lowered later into the correct offsets and registers 572 // via rewriteXFrameIndex. 573 if (SI != FuncInfo.StaticAllocaMap.end()) { 574 TargetRegisterClass* RC = TLI.getRegClassFor(VT); 575 unsigned ResultReg = createResultReg(RC); 576 unsigned Opc = isThumb ? ARM::t2ADDri : ARM::ADDri; 577 AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL, 578 TII.get(Opc), ResultReg) 579 .addFrameIndex(SI->second) 580 .addImm(0)); 581 return ResultReg; 582 } 583 584 return 0; 585} 586 587bool ARMFastISel::isTypeLegal(const Type *Ty, MVT &VT) { 588 EVT evt = TLI.getValueType(Ty, true); 589 590 // Only handle simple types. 591 if (evt == MVT::Other || !evt.isSimple()) return false; 592 VT = evt.getSimpleVT(); 593 594 // Handle all legal types, i.e. a register that will directly hold this 595 // value. 596 return TLI.isTypeLegal(VT); 597} 598 599bool ARMFastISel::isLoadTypeLegal(const Type *Ty, MVT &VT) { 600 if (isTypeLegal(Ty, VT)) return true; 601 602 // If this is a type than can be sign or zero-extended to a basic operation 603 // go ahead and accept it now. 604 if (VT == MVT::i8 || VT == MVT::i16) 605 return true; 606 607 return false; 608} 609 610// Computes the address to get to an object. 611bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 612 // Some boilerplate from the X86 FastISel. 613 const User *U = NULL; 614 unsigned Opcode = Instruction::UserOp1; 615 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 616 // Don't walk into other basic blocks unless the object is an alloca from 617 // another block, otherwise it may not have a virtual register assigned. 618 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 619 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 620 Opcode = I->getOpcode(); 621 U = I; 622 } 623 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 624 Opcode = C->getOpcode(); 625 U = C; 626 } 627 628 if (const PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 629 if (Ty->getAddressSpace() > 255) 630 // Fast instruction selection doesn't support the special 631 // address spaces. 632 return false; 633 634 switch (Opcode) { 635 default: 636 break; 637 case Instruction::BitCast: { 638 // Look through bitcasts. 639 return ARMComputeAddress(U->getOperand(0), Addr); 640 } 641 case Instruction::IntToPtr: { 642 // Look past no-op inttoptrs. 643 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 644 return ARMComputeAddress(U->getOperand(0), Addr); 645 break; 646 } 647 case Instruction::PtrToInt: { 648 // Look past no-op ptrtoints. 649 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 650 return ARMComputeAddress(U->getOperand(0), Addr); 651 break; 652 } 653 case Instruction::GetElementPtr: { 654 Address SavedAddr = Addr; 655 int TmpOffset = Addr.Offset; 656 657 // Iterate through the GEP folding the constants into offsets where 658 // we can. 659 gep_type_iterator GTI = gep_type_begin(U); 660 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 661 i != e; ++i, ++GTI) { 662 const Value *Op = *i; 663 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 664 const StructLayout *SL = TD.getStructLayout(STy); 665 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 666 TmpOffset += SL->getElementOffset(Idx); 667 } else { 668 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); 669 SmallVector<const Value *, 4> Worklist; 670 Worklist.push_back(Op); 671 do { 672 Op = Worklist.pop_back_val(); 673 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 674 // Constant-offset addressing. 675 TmpOffset += CI->getSExtValue() * S; 676 } else if (isa<AddOperator>(Op) && 677 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) { 678 // An add with a constant operand. Fold the constant. 679 ConstantInt *CI = 680 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 681 TmpOffset += CI->getSExtValue() * S; 682 // Add the other operand back to the work list. 683 Worklist.push_back(cast<AddOperator>(Op)->getOperand(0)); 684 } else 685 goto unsupported_gep; 686 } while (!Worklist.empty()); 687 } 688 } 689 690 // Try to grab the base operand now. 691 Addr.Offset = TmpOffset; 692 if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 693 694 // We failed, restore everything and try the other options. 695 Addr = SavedAddr; 696 697 unsupported_gep: 698 break; 699 } 700 case Instruction::Alloca: { 701 const AllocaInst *AI = cast<AllocaInst>(Obj); 702 DenseMap<const AllocaInst*, int>::iterator SI = 703 FuncInfo.StaticAllocaMap.find(AI); 704 if (SI != FuncInfo.StaticAllocaMap.end()) { 705 Addr.BaseType = Address::FrameIndexBase; 706 Addr.Base.FI = SI->second; 707 return true; 708 } 709 break; 710 } 711 } 712 713 // Materialize the global variable's address into a reg which can 714 // then be used later to load the variable. 715 if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) { 716 unsigned Tmp = ARMMaterializeGV(GV, TLI.getValueType(Obj->getType())); 717 if (Tmp == 0) return false; 718 719 Addr.Base.Reg = Tmp; 720 return true; 721 } 722 723 // Try to get this in a register if nothing else has worked. 724 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 725 return Addr.Base.Reg != 0; 726} 727 728void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT) { 729 730 assert(VT.isSimple() && "Non-simple types are invalid here!"); 731 732 bool needsLowering = false; 733 switch (VT.getSimpleVT().SimpleTy) { 734 default: 735 assert(false && "Unhandled load/store type!"); 736 case MVT::i1: 737 case MVT::i8: 738 case MVT::i16: 739 case MVT::i32: 740 // Integer loads/stores handle 12-bit offsets. 741 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 742 break; 743 case MVT::f32: 744 case MVT::f64: 745 // Floating point operands handle 8-bit offsets. 746 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 747 break; 748 } 749 750 // If this is a stack pointer and the offset needs to be simplified then 751 // put the alloca address into a register, set the base type back to 752 // register and continue. This should almost never happen. 753 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 754 TargetRegisterClass *RC = isThumb ? ARM::tGPRRegisterClass : 755 ARM::GPRRegisterClass; 756 unsigned ResultReg = createResultReg(RC); 757 unsigned Opc = isThumb ? ARM::t2ADDri : ARM::ADDri; 758 AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL, 759 TII.get(Opc), ResultReg) 760 .addFrameIndex(Addr.Base.FI) 761 .addImm(0)); 762 Addr.Base.Reg = ResultReg; 763 Addr.BaseType = Address::RegBase; 764 } 765 766 // Since the offset is too large for the load/store instruction 767 // get the reg+offset into a register. 768 if (needsLowering) { 769 ARMCC::CondCodes Pred = ARMCC::AL; 770 unsigned PredReg = 0; 771 772 TargetRegisterClass *RC = isThumb ? ARM::tGPRRegisterClass : 773 ARM::GPRRegisterClass; 774 unsigned BaseReg = createResultReg(RC); 775 776 if (!isThumb) 777 emitARMRegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 778 BaseReg, Addr.Base.Reg, Addr.Offset, 779 Pred, PredReg, 780 static_cast<const ARMBaseInstrInfo&>(TII)); 781 else { 782 assert(AFI->isThumb2Function()); 783 emitT2RegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 784 BaseReg, Addr.Base.Reg, Addr.Offset, Pred, PredReg, 785 static_cast<const ARMBaseInstrInfo&>(TII)); 786 } 787 Addr.Offset = 0; 788 Addr.Base.Reg = BaseReg; 789 } 790} 791 792void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr, 793 const MachineInstrBuilder &MIB) { 794 // addrmode5 output depends on the selection dag addressing dividing the 795 // offset by 4 that it then later multiplies. Do this here as well. 796 if (VT.getSimpleVT().SimpleTy == MVT::f32 || 797 VT.getSimpleVT().SimpleTy == MVT::f64) 798 Addr.Offset /= 4; 799 800 // Frame base works a bit differently. Handle it separately. 801 if (Addr.BaseType == Address::FrameIndexBase) { 802 int FI = Addr.Base.FI; 803 int Offset = Addr.Offset; 804 MachineMemOperand *MMO = 805 FuncInfo.MF->getMachineMemOperand( 806 MachinePointerInfo::getFixedStack(FI, Offset), 807 MachineMemOperand::MOLoad, 808 MFI.getObjectSize(FI), 809 MFI.getObjectAlignment(FI)); 810 // Now add the rest of the operands. 811 MIB.addFrameIndex(FI); 812 813 // ARM halfword load/stores need an additional operand. 814 if (!isThumb && VT.getSimpleVT().SimpleTy == MVT::i16) MIB.addReg(0); 815 816 MIB.addImm(Addr.Offset); 817 MIB.addMemOperand(MMO); 818 } else { 819 // Now add the rest of the operands. 820 MIB.addReg(Addr.Base.Reg); 821 822 // ARM halfword load/stores need an additional operand. 823 if (!isThumb && VT.getSimpleVT().SimpleTy == MVT::i16) MIB.addReg(0); 824 825 MIB.addImm(Addr.Offset); 826 } 827 AddOptionalDefs(MIB); 828} 829 830bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr) { 831 832 assert(VT.isSimple() && "Non-simple types are invalid here!"); 833 unsigned Opc; 834 TargetRegisterClass *RC; 835 switch (VT.getSimpleVT().SimpleTy) { 836 // This is mostly going to be Neon/vector support. 837 default: return false; 838 case MVT::i16: 839 Opc = isThumb ? ARM::t2LDRHi12 : ARM::LDRH; 840 RC = ARM::GPRRegisterClass; 841 break; 842 case MVT::i8: 843 Opc = isThumb ? ARM::t2LDRBi12 : ARM::LDRBi12; 844 RC = ARM::GPRRegisterClass; 845 break; 846 case MVT::i32: 847 Opc = isThumb ? ARM::t2LDRi12 : ARM::LDRi12; 848 RC = ARM::GPRRegisterClass; 849 break; 850 case MVT::f32: 851 Opc = ARM::VLDRS; 852 RC = TLI.getRegClassFor(VT); 853 break; 854 case MVT::f64: 855 Opc = ARM::VLDRD; 856 RC = TLI.getRegClassFor(VT); 857 break; 858 } 859 // Simplify this down to something we can handle. 860 ARMSimplifyAddress(Addr, VT); 861 862 // Create the base instruction, then add the operands. 863 ResultReg = createResultReg(RC); 864 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 865 TII.get(Opc), ResultReg); 866 AddLoadStoreOperands(VT, Addr, MIB); 867 return true; 868} 869 870bool ARMFastISel::SelectLoad(const Instruction *I) { 871 // Verify we have a legal type before going any further. 872 MVT VT; 873 if (!isLoadTypeLegal(I->getType(), VT)) 874 return false; 875 876 // See if we can handle this address. 877 Address Addr; 878 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 879 880 unsigned ResultReg; 881 if (!ARMEmitLoad(VT, ResultReg, Addr)) return false; 882 UpdateValueMap(I, ResultReg); 883 return true; 884} 885 886bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr) { 887 unsigned StrOpc; 888 switch (VT.getSimpleVT().SimpleTy) { 889 // This is mostly going to be Neon/vector support. 890 default: return false; 891 case MVT::i1: { 892 unsigned Res = createResultReg(isThumb ? ARM::tGPRRegisterClass : 893 ARM::GPRRegisterClass); 894 unsigned Opc = isThumb ? ARM::t2ANDri : ARM::ANDri; 895 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 896 TII.get(Opc), Res) 897 .addReg(SrcReg).addImm(1)); 898 SrcReg = Res; 899 } // Fallthrough here. 900 case MVT::i8: 901 StrOpc = isThumb ? ARM::t2STRBi12 : ARM::STRBi12; 902 break; 903 case MVT::i16: 904 StrOpc = isThumb ? ARM::t2STRHi12 : ARM::STRH; 905 break; 906 case MVT::i32: 907 StrOpc = isThumb ? ARM::t2STRi12 : ARM::STRi12; 908 break; 909 case MVT::f32: 910 if (!Subtarget->hasVFP2()) return false; 911 StrOpc = ARM::VSTRS; 912 break; 913 case MVT::f64: 914 if (!Subtarget->hasVFP2()) return false; 915 StrOpc = ARM::VSTRD; 916 break; 917 } 918 // Simplify this down to something we can handle. 919 ARMSimplifyAddress(Addr, VT); 920 921 // Create the base instruction, then add the operands. 922 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 923 TII.get(StrOpc)) 924 .addReg(SrcReg, getKillRegState(true)); 925 AddLoadStoreOperands(VT, Addr, MIB); 926 return true; 927} 928 929bool ARMFastISel::SelectStore(const Instruction *I) { 930 Value *Op0 = I->getOperand(0); 931 unsigned SrcReg = 0; 932 933 // Verify we have a legal type before going any further. 934 MVT VT; 935 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 936 return false; 937 938 // Get the value to be stored into a register. 939 SrcReg = getRegForValue(Op0); 940 if (SrcReg == 0) return false; 941 942 // See if we can handle this address. 943 Address Addr; 944 if (!ARMComputeAddress(I->getOperand(1), Addr)) 945 return false; 946 947 if (!ARMEmitStore(VT, SrcReg, Addr)) return false; 948 return true; 949} 950 951static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 952 switch (Pred) { 953 // Needs two compares... 954 case CmpInst::FCMP_ONE: 955 case CmpInst::FCMP_UEQ: 956 default: 957 // AL is our "false" for now. The other two need more compares. 958 return ARMCC::AL; 959 case CmpInst::ICMP_EQ: 960 case CmpInst::FCMP_OEQ: 961 return ARMCC::EQ; 962 case CmpInst::ICMP_SGT: 963 case CmpInst::FCMP_OGT: 964 return ARMCC::GT; 965 case CmpInst::ICMP_SGE: 966 case CmpInst::FCMP_OGE: 967 return ARMCC::GE; 968 case CmpInst::ICMP_UGT: 969 case CmpInst::FCMP_UGT: 970 return ARMCC::HI; 971 case CmpInst::FCMP_OLT: 972 return ARMCC::MI; 973 case CmpInst::ICMP_ULE: 974 case CmpInst::FCMP_OLE: 975 return ARMCC::LS; 976 case CmpInst::FCMP_ORD: 977 return ARMCC::VC; 978 case CmpInst::FCMP_UNO: 979 return ARMCC::VS; 980 case CmpInst::FCMP_UGE: 981 return ARMCC::PL; 982 case CmpInst::ICMP_SLT: 983 case CmpInst::FCMP_ULT: 984 return ARMCC::LT; 985 case CmpInst::ICMP_SLE: 986 case CmpInst::FCMP_ULE: 987 return ARMCC::LE; 988 case CmpInst::FCMP_UNE: 989 case CmpInst::ICMP_NE: 990 return ARMCC::NE; 991 case CmpInst::ICMP_UGE: 992 return ARMCC::HS; 993 case CmpInst::ICMP_ULT: 994 return ARMCC::LO; 995 } 996} 997 998bool ARMFastISel::SelectBranch(const Instruction *I) { 999 const BranchInst *BI = cast<BranchInst>(I); 1000 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 1001 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1002 1003 // Simple branch support. 1004 1005 // If we can, avoid recomputing the compare - redoing it could lead to wonky 1006 // behavior. 1007 // TODO: Factor this out. 1008 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1009 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 1010 MVT VT; 1011 const Type *Ty = CI->getOperand(0)->getType(); 1012 if (!isTypeLegal(Ty, VT)) 1013 return false; 1014 1015 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1016 if (isFloat && !Subtarget->hasVFP2()) 1017 return false; 1018 1019 unsigned CmpOpc; 1020 switch (VT.SimpleTy) { 1021 default: return false; 1022 // TODO: Verify compares. 1023 case MVT::f32: 1024 CmpOpc = ARM::VCMPES; 1025 break; 1026 case MVT::f64: 1027 CmpOpc = ARM::VCMPED; 1028 break; 1029 case MVT::i32: 1030 CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr; 1031 break; 1032 } 1033 1034 // Get the compare predicate. 1035 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1036 1037 // We may not handle every CC for now. 1038 if (ARMPred == ARMCC::AL) return false; 1039 1040 unsigned Arg1 = getRegForValue(CI->getOperand(0)); 1041 if (Arg1 == 0) return false; 1042 1043 unsigned Arg2 = getRegForValue(CI->getOperand(1)); 1044 if (Arg2 == 0) return false; 1045 1046 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1047 TII.get(CmpOpc)) 1048 .addReg(Arg1).addReg(Arg2)); 1049 1050 // For floating point we need to move the result to a comparison register 1051 // that we can then use for branches. 1052 if (isFloat) 1053 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1054 TII.get(ARM::FMSTAT))); 1055 1056 unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc; 1057 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1058 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1059 FastEmitBranch(FBB, DL); 1060 FuncInfo.MBB->addSuccessor(TBB); 1061 return true; 1062 } 1063 } 1064 1065 unsigned CmpReg = getRegForValue(BI->getCondition()); 1066 if (CmpReg == 0) return false; 1067 1068 // Re-set the flags just in case. 1069 unsigned CmpOpc = isThumb ? ARM::t2CMPri : ARM::CMPri; 1070 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1071 .addReg(CmpReg).addImm(0)); 1072 1073 unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc; 1074 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1075 .addMBB(TBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 1076 FastEmitBranch(FBB, DL); 1077 FuncInfo.MBB->addSuccessor(TBB); 1078 return true; 1079} 1080 1081bool ARMFastISel::SelectCmp(const Instruction *I) { 1082 const CmpInst *CI = cast<CmpInst>(I); 1083 1084 MVT VT; 1085 const Type *Ty = CI->getOperand(0)->getType(); 1086 if (!isTypeLegal(Ty, VT)) 1087 return false; 1088 1089 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1090 if (isFloat && !Subtarget->hasVFP2()) 1091 return false; 1092 1093 unsigned CmpOpc; 1094 unsigned CondReg; 1095 switch (VT.SimpleTy) { 1096 default: return false; 1097 // TODO: Verify compares. 1098 case MVT::f32: 1099 CmpOpc = ARM::VCMPES; 1100 CondReg = ARM::FPSCR; 1101 break; 1102 case MVT::f64: 1103 CmpOpc = ARM::VCMPED; 1104 CondReg = ARM::FPSCR; 1105 break; 1106 case MVT::i32: 1107 CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr; 1108 CondReg = ARM::CPSR; 1109 break; 1110 } 1111 1112 // Get the compare predicate. 1113 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1114 1115 // We may not handle every CC for now. 1116 if (ARMPred == ARMCC::AL) return false; 1117 1118 unsigned Arg1 = getRegForValue(CI->getOperand(0)); 1119 if (Arg1 == 0) return false; 1120 1121 unsigned Arg2 = getRegForValue(CI->getOperand(1)); 1122 if (Arg2 == 0) return false; 1123 1124 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1125 .addReg(Arg1).addReg(Arg2)); 1126 1127 // For floating point we need to move the result to a comparison register 1128 // that we can then use for branches. 1129 if (isFloat) 1130 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1131 TII.get(ARM::FMSTAT))); 1132 1133 // Now set a register based on the comparison. Explicitly set the predicates 1134 // here. 1135 unsigned MovCCOpc = isThumb ? ARM::t2MOVCCi : ARM::MOVCCi; 1136 TargetRegisterClass *RC = isThumb ? ARM::rGPRRegisterClass 1137 : ARM::GPRRegisterClass; 1138 unsigned DestReg = createResultReg(RC); 1139 Constant *Zero 1140 = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1141 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1142 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) 1143 .addReg(ZeroReg).addImm(1) 1144 .addImm(ARMPred).addReg(CondReg); 1145 1146 UpdateValueMap(I, DestReg); 1147 return true; 1148} 1149 1150bool ARMFastISel::SelectFPExt(const Instruction *I) { 1151 // Make sure we have VFP and that we're extending float to double. 1152 if (!Subtarget->hasVFP2()) return false; 1153 1154 Value *V = I->getOperand(0); 1155 if (!I->getType()->isDoubleTy() || 1156 !V->getType()->isFloatTy()) return false; 1157 1158 unsigned Op = getRegForValue(V); 1159 if (Op == 0) return false; 1160 1161 unsigned Result = createResultReg(ARM::DPRRegisterClass); 1162 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1163 TII.get(ARM::VCVTDS), Result) 1164 .addReg(Op)); 1165 UpdateValueMap(I, Result); 1166 return true; 1167} 1168 1169bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1170 // Make sure we have VFP and that we're truncating double to float. 1171 if (!Subtarget->hasVFP2()) return false; 1172 1173 Value *V = I->getOperand(0); 1174 if (!(I->getType()->isFloatTy() && 1175 V->getType()->isDoubleTy())) return false; 1176 1177 unsigned Op = getRegForValue(V); 1178 if (Op == 0) return false; 1179 1180 unsigned Result = createResultReg(ARM::SPRRegisterClass); 1181 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1182 TII.get(ARM::VCVTSD), Result) 1183 .addReg(Op)); 1184 UpdateValueMap(I, Result); 1185 return true; 1186} 1187 1188bool ARMFastISel::SelectSIToFP(const Instruction *I) { 1189 // Make sure we have VFP. 1190 if (!Subtarget->hasVFP2()) return false; 1191 1192 MVT DstVT; 1193 const Type *Ty = I->getType(); 1194 if (!isTypeLegal(Ty, DstVT)) 1195 return false; 1196 1197 unsigned Op = getRegForValue(I->getOperand(0)); 1198 if (Op == 0) return false; 1199 1200 // The conversion routine works on fp-reg to fp-reg and the operand above 1201 // was an integer, move it to the fp registers if possible. 1202 unsigned FP = ARMMoveToFPReg(MVT::f32, Op); 1203 if (FP == 0) return false; 1204 1205 unsigned Opc; 1206 if (Ty->isFloatTy()) Opc = ARM::VSITOS; 1207 else if (Ty->isDoubleTy()) Opc = ARM::VSITOD; 1208 else return 0; 1209 1210 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1211 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1212 ResultReg) 1213 .addReg(FP)); 1214 UpdateValueMap(I, ResultReg); 1215 return true; 1216} 1217 1218bool ARMFastISel::SelectFPToSI(const Instruction *I) { 1219 // Make sure we have VFP. 1220 if (!Subtarget->hasVFP2()) return false; 1221 1222 MVT DstVT; 1223 const Type *RetTy = I->getType(); 1224 if (!isTypeLegal(RetTy, DstVT)) 1225 return false; 1226 1227 unsigned Op = getRegForValue(I->getOperand(0)); 1228 if (Op == 0) return false; 1229 1230 unsigned Opc; 1231 const Type *OpTy = I->getOperand(0)->getType(); 1232 if (OpTy->isFloatTy()) Opc = ARM::VTOSIZS; 1233 else if (OpTy->isDoubleTy()) Opc = ARM::VTOSIZD; 1234 else return 0; 1235 1236 // f64->s32 or f32->s32 both need an intermediate f32 reg. 1237 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1238 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1239 ResultReg) 1240 .addReg(Op)); 1241 1242 // This result needs to be in an integer register, but the conversion only 1243 // takes place in fp-regs. 1244 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1245 if (IntReg == 0) return false; 1246 1247 UpdateValueMap(I, IntReg); 1248 return true; 1249} 1250 1251bool ARMFastISel::SelectSelect(const Instruction *I) { 1252 MVT VT; 1253 if (!isTypeLegal(I->getType(), VT)) 1254 return false; 1255 1256 // Things need to be register sized for register moves. 1257 if (VT != MVT::i32) return false; 1258 const TargetRegisterClass *RC = TLI.getRegClassFor(VT); 1259 1260 unsigned CondReg = getRegForValue(I->getOperand(0)); 1261 if (CondReg == 0) return false; 1262 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1263 if (Op1Reg == 0) return false; 1264 unsigned Op2Reg = getRegForValue(I->getOperand(2)); 1265 if (Op2Reg == 0) return false; 1266 1267 unsigned CmpOpc = isThumb ? ARM::t2TSTri : ARM::TSTri; 1268 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1269 .addReg(CondReg).addImm(1)); 1270 unsigned ResultReg = createResultReg(RC); 1271 unsigned MovCCOpc = isThumb ? ARM::t2MOVCCr : ARM::MOVCCr; 1272 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1273 .addReg(Op1Reg).addReg(Op2Reg) 1274 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 1275 UpdateValueMap(I, ResultReg); 1276 return true; 1277} 1278 1279bool ARMFastISel::SelectSDiv(const Instruction *I) { 1280 MVT VT; 1281 const Type *Ty = I->getType(); 1282 if (!isTypeLegal(Ty, VT)) 1283 return false; 1284 1285 // If we have integer div support we should have selected this automagically. 1286 // In case we have a real miss go ahead and return false and we'll pick 1287 // it up later. 1288 if (Subtarget->hasDivide()) return false; 1289 1290 // Otherwise emit a libcall. 1291 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1292 if (VT == MVT::i8) 1293 LC = RTLIB::SDIV_I8; 1294 else if (VT == MVT::i16) 1295 LC = RTLIB::SDIV_I16; 1296 else if (VT == MVT::i32) 1297 LC = RTLIB::SDIV_I32; 1298 else if (VT == MVT::i64) 1299 LC = RTLIB::SDIV_I64; 1300 else if (VT == MVT::i128) 1301 LC = RTLIB::SDIV_I128; 1302 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1303 1304 return ARMEmitLibcall(I, LC); 1305} 1306 1307bool ARMFastISel::SelectSRem(const Instruction *I) { 1308 MVT VT; 1309 const Type *Ty = I->getType(); 1310 if (!isTypeLegal(Ty, VT)) 1311 return false; 1312 1313 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1314 if (VT == MVT::i8) 1315 LC = RTLIB::SREM_I8; 1316 else if (VT == MVT::i16) 1317 LC = RTLIB::SREM_I16; 1318 else if (VT == MVT::i32) 1319 LC = RTLIB::SREM_I32; 1320 else if (VT == MVT::i64) 1321 LC = RTLIB::SREM_I64; 1322 else if (VT == MVT::i128) 1323 LC = RTLIB::SREM_I128; 1324 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1325 1326 return ARMEmitLibcall(I, LC); 1327} 1328 1329bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) { 1330 EVT VT = TLI.getValueType(I->getType(), true); 1331 1332 // We can get here in the case when we want to use NEON for our fp 1333 // operations, but can't figure out how to. Just use the vfp instructions 1334 // if we have them. 1335 // FIXME: It'd be nice to use NEON instructions. 1336 const Type *Ty = I->getType(); 1337 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1338 if (isFloat && !Subtarget->hasVFP2()) 1339 return false; 1340 1341 unsigned Op1 = getRegForValue(I->getOperand(0)); 1342 if (Op1 == 0) return false; 1343 1344 unsigned Op2 = getRegForValue(I->getOperand(1)); 1345 if (Op2 == 0) return false; 1346 1347 unsigned Opc; 1348 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1349 switch (ISDOpcode) { 1350 default: return false; 1351 case ISD::FADD: 1352 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1353 break; 1354 case ISD::FSUB: 1355 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1356 break; 1357 case ISD::FMUL: 1358 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1359 break; 1360 } 1361 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); 1362 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1363 TII.get(Opc), ResultReg) 1364 .addReg(Op1).addReg(Op2)); 1365 UpdateValueMap(I, ResultReg); 1366 return true; 1367} 1368 1369// Call Handling Code 1370 1371bool ARMFastISel::FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, 1372 EVT SrcVT, unsigned &ResultReg) { 1373 unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, 1374 Src, /*TODO: Kill=*/false); 1375 1376 if (RR != 0) { 1377 ResultReg = RR; 1378 return true; 1379 } else 1380 return false; 1381} 1382 1383// This is largely taken directly from CCAssignFnForNode - we don't support 1384// varargs in FastISel so that part has been removed. 1385// TODO: We may not support all of this. 1386CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, bool Return) { 1387 switch (CC) { 1388 default: 1389 llvm_unreachable("Unsupported calling convention"); 1390 case CallingConv::Fast: 1391 // Ignore fastcc. Silence compiler warnings. 1392 (void)RetFastCC_ARM_APCS; 1393 (void)FastCC_ARM_APCS; 1394 // Fallthrough 1395 case CallingConv::C: 1396 // Use target triple & subtarget features to do actual dispatch. 1397 if (Subtarget->isAAPCS_ABI()) { 1398 if (Subtarget->hasVFP2() && 1399 FloatABIType == FloatABI::Hard) 1400 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1401 else 1402 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1403 } else 1404 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1405 case CallingConv::ARM_AAPCS_VFP: 1406 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1407 case CallingConv::ARM_AAPCS: 1408 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1409 case CallingConv::ARM_APCS: 1410 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1411 } 1412} 1413 1414bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1415 SmallVectorImpl<unsigned> &ArgRegs, 1416 SmallVectorImpl<MVT> &ArgVTs, 1417 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1418 SmallVectorImpl<unsigned> &RegArgs, 1419 CallingConv::ID CC, 1420 unsigned &NumBytes) { 1421 SmallVector<CCValAssign, 16> ArgLocs; 1422 CCState CCInfo(CC, false, TM, ArgLocs, *Context); 1423 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false)); 1424 1425 // Get a count of how many bytes are to be pushed on the stack. 1426 NumBytes = CCInfo.getNextStackOffset(); 1427 1428 // Issue CALLSEQ_START 1429 unsigned AdjStackDown = TM.getRegisterInfo()->getCallFrameSetupOpcode(); 1430 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1431 TII.get(AdjStackDown)) 1432 .addImm(NumBytes)); 1433 1434 // Process the args. 1435 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1436 CCValAssign &VA = ArgLocs[i]; 1437 unsigned Arg = ArgRegs[VA.getValNo()]; 1438 MVT ArgVT = ArgVTs[VA.getValNo()]; 1439 1440 // We don't handle NEON/vector parameters yet. 1441 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) 1442 return false; 1443 1444 // Handle arg promotion, etc. 1445 switch (VA.getLocInfo()) { 1446 case CCValAssign::Full: break; 1447 case CCValAssign::SExt: { 1448 bool Emitted = FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), 1449 Arg, ArgVT, Arg); 1450 assert(Emitted && "Failed to emit a sext!"); (void)Emitted; 1451 Emitted = true; 1452 ArgVT = VA.getLocVT(); 1453 break; 1454 } 1455 case CCValAssign::ZExt: { 1456 bool Emitted = FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), 1457 Arg, ArgVT, Arg); 1458 assert(Emitted && "Failed to emit a zext!"); (void)Emitted; 1459 Emitted = true; 1460 ArgVT = VA.getLocVT(); 1461 break; 1462 } 1463 case CCValAssign::AExt: { 1464 bool Emitted = FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(), 1465 Arg, ArgVT, Arg); 1466 if (!Emitted) 1467 Emitted = FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), 1468 Arg, ArgVT, Arg); 1469 if (!Emitted) 1470 Emitted = FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), 1471 Arg, ArgVT, Arg); 1472 1473 assert(Emitted && "Failed to emit a aext!"); (void)Emitted; 1474 ArgVT = VA.getLocVT(); 1475 break; 1476 } 1477 case CCValAssign::BCvt: { 1478 unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 1479 /*TODO: Kill=*/false); 1480 assert(BC != 0 && "Failed to emit a bitcast!"); 1481 Arg = BC; 1482 ArgVT = VA.getLocVT(); 1483 break; 1484 } 1485 default: llvm_unreachable("Unknown arg promotion!"); 1486 } 1487 1488 // Now copy/store arg to correct locations. 1489 if (VA.isRegLoc() && !VA.needsCustom()) { 1490 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1491 VA.getLocReg()) 1492 .addReg(Arg); 1493 RegArgs.push_back(VA.getLocReg()); 1494 } else if (VA.needsCustom()) { 1495 // TODO: We need custom lowering for vector (v2f64) args. 1496 if (VA.getLocVT() != MVT::f64) return false; 1497 1498 CCValAssign &NextVA = ArgLocs[++i]; 1499 1500 // TODO: Only handle register args for now. 1501 if(!(VA.isRegLoc() && NextVA.isRegLoc())) return false; 1502 1503 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1504 TII.get(ARM::VMOVRRD), VA.getLocReg()) 1505 .addReg(NextVA.getLocReg(), RegState::Define) 1506 .addReg(Arg)); 1507 RegArgs.push_back(VA.getLocReg()); 1508 RegArgs.push_back(NextVA.getLocReg()); 1509 } else { 1510 assert(VA.isMemLoc()); 1511 // Need to store on the stack. 1512 Address Addr; 1513 Addr.BaseType = Address::RegBase; 1514 Addr.Base.Reg = ARM::SP; 1515 Addr.Offset = VA.getLocMemOffset(); 1516 1517 if (!ARMEmitStore(ArgVT, Arg, Addr)) return false; 1518 } 1519 } 1520 return true; 1521} 1522 1523bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 1524 const Instruction *I, CallingConv::ID CC, 1525 unsigned &NumBytes) { 1526 // Issue CALLSEQ_END 1527 unsigned AdjStackUp = TM.getRegisterInfo()->getCallFrameDestroyOpcode(); 1528 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1529 TII.get(AdjStackUp)) 1530 .addImm(NumBytes).addImm(0)); 1531 1532 // Now the return value. 1533 if (RetVT != MVT::isVoid) { 1534 SmallVector<CCValAssign, 16> RVLocs; 1535 CCState CCInfo(CC, false, TM, RVLocs, *Context); 1536 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true)); 1537 1538 // Copy all of the result registers out of their specified physreg. 1539 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 1540 // For this move we copy into two registers and then move into the 1541 // double fp reg we want. 1542 EVT DestVT = RVLocs[0].getValVT(); 1543 TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 1544 unsigned ResultReg = createResultReg(DstRC); 1545 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1546 TII.get(ARM::VMOVDRR), ResultReg) 1547 .addReg(RVLocs[0].getLocReg()) 1548 .addReg(RVLocs[1].getLocReg())); 1549 1550 UsedRegs.push_back(RVLocs[0].getLocReg()); 1551 UsedRegs.push_back(RVLocs[1].getLocReg()); 1552 1553 // Finally update the result. 1554 UpdateValueMap(I, ResultReg); 1555 } else { 1556 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 1557 EVT CopyVT = RVLocs[0].getValVT(); 1558 TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 1559 1560 unsigned ResultReg = createResultReg(DstRC); 1561 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1562 ResultReg).addReg(RVLocs[0].getLocReg()); 1563 UsedRegs.push_back(RVLocs[0].getLocReg()); 1564 1565 // Finally update the result. 1566 UpdateValueMap(I, ResultReg); 1567 } 1568 } 1569 1570 return true; 1571} 1572 1573bool ARMFastISel::SelectRet(const Instruction *I) { 1574 const ReturnInst *Ret = cast<ReturnInst>(I); 1575 const Function &F = *I->getParent()->getParent(); 1576 1577 if (!FuncInfo.CanLowerReturn) 1578 return false; 1579 1580 if (F.isVarArg()) 1581 return false; 1582 1583 CallingConv::ID CC = F.getCallingConv(); 1584 if (Ret->getNumOperands() > 0) { 1585 SmallVector<ISD::OutputArg, 4> Outs; 1586 GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), 1587 Outs, TLI); 1588 1589 // Analyze operands of the call, assigning locations to each operand. 1590 SmallVector<CCValAssign, 16> ValLocs; 1591 CCState CCInfo(CC, F.isVarArg(), TM, ValLocs, I->getContext()); 1592 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */)); 1593 1594 const Value *RV = Ret->getOperand(0); 1595 unsigned Reg = getRegForValue(RV); 1596 if (Reg == 0) 1597 return false; 1598 1599 // Only handle a single return value for now. 1600 if (ValLocs.size() != 1) 1601 return false; 1602 1603 CCValAssign &VA = ValLocs[0]; 1604 1605 // Don't bother handling odd stuff for now. 1606 if (VA.getLocInfo() != CCValAssign::Full) 1607 return false; 1608 // Only handle register returns for now. 1609 if (!VA.isRegLoc()) 1610 return false; 1611 // TODO: For now, don't try to handle cases where getLocInfo() 1612 // says Full but the types don't match. 1613 if (TLI.getValueType(RV->getType()) != VA.getValVT()) 1614 return false; 1615 1616 // Make the copy. 1617 unsigned SrcReg = Reg + VA.getValNo(); 1618 unsigned DstReg = VA.getLocReg(); 1619 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 1620 // Avoid a cross-class copy. This is very unlikely. 1621 if (!SrcRC->contains(DstReg)) 1622 return false; 1623 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1624 DstReg).addReg(SrcReg); 1625 1626 // Mark the register as live out of the function. 1627 MRI.addLiveOut(VA.getLocReg()); 1628 } 1629 1630 unsigned RetOpc = isThumb ? ARM::tBX_RET : ARM::BX_RET; 1631 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1632 TII.get(RetOpc))); 1633 return true; 1634} 1635 1636// A quick function that will emit a call for a named libcall in F with the 1637// vector of passed arguments for the Instruction in I. We can assume that we 1638// can emit a call for any libcall we can produce. This is an abridged version 1639// of the full call infrastructure since we won't need to worry about things 1640// like computed function pointers or strange arguments at call sites. 1641// TODO: Try to unify this and the normal call bits for ARM, then try to unify 1642// with X86. 1643bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 1644 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 1645 1646 // Handle *simple* calls for now. 1647 const Type *RetTy = I->getType(); 1648 MVT RetVT; 1649 if (RetTy->isVoidTy()) 1650 RetVT = MVT::isVoid; 1651 else if (!isTypeLegal(RetTy, RetVT)) 1652 return false; 1653 1654 // For now we're using BLX etc on the assumption that we have v5t ops. 1655 if (!Subtarget->hasV5TOps()) return false; 1656 1657 // TODO: For now if we have long calls specified we don't handle the call. 1658 if (EnableARMLongCalls) return false; 1659 1660 // Set up the argument vectors. 1661 SmallVector<Value*, 8> Args; 1662 SmallVector<unsigned, 8> ArgRegs; 1663 SmallVector<MVT, 8> ArgVTs; 1664 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 1665 Args.reserve(I->getNumOperands()); 1666 ArgRegs.reserve(I->getNumOperands()); 1667 ArgVTs.reserve(I->getNumOperands()); 1668 ArgFlags.reserve(I->getNumOperands()); 1669 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 1670 Value *Op = I->getOperand(i); 1671 unsigned Arg = getRegForValue(Op); 1672 if (Arg == 0) return false; 1673 1674 const Type *ArgTy = Op->getType(); 1675 MVT ArgVT; 1676 if (!isTypeLegal(ArgTy, ArgVT)) return false; 1677 1678 ISD::ArgFlagsTy Flags; 1679 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 1680 Flags.setOrigAlign(OriginalAlignment); 1681 1682 Args.push_back(Op); 1683 ArgRegs.push_back(Arg); 1684 ArgVTs.push_back(ArgVT); 1685 ArgFlags.push_back(Flags); 1686 } 1687 1688 // Handle the arguments now that we've gotten them. 1689 SmallVector<unsigned, 4> RegArgs; 1690 unsigned NumBytes; 1691 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 1692 return false; 1693 1694 // Issue the call, BLXr9 for darwin, BLX otherwise. This uses V5 ops. 1695 // TODO: Turn this into the table of arm call ops. 1696 MachineInstrBuilder MIB; 1697 unsigned CallOpc; 1698 if(isThumb) { 1699 CallOpc = Subtarget->isTargetDarwin() ? ARM::tBLXi_r9 : ARM::tBLXi; 1700 // Explicitly adding the predicate here. 1701 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1702 TII.get(CallOpc))) 1703 .addExternalSymbol(TLI.getLibcallName(Call)); 1704 } else { 1705 CallOpc = Subtarget->isTargetDarwin() ? ARM::BLr9 : ARM::BL; 1706 // Explicitly adding the predicate here. 1707 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1708 TII.get(CallOpc)) 1709 .addExternalSymbol(TLI.getLibcallName(Call))); 1710 } 1711 1712 // Add implicit physical register uses to the call. 1713 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 1714 MIB.addReg(RegArgs[i]); 1715 1716 // Finish off the call including any return values. 1717 SmallVector<unsigned, 4> UsedRegs; 1718 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 1719 1720 // Set all unused physreg defs as dead. 1721 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 1722 1723 return true; 1724} 1725 1726bool ARMFastISel::SelectCall(const Instruction *I) { 1727 const CallInst *CI = cast<CallInst>(I); 1728 const Value *Callee = CI->getCalledValue(); 1729 1730 // Can't handle inline asm or worry about intrinsics yet. 1731 if (isa<InlineAsm>(Callee) || isa<IntrinsicInst>(CI)) return false; 1732 1733 // Only handle global variable Callees that are direct calls. 1734 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 1735 if (!GV || Subtarget->GVIsIndirectSymbol(GV, TM.getRelocationModel())) 1736 return false; 1737 1738 // Check the calling convention. 1739 ImmutableCallSite CS(CI); 1740 CallingConv::ID CC = CS.getCallingConv(); 1741 1742 // TODO: Avoid some calling conventions? 1743 1744 // Let SDISel handle vararg functions. 1745 const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 1746 const FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 1747 if (FTy->isVarArg()) 1748 return false; 1749 1750 // Handle *simple* calls for now. 1751 const Type *RetTy = I->getType(); 1752 MVT RetVT; 1753 if (RetTy->isVoidTy()) 1754 RetVT = MVT::isVoid; 1755 else if (!isTypeLegal(RetTy, RetVT)) 1756 return false; 1757 1758 // For now we're using BLX etc on the assumption that we have v5t ops. 1759 // TODO: Maybe? 1760 if (!Subtarget->hasV5TOps()) return false; 1761 1762 // TODO: For now if we have long calls specified we don't handle the call. 1763 if (EnableARMLongCalls) return false; 1764 1765 // Set up the argument vectors. 1766 SmallVector<Value*, 8> Args; 1767 SmallVector<unsigned, 8> ArgRegs; 1768 SmallVector<MVT, 8> ArgVTs; 1769 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 1770 Args.reserve(CS.arg_size()); 1771 ArgRegs.reserve(CS.arg_size()); 1772 ArgVTs.reserve(CS.arg_size()); 1773 ArgFlags.reserve(CS.arg_size()); 1774 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 1775 i != e; ++i) { 1776 unsigned Arg = getRegForValue(*i); 1777 1778 if (Arg == 0) 1779 return false; 1780 ISD::ArgFlagsTy Flags; 1781 unsigned AttrInd = i - CS.arg_begin() + 1; 1782 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 1783 Flags.setSExt(); 1784 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 1785 Flags.setZExt(); 1786 1787 // FIXME: Only handle *easy* calls for now. 1788 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 1789 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 1790 CS.paramHasAttr(AttrInd, Attribute::Nest) || 1791 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 1792 return false; 1793 1794 const Type *ArgTy = (*i)->getType(); 1795 MVT ArgVT; 1796 if (!isTypeLegal(ArgTy, ArgVT)) 1797 return false; 1798 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 1799 Flags.setOrigAlign(OriginalAlignment); 1800 1801 Args.push_back(*i); 1802 ArgRegs.push_back(Arg); 1803 ArgVTs.push_back(ArgVT); 1804 ArgFlags.push_back(Flags); 1805 } 1806 1807 // Handle the arguments now that we've gotten them. 1808 SmallVector<unsigned, 4> RegArgs; 1809 unsigned NumBytes; 1810 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 1811 return false; 1812 1813 // Issue the call, BLXr9 for darwin, BLX otherwise. This uses V5 ops. 1814 // TODO: Turn this into the table of arm call ops. 1815 MachineInstrBuilder MIB; 1816 unsigned CallOpc; 1817 // Explicitly adding the predicate here. 1818 if(isThumb) { 1819 CallOpc = Subtarget->isTargetDarwin() ? ARM::tBLXi_r9 : ARM::tBLXi; 1820 // Explicitly adding the predicate here. 1821 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1822 TII.get(CallOpc))) 1823 .addGlobalAddress(GV, 0, 0); 1824 } else { 1825 CallOpc = Subtarget->isTargetDarwin() ? ARM::BLr9 : ARM::BL; 1826 // Explicitly adding the predicate here. 1827 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1828 TII.get(CallOpc)) 1829 .addGlobalAddress(GV, 0, 0)); 1830 } 1831 1832 // Add implicit physical register uses to the call. 1833 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 1834 MIB.addReg(RegArgs[i]); 1835 1836 // Finish off the call including any return values. 1837 SmallVector<unsigned, 4> UsedRegs; 1838 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 1839 1840 // Set all unused physreg defs as dead. 1841 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 1842 1843 return true; 1844 1845} 1846 1847// TODO: SoftFP support. 1848bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 1849 1850 switch (I->getOpcode()) { 1851 case Instruction::Load: 1852 return SelectLoad(I); 1853 case Instruction::Store: 1854 return SelectStore(I); 1855 case Instruction::Br: 1856 return SelectBranch(I); 1857 case Instruction::ICmp: 1858 case Instruction::FCmp: 1859 return SelectCmp(I); 1860 case Instruction::FPExt: 1861 return SelectFPExt(I); 1862 case Instruction::FPTrunc: 1863 return SelectFPTrunc(I); 1864 case Instruction::SIToFP: 1865 return SelectSIToFP(I); 1866 case Instruction::FPToSI: 1867 return SelectFPToSI(I); 1868 case Instruction::FAdd: 1869 return SelectBinaryOp(I, ISD::FADD); 1870 case Instruction::FSub: 1871 return SelectBinaryOp(I, ISD::FSUB); 1872 case Instruction::FMul: 1873 return SelectBinaryOp(I, ISD::FMUL); 1874 case Instruction::SDiv: 1875 return SelectSDiv(I); 1876 case Instruction::SRem: 1877 return SelectSRem(I); 1878 case Instruction::Call: 1879 return SelectCall(I); 1880 case Instruction::Select: 1881 return SelectSelect(I); 1882 case Instruction::Ret: 1883 return SelectRet(I); 1884 default: break; 1885 } 1886 return false; 1887} 1888 1889namespace llvm { 1890 llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) { 1891 // Completely untested on non-darwin. 1892 const TargetMachine &TM = funcInfo.MF->getTarget(); 1893 1894 // Darwin and thumb1 only for now. 1895 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 1896 if (Subtarget->isTargetDarwin() && !Subtarget->isThumb1Only() && 1897 !DisableARMFastISel) 1898 return new ARMFastISel(funcInfo); 1899 return 0; 1900 } 1901} 1902