1327952Sdim//===- ARMFastISel.cpp - ARM FastISel implementation ----------------------===// 2212793Sdim// 3353358Sdim// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4353358Sdim// See https://llvm.org/LICENSE.txt for license information. 5353358Sdim// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6212793Sdim// 7212793Sdim//===----------------------------------------------------------------------===// 8212793Sdim// 9212793Sdim// This file defines the ARM-specific support for the FastISel class. Some 10212793Sdim// of the target-specific code is generated by tablegen in the file 11212793Sdim// ARMGenFastISel.inc, which is #included here. 12212793Sdim// 13212793Sdim//===----------------------------------------------------------------------===// 14212793Sdim 15212793Sdim#include "ARM.h" 16321369Sdim#include "ARMBaseInstrInfo.h" 17276479Sdim#include "ARMBaseRegisterInfo.h" 18218893Sdim#include "ARMCallingConv.h" 19249423Sdim#include "ARMConstantPoolValue.h" 20276479Sdim#include "ARMISelLowering.h" 21276479Sdim#include "ARMMachineFunctionInfo.h" 22249423Sdim#include "ARMSubtarget.h" 23226633Sdim#include "MCTargetDesc/ARMAddressingModes.h" 24321369Sdim#include "MCTargetDesc/ARMBaseInfo.h" 25327952Sdim#include "Utils/ARMBaseInfo.h" 26321369Sdim#include "llvm/ADT/APFloat.h" 27321369Sdim#include "llvm/ADT/APInt.h" 28321369Sdim#include "llvm/ADT/DenseMap.h" 29321369Sdim#include "llvm/ADT/SmallVector.h" 30321369Sdim#include "llvm/CodeGen/CallingConvLower.h" 31212793Sdim#include "llvm/CodeGen/FastISel.h" 32212793Sdim#include "llvm/CodeGen/FunctionLoweringInfo.h" 33321369Sdim#include "llvm/CodeGen/ISDOpcodes.h" 34327952Sdim#include "llvm/CodeGen/MachineBasicBlock.h" 35212793Sdim#include "llvm/CodeGen/MachineConstantPool.h" 36212793Sdim#include "llvm/CodeGen/MachineFrameInfo.h" 37327952Sdim#include "llvm/CodeGen/MachineFunction.h" 38321369Sdim#include "llvm/CodeGen/MachineInstr.h" 39249423Sdim#include "llvm/CodeGen/MachineInstrBuilder.h" 40218893Sdim#include "llvm/CodeGen/MachineMemOperand.h" 41321369Sdim#include "llvm/CodeGen/MachineOperand.h" 42212793Sdim#include "llvm/CodeGen/MachineRegisterInfo.h" 43321369Sdim#include "llvm/CodeGen/RuntimeLibcalls.h" 44327952Sdim#include "llvm/CodeGen/TargetInstrInfo.h" 45327952Sdim#include "llvm/CodeGen/TargetLowering.h" 46327952Sdim#include "llvm/CodeGen/TargetOpcodes.h" 47327952Sdim#include "llvm/CodeGen/TargetRegisterInfo.h" 48321369Sdim#include "llvm/CodeGen/ValueTypes.h" 49321369Sdim#include "llvm/IR/Argument.h" 50321369Sdim#include "llvm/IR/Attributes.h" 51276479Sdim#include "llvm/IR/CallSite.h" 52249423Sdim#include "llvm/IR/CallingConv.h" 53321369Sdim#include "llvm/IR/Constant.h" 54321369Sdim#include "llvm/IR/Constants.h" 55249423Sdim#include "llvm/IR/DataLayout.h" 56249423Sdim#include "llvm/IR/DerivedTypes.h" 57321369Sdim#include "llvm/IR/Function.h" 58276479Sdim#include "llvm/IR/GetElementPtrTypeIterator.h" 59321369Sdim#include "llvm/IR/GlobalValue.h" 60249423Sdim#include "llvm/IR/GlobalVariable.h" 61321369Sdim#include "llvm/IR/InstrTypes.h" 62321369Sdim#include "llvm/IR/Instruction.h" 63249423Sdim#include "llvm/IR/Instructions.h" 64249423Sdim#include "llvm/IR/IntrinsicInst.h" 65327952Sdim#include "llvm/IR/Intrinsics.h" 66249423Sdim#include "llvm/IR/Module.h" 67249423Sdim#include "llvm/IR/Operator.h" 68321369Sdim#include "llvm/IR/Type.h" 69321369Sdim#include "llvm/IR/User.h" 70321369Sdim#include "llvm/IR/Value.h" 71321369Sdim#include "llvm/MC/MCInstrDesc.h" 72321369Sdim#include "llvm/MC/MCRegisterInfo.h" 73321369Sdim#include "llvm/Support/Casting.h" 74321369Sdim#include "llvm/Support/Compiler.h" 75212793Sdim#include "llvm/Support/ErrorHandling.h" 76341825Sdim#include "llvm/Support/MachineValueType.h" 77321369Sdim#include "llvm/Support/MathExtras.h" 78212793Sdim#include "llvm/Target/TargetMachine.h" 79212793Sdim#include "llvm/Target/TargetOptions.h" 80321369Sdim#include <cassert> 81321369Sdim#include <cstdint> 82321369Sdim#include <utility> 83321369Sdim 84212793Sdimusing namespace llvm; 85212793Sdim 86212793Sdimnamespace { 87212793Sdim 88218893Sdim // All possible address modes, plus some. 89327952Sdim struct Address { 90218893Sdim enum { 91218893Sdim RegBase, 92218893Sdim FrameIndexBase 93321369Sdim } BaseType = RegBase; 94218893Sdim 95218893Sdim union { 96218893Sdim unsigned Reg; 97218893Sdim int FI; 98218893Sdim } Base; 99218893Sdim 100321369Sdim int Offset = 0; 101218893Sdim 102218893Sdim // Innocuous defaults for our address. 103321369Sdim Address() { 104321369Sdim Base.Reg = 0; 105321369Sdim } 106327952Sdim }; 107218893Sdim 108276479Sdimclass ARMFastISel final : public FastISel { 109212793Sdim /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 110212793Sdim /// make the right decision when generating code for different targets. 111212793Sdim const ARMSubtarget *Subtarget; 112276479Sdim Module &M; 113212793Sdim const TargetMachine &TM; 114212793Sdim const TargetInstrInfo &TII; 115212793Sdim const TargetLowering &TLI; 116218893Sdim ARMFunctionInfo *AFI; 117212793Sdim 118218893Sdim // Convenience variables to avoid some queries. 119234353Sdim bool isThumb2; 120218893Sdim LLVMContext *Context; 121212793Sdim 122212793Sdim public: 123239462Sdim explicit ARMFastISel(FunctionLoweringInfo &funcInfo, 124239462Sdim const TargetLibraryInfo *libInfo) 125280031Sdim : FastISel(funcInfo, libInfo), 126288943Sdim Subtarget( 127288943Sdim &static_cast<const ARMSubtarget &>(funcInfo.MF->getSubtarget())), 128280031Sdim M(const_cast<Module &>(*funcInfo.Fn->getParent())), 129288943Sdim TM(funcInfo.MF->getTarget()), TII(*Subtarget->getInstrInfo()), 130288943Sdim TLI(*Subtarget->getTargetLowering()) { 131212793Sdim AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 132234353Sdim isThumb2 = AFI->isThumbFunction(); 133218893Sdim Context = &funcInfo.Fn->getContext(); 134212793Sdim } 135212793Sdim 136321369Sdim private: 137212793Sdim // Code from FastISel.cpp. 138321369Sdim 139280031Sdim unsigned fastEmitInst_r(unsigned MachineInstOpcode, 140243830Sdim const TargetRegisterClass *RC, 141243830Sdim unsigned Op0, bool Op0IsKill); 142280031Sdim unsigned fastEmitInst_rr(unsigned MachineInstOpcode, 143243830Sdim const TargetRegisterClass *RC, 144243830Sdim unsigned Op0, bool Op0IsKill, 145243830Sdim unsigned Op1, bool Op1IsKill); 146280031Sdim unsigned fastEmitInst_ri(unsigned MachineInstOpcode, 147243830Sdim const TargetRegisterClass *RC, 148243830Sdim unsigned Op0, bool Op0IsKill, 149243830Sdim uint64_t Imm); 150280031Sdim unsigned fastEmitInst_i(unsigned MachineInstOpcode, 151243830Sdim const TargetRegisterClass *RC, 152243830Sdim uint64_t Imm); 153221345Sdim 154212793Sdim // Backend specific FastISel code. 155321369Sdim 156280031Sdim bool fastSelectInstruction(const Instruction *I) override; 157280031Sdim unsigned fastMaterializeConstant(const Constant *C) override; 158280031Sdim unsigned fastMaterializeAlloca(const AllocaInst *AI) override; 159276479Sdim bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, 160276479Sdim const LoadInst *LI) override; 161280031Sdim bool fastLowerArguments() override; 162321369Sdim 163212793Sdim #include "ARMGenFastISel.inc" 164218893Sdim 165212793Sdim // Instruction selection routines. 166321369Sdim 167218893Sdim bool SelectLoad(const Instruction *I); 168218893Sdim bool SelectStore(const Instruction *I); 169218893Sdim bool SelectBranch(const Instruction *I); 170234353Sdim bool SelectIndirectBr(const Instruction *I); 171218893Sdim bool SelectCmp(const Instruction *I); 172218893Sdim bool SelectFPExt(const Instruction *I); 173218893Sdim bool SelectFPTrunc(const Instruction *I); 174234353Sdim bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode); 175234353Sdim bool SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode); 176234353Sdim bool SelectIToFP(const Instruction *I, bool isSigned); 177234353Sdim bool SelectFPToI(const Instruction *I, bool isSigned); 178234353Sdim bool SelectDiv(const Instruction *I, bool isSigned); 179234353Sdim bool SelectRem(const Instruction *I, bool isSigned); 180234353Sdim bool SelectCall(const Instruction *I, const char *IntrMemName); 181234353Sdim bool SelectIntrinsicCall(const IntrinsicInst &I); 182218893Sdim bool SelectSelect(const Instruction *I); 183218893Sdim bool SelectRet(const Instruction *I); 184234353Sdim bool SelectTrunc(const Instruction *I); 185234353Sdim bool SelectIntExt(const Instruction *I); 186239462Sdim bool SelectShift(const Instruction *I, ARM_AM::ShiftOpc ShiftTy); 187212793Sdim 188212793Sdim // Utility routines. 189321369Sdim 190309124Sdim bool isPositionIndependent() const; 191226633Sdim bool isTypeLegal(Type *Ty, MVT &VT); 192226633Sdim bool isLoadTypeLegal(Type *Ty, MVT &VT); 193234353Sdim bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 194360784Sdim bool isZExt); 195360784Sdim bool ARMEmitLoad(MVT VT, Register &ResultReg, Address &Addr, 196234353Sdim unsigned Alignment = 0, bool isZExt = true, 197234353Sdim bool allocReg = true); 198249423Sdim bool ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr, 199234353Sdim unsigned Alignment = 0); 200218893Sdim bool ARMComputeAddress(const Value *Obj, Address &Addr); 201249423Sdim void ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3); 202234353Sdim bool ARMIsMemCpySmall(uint64_t Len); 203249423Sdim bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len, 204249423Sdim unsigned Alignment); 205249423Sdim unsigned ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt); 206249423Sdim unsigned ARMMaterializeFP(const ConstantFP *CFP, MVT VT); 207249423Sdim unsigned ARMMaterializeInt(const Constant *C, MVT VT); 208249423Sdim unsigned ARMMaterializeGV(const GlobalValue *GV, MVT VT); 209249423Sdim unsigned ARMMoveToFPReg(MVT VT, unsigned SrcReg); 210249423Sdim unsigned ARMMoveToIntReg(MVT VT, unsigned SrcReg); 211239462Sdim unsigned ARMSelectCallOp(bool UseReg); 212249423Sdim unsigned ARMLowerPICELF(const GlobalValue *GV, unsigned Align, MVT VT); 213218893Sdim 214288943Sdim const TargetLowering *getTargetLowering() { return &TLI; } 215276479Sdim 216218893Sdim // Call handling routines. 217321369Sdim 218239462Sdim CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, 219239462Sdim bool Return, 220239462Sdim bool isVarArg); 221218893Sdim bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 222360784Sdim SmallVectorImpl<Register> &ArgRegs, 223218893Sdim SmallVectorImpl<MVT> &ArgVTs, 224218893Sdim SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 225360784Sdim SmallVectorImpl<Register> &RegArgs, 226218893Sdim CallingConv::ID CC, 227239462Sdim unsigned &NumBytes, 228239462Sdim bool isVarArg); 229239462Sdim unsigned getLibcallReg(const Twine &Name); 230360784Sdim bool FinishCall(MVT RetVT, SmallVectorImpl<Register> &UsedRegs, 231218893Sdim const Instruction *I, CallingConv::ID CC, 232239462Sdim unsigned &NumBytes, bool isVarArg); 233218893Sdim bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 234218893Sdim 235218893Sdim // OptionalDef handling routines. 236321369Sdim 237221345Sdim bool isARMNEONPred(const MachineInstr *MI); 238212793Sdim bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 239212793Sdim const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 240249423Sdim void AddLoadStoreOperands(MVT VT, Address &Addr, 241223017Sdim const MachineInstrBuilder &MIB, 242309124Sdim MachineMemOperand::Flags Flags, bool useAM3); 243212793Sdim}; 244212793Sdim 245212793Sdim} // end anonymous namespace 246212793Sdim 247212793Sdim// DefinesOptionalPredicate - This is different from DefinesPredicate in that 248212793Sdim// we don't care about implicit defs here, just places we'll need to add a 249212793Sdim// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 250212793Sdimbool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 251234353Sdim if (!MI->hasOptionalDef()) 252212793Sdim return false; 253212793Sdim 254212793Sdim // Look to see if our OptionalDef is defining CPSR or CCR. 255321369Sdim for (const MachineOperand &MO : MI->operands()) { 256212793Sdim if (!MO.isReg() || !MO.isDef()) continue; 257212793Sdim if (MO.getReg() == ARM::CPSR) 258212793Sdim *CPSR = true; 259212793Sdim } 260212793Sdim return true; 261212793Sdim} 262212793Sdim 263221345Sdimbool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { 264224145Sdim const MCInstrDesc &MCID = MI->getDesc(); 265221345Sdim 266261991Sdim // If we're a thumb2 or not NEON function we'll be handled via isPredicable. 267224145Sdim if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || 268221345Sdim AFI->isThumb2Function()) 269261991Sdim return MI->isPredicable(); 270221345Sdim 271321369Sdim for (const MCOperandInfo &opInfo : MCID.operands()) 272321369Sdim if (opInfo.isPredicate()) 273221345Sdim return true; 274221345Sdim 275221345Sdim return false; 276221345Sdim} 277221345Sdim 278212793Sdim// If the machine is predicable go ahead and add the predicate operands, if 279212793Sdim// it needs default CC operands add those. 280218893Sdim// TODO: If we want to support thumb1 then we'll need to deal with optional 281218893Sdim// CPSR defs that need to be added before the remaining operands. See s_cc_out 282218893Sdim// for descriptions why. 283212793Sdimconst MachineInstrBuilder & 284212793SdimARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 285212793Sdim MachineInstr *MI = &*MIB; 286212793Sdim 287221345Sdim // Do we use a predicate? or... 288221345Sdim // Are we NEON in ARM mode and have a predicate operand? If so, I know 289221345Sdim // we're not predicable but add it anyways. 290261991Sdim if (isARMNEONPred(MI)) 291321369Sdim MIB.add(predOps(ARMCC::AL)); 292218893Sdim 293212793Sdim // Do we optionally set a predicate? Preds is size > 0 iff the predicate 294212793Sdim // defines CPSR. All other OptionalDefines in ARM are the CCR register. 295212793Sdim bool CPSR = false; 296321369Sdim if (DefinesOptionalPredicate(MI, &CPSR)) 297321369Sdim MIB.add(CPSR ? t1CondCodeOp() : condCodeOp()); 298212793Sdim return MIB; 299212793Sdim} 300212793Sdim 301280031Sdimunsigned ARMFastISel::fastEmitInst_r(unsigned MachineInstOpcode, 302212793Sdim const TargetRegisterClass *RC, 303212793Sdim unsigned Op0, bool Op0IsKill) { 304360784Sdim Register ResultReg = createResultReg(RC); 305224145Sdim const MCInstrDesc &II = TII.get(MachineInstOpcode); 306212793Sdim 307261991Sdim // Make sure the input operand is sufficiently constrained to be legal 308261991Sdim // for this instruction. 309261991Sdim Op0 = constrainOperandRegClass(II, Op0, 1); 310234353Sdim if (II.getNumDefs() >= 1) { 311276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, 312276479Sdim ResultReg).addReg(Op0, Op0IsKill * RegState::Kill)); 313234353Sdim } else { 314276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 315212793Sdim .addReg(Op0, Op0IsKill * RegState::Kill)); 316276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 317212793Sdim TII.get(TargetOpcode::COPY), ResultReg) 318212793Sdim .addReg(II.ImplicitDefs[0])); 319212793Sdim } 320212793Sdim return ResultReg; 321212793Sdim} 322212793Sdim 323280031Sdimunsigned ARMFastISel::fastEmitInst_rr(unsigned MachineInstOpcode, 324212793Sdim const TargetRegisterClass *RC, 325212793Sdim unsigned Op0, bool Op0IsKill, 326212793Sdim unsigned Op1, bool Op1IsKill) { 327212793Sdim unsigned ResultReg = createResultReg(RC); 328224145Sdim const MCInstrDesc &II = TII.get(MachineInstOpcode); 329212793Sdim 330261991Sdim // Make sure the input operands are sufficiently constrained to be legal 331261991Sdim // for this instruction. 332261991Sdim Op0 = constrainOperandRegClass(II, Op0, 1); 333261991Sdim Op1 = constrainOperandRegClass(II, Op1, 2); 334261991Sdim 335234353Sdim if (II.getNumDefs() >= 1) { 336276479Sdim AddOptionalDefs( 337276479Sdim BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 338276479Sdim .addReg(Op0, Op0IsKill * RegState::Kill) 339276479Sdim .addReg(Op1, Op1IsKill * RegState::Kill)); 340234353Sdim } else { 341276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 342212793Sdim .addReg(Op0, Op0IsKill * RegState::Kill) 343212793Sdim .addReg(Op1, Op1IsKill * RegState::Kill)); 344276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 345212793Sdim TII.get(TargetOpcode::COPY), ResultReg) 346212793Sdim .addReg(II.ImplicitDefs[0])); 347212793Sdim } 348212793Sdim return ResultReg; 349212793Sdim} 350212793Sdim 351280031Sdimunsigned ARMFastISel::fastEmitInst_ri(unsigned MachineInstOpcode, 352212793Sdim const TargetRegisterClass *RC, 353212793Sdim unsigned Op0, bool Op0IsKill, 354212793Sdim uint64_t Imm) { 355212793Sdim unsigned ResultReg = createResultReg(RC); 356224145Sdim const MCInstrDesc &II = TII.get(MachineInstOpcode); 357212793Sdim 358261991Sdim // Make sure the input operand is sufficiently constrained to be legal 359261991Sdim // for this instruction. 360261991Sdim Op0 = constrainOperandRegClass(II, Op0, 1); 361234353Sdim if (II.getNumDefs() >= 1) { 362276479Sdim AddOptionalDefs( 363276479Sdim BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 364276479Sdim .addReg(Op0, Op0IsKill * RegState::Kill) 365276479Sdim .addImm(Imm)); 366234353Sdim } else { 367276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 368212793Sdim .addReg(Op0, Op0IsKill * RegState::Kill) 369212793Sdim .addImm(Imm)); 370276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 371212793Sdim TII.get(TargetOpcode::COPY), ResultReg) 372212793Sdim .addReg(II.ImplicitDefs[0])); 373212793Sdim } 374212793Sdim return ResultReg; 375212793Sdim} 376212793Sdim 377280031Sdimunsigned ARMFastISel::fastEmitInst_i(unsigned MachineInstOpcode, 378212793Sdim const TargetRegisterClass *RC, 379212793Sdim uint64_t Imm) { 380212793Sdim unsigned ResultReg = createResultReg(RC); 381224145Sdim const MCInstrDesc &II = TII.get(MachineInstOpcode); 382218893Sdim 383234353Sdim if (II.getNumDefs() >= 1) { 384276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, 385276479Sdim ResultReg).addImm(Imm)); 386234353Sdim } else { 387276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 388212793Sdim .addImm(Imm)); 389276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 390212793Sdim TII.get(TargetOpcode::COPY), ResultReg) 391212793Sdim .addReg(II.ImplicitDefs[0])); 392212793Sdim } 393212793Sdim return ResultReg; 394212793Sdim} 395212793Sdim 396218893Sdim// TODO: Don't worry about 64-bit now, but when this is fixed remove the 397218893Sdim// checks from the various callers. 398249423Sdimunsigned ARMFastISel::ARMMoveToFPReg(MVT VT, unsigned SrcReg) { 399218893Sdim if (VT == MVT::f64) return 0; 400212793Sdim 401218893Sdim unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 402276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 403234353Sdim TII.get(ARM::VMOVSR), MoveReg) 404218893Sdim .addReg(SrcReg)); 405218893Sdim return MoveReg; 406218893Sdim} 407212793Sdim 408249423Sdimunsigned ARMFastISel::ARMMoveToIntReg(MVT VT, unsigned SrcReg) { 409218893Sdim if (VT == MVT::i64) return 0; 410218893Sdim 411218893Sdim unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 412276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 413234353Sdim TII.get(ARM::VMOVRS), MoveReg) 414218893Sdim .addReg(SrcReg)); 415218893Sdim return MoveReg; 416218893Sdim} 417218893Sdim 418218893Sdim// For double width floating point we need to materialize two constants 419218893Sdim// (the high and the low) into integer registers then use a move to get 420218893Sdim// the combined constant into an FP reg. 421249423Sdimunsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) { 422218893Sdim const APFloat Val = CFP->getValueAPF(); 423218893Sdim bool is64bit = VT == MVT::f64; 424218893Sdim 425218893Sdim // This checks to see if we can use VFP3 instructions to materialize 426218893Sdim // a constant, otherwise we have to go through the constant pool. 427218893Sdim if (TLI.isFPImmLegal(Val, VT)) { 428226633Sdim int Imm; 429226633Sdim unsigned Opc; 430226633Sdim if (is64bit) { 431226633Sdim Imm = ARM_AM::getFP64Imm(Val); 432226633Sdim Opc = ARM::FCONSTD; 433226633Sdim } else { 434226633Sdim Imm = ARM_AM::getFP32Imm(Val); 435226633Sdim Opc = ARM::FCONSTS; 436226633Sdim } 437218893Sdim unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 438276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 439276479Sdim TII.get(Opc), DestReg).addImm(Imm)); 440218893Sdim return DestReg; 441218893Sdim } 442218893Sdim 443218893Sdim // Require VFP2 for loading fp constants. 444353358Sdim if (!Subtarget->hasVFP2Base()) return false; 445218893Sdim 446212793Sdim // MachineConstantPool wants an explicit alignment. 447276479Sdim unsigned Align = DL.getPrefTypeAlignment(CFP->getType()); 448218893Sdim if (Align == 0) { 449218893Sdim // TODO: Figure out if this is correct. 450276479Sdim Align = DL.getTypeAllocSize(CFP->getType()); 451218893Sdim } 452218893Sdim unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 453218893Sdim unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 454218893Sdim unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 455218893Sdim 456218893Sdim // The extra reg is for addrmode5. 457276479Sdim AddOptionalDefs( 458276479Sdim BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg) 459276479Sdim .addConstantPoolIndex(Idx) 460276479Sdim .addReg(0)); 461218893Sdim return DestReg; 462218893Sdim} 463218893Sdim 464249423Sdimunsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) { 465234353Sdim if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1) 466280031Sdim return 0; 467218893Sdim 468218893Sdim // If we can do this in a single instruction without a constant pool entry 469218893Sdim // do so now. 470218893Sdim const ConstantInt *CI = cast<ConstantInt>(C); 471234353Sdim if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) { 472234353Sdim unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16; 473249423Sdim const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass : 474249423Sdim &ARM::GPRRegClass; 475249423Sdim unsigned ImmReg = createResultReg(RC); 476276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 477234353Sdim TII.get(Opc), ImmReg) 478234353Sdim .addImm(CI->getZExtValue())); 479234353Sdim return ImmReg; 480218893Sdim } 481218893Sdim 482234353Sdim // Use MVN to emit negative constants. 483234353Sdim if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) { 484234353Sdim unsigned Imm = (unsigned)~(CI->getSExtValue()); 485234353Sdim bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 486234353Sdim (ARM_AM::getSOImmVal(Imm) != -1); 487234353Sdim if (UseImm) { 488234353Sdim unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi; 489280031Sdim const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass : 490280031Sdim &ARM::GPRRegClass; 491280031Sdim unsigned ImmReg = createResultReg(RC); 492276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 493234353Sdim TII.get(Opc), ImmReg) 494234353Sdim .addImm(Imm)); 495234353Sdim return ImmReg; 496234353Sdim } 497234353Sdim } 498234353Sdim 499280031Sdim unsigned ResultReg = 0; 500353358Sdim if (Subtarget->useMovt()) 501280031Sdim ResultReg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue()); 502280031Sdim 503280031Sdim if (ResultReg) 504280031Sdim return ResultReg; 505280031Sdim 506234353Sdim // Load from constant pool. For now 32-bit only. 507234353Sdim if (VT != MVT::i32) 508280031Sdim return 0; 509234353Sdim 510218893Sdim // MachineConstantPool wants an explicit alignment. 511276479Sdim unsigned Align = DL.getPrefTypeAlignment(C->getType()); 512212793Sdim if (Align == 0) { 513212793Sdim // TODO: Figure out if this is correct. 514276479Sdim Align = DL.getTypeAllocSize(C->getType()); 515212793Sdim } 516212793Sdim unsigned Idx = MCP.getConstantPoolIndex(C, Align); 517280031Sdim ResultReg = createResultReg(TLI.getRegClassFor(VT)); 518234353Sdim if (isThumb2) 519276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 520280031Sdim TII.get(ARM::t2LDRpci), ResultReg) 521280031Sdim .addConstantPoolIndex(Idx)); 522276479Sdim else { 523218893Sdim // The extra immediate is for addrmode2. 524280031Sdim ResultReg = constrainOperandRegClass(TII.get(ARM::LDRcp), ResultReg, 0); 525276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 526280031Sdim TII.get(ARM::LDRcp), ResultReg) 527280031Sdim .addConstantPoolIndex(Idx) 528280031Sdim .addImm(0)); 529276479Sdim } 530280031Sdim return ResultReg; 531212793Sdim} 532212793Sdim 533309124Sdimbool ARMFastISel::isPositionIndependent() const { 534309124Sdim return TLI.isPositionIndependent(); 535309124Sdim} 536309124Sdim 537249423Sdimunsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) { 538218893Sdim // For now 32-bit only. 539296417Sdim if (VT != MVT::i32 || GV->isThreadLocal()) return 0; 540218893Sdim 541314564Sdim // ROPI/RWPI not currently supported. 542314564Sdim if (Subtarget->isROPI() || Subtarget->isRWPI()) 543314564Sdim return 0; 544314564Sdim 545309124Sdim bool IsIndirect = Subtarget->isGVIndirectSymbol(GV); 546280031Sdim const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass 547280031Sdim : &ARM::GPRRegClass; 548243830Sdim unsigned DestReg = createResultReg(RC); 549218893Sdim 550276479Sdim // FastISel TLS support on non-MachO is broken, punt to SelectionDAG. 551261991Sdim const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); 552261991Sdim bool IsThreadLocal = GVar && GVar->isThreadLocal(); 553276479Sdim if (!Subtarget->isTargetMachO() && IsThreadLocal) return 0; 554261991Sdim 555309124Sdim bool IsPositionIndependent = isPositionIndependent(); 556234353Sdim // Use movw+movt when possible, it avoids constant pool entries. 557276479Sdim // Non-darwin targets only support static movt relocations in FastISel. 558353358Sdim if (Subtarget->useMovt() && 559309124Sdim (Subtarget->isTargetMachO() || !IsPositionIndependent)) { 560234353Sdim unsigned Opc; 561276479Sdim unsigned char TF = 0; 562276479Sdim if (Subtarget->isTargetMachO()) 563276479Sdim TF = ARMII::MO_NONLAZY; 564276479Sdim 565309124Sdim if (IsPositionIndependent) 566234353Sdim Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel; 567309124Sdim else 568234353Sdim Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm; 569276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 570276479Sdim TII.get(Opc), DestReg).addGlobalAddress(GV, 0, TF)); 571234353Sdim } else { 572234353Sdim // MachineConstantPool wants an explicit alignment. 573276479Sdim unsigned Align = DL.getPrefTypeAlignment(GV->getType()); 574234353Sdim if (Align == 0) { 575234353Sdim // TODO: Figure out if this is correct. 576276479Sdim Align = DL.getTypeAllocSize(GV->getType()); 577234353Sdim } 578218893Sdim 579309124Sdim if (Subtarget->isTargetELF() && IsPositionIndependent) 580243830Sdim return ARMLowerPICELF(GV, Align, VT); 581243830Sdim 582234353Sdim // Grab index. 583309124Sdim unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0; 584234353Sdim unsigned Id = AFI->createPICLabelUId(); 585234353Sdim ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id, 586234353Sdim ARMCP::CPValue, 587234353Sdim PCAdj); 588234353Sdim unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 589234353Sdim 590234353Sdim // Load value. 591234353Sdim MachineInstrBuilder MIB; 592234353Sdim if (isThumb2) { 593309124Sdim unsigned Opc = IsPositionIndependent ? ARM::t2LDRpci_pic : ARM::t2LDRpci; 594276479Sdim MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), 595276479Sdim DestReg).addConstantPoolIndex(Idx); 596309124Sdim if (IsPositionIndependent) 597234353Sdim MIB.addImm(Id); 598243830Sdim AddOptionalDefs(MIB); 599234353Sdim } else { 600234353Sdim // The extra immediate is for addrmode2. 601261991Sdim DestReg = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg, 0); 602276479Sdim MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 603276479Sdim TII.get(ARM::LDRcp), DestReg) 604276479Sdim .addConstantPoolIndex(Idx) 605276479Sdim .addImm(0); 606243830Sdim AddOptionalDefs(MIB); 607243830Sdim 608309124Sdim if (IsPositionIndependent) { 609243830Sdim unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD; 610243830Sdim unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 611243830Sdim 612243830Sdim MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 613276479Sdim DbgLoc, TII.get(Opc), NewDestReg) 614243830Sdim .addReg(DestReg) 615243830Sdim .addImm(Id); 616243830Sdim AddOptionalDefs(MIB); 617243830Sdim return NewDestReg; 618243830Sdim } 619234353Sdim } 620218893Sdim } 621223017Sdim 622243830Sdim if (IsIndirect) { 623234353Sdim MachineInstrBuilder MIB; 624223017Sdim unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 625234353Sdim if (isThumb2) 626276479Sdim MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 627226633Sdim TII.get(ARM::t2LDRi12), NewDestReg) 628223017Sdim .addReg(DestReg) 629223017Sdim .addImm(0); 630223017Sdim else 631276479Sdim MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 632276479Sdim TII.get(ARM::LDRi12), NewDestReg) 633276479Sdim .addReg(DestReg) 634276479Sdim .addImm(0); 635223017Sdim DestReg = NewDestReg; 636223017Sdim AddOptionalDefs(MIB); 637223017Sdim } 638223017Sdim 639218893Sdim return DestReg; 640218893Sdim} 641218893Sdim 642280031Sdimunsigned ARMFastISel::fastMaterializeConstant(const Constant *C) { 643288943Sdim EVT CEVT = TLI.getValueType(DL, C->getType(), true); 644218893Sdim 645212793Sdim // Only handle simple types. 646249423Sdim if (!CEVT.isSimple()) return 0; 647249423Sdim MVT VT = CEVT.getSimpleVT(); 648218893Sdim 649218893Sdim if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 650218893Sdim return ARMMaterializeFP(CFP, VT); 651218893Sdim else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 652218893Sdim return ARMMaterializeGV(GV, VT); 653218893Sdim else if (isa<ConstantInt>(C)) 654218893Sdim return ARMMaterializeInt(C, VT); 655218893Sdim 656218893Sdim return 0; 657218893Sdim} 658218893Sdim 659234353Sdim// TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF); 660234353Sdim 661280031Sdimunsigned ARMFastISel::fastMaterializeAlloca(const AllocaInst *AI) { 662218893Sdim // Don't handle dynamic allocas. 663218893Sdim if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 664218893Sdim 665218893Sdim MVT VT; 666239462Sdim if (!isLoadTypeLegal(AI->getType(), VT)) return 0; 667218893Sdim 668218893Sdim DenseMap<const AllocaInst*, int>::iterator SI = 669218893Sdim FuncInfo.StaticAllocaMap.find(AI); 670218893Sdim 671218893Sdim // This will get lowered later into the correct offsets and registers 672218893Sdim // via rewriteXFrameIndex. 673218893Sdim if (SI != FuncInfo.StaticAllocaMap.end()) { 674276479Sdim unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 675234353Sdim const TargetRegisterClass* RC = TLI.getRegClassFor(VT); 676218893Sdim unsigned ResultReg = createResultReg(RC); 677276479Sdim ResultReg = constrainOperandRegClass(TII.get(Opc), ResultReg, 0); 678276479Sdim 679276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 680218893Sdim TII.get(Opc), ResultReg) 681218893Sdim .addFrameIndex(SI->second) 682218893Sdim .addImm(0)); 683218893Sdim return ResultReg; 684218893Sdim } 685218893Sdim 686218893Sdim return 0; 687218893Sdim} 688218893Sdim 689226633Sdimbool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) { 690288943Sdim EVT evt = TLI.getValueType(DL, Ty, true); 691218893Sdim 692218893Sdim // Only handle simple types. 693218893Sdim if (evt == MVT::Other || !evt.isSimple()) return false; 694218893Sdim VT = evt.getSimpleVT(); 695218893Sdim 696212793Sdim // Handle all legal types, i.e. a register that will directly hold this 697212793Sdim // value. 698212793Sdim return TLI.isTypeLegal(VT); 699212793Sdim} 700212793Sdim 701226633Sdimbool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { 702212793Sdim if (isTypeLegal(Ty, VT)) return true; 703218893Sdim 704212793Sdim // If this is a type than can be sign or zero-extended to a basic operation 705212793Sdim // go ahead and accept it now. 706234353Sdim if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 707212793Sdim return true; 708218893Sdim 709212793Sdim return false; 710212793Sdim} 711212793Sdim 712218893Sdim// Computes the address to get to an object. 713218893Sdimbool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 714212793Sdim // Some boilerplate from the X86 FastISel. 715276479Sdim const User *U = nullptr; 716212793Sdim unsigned Opcode = Instruction::UserOp1; 717212793Sdim if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 718218893Sdim // Don't walk into other basic blocks unless the object is an alloca from 719218893Sdim // another block, otherwise it may not have a virtual register assigned. 720218893Sdim if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 721218893Sdim FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 722218893Sdim Opcode = I->getOpcode(); 723218893Sdim U = I; 724218893Sdim } 725212793Sdim } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 726212793Sdim Opcode = C->getOpcode(); 727212793Sdim U = C; 728212793Sdim } 729212793Sdim 730226633Sdim if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 731212793Sdim if (Ty->getAddressSpace() > 255) 732212793Sdim // Fast instruction selection doesn't support the special 733212793Sdim // address spaces. 734212793Sdim return false; 735218893Sdim 736212793Sdim switch (Opcode) { 737218893Sdim default: 738212793Sdim break; 739261991Sdim case Instruction::BitCast: 740218893Sdim // Look through bitcasts. 741218893Sdim return ARMComputeAddress(U->getOperand(0), Addr); 742261991Sdim case Instruction::IntToPtr: 743218893Sdim // Look past no-op inttoptrs. 744288943Sdim if (TLI.getValueType(DL, U->getOperand(0)->getType()) == 745288943Sdim TLI.getPointerTy(DL)) 746218893Sdim return ARMComputeAddress(U->getOperand(0), Addr); 747218893Sdim break; 748261991Sdim case Instruction::PtrToInt: 749218893Sdim // Look past no-op ptrtoints. 750288943Sdim if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL)) 751218893Sdim return ARMComputeAddress(U->getOperand(0), Addr); 752218893Sdim break; 753218893Sdim case Instruction::GetElementPtr: { 754218893Sdim Address SavedAddr = Addr; 755218893Sdim int TmpOffset = Addr.Offset; 756218893Sdim 757218893Sdim // Iterate through the GEP folding the constants into offsets where 758218893Sdim // we can. 759218893Sdim gep_type_iterator GTI = gep_type_begin(U); 760218893Sdim for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 761218893Sdim i != e; ++i, ++GTI) { 762218893Sdim const Value *Op = *i; 763314564Sdim if (StructType *STy = GTI.getStructTypeOrNull()) { 764276479Sdim const StructLayout *SL = DL.getStructLayout(STy); 765218893Sdim unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 766218893Sdim TmpOffset += SL->getElementOffset(Idx); 767218893Sdim } else { 768276479Sdim uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType()); 769321369Sdim while (true) { 770218893Sdim if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 771218893Sdim // Constant-offset addressing. 772218893Sdim TmpOffset += CI->getSExtValue() * S; 773221345Sdim break; 774221345Sdim } 775261991Sdim if (canFoldAddIntoGEP(U, Op)) { 776261991Sdim // A compatible add with a constant operand. Fold the constant. 777218893Sdim ConstantInt *CI = 778221345Sdim cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 779218893Sdim TmpOffset += CI->getSExtValue() * S; 780221345Sdim // Iterate on the other operand. 781221345Sdim Op = cast<AddOperator>(Op)->getOperand(0); 782221345Sdim continue; 783221345Sdim } 784221345Sdim // Unsupported 785221345Sdim goto unsupported_gep; 786221345Sdim } 787218893Sdim } 788218893Sdim } 789218893Sdim 790218893Sdim // Try to grab the base operand now. 791218893Sdim Addr.Offset = TmpOffset; 792218893Sdim if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 793218893Sdim 794218893Sdim // We failed, restore everything and try the other options. 795218893Sdim Addr = SavedAddr; 796218893Sdim 797218893Sdim unsupported_gep: 798218893Sdim break; 799218893Sdim } 800212793Sdim case Instruction::Alloca: { 801218893Sdim const AllocaInst *AI = cast<AllocaInst>(Obj); 802218893Sdim DenseMap<const AllocaInst*, int>::iterator SI = 803218893Sdim FuncInfo.StaticAllocaMap.find(AI); 804218893Sdim if (SI != FuncInfo.StaticAllocaMap.end()) { 805218893Sdim Addr.BaseType = Address::FrameIndexBase; 806218893Sdim Addr.Base.FI = SI->second; 807218893Sdim return true; 808218893Sdim } 809218893Sdim break; 810212793Sdim } 811212793Sdim } 812218893Sdim 813212793Sdim // Try to get this in a register if nothing else has worked. 814218893Sdim if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 815218893Sdim return Addr.Base.Reg != 0; 816218893Sdim} 817212793Sdim 818249423Sdimvoid ARMFastISel::ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3) { 819218893Sdim bool needsLowering = false; 820249423Sdim switch (VT.SimpleTy) { 821234353Sdim default: llvm_unreachable("Unhandled load/store type!"); 822218893Sdim case MVT::i1: 823218893Sdim case MVT::i8: 824218893Sdim case MVT::i16: 825218893Sdim case MVT::i32: 826234353Sdim if (!useAM3) { 827234353Sdim // Integer loads/stores handle 12-bit offsets. 828234353Sdim needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 829234353Sdim // Handle negative offsets. 830234353Sdim if (needsLowering && isThumb2) 831234353Sdim needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 && 832234353Sdim Addr.Offset > -256); 833234353Sdim } else { 834234353Sdim // ARM halfword load/stores and signed byte loads use +/-imm8 offsets. 835234353Sdim needsLowering = (Addr.Offset > 255 || Addr.Offset < -255); 836234353Sdim } 837218893Sdim break; 838218893Sdim case MVT::f32: 839218893Sdim case MVT::f64: 840218893Sdim // Floating point operands handle 8-bit offsets. 841218893Sdim needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 842218893Sdim break; 843218893Sdim } 844218893Sdim 845218893Sdim // If this is a stack pointer and the offset needs to be simplified then 846218893Sdim // put the alloca address into a register, set the base type back to 847218893Sdim // register and continue. This should almost never happen. 848218893Sdim if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 849280031Sdim const TargetRegisterClass *RC = isThumb2 ? &ARM::tGPRRegClass 850280031Sdim : &ARM::GPRRegClass; 851218893Sdim unsigned ResultReg = createResultReg(RC); 852234353Sdim unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 853276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 854218893Sdim TII.get(Opc), ResultReg) 855218893Sdim .addFrameIndex(Addr.Base.FI) 856218893Sdim .addImm(0)); 857218893Sdim Addr.Base.Reg = ResultReg; 858218893Sdim Addr.BaseType = Address::RegBase; 859218893Sdim } 860218893Sdim 861218893Sdim // Since the offset is too large for the load/store instruction 862212793Sdim // get the reg+offset into a register. 863218893Sdim if (needsLowering) { 864280031Sdim Addr.Base.Reg = fastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, 865221345Sdim /*Op0IsKill*/false, Addr.Offset, MVT::i32); 866218893Sdim Addr.Offset = 0; 867212793Sdim } 868212793Sdim} 869212793Sdim 870249423Sdimvoid ARMFastISel::AddLoadStoreOperands(MVT VT, Address &Addr, 871223017Sdim const MachineInstrBuilder &MIB, 872309124Sdim MachineMemOperand::Flags Flags, 873309124Sdim bool useAM3) { 874218893Sdim // addrmode5 output depends on the selection dag addressing dividing the 875218893Sdim // offset by 4 that it then later multiplies. Do this here as well. 876249423Sdim if (VT.SimpleTy == MVT::f32 || VT.SimpleTy == MVT::f64) 877218893Sdim Addr.Offset /= 4; 878221345Sdim 879218893Sdim // Frame base works a bit differently. Handle it separately. 880218893Sdim if (Addr.BaseType == Address::FrameIndexBase) { 881218893Sdim int FI = Addr.Base.FI; 882218893Sdim int Offset = Addr.Offset; 883296417Sdim MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand( 884296417Sdim MachinePointerInfo::getFixedStack(*FuncInfo.MF, FI, Offset), Flags, 885296417Sdim MFI.getObjectSize(FI), MFI.getObjectAlignment(FI)); 886218893Sdim // Now add the rest of the operands. 887218893Sdim MIB.addFrameIndex(FI); 888212793Sdim 889234353Sdim // ARM halfword load/stores and signed byte loads need an additional 890234353Sdim // operand. 891234353Sdim if (useAM3) { 892309124Sdim int Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 893234353Sdim MIB.addReg(0); 894234353Sdim MIB.addImm(Imm); 895234353Sdim } else { 896234353Sdim MIB.addImm(Addr.Offset); 897234353Sdim } 898218893Sdim MIB.addMemOperand(MMO); 899218893Sdim } else { 900218893Sdim // Now add the rest of the operands. 901218893Sdim MIB.addReg(Addr.Base.Reg); 902221345Sdim 903234353Sdim // ARM halfword load/stores and signed byte loads need an additional 904234353Sdim // operand. 905234353Sdim if (useAM3) { 906309124Sdim int Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 907234353Sdim MIB.addReg(0); 908234353Sdim MIB.addImm(Imm); 909234353Sdim } else { 910234353Sdim MIB.addImm(Addr.Offset); 911234353Sdim } 912212793Sdim } 913218893Sdim AddOptionalDefs(MIB); 914212793Sdim} 915212793Sdim 916360784Sdimbool ARMFastISel::ARMEmitLoad(MVT VT, Register &ResultReg, Address &Addr, 917234353Sdim unsigned Alignment, bool isZExt, bool allocReg) { 918212793Sdim unsigned Opc; 919234353Sdim bool useAM3 = false; 920234353Sdim bool needVMOV = false; 921234353Sdim const TargetRegisterClass *RC; 922249423Sdim switch (VT.SimpleTy) { 923218893Sdim // This is mostly going to be Neon/vector support. 924218893Sdim default: return false; 925234353Sdim case MVT::i1: 926234353Sdim case MVT::i8: 927234353Sdim if (isThumb2) { 928234353Sdim if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 929234353Sdim Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8; 930234353Sdim else 931234353Sdim Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12; 932234353Sdim } else { 933234353Sdim if (isZExt) { 934234353Sdim Opc = ARM::LDRBi12; 935234353Sdim } else { 936234353Sdim Opc = ARM::LDRSB; 937234353Sdim useAM3 = true; 938234353Sdim } 939234353Sdim } 940261991Sdim RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 941212793Sdim break; 942234353Sdim case MVT::i16: 943243830Sdim if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem()) 944243830Sdim return false; 945243830Sdim 946234353Sdim if (isThumb2) { 947234353Sdim if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 948234353Sdim Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8; 949234353Sdim else 950234353Sdim Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12; 951234353Sdim } else { 952234353Sdim Opc = isZExt ? ARM::LDRH : ARM::LDRSH; 953234353Sdim useAM3 = true; 954234353Sdim } 955261991Sdim RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 956212793Sdim break; 957212793Sdim case MVT::i32: 958243830Sdim if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem()) 959243830Sdim return false; 960243830Sdim 961234353Sdim if (isThumb2) { 962234353Sdim if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 963234353Sdim Opc = ARM::t2LDRi8; 964234353Sdim else 965234353Sdim Opc = ARM::t2LDRi12; 966234353Sdim } else { 967234353Sdim Opc = ARM::LDRi12; 968234353Sdim } 969261991Sdim RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 970212793Sdim break; 971218893Sdim case MVT::f32: 972353358Sdim if (!Subtarget->hasVFP2Base()) return false; 973234353Sdim // Unaligned loads need special handling. Floats require word-alignment. 974234353Sdim if (Alignment && Alignment < 4) { 975234353Sdim needVMOV = true; 976234353Sdim VT = MVT::i32; 977234353Sdim Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12; 978261991Sdim RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 979234353Sdim } else { 980234353Sdim Opc = ARM::VLDRS; 981234353Sdim RC = TLI.getRegClassFor(VT); 982234353Sdim } 983218893Sdim break; 984218893Sdim case MVT::f64: 985353358Sdim // Can load and store double precision even without FeatureFP64 986353358Sdim if (!Subtarget->hasVFP2Base()) return false; 987234353Sdim // FIXME: Unaligned loads need special handling. Doublewords require 988234353Sdim // word-alignment. 989234353Sdim if (Alignment && Alignment < 4) 990234353Sdim return false; 991234353Sdim 992218893Sdim Opc = ARM::VLDRD; 993218893Sdim RC = TLI.getRegClassFor(VT); 994218893Sdim break; 995212793Sdim } 996218893Sdim // Simplify this down to something we can handle. 997234353Sdim ARMSimplifyAddress(Addr, VT, useAM3); 998218893Sdim 999218893Sdim // Create the base instruction, then add the operands. 1000234353Sdim if (allocReg) 1001234353Sdim ResultReg = createResultReg(RC); 1002321369Sdim assert(ResultReg > 255 && "Expected an allocated virtual register."); 1003276479Sdim MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1004218893Sdim TII.get(Opc), ResultReg); 1005234353Sdim AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3); 1006234353Sdim 1007234353Sdim // If we had an unaligned load of a float we've converted it to an regular 1008234353Sdim // load. Now we must move from the GRP to the FP register. 1009234353Sdim if (needVMOV) { 1010234353Sdim unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1011276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1012234353Sdim TII.get(ARM::VMOVSR), MoveReg) 1013234353Sdim .addReg(ResultReg)); 1014234353Sdim ResultReg = MoveReg; 1015234353Sdim } 1016212793Sdim return true; 1017212793Sdim} 1018212793Sdim 1019218893Sdimbool ARMFastISel::SelectLoad(const Instruction *I) { 1020226633Sdim // Atomic loads need special handling. 1021226633Sdim if (cast<LoadInst>(I)->isAtomic()) 1022226633Sdim return false; 1023226633Sdim 1024309124Sdim const Value *SV = I->getOperand(0); 1025309124Sdim if (TLI.supportSwiftError()) { 1026309124Sdim // Swifterror values can come from either a function parameter with 1027309124Sdim // swifterror attribute or an alloca with swifterror attribute. 1028309124Sdim if (const Argument *Arg = dyn_cast<Argument>(SV)) { 1029309124Sdim if (Arg->hasSwiftErrorAttr()) 1030309124Sdim return false; 1031309124Sdim } 1032309124Sdim 1033309124Sdim if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) { 1034309124Sdim if (Alloca->isSwiftError()) 1035309124Sdim return false; 1036309124Sdim } 1037309124Sdim } 1038309124Sdim 1039218893Sdim // Verify we have a legal type before going any further. 1040218893Sdim MVT VT; 1041218893Sdim if (!isLoadTypeLegal(I->getType(), VT)) 1042218893Sdim return false; 1043212793Sdim 1044218893Sdim // See if we can handle this address. 1045218893Sdim Address Addr; 1046218893Sdim if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 1047212793Sdim 1048360784Sdim Register ResultReg; 1049234353Sdim if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment())) 1050234353Sdim return false; 1051280031Sdim updateValueMap(I, ResultReg); 1052218893Sdim return true; 1053212793Sdim} 1054212793Sdim 1055249423Sdimbool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr, 1056234353Sdim unsigned Alignment) { 1057212793Sdim unsigned StrOpc; 1058234353Sdim bool useAM3 = false; 1059249423Sdim switch (VT.SimpleTy) { 1060218893Sdim // This is mostly going to be Neon/vector support. 1061212793Sdim default: return false; 1062218893Sdim case MVT::i1: { 1063280031Sdim unsigned Res = createResultReg(isThumb2 ? &ARM::tGPRRegClass 1064280031Sdim : &ARM::GPRRegClass); 1065234353Sdim unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 1066261991Sdim SrcReg = constrainOperandRegClass(TII.get(Opc), SrcReg, 1); 1067276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1068218893Sdim TII.get(Opc), Res) 1069218893Sdim .addReg(SrcReg).addImm(1)); 1070218893Sdim SrcReg = Res; 1071314564Sdim LLVM_FALLTHROUGH; 1072314564Sdim } 1073218893Sdim case MVT::i8: 1074234353Sdim if (isThumb2) { 1075234353Sdim if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1076234353Sdim StrOpc = ARM::t2STRBi8; 1077234353Sdim else 1078234353Sdim StrOpc = ARM::t2STRBi12; 1079234353Sdim } else { 1080234353Sdim StrOpc = ARM::STRBi12; 1081234353Sdim } 1082218893Sdim break; 1083218893Sdim case MVT::i16: 1084243830Sdim if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem()) 1085243830Sdim return false; 1086243830Sdim 1087234353Sdim if (isThumb2) { 1088234353Sdim if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1089234353Sdim StrOpc = ARM::t2STRHi8; 1090234353Sdim else 1091234353Sdim StrOpc = ARM::t2STRHi12; 1092234353Sdim } else { 1093234353Sdim StrOpc = ARM::STRH; 1094234353Sdim useAM3 = true; 1095234353Sdim } 1096218893Sdim break; 1097218893Sdim case MVT::i32: 1098243830Sdim if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem()) 1099243830Sdim return false; 1100243830Sdim 1101234353Sdim if (isThumb2) { 1102234353Sdim if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1103234353Sdim StrOpc = ARM::t2STRi8; 1104234353Sdim else 1105234353Sdim StrOpc = ARM::t2STRi12; 1106234353Sdim } else { 1107234353Sdim StrOpc = ARM::STRi12; 1108234353Sdim } 1109218893Sdim break; 1110212793Sdim case MVT::f32: 1111353358Sdim if (!Subtarget->hasVFP2Base()) return false; 1112234353Sdim // Unaligned stores need special handling. Floats require word-alignment. 1113234353Sdim if (Alignment && Alignment < 4) { 1114234353Sdim unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1115276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1116234353Sdim TII.get(ARM::VMOVRS), MoveReg) 1117234353Sdim .addReg(SrcReg)); 1118234353Sdim SrcReg = MoveReg; 1119234353Sdim VT = MVT::i32; 1120234353Sdim StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12; 1121234353Sdim } else { 1122234353Sdim StrOpc = ARM::VSTRS; 1123234353Sdim } 1124212793Sdim break; 1125212793Sdim case MVT::f64: 1126353358Sdim // Can load and store double precision even without FeatureFP64 1127353358Sdim if (!Subtarget->hasVFP2Base()) return false; 1128234353Sdim // FIXME: Unaligned stores need special handling. Doublewords require 1129234353Sdim // word-alignment. 1130234353Sdim if (Alignment && Alignment < 4) 1131234353Sdim return false; 1132234353Sdim 1133212793Sdim StrOpc = ARM::VSTRD; 1134212793Sdim break; 1135212793Sdim } 1136218893Sdim // Simplify this down to something we can handle. 1137234353Sdim ARMSimplifyAddress(Addr, VT, useAM3); 1138218893Sdim 1139218893Sdim // Create the base instruction, then add the operands. 1140261991Sdim SrcReg = constrainOperandRegClass(TII.get(StrOpc), SrcReg, 0); 1141276479Sdim MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1142218893Sdim TII.get(StrOpc)) 1143234353Sdim .addReg(SrcReg); 1144234353Sdim AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3); 1145212793Sdim return true; 1146212793Sdim} 1147212793Sdim 1148218893Sdimbool ARMFastISel::SelectStore(const Instruction *I) { 1149212793Sdim Value *Op0 = I->getOperand(0); 1150212793Sdim unsigned SrcReg = 0; 1151212793Sdim 1152226633Sdim // Atomic stores need special handling. 1153226633Sdim if (cast<StoreInst>(I)->isAtomic()) 1154226633Sdim return false; 1155226633Sdim 1156309124Sdim const Value *PtrV = I->getOperand(1); 1157309124Sdim if (TLI.supportSwiftError()) { 1158309124Sdim // Swifterror values can come from either a function parameter with 1159309124Sdim // swifterror attribute or an alloca with swifterror attribute. 1160309124Sdim if (const Argument *Arg = dyn_cast<Argument>(PtrV)) { 1161309124Sdim if (Arg->hasSwiftErrorAttr()) 1162309124Sdim return false; 1163309124Sdim } 1164309124Sdim 1165309124Sdim if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) { 1166309124Sdim if (Alloca->isSwiftError()) 1167309124Sdim return false; 1168309124Sdim } 1169309124Sdim } 1170309124Sdim 1171218893Sdim // Verify we have a legal type before going any further. 1172218893Sdim MVT VT; 1173212793Sdim if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 1174212793Sdim return false; 1175212793Sdim 1176212793Sdim // Get the value to be stored into a register. 1177212793Sdim SrcReg = getRegForValue(Op0); 1178218893Sdim if (SrcReg == 0) return false; 1179218893Sdim 1180218893Sdim // See if we can handle this address. 1181218893Sdim Address Addr; 1182218893Sdim if (!ARMComputeAddress(I->getOperand(1), Addr)) 1183212793Sdim return false; 1184212793Sdim 1185234353Sdim if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment())) 1186234353Sdim return false; 1187212793Sdim return true; 1188212793Sdim} 1189212793Sdim 1190218893Sdimstatic ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 1191218893Sdim switch (Pred) { 1192218893Sdim // Needs two compares... 1193218893Sdim case CmpInst::FCMP_ONE: 1194218893Sdim case CmpInst::FCMP_UEQ: 1195218893Sdim default: 1196218893Sdim // AL is our "false" for now. The other two need more compares. 1197218893Sdim return ARMCC::AL; 1198218893Sdim case CmpInst::ICMP_EQ: 1199218893Sdim case CmpInst::FCMP_OEQ: 1200218893Sdim return ARMCC::EQ; 1201218893Sdim case CmpInst::ICMP_SGT: 1202218893Sdim case CmpInst::FCMP_OGT: 1203218893Sdim return ARMCC::GT; 1204218893Sdim case CmpInst::ICMP_SGE: 1205218893Sdim case CmpInst::FCMP_OGE: 1206218893Sdim return ARMCC::GE; 1207218893Sdim case CmpInst::ICMP_UGT: 1208218893Sdim case CmpInst::FCMP_UGT: 1209218893Sdim return ARMCC::HI; 1210218893Sdim case CmpInst::FCMP_OLT: 1211218893Sdim return ARMCC::MI; 1212218893Sdim case CmpInst::ICMP_ULE: 1213218893Sdim case CmpInst::FCMP_OLE: 1214218893Sdim return ARMCC::LS; 1215218893Sdim case CmpInst::FCMP_ORD: 1216218893Sdim return ARMCC::VC; 1217218893Sdim case CmpInst::FCMP_UNO: 1218218893Sdim return ARMCC::VS; 1219218893Sdim case CmpInst::FCMP_UGE: 1220218893Sdim return ARMCC::PL; 1221218893Sdim case CmpInst::ICMP_SLT: 1222218893Sdim case CmpInst::FCMP_ULT: 1223218893Sdim return ARMCC::LT; 1224218893Sdim case CmpInst::ICMP_SLE: 1225218893Sdim case CmpInst::FCMP_ULE: 1226218893Sdim return ARMCC::LE; 1227218893Sdim case CmpInst::FCMP_UNE: 1228218893Sdim case CmpInst::ICMP_NE: 1229218893Sdim return ARMCC::NE; 1230218893Sdim case CmpInst::ICMP_UGE: 1231218893Sdim return ARMCC::HS; 1232218893Sdim case CmpInst::ICMP_ULT: 1233218893Sdim return ARMCC::LO; 1234218893Sdim } 1235218893Sdim} 1236218893Sdim 1237218893Sdimbool ARMFastISel::SelectBranch(const Instruction *I) { 1238212793Sdim const BranchInst *BI = cast<BranchInst>(I); 1239212793Sdim MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 1240212793Sdim MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1241218893Sdim 1242212793Sdim // Simple branch support. 1243218893Sdim 1244218893Sdim // If we can, avoid recomputing the compare - redoing it could lead to wonky 1245218893Sdim // behavior. 1246218893Sdim if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1247234353Sdim if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 1248218893Sdim // Get the compare predicate. 1249221345Sdim // Try to take advantage of fallthrough opportunities. 1250221345Sdim CmpInst::Predicate Predicate = CI->getPredicate(); 1251221345Sdim if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1252221345Sdim std::swap(TBB, FBB); 1253221345Sdim Predicate = CmpInst::getInversePredicate(Predicate); 1254221345Sdim } 1255218893Sdim 1256221345Sdim ARMCC::CondCodes ARMPred = getComparePred(Predicate); 1257221345Sdim 1258218893Sdim // We may not handle every CC for now. 1259218893Sdim if (ARMPred == ARMCC::AL) return false; 1260218893Sdim 1261234353Sdim // Emit the compare. 1262360784Sdim if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1263234353Sdim return false; 1264218893Sdim 1265234353Sdim unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1266276479Sdim BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc)) 1267218893Sdim .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1268296417Sdim finishCondBranch(BI->getParent(), TBB, FBB); 1269218893Sdim return true; 1270218893Sdim } 1271221345Sdim } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) { 1272221345Sdim MVT SourceVT; 1273221345Sdim if (TI->hasOneUse() && TI->getParent() == I->getParent() && 1274223017Sdim (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { 1275234353Sdim unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1276221345Sdim unsigned OpReg = getRegForValue(TI->getOperand(0)); 1277261991Sdim OpReg = constrainOperandRegClass(TII.get(TstOpc), OpReg, 0); 1278276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1279221345Sdim TII.get(TstOpc)) 1280221345Sdim .addReg(OpReg).addImm(1)); 1281221345Sdim 1282221345Sdim unsigned CCMode = ARMCC::NE; 1283221345Sdim if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1284221345Sdim std::swap(TBB, FBB); 1285221345Sdim CCMode = ARMCC::EQ; 1286221345Sdim } 1287221345Sdim 1288234353Sdim unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1289276479Sdim BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc)) 1290221345Sdim .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1291221345Sdim 1292296417Sdim finishCondBranch(BI->getParent(), TBB, FBB); 1293221345Sdim return true; 1294221345Sdim } 1295234353Sdim } else if (const ConstantInt *CI = 1296234353Sdim dyn_cast<ConstantInt>(BI->getCondition())) { 1297234353Sdim uint64_t Imm = CI->getZExtValue(); 1298234353Sdim MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; 1299280031Sdim fastEmitBranch(Target, DbgLoc); 1300234353Sdim return true; 1301218893Sdim } 1302218893Sdim 1303218893Sdim unsigned CmpReg = getRegForValue(BI->getCondition()); 1304218893Sdim if (CmpReg == 0) return false; 1305218893Sdim 1306221345Sdim // We've been divorced from our compare! Our block was split, and 1307221345Sdim // now our compare lives in a predecessor block. We musn't 1308221345Sdim // re-compare here, as the children of the compare aren't guaranteed 1309221345Sdim // live across the block boundary (we *could* check for this). 1310221345Sdim // Regardless, the compare has been done in the predecessor block, 1311221345Sdim // and it left a value for us in a virtual register. Ergo, we test 1312221345Sdim // the one-bit value left in the virtual register. 1313234353Sdim unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1314261991Sdim CmpReg = constrainOperandRegClass(TII.get(TstOpc), CmpReg, 0); 1315276479Sdim AddOptionalDefs( 1316276479Sdim BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TstOpc)) 1317276479Sdim .addReg(CmpReg) 1318276479Sdim .addImm(1)); 1319218893Sdim 1320221345Sdim unsigned CCMode = ARMCC::NE; 1321221345Sdim if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1322221345Sdim std::swap(TBB, FBB); 1323221345Sdim CCMode = ARMCC::EQ; 1324221345Sdim } 1325221345Sdim 1326234353Sdim unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1327276479Sdim BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc)) 1328221345Sdim .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1329296417Sdim finishCondBranch(BI->getParent(), TBB, FBB); 1330212793Sdim return true; 1331212793Sdim} 1332212793Sdim 1333234353Sdimbool ARMFastISel::SelectIndirectBr(const Instruction *I) { 1334234353Sdim unsigned AddrReg = getRegForValue(I->getOperand(0)); 1335234353Sdim if (AddrReg == 0) return false; 1336218893Sdim 1337234353Sdim unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX; 1338326496Sdim assert(isThumb2 || Subtarget->hasV4TOps()); 1339326496Sdim 1340276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1341276479Sdim TII.get(Opc)).addReg(AddrReg)); 1342243830Sdim 1343243830Sdim const IndirectBrInst *IB = cast<IndirectBrInst>(I); 1344296417Sdim for (const BasicBlock *SuccBB : IB->successors()) 1345296417Sdim FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[SuccBB]); 1346243830Sdim 1347239462Sdim return true; 1348234353Sdim} 1349218893Sdim 1350234353Sdimbool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 1351360784Sdim bool isZExt) { 1352234353Sdim Type *Ty = Src1Value->getType(); 1353288943Sdim EVT SrcEVT = TLI.getValueType(DL, Ty, true); 1354249423Sdim if (!SrcEVT.isSimple()) return false; 1355249423Sdim MVT SrcVT = SrcEVT.getSimpleVT(); 1356234353Sdim 1357353358Sdim if (Ty->isFloatTy() && !Subtarget->hasVFP2Base()) 1358218893Sdim return false; 1359218893Sdim 1360353358Sdim if (Ty->isDoubleTy() && (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64())) 1361321369Sdim return false; 1362321369Sdim 1363234353Sdim // Check to see if the 2nd operand is a constant that we can encode directly 1364234353Sdim // in the compare. 1365234353Sdim int Imm = 0; 1366234353Sdim bool UseImm = false; 1367234353Sdim bool isNegativeImm = false; 1368234353Sdim // FIXME: At -O0 we don't have anything that canonicalizes operand order. 1369234353Sdim // Thus, Src1Value may be a ConstantInt, but we're missing it. 1370234353Sdim if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) { 1371234353Sdim if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 || 1372234353Sdim SrcVT == MVT::i1) { 1373234353Sdim const APInt &CIVal = ConstInt->getValue(); 1374234353Sdim Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue(); 1375234353Sdim // For INT_MIN/LONG_MIN (i.e., 0x80000000) we need to use a cmp, rather 1376276479Sdim // then a cmn, because there is no way to represent 2147483648 as a 1377234353Sdim // signed 32-bit int. 1378234353Sdim if (Imm < 0 && Imm != (int)0x80000000) { 1379234353Sdim isNegativeImm = true; 1380234353Sdim Imm = -Imm; 1381234353Sdim } 1382234353Sdim UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1383234353Sdim (ARM_AM::getSOImmVal(Imm) != -1); 1384234353Sdim } 1385234353Sdim } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) { 1386234353Sdim if (SrcVT == MVT::f32 || SrcVT == MVT::f64) 1387234353Sdim if (ConstFP->isZero() && !ConstFP->isNegative()) 1388234353Sdim UseImm = true; 1389234353Sdim } 1390234353Sdim 1391218893Sdim unsigned CmpOpc; 1392234353Sdim bool isICmp = true; 1393234353Sdim bool needsExt = false; 1394249423Sdim switch (SrcVT.SimpleTy) { 1395218893Sdim default: return false; 1396218893Sdim // TODO: Verify compares. 1397218893Sdim case MVT::f32: 1398234353Sdim isICmp = false; 1399360784Sdim CmpOpc = UseImm ? ARM::VCMPZS : ARM::VCMPS; 1400218893Sdim break; 1401218893Sdim case MVT::f64: 1402234353Sdim isICmp = false; 1403360784Sdim CmpOpc = UseImm ? ARM::VCMPZD : ARM::VCMPD; 1404218893Sdim break; 1405234353Sdim case MVT::i1: 1406234353Sdim case MVT::i8: 1407234353Sdim case MVT::i16: 1408234353Sdim needsExt = true; 1409327952Sdim LLVM_FALLTHROUGH; 1410218893Sdim case MVT::i32: 1411234353Sdim if (isThumb2) { 1412234353Sdim if (!UseImm) 1413234353Sdim CmpOpc = ARM::t2CMPrr; 1414234353Sdim else 1415239462Sdim CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri; 1416234353Sdim } else { 1417234353Sdim if (!UseImm) 1418234353Sdim CmpOpc = ARM::CMPrr; 1419234353Sdim else 1420239462Sdim CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri; 1421234353Sdim } 1422218893Sdim break; 1423218893Sdim } 1424218893Sdim 1425234353Sdim unsigned SrcReg1 = getRegForValue(Src1Value); 1426234353Sdim if (SrcReg1 == 0) return false; 1427218893Sdim 1428234353Sdim unsigned SrcReg2 = 0; 1429234353Sdim if (!UseImm) { 1430234353Sdim SrcReg2 = getRegForValue(Src2Value); 1431234353Sdim if (SrcReg2 == 0) return false; 1432234353Sdim } 1433218893Sdim 1434234353Sdim // We have i1, i8, or i16, we need to either zero extend or sign extend. 1435234353Sdim if (needsExt) { 1436234353Sdim SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt); 1437234353Sdim if (SrcReg1 == 0) return false; 1438234353Sdim if (!UseImm) { 1439234353Sdim SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt); 1440234353Sdim if (SrcReg2 == 0) return false; 1441234353Sdim } 1442234353Sdim } 1443218893Sdim 1444261991Sdim const MCInstrDesc &II = TII.get(CmpOpc); 1445261991Sdim SrcReg1 = constrainOperandRegClass(II, SrcReg1, 0); 1446234353Sdim if (!UseImm) { 1447261991Sdim SrcReg2 = constrainOperandRegClass(II, SrcReg2, 1); 1448276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1449234353Sdim .addReg(SrcReg1).addReg(SrcReg2)); 1450234353Sdim } else { 1451234353Sdim MachineInstrBuilder MIB; 1452276479Sdim MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1453234353Sdim .addReg(SrcReg1); 1454218893Sdim 1455234353Sdim // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0. 1456234353Sdim if (isICmp) 1457234353Sdim MIB.addImm(Imm); 1458234353Sdim AddOptionalDefs(MIB); 1459234353Sdim } 1460218893Sdim 1461218893Sdim // For floating point we need to move the result to a comparison register 1462218893Sdim // that we can then use for branches. 1463234353Sdim if (Ty->isFloatTy() || Ty->isDoubleTy()) 1464276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1465218893Sdim TII.get(ARM::FMSTAT))); 1466234353Sdim return true; 1467234353Sdim} 1468218893Sdim 1469234353Sdimbool ARMFastISel::SelectCmp(const Instruction *I) { 1470234353Sdim const CmpInst *CI = cast<CmpInst>(I); 1471234353Sdim 1472234353Sdim // Get the compare predicate. 1473234353Sdim ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1474234353Sdim 1475234353Sdim // We may not handle every CC for now. 1476234353Sdim if (ARMPred == ARMCC::AL) return false; 1477234353Sdim 1478234353Sdim // Emit the compare. 1479360784Sdim if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1480234353Sdim return false; 1481234353Sdim 1482218893Sdim // Now set a register based on the comparison. Explicitly set the predicates 1483218893Sdim // here. 1484234353Sdim unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1485280031Sdim const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass 1486280031Sdim : &ARM::GPRRegClass; 1487218893Sdim unsigned DestReg = createResultReg(RC); 1488234353Sdim Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1489280031Sdim unsigned ZeroReg = fastMaterializeConstant(Zero); 1490234353Sdim // ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR. 1491276479Sdim BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), DestReg) 1492218893Sdim .addReg(ZeroReg).addImm(1) 1493234353Sdim .addImm(ARMPred).addReg(ARM::CPSR); 1494218893Sdim 1495280031Sdim updateValueMap(I, DestReg); 1496218893Sdim return true; 1497218893Sdim} 1498218893Sdim 1499218893Sdimbool ARMFastISel::SelectFPExt(const Instruction *I) { 1500218893Sdim // Make sure we have VFP and that we're extending float to double. 1501353358Sdim if (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()) return false; 1502218893Sdim 1503218893Sdim Value *V = I->getOperand(0); 1504218893Sdim if (!I->getType()->isDoubleTy() || 1505218893Sdim !V->getType()->isFloatTy()) return false; 1506218893Sdim 1507218893Sdim unsigned Op = getRegForValue(V); 1508218893Sdim if (Op == 0) return false; 1509218893Sdim 1510239462Sdim unsigned Result = createResultReg(&ARM::DPRRegClass); 1511276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1512218893Sdim TII.get(ARM::VCVTDS), Result) 1513218893Sdim .addReg(Op)); 1514280031Sdim updateValueMap(I, Result); 1515218893Sdim return true; 1516218893Sdim} 1517218893Sdim 1518218893Sdimbool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1519218893Sdim // Make sure we have VFP and that we're truncating double to float. 1520353358Sdim if (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()) return false; 1521218893Sdim 1522218893Sdim Value *V = I->getOperand(0); 1523218893Sdim if (!(I->getType()->isFloatTy() && 1524218893Sdim V->getType()->isDoubleTy())) return false; 1525218893Sdim 1526218893Sdim unsigned Op = getRegForValue(V); 1527218893Sdim if (Op == 0) return false; 1528218893Sdim 1529239462Sdim unsigned Result = createResultReg(&ARM::SPRRegClass); 1530276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1531218893Sdim TII.get(ARM::VCVTSD), Result) 1532218893Sdim .addReg(Op)); 1533280031Sdim updateValueMap(I, Result); 1534218893Sdim return true; 1535218893Sdim} 1536218893Sdim 1537234353Sdimbool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) { 1538218893Sdim // Make sure we have VFP. 1539353358Sdim if (!Subtarget->hasVFP2Base()) return false; 1540218893Sdim 1541218893Sdim MVT DstVT; 1542226633Sdim Type *Ty = I->getType(); 1543218893Sdim if (!isTypeLegal(Ty, DstVT)) 1544218893Sdim return false; 1545218893Sdim 1546234353Sdim Value *Src = I->getOperand(0); 1547288943Sdim EVT SrcEVT = TLI.getValueType(DL, Src->getType(), true); 1548249423Sdim if (!SrcEVT.isSimple()) 1549249423Sdim return false; 1550249423Sdim MVT SrcVT = SrcEVT.getSimpleVT(); 1551234353Sdim if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 1552223017Sdim return false; 1553223017Sdim 1554234353Sdim unsigned SrcReg = getRegForValue(Src); 1555234353Sdim if (SrcReg == 0) return false; 1556218893Sdim 1557234353Sdim // Handle sign-extension. 1558234353Sdim if (SrcVT == MVT::i16 || SrcVT == MVT::i8) { 1559249423Sdim SrcReg = ARMEmitIntExt(SrcVT, SrcReg, MVT::i32, 1560234353Sdim /*isZExt*/!isSigned); 1561234353Sdim if (SrcReg == 0) return false; 1562234353Sdim } 1563234353Sdim 1564218893Sdim // The conversion routine works on fp-reg to fp-reg and the operand above 1565218893Sdim // was an integer, move it to the fp registers if possible. 1566234353Sdim unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg); 1567218893Sdim if (FP == 0) return false; 1568218893Sdim 1569218893Sdim unsigned Opc; 1570234353Sdim if (Ty->isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS; 1571353358Sdim else if (Ty->isDoubleTy() && Subtarget->hasFP64()) 1572321369Sdim Opc = isSigned ? ARM::VSITOD : ARM::VUITOD; 1573226633Sdim else return false; 1574218893Sdim 1575218893Sdim unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1576276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1577276479Sdim TII.get(Opc), ResultReg).addReg(FP)); 1578280031Sdim updateValueMap(I, ResultReg); 1579218893Sdim return true; 1580218893Sdim} 1581218893Sdim 1582234353Sdimbool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) { 1583218893Sdim // Make sure we have VFP. 1584353358Sdim if (!Subtarget->hasVFP2Base()) return false; 1585218893Sdim 1586218893Sdim MVT DstVT; 1587226633Sdim Type *RetTy = I->getType(); 1588218893Sdim if (!isTypeLegal(RetTy, DstVT)) 1589218893Sdim return false; 1590218893Sdim 1591218893Sdim unsigned Op = getRegForValue(I->getOperand(0)); 1592218893Sdim if (Op == 0) return false; 1593218893Sdim 1594218893Sdim unsigned Opc; 1595226633Sdim Type *OpTy = I->getOperand(0)->getType(); 1596234353Sdim if (OpTy->isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS; 1597353358Sdim else if (OpTy->isDoubleTy() && Subtarget->hasFP64()) 1598321369Sdim Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD; 1599226633Sdim else return false; 1600218893Sdim 1601234353Sdim // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg. 1602218893Sdim unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1603276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1604276479Sdim TII.get(Opc), ResultReg).addReg(Op)); 1605218893Sdim 1606218893Sdim // This result needs to be in an integer register, but the conversion only 1607218893Sdim // takes place in fp-regs. 1608218893Sdim unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1609218893Sdim if (IntReg == 0) return false; 1610218893Sdim 1611280031Sdim updateValueMap(I, IntReg); 1612218893Sdim return true; 1613218893Sdim} 1614218893Sdim 1615218893Sdimbool ARMFastISel::SelectSelect(const Instruction *I) { 1616218893Sdim MVT VT; 1617218893Sdim if (!isTypeLegal(I->getType(), VT)) 1618218893Sdim return false; 1619218893Sdim 1620218893Sdim // Things need to be register sized for register moves. 1621218893Sdim if (VT != MVT::i32) return false; 1622218893Sdim 1623218893Sdim unsigned CondReg = getRegForValue(I->getOperand(0)); 1624218893Sdim if (CondReg == 0) return false; 1625218893Sdim unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1626218893Sdim if (Op1Reg == 0) return false; 1627218893Sdim 1628234353Sdim // Check to see if we can use an immediate in the conditional move. 1629234353Sdim int Imm = 0; 1630234353Sdim bool UseImm = false; 1631234353Sdim bool isNegativeImm = false; 1632234353Sdim if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) { 1633321369Sdim assert(VT == MVT::i32 && "Expecting an i32."); 1634234353Sdim Imm = (int)ConstInt->getValue().getZExtValue(); 1635234353Sdim if (Imm < 0) { 1636234353Sdim isNegativeImm = true; 1637234353Sdim Imm = ~Imm; 1638234353Sdim } 1639234353Sdim UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1640234353Sdim (ARM_AM::getSOImmVal(Imm) != -1); 1641234353Sdim } 1642234353Sdim 1643234353Sdim unsigned Op2Reg = 0; 1644234353Sdim if (!UseImm) { 1645234353Sdim Op2Reg = getRegForValue(I->getOperand(2)); 1646234353Sdim if (Op2Reg == 0) return false; 1647234353Sdim } 1648234353Sdim 1649288943Sdim unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1650288943Sdim CondReg = constrainOperandRegClass(TII.get(TstOpc), CondReg, 0); 1651276479Sdim AddOptionalDefs( 1652288943Sdim BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TstOpc)) 1653276479Sdim .addReg(CondReg) 1654288943Sdim .addImm(1)); 1655234353Sdim 1656234353Sdim unsigned MovCCOpc; 1657249423Sdim const TargetRegisterClass *RC; 1658234353Sdim if (!UseImm) { 1659249423Sdim RC = isThumb2 ? &ARM::tGPRRegClass : &ARM::GPRRegClass; 1660234353Sdim MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr; 1661234353Sdim } else { 1662249423Sdim RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass; 1663249423Sdim if (!isNegativeImm) 1664234353Sdim MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1665249423Sdim else 1666234353Sdim MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi; 1667234353Sdim } 1668218893Sdim unsigned ResultReg = createResultReg(RC); 1669261991Sdim if (!UseImm) { 1670261991Sdim Op2Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op2Reg, 1); 1671261991Sdim Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 2); 1672276479Sdim BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), 1673276479Sdim ResultReg) 1674276479Sdim .addReg(Op2Reg) 1675276479Sdim .addReg(Op1Reg) 1676276479Sdim .addImm(ARMCC::NE) 1677276479Sdim .addReg(ARM::CPSR); 1678261991Sdim } else { 1679261991Sdim Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 1); 1680276479Sdim BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), 1681276479Sdim ResultReg) 1682276479Sdim .addReg(Op1Reg) 1683276479Sdim .addImm(Imm) 1684276479Sdim .addImm(ARMCC::EQ) 1685276479Sdim .addReg(ARM::CPSR); 1686261991Sdim } 1687280031Sdim updateValueMap(I, ResultReg); 1688218893Sdim return true; 1689218893Sdim} 1690218893Sdim 1691234353Sdimbool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) { 1692218893Sdim MVT VT; 1693226633Sdim Type *Ty = I->getType(); 1694218893Sdim if (!isTypeLegal(Ty, VT)) 1695218893Sdim return false; 1696218893Sdim 1697218893Sdim // If we have integer div support we should have selected this automagically. 1698218893Sdim // In case we have a real miss go ahead and return false and we'll pick 1699218893Sdim // it up later. 1700321369Sdim if (Subtarget->hasDivideInThumbMode()) 1701321369Sdim return false; 1702218893Sdim 1703218893Sdim // Otherwise emit a libcall. 1704218893Sdim RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1705218893Sdim if (VT == MVT::i8) 1706234353Sdim LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8; 1707218893Sdim else if (VT == MVT::i16) 1708234353Sdim LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16; 1709218893Sdim else if (VT == MVT::i32) 1710234353Sdim LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32; 1711218893Sdim else if (VT == MVT::i64) 1712234353Sdim LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64; 1713218893Sdim else if (VT == MVT::i128) 1714234353Sdim LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128; 1715218893Sdim assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1716218893Sdim 1717218893Sdim return ARMEmitLibcall(I, LC); 1718218893Sdim} 1719218893Sdim 1720234353Sdimbool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) { 1721218893Sdim MVT VT; 1722226633Sdim Type *Ty = I->getType(); 1723218893Sdim if (!isTypeLegal(Ty, VT)) 1724218893Sdim return false; 1725218893Sdim 1726309124Sdim // Many ABIs do not provide a libcall for standalone remainder, so we need to 1727309124Sdim // use divrem (see the RTABI 4.3.1). Since FastISel can't handle non-double 1728309124Sdim // multi-reg returns, we'll have to bail out. 1729309124Sdim if (!TLI.hasStandaloneRem(VT)) { 1730309124Sdim return false; 1731309124Sdim } 1732309124Sdim 1733218893Sdim RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1734218893Sdim if (VT == MVT::i8) 1735234353Sdim LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8; 1736218893Sdim else if (VT == MVT::i16) 1737234353Sdim LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16; 1738218893Sdim else if (VT == MVT::i32) 1739234353Sdim LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32; 1740218893Sdim else if (VT == MVT::i64) 1741234353Sdim LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64; 1742218893Sdim else if (VT == MVT::i128) 1743234353Sdim LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128; 1744218893Sdim assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1745218893Sdim 1746218893Sdim return ARMEmitLibcall(I, LC); 1747218893Sdim} 1748218893Sdim 1749234353Sdimbool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) { 1750288943Sdim EVT DestVT = TLI.getValueType(DL, I->getType(), true); 1751234353Sdim 1752234353Sdim // We can get here in the case when we have a binary operation on a non-legal 1753234353Sdim // type and the target independent selector doesn't know how to handle it. 1754234353Sdim if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 1755234353Sdim return false; 1756239462Sdim 1757234353Sdim unsigned Opc; 1758234353Sdim switch (ISDOpcode) { 1759234353Sdim default: return false; 1760234353Sdim case ISD::ADD: 1761234353Sdim Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr; 1762234353Sdim break; 1763234353Sdim case ISD::OR: 1764234353Sdim Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr; 1765234353Sdim break; 1766234353Sdim case ISD::SUB: 1767234353Sdim Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr; 1768234353Sdim break; 1769234353Sdim } 1770234353Sdim 1771234353Sdim unsigned SrcReg1 = getRegForValue(I->getOperand(0)); 1772234353Sdim if (SrcReg1 == 0) return false; 1773234353Sdim 1774234353Sdim // TODO: Often the 2nd operand is an immediate, which can be encoded directly 1775234353Sdim // in the instruction, rather then materializing the value in a register. 1776234353Sdim unsigned SrcReg2 = getRegForValue(I->getOperand(1)); 1777234353Sdim if (SrcReg2 == 0) return false; 1778234353Sdim 1779261991Sdim unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass); 1780261991Sdim SrcReg1 = constrainOperandRegClass(TII.get(Opc), SrcReg1, 1); 1781261991Sdim SrcReg2 = constrainOperandRegClass(TII.get(Opc), SrcReg2, 2); 1782276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1783234353Sdim TII.get(Opc), ResultReg) 1784234353Sdim .addReg(SrcReg1).addReg(SrcReg2)); 1785280031Sdim updateValueMap(I, ResultReg); 1786234353Sdim return true; 1787234353Sdim} 1788234353Sdim 1789234353Sdimbool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) { 1790288943Sdim EVT FPVT = TLI.getValueType(DL, I->getType(), true); 1791249423Sdim if (!FPVT.isSimple()) return false; 1792249423Sdim MVT VT = FPVT.getSimpleVT(); 1793218893Sdim 1794288943Sdim // FIXME: Support vector types where possible. 1795288943Sdim if (VT.isVector()) 1796288943Sdim return false; 1797288943Sdim 1798218893Sdim // We can get here in the case when we want to use NEON for our fp 1799218893Sdim // operations, but can't figure out how to. Just use the vfp instructions 1800218893Sdim // if we have them. 1801218893Sdim // FIXME: It'd be nice to use NEON instructions. 1802226633Sdim Type *Ty = I->getType(); 1803353358Sdim if (Ty->isFloatTy() && !Subtarget->hasVFP2Base()) 1804218893Sdim return false; 1805353358Sdim if (Ty->isDoubleTy() && (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64())) 1806321369Sdim return false; 1807218893Sdim 1808218893Sdim unsigned Opc; 1809218893Sdim bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1810218893Sdim switch (ISDOpcode) { 1811218893Sdim default: return false; 1812218893Sdim case ISD::FADD: 1813218893Sdim Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1814218893Sdim break; 1815218893Sdim case ISD::FSUB: 1816218893Sdim Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1817218893Sdim break; 1818218893Sdim case ISD::FMUL: 1819218893Sdim Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1820218893Sdim break; 1821218893Sdim } 1822234353Sdim unsigned Op1 = getRegForValue(I->getOperand(0)); 1823234353Sdim if (Op1 == 0) return false; 1824234353Sdim 1825234353Sdim unsigned Op2 = getRegForValue(I->getOperand(1)); 1826234353Sdim if (Op2 == 0) return false; 1827234353Sdim 1828249423Sdim unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy)); 1829276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1830218893Sdim TII.get(Opc), ResultReg) 1831218893Sdim .addReg(Op1).addReg(Op2)); 1832280031Sdim updateValueMap(I, ResultReg); 1833218893Sdim return true; 1834218893Sdim} 1835218893Sdim 1836218893Sdim// Call Handling Code 1837218893Sdim 1838239462Sdim// This is largely taken directly from CCAssignFnForNode 1839218893Sdim// TODO: We may not support all of this. 1840239462SdimCCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, 1841239462Sdim bool Return, 1842239462Sdim bool isVarArg) { 1843218893Sdim switch (CC) { 1844218893Sdim default: 1845327952Sdim report_fatal_error("Unsupported calling convention"); 1846218893Sdim case CallingConv::Fast: 1847353358Sdim if (Subtarget->hasVFP2Base() && !isVarArg) { 1848239462Sdim if (!Subtarget->isAAPCS_ABI()) 1849239462Sdim return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 1850239462Sdim // For AAPCS ABI targets, just use VFP variant of the calling convention. 1851239462Sdim return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1852239462Sdim } 1853314564Sdim LLVM_FALLTHROUGH; 1854218893Sdim case CallingConv::C: 1855309124Sdim case CallingConv::CXX_FAST_TLS: 1856218893Sdim // Use target triple & subtarget features to do actual dispatch. 1857218893Sdim if (Subtarget->isAAPCS_ABI()) { 1858353358Sdim if (Subtarget->hasVFP2Base() && 1859239462Sdim TM.Options.FloatABIType == FloatABI::Hard && !isVarArg) 1860218893Sdim return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1861218893Sdim else 1862218893Sdim return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1863296417Sdim } else { 1864296417Sdim return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1865296417Sdim } 1866218893Sdim case CallingConv::ARM_AAPCS_VFP: 1867309124Sdim case CallingConv::Swift: 1868239462Sdim if (!isVarArg) 1869239462Sdim return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1870239462Sdim // Fall through to soft float variant, variadic functions don't 1871239462Sdim // use hard floating point ABI. 1872314564Sdim LLVM_FALLTHROUGH; 1873218893Sdim case CallingConv::ARM_AAPCS: 1874218893Sdim return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1875218893Sdim case CallingConv::ARM_APCS: 1876218893Sdim return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1877239462Sdim case CallingConv::GHC: 1878239462Sdim if (Return) 1879327952Sdim report_fatal_error("Can't return in GHC call convention"); 1880239462Sdim else 1881239462Sdim return CC_ARM_APCS_GHC; 1882360784Sdim case CallingConv::CFGuard_Check: 1883360784Sdim return (Return ? RetCC_ARM_AAPCS : CC_ARM_Win32_CFGuard_Check); 1884218893Sdim } 1885218893Sdim} 1886218893Sdim 1887218893Sdimbool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1888360784Sdim SmallVectorImpl<Register> &ArgRegs, 1889218893Sdim SmallVectorImpl<MVT> &ArgVTs, 1890218893Sdim SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1891360784Sdim SmallVectorImpl<Register> &RegArgs, 1892218893Sdim CallingConv::ID CC, 1893239462Sdim unsigned &NumBytes, 1894239462Sdim bool isVarArg) { 1895218893Sdim SmallVector<CCValAssign, 16> ArgLocs; 1896280031Sdim CCState CCInfo(CC, isVarArg, *FuncInfo.MF, ArgLocs, *Context); 1897239462Sdim CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, 1898239462Sdim CCAssignFnForCall(CC, false, isVarArg)); 1899218893Sdim 1900234353Sdim // Check that we can handle all of the arguments. If we can't, then bail out 1901234353Sdim // now before we add code to the MBB. 1902234353Sdim for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1903234353Sdim CCValAssign &VA = ArgLocs[i]; 1904234353Sdim MVT ArgVT = ArgVTs[VA.getValNo()]; 1905234353Sdim 1906234353Sdim // We don't handle NEON/vector parameters yet. 1907234353Sdim if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) 1908234353Sdim return false; 1909234353Sdim 1910234353Sdim // Now copy/store arg to correct locations. 1911234353Sdim if (VA.isRegLoc() && !VA.needsCustom()) { 1912234353Sdim continue; 1913234353Sdim } else if (VA.needsCustom()) { 1914234353Sdim // TODO: We need custom lowering for vector (v2f64) args. 1915234353Sdim if (VA.getLocVT() != MVT::f64 || 1916234353Sdim // TODO: Only handle register args for now. 1917234353Sdim !VA.isRegLoc() || !ArgLocs[++i].isRegLoc()) 1918234353Sdim return false; 1919234353Sdim } else { 1920261991Sdim switch (ArgVT.SimpleTy) { 1921234353Sdim default: 1922234353Sdim return false; 1923234353Sdim case MVT::i1: 1924234353Sdim case MVT::i8: 1925234353Sdim case MVT::i16: 1926234353Sdim case MVT::i32: 1927234353Sdim break; 1928234353Sdim case MVT::f32: 1929353358Sdim if (!Subtarget->hasVFP2Base()) 1930234353Sdim return false; 1931234353Sdim break; 1932234353Sdim case MVT::f64: 1933353358Sdim if (!Subtarget->hasVFP2Base()) 1934234353Sdim return false; 1935234353Sdim break; 1936234353Sdim } 1937234353Sdim } 1938234353Sdim } 1939234353Sdim 1940234353Sdim // At the point, we are able to handle the call's arguments in fast isel. 1941234353Sdim 1942218893Sdim // Get a count of how many bytes are to be pushed on the stack. 1943218893Sdim NumBytes = CCInfo.getNextStackOffset(); 1944218893Sdim 1945218893Sdim // Issue CALLSEQ_START 1946224145Sdim unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 1947276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1948218893Sdim TII.get(AdjStackDown)) 1949321369Sdim .addImm(NumBytes).addImm(0)); 1950218893Sdim 1951218893Sdim // Process the args. 1952218893Sdim for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1953218893Sdim CCValAssign &VA = ArgLocs[i]; 1954280031Sdim const Value *ArgVal = Args[VA.getValNo()]; 1955360784Sdim Register Arg = ArgRegs[VA.getValNo()]; 1956218893Sdim MVT ArgVT = ArgVTs[VA.getValNo()]; 1957218893Sdim 1958234353Sdim assert((!ArgVT.isVector() && ArgVT.getSizeInBits() <= 64) && 1959234353Sdim "We don't handle NEON/vector parameters yet."); 1960218893Sdim 1961218893Sdim // Handle arg promotion, etc. 1962218893Sdim switch (VA.getLocInfo()) { 1963218893Sdim case CCValAssign::Full: break; 1964218893Sdim case CCValAssign::SExt: { 1965234353Sdim MVT DestVT = VA.getLocVT(); 1966234353Sdim Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/false); 1967321369Sdim assert(Arg != 0 && "Failed to emit a sext"); 1968234353Sdim ArgVT = DestVT; 1969218893Sdim break; 1970218893Sdim } 1971234353Sdim case CCValAssign::AExt: 1972321369Sdim // Intentional fall-through. Handle AExt and ZExt. 1973218893Sdim case CCValAssign::ZExt: { 1974234353Sdim MVT DestVT = VA.getLocVT(); 1975234353Sdim Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/true); 1976321369Sdim assert(Arg != 0 && "Failed to emit a zext"); 1977234353Sdim ArgVT = DestVT; 1978218893Sdim break; 1979218893Sdim } 1980218893Sdim case CCValAssign::BCvt: { 1981280031Sdim unsigned BC = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 1982218893Sdim /*TODO: Kill=*/false); 1983218893Sdim assert(BC != 0 && "Failed to emit a bitcast!"); 1984218893Sdim Arg = BC; 1985218893Sdim ArgVT = VA.getLocVT(); 1986218893Sdim break; 1987218893Sdim } 1988218893Sdim default: llvm_unreachable("Unknown arg promotion!"); 1989218893Sdim } 1990218893Sdim 1991218893Sdim // Now copy/store arg to correct locations. 1992218893Sdim if (VA.isRegLoc() && !VA.needsCustom()) { 1993276479Sdim BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1994276479Sdim TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(Arg); 1995218893Sdim RegArgs.push_back(VA.getLocReg()); 1996218893Sdim } else if (VA.needsCustom()) { 1997218893Sdim // TODO: We need custom lowering for vector (v2f64) args. 1998234353Sdim assert(VA.getLocVT() == MVT::f64 && 1999234353Sdim "Custom lowering for v2f64 args not available"); 2000218893Sdim 2001321369Sdim // FIXME: ArgLocs[++i] may extend beyond ArgLocs.size() 2002218893Sdim CCValAssign &NextVA = ArgLocs[++i]; 2003218893Sdim 2004234353Sdim assert(VA.isRegLoc() && NextVA.isRegLoc() && 2005234353Sdim "We only handle register args!"); 2006218893Sdim 2007276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2008218893Sdim TII.get(ARM::VMOVRRD), VA.getLocReg()) 2009218893Sdim .addReg(NextVA.getLocReg(), RegState::Define) 2010218893Sdim .addReg(Arg)); 2011218893Sdim RegArgs.push_back(VA.getLocReg()); 2012218893Sdim RegArgs.push_back(NextVA.getLocReg()); 2013218893Sdim } else { 2014218893Sdim assert(VA.isMemLoc()); 2015218893Sdim // Need to store on the stack. 2016280031Sdim 2017280031Sdim // Don't emit stores for undef values. 2018280031Sdim if (isa<UndefValue>(ArgVal)) 2019280031Sdim continue; 2020280031Sdim 2021218893Sdim Address Addr; 2022218893Sdim Addr.BaseType = Address::RegBase; 2023218893Sdim Addr.Base.Reg = ARM::SP; 2024218893Sdim Addr.Offset = VA.getLocMemOffset(); 2025218893Sdim 2026234353Sdim bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet; 2027234353Sdim assert(EmitRet && "Could not emit a store for argument!"); 2028218893Sdim } 2029218893Sdim } 2030234353Sdim 2031218893Sdim return true; 2032218893Sdim} 2033218893Sdim 2034360784Sdimbool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<Register> &UsedRegs, 2035218893Sdim const Instruction *I, CallingConv::ID CC, 2036239462Sdim unsigned &NumBytes, bool isVarArg) { 2037218893Sdim // Issue CALLSEQ_END 2038224145Sdim unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 2039276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2040218893Sdim TII.get(AdjStackUp)) 2041218893Sdim .addImm(NumBytes).addImm(0)); 2042218893Sdim 2043218893Sdim // Now the return value. 2044218893Sdim if (RetVT != MVT::isVoid) { 2045218893Sdim SmallVector<CCValAssign, 16> RVLocs; 2046280031Sdim CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *Context); 2047239462Sdim CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2048218893Sdim 2049218893Sdim // Copy all of the result registers out of their specified physreg. 2050218893Sdim if (RVLocs.size() == 2 && RetVT == MVT::f64) { 2051218893Sdim // For this move we copy into two registers and then move into the 2052218893Sdim // double fp reg we want. 2053249423Sdim MVT DestVT = RVLocs[0].getValVT(); 2054234353Sdim const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 2055360784Sdim Register ResultReg = createResultReg(DstRC); 2056276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2057218893Sdim TII.get(ARM::VMOVDRR), ResultReg) 2058218893Sdim .addReg(RVLocs[0].getLocReg()) 2059218893Sdim .addReg(RVLocs[1].getLocReg())); 2060218893Sdim 2061218893Sdim UsedRegs.push_back(RVLocs[0].getLocReg()); 2062218893Sdim UsedRegs.push_back(RVLocs[1].getLocReg()); 2063218893Sdim 2064218893Sdim // Finally update the result. 2065280031Sdim updateValueMap(I, ResultReg); 2066218893Sdim } else { 2067218893Sdim assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 2068249423Sdim MVT CopyVT = RVLocs[0].getValVT(); 2069218893Sdim 2070234353Sdim // Special handling for extended integers. 2071234353Sdim if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16) 2072234353Sdim CopyVT = MVT::i32; 2073234353Sdim 2074234353Sdim const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 2075234353Sdim 2076360784Sdim Register ResultReg = createResultReg(DstRC); 2077276479Sdim BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2078276479Sdim TII.get(TargetOpcode::COPY), 2079218893Sdim ResultReg).addReg(RVLocs[0].getLocReg()); 2080218893Sdim UsedRegs.push_back(RVLocs[0].getLocReg()); 2081218893Sdim 2082218893Sdim // Finally update the result. 2083280031Sdim updateValueMap(I, ResultReg); 2084218893Sdim } 2085218893Sdim } 2086218893Sdim 2087218893Sdim return true; 2088218893Sdim} 2089218893Sdim 2090218893Sdimbool ARMFastISel::SelectRet(const Instruction *I) { 2091218893Sdim const ReturnInst *Ret = cast<ReturnInst>(I); 2092218893Sdim const Function &F = *I->getParent()->getParent(); 2093218893Sdim 2094218893Sdim if (!FuncInfo.CanLowerReturn) 2095218893Sdim return false; 2096218893Sdim 2097309124Sdim if (TLI.supportSwiftError() && 2098309124Sdim F.getAttributes().hasAttrSomewhere(Attribute::SwiftError)) 2099309124Sdim return false; 2100309124Sdim 2101296417Sdim if (TLI.supportSplitCSR(FuncInfo.MF)) 2102296417Sdim return false; 2103296417Sdim 2104249423Sdim // Build a list of return value registers. 2105249423Sdim SmallVector<unsigned, 4> RetRegs; 2106249423Sdim 2107218893Sdim CallingConv::ID CC = F.getCallingConv(); 2108218893Sdim if (Ret->getNumOperands() > 0) { 2109218893Sdim SmallVector<ISD::OutputArg, 4> Outs; 2110341825Sdim GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL); 2111218893Sdim 2112218893Sdim // Analyze operands of the call, assigning locations to each operand. 2113218893Sdim SmallVector<CCValAssign, 16> ValLocs; 2114280031Sdim CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs, I->getContext()); 2115239462Sdim CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */, 2116239462Sdim F.isVarArg())); 2117218893Sdim 2118218893Sdim const Value *RV = Ret->getOperand(0); 2119218893Sdim unsigned Reg = getRegForValue(RV); 2120218893Sdim if (Reg == 0) 2121218893Sdim return false; 2122218893Sdim 2123218893Sdim // Only handle a single return value for now. 2124218893Sdim if (ValLocs.size() != 1) 2125218893Sdim return false; 2126218893Sdim 2127218893Sdim CCValAssign &VA = ValLocs[0]; 2128218893Sdim 2129218893Sdim // Don't bother handling odd stuff for now. 2130218893Sdim if (VA.getLocInfo() != CCValAssign::Full) 2131218893Sdim return false; 2132218893Sdim // Only handle register returns for now. 2133218893Sdim if (!VA.isRegLoc()) 2134218893Sdim return false; 2135218893Sdim 2136234353Sdim unsigned SrcReg = Reg + VA.getValNo(); 2137288943Sdim EVT RVEVT = TLI.getValueType(DL, RV->getType()); 2138249423Sdim if (!RVEVT.isSimple()) return false; 2139249423Sdim MVT RVVT = RVEVT.getSimpleVT(); 2140249423Sdim MVT DestVT = VA.getValVT(); 2141234353Sdim // Special handling for extended integers. 2142234353Sdim if (RVVT != DestVT) { 2143234353Sdim if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) 2144234353Sdim return false; 2145234353Sdim 2146234353Sdim assert(DestVT == MVT::i32 && "ARM should always ext to i32"); 2147234353Sdim 2148234353Sdim // Perform extension if flagged as either zext or sext. Otherwise, do 2149234353Sdim // nothing. 2150234353Sdim if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) { 2151234353Sdim SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt()); 2152234353Sdim if (SrcReg == 0) return false; 2153234353Sdim } 2154234353Sdim } 2155234353Sdim 2156218893Sdim // Make the copy. 2157360784Sdim Register DstReg = VA.getLocReg(); 2158218893Sdim const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 2159218893Sdim // Avoid a cross-class copy. This is very unlikely. 2160218893Sdim if (!SrcRC->contains(DstReg)) 2161218893Sdim return false; 2162276479Sdim BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2163276479Sdim TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg); 2164218893Sdim 2165249423Sdim // Add register to return instruction. 2166249423Sdim RetRegs.push_back(VA.getLocReg()); 2167218893Sdim } 2168218893Sdim 2169276479Sdim MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2170326496Sdim TII.get(Subtarget->getReturnOpcode())); 2171249423Sdim AddOptionalDefs(MIB); 2172321369Sdim for (unsigned R : RetRegs) 2173321369Sdim MIB.addReg(R, RegState::Implicit); 2174218893Sdim return true; 2175218893Sdim} 2176218893Sdim 2177239462Sdimunsigned ARMFastISel::ARMSelectCallOp(bool UseReg) { 2178239462Sdim if (UseReg) 2179239462Sdim return isThumb2 ? ARM::tBLXr : ARM::BLX; 2180239462Sdim else 2181239462Sdim return isThumb2 ? ARM::tBL : ARM::BL; 2182219077Sdim} 2183219077Sdim 2184239462Sdimunsigned ARMFastISel::getLibcallReg(const Twine &Name) { 2185261991Sdim // Manually compute the global's type to avoid building it when unnecessary. 2186261991Sdim Type *GVTy = Type::getInt32PtrTy(*Context, /*AS=*/0); 2187288943Sdim EVT LCREVT = TLI.getValueType(DL, GVTy); 2188261991Sdim if (!LCREVT.isSimple()) return 0; 2189261991Sdim 2190276479Sdim GlobalValue *GV = new GlobalVariable(M, Type::getInt32Ty(*Context), false, 2191276479Sdim GlobalValue::ExternalLinkage, nullptr, 2192276479Sdim Name); 2193261991Sdim assert(GV->getType() == GVTy && "We miscomputed the type for the global!"); 2194249423Sdim return ARMMaterializeGV(GV, LCREVT.getSimpleVT()); 2195239462Sdim} 2196239462Sdim 2197218893Sdim// A quick function that will emit a call for a named libcall in F with the 2198218893Sdim// vector of passed arguments for the Instruction in I. We can assume that we 2199218893Sdim// can emit a call for any libcall we can produce. This is an abridged version 2200218893Sdim// of the full call infrastructure since we won't need to worry about things 2201218893Sdim// like computed function pointers or strange arguments at call sites. 2202218893Sdim// TODO: Try to unify this and the normal call bits for ARM, then try to unify 2203218893Sdim// with X86. 2204218893Sdimbool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 2205218893Sdim CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 2206218893Sdim 2207218893Sdim // Handle *simple* calls for now. 2208226633Sdim Type *RetTy = I->getType(); 2209218893Sdim MVT RetVT; 2210218893Sdim if (RetTy->isVoidTy()) 2211218893Sdim RetVT = MVT::isVoid; 2212218893Sdim else if (!isTypeLegal(RetTy, RetVT)) 2213218893Sdim return false; 2214218893Sdim 2215239462Sdim // Can't handle non-double multi-reg retvals. 2216239462Sdim if (RetVT != MVT::isVoid && RetVT != MVT::i32) { 2217239462Sdim SmallVector<CCValAssign, 16> RVLocs; 2218280031Sdim CCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context); 2219239462Sdim CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, false)); 2220239462Sdim if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2221239462Sdim return false; 2222239462Sdim } 2223218893Sdim 2224218893Sdim // Set up the argument vectors. 2225218893Sdim SmallVector<Value*, 8> Args; 2226360784Sdim SmallVector<Register, 8> ArgRegs; 2227218893Sdim SmallVector<MVT, 8> ArgVTs; 2228218893Sdim SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2229218893Sdim Args.reserve(I->getNumOperands()); 2230218893Sdim ArgRegs.reserve(I->getNumOperands()); 2231218893Sdim ArgVTs.reserve(I->getNumOperands()); 2232218893Sdim ArgFlags.reserve(I->getNumOperands()); 2233321369Sdim for (Value *Op : I->operands()) { 2234218893Sdim unsigned Arg = getRegForValue(Op); 2235218893Sdim if (Arg == 0) return false; 2236218893Sdim 2237226633Sdim Type *ArgTy = Op->getType(); 2238218893Sdim MVT ArgVT; 2239218893Sdim if (!isTypeLegal(ArgTy, ArgVT)) return false; 2240218893Sdim 2241218893Sdim ISD::ArgFlagsTy Flags; 2242360784Sdim Flags.setOrigAlign(Align(DL.getABITypeAlignment(ArgTy))); 2243218893Sdim 2244218893Sdim Args.push_back(Op); 2245218893Sdim ArgRegs.push_back(Arg); 2246218893Sdim ArgVTs.push_back(ArgVT); 2247218893Sdim ArgFlags.push_back(Flags); 2248218893Sdim } 2249218893Sdim 2250218893Sdim // Handle the arguments now that we've gotten them. 2251360784Sdim SmallVector<Register, 4> RegArgs; 2252218893Sdim unsigned NumBytes; 2253239462Sdim if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2254239462Sdim RegArgs, CC, NumBytes, false)) 2255218893Sdim return false; 2256218893Sdim 2257360784Sdim Register CalleeReg; 2258288943Sdim if (Subtarget->genLongCalls()) { 2259239462Sdim CalleeReg = getLibcallReg(TLI.getLibcallName(Call)); 2260239462Sdim if (CalleeReg == 0) return false; 2261239462Sdim } 2262239462Sdim 2263234353Sdim // Issue the call. 2264288943Sdim unsigned CallOpc = ARMSelectCallOp(Subtarget->genLongCalls()); 2265239462Sdim MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2266276479Sdim DbgLoc, TII.get(CallOpc)); 2267243830Sdim // BL / BLX don't take a predicate, but tBL / tBLX do. 2268243830Sdim if (isThumb2) 2269321369Sdim MIB.add(predOps(ARMCC::AL)); 2270288943Sdim if (Subtarget->genLongCalls()) 2271243830Sdim MIB.addReg(CalleeReg); 2272243830Sdim else 2273243830Sdim MIB.addExternalSymbol(TLI.getLibcallName(Call)); 2274239462Sdim 2275218893Sdim // Add implicit physical register uses to the call. 2276360784Sdim for (Register R : RegArgs) 2277321369Sdim MIB.addReg(R, RegState::Implicit); 2278218893Sdim 2279234353Sdim // Add a register mask with the call-preserved registers. 2280234353Sdim // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2281288943Sdim MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC)); 2282234353Sdim 2283218893Sdim // Finish off the call including any return values. 2284360784Sdim SmallVector<Register, 4> UsedRegs; 2285239462Sdim if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, false)) return false; 2286218893Sdim 2287218893Sdim // Set all unused physreg defs as dead. 2288218893Sdim static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2289218893Sdim 2290218893Sdim return true; 2291218893Sdim} 2292218893Sdim 2293234353Sdimbool ARMFastISel::SelectCall(const Instruction *I, 2294276479Sdim const char *IntrMemName = nullptr) { 2295218893Sdim const CallInst *CI = cast<CallInst>(I); 2296218893Sdim const Value *Callee = CI->getCalledValue(); 2297218893Sdim 2298234353Sdim // Can't handle inline asm. 2299234353Sdim if (isa<InlineAsm>(Callee)) return false; 2300218893Sdim 2301249423Sdim // Allow SelectionDAG isel to handle tail calls. 2302249423Sdim if (CI->isTailCall()) return false; 2303249423Sdim 2304218893Sdim // Check the calling convention. 2305218893Sdim ImmutableCallSite CS(CI); 2306218893Sdim CallingConv::ID CC = CS.getCallingConv(); 2307218893Sdim 2308218893Sdim // TODO: Avoid some calling conventions? 2309218893Sdim 2310309124Sdim FunctionType *FTy = CS.getFunctionType(); 2311239462Sdim bool isVarArg = FTy->isVarArg(); 2312218893Sdim 2313218893Sdim // Handle *simple* calls for now. 2314226633Sdim Type *RetTy = I->getType(); 2315218893Sdim MVT RetVT; 2316218893Sdim if (RetTy->isVoidTy()) 2317218893Sdim RetVT = MVT::isVoid; 2318234353Sdim else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 && 2319234353Sdim RetVT != MVT::i8 && RetVT != MVT::i1) 2320218893Sdim return false; 2321218893Sdim 2322239462Sdim // Can't handle non-double multi-reg retvals. 2323239462Sdim if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 && 2324239462Sdim RetVT != MVT::i16 && RetVT != MVT::i32) { 2325239462Sdim SmallVector<CCValAssign, 16> RVLocs; 2326280031Sdim CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *Context); 2327239462Sdim CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2328239462Sdim if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2329239462Sdim return false; 2330239462Sdim } 2331221345Sdim 2332218893Sdim // Set up the argument vectors. 2333218893Sdim SmallVector<Value*, 8> Args; 2334360784Sdim SmallVector<Register, 8> ArgRegs; 2335218893Sdim SmallVector<MVT, 8> ArgVTs; 2336218893Sdim SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2337234353Sdim unsigned arg_size = CS.arg_size(); 2338234353Sdim Args.reserve(arg_size); 2339234353Sdim ArgRegs.reserve(arg_size); 2340234353Sdim ArgVTs.reserve(arg_size); 2341234353Sdim ArgFlags.reserve(arg_size); 2342218893Sdim for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 2343218893Sdim i != e; ++i) { 2344234353Sdim // If we're lowering a memory intrinsic instead of a regular call, skip the 2345341825Sdim // last argument, which shouldn't be passed to the underlying function. 2346341825Sdim if (IntrMemName && e - i <= 1) 2347234353Sdim break; 2348218893Sdim 2349218893Sdim ISD::ArgFlagsTy Flags; 2350321369Sdim unsigned ArgIdx = i - CS.arg_begin(); 2351321369Sdim if (CS.paramHasAttr(ArgIdx, Attribute::SExt)) 2352218893Sdim Flags.setSExt(); 2353321369Sdim if (CS.paramHasAttr(ArgIdx, Attribute::ZExt)) 2354218893Sdim Flags.setZExt(); 2355218893Sdim 2356234353Sdim // FIXME: Only handle *easy* calls for now. 2357321369Sdim if (CS.paramHasAttr(ArgIdx, Attribute::InReg) || 2358321369Sdim CS.paramHasAttr(ArgIdx, Attribute::StructRet) || 2359321369Sdim CS.paramHasAttr(ArgIdx, Attribute::SwiftSelf) || 2360321369Sdim CS.paramHasAttr(ArgIdx, Attribute::SwiftError) || 2361321369Sdim CS.paramHasAttr(ArgIdx, Attribute::Nest) || 2362321369Sdim CS.paramHasAttr(ArgIdx, Attribute::ByVal)) 2363218893Sdim return false; 2364218893Sdim 2365226633Sdim Type *ArgTy = (*i)->getType(); 2366218893Sdim MVT ArgVT; 2367234353Sdim if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 && 2368234353Sdim ArgVT != MVT::i1) 2369218893Sdim return false; 2370234353Sdim 2371360784Sdim Register Arg = getRegForValue(*i); 2372360784Sdim if (!Arg.isValid()) 2373234353Sdim return false; 2374234353Sdim 2375360784Sdim Flags.setOrigAlign(Align(DL.getABITypeAlignment(ArgTy))); 2376218893Sdim 2377218893Sdim Args.push_back(*i); 2378218893Sdim ArgRegs.push_back(Arg); 2379218893Sdim ArgVTs.push_back(ArgVT); 2380218893Sdim ArgFlags.push_back(Flags); 2381218893Sdim } 2382218893Sdim 2383218893Sdim // Handle the arguments now that we've gotten them. 2384360784Sdim SmallVector<Register, 4> RegArgs; 2385218893Sdim unsigned NumBytes; 2386239462Sdim if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2387239462Sdim RegArgs, CC, NumBytes, isVarArg)) 2388218893Sdim return false; 2389218893Sdim 2390239462Sdim bool UseReg = false; 2391239462Sdim const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 2392288943Sdim if (!GV || Subtarget->genLongCalls()) UseReg = true; 2393239462Sdim 2394360784Sdim Register CalleeReg; 2395239462Sdim if (UseReg) { 2396239462Sdim if (IntrMemName) 2397239462Sdim CalleeReg = getLibcallReg(IntrMemName); 2398239462Sdim else 2399239462Sdim CalleeReg = getRegForValue(Callee); 2400239462Sdim 2401239462Sdim if (CalleeReg == 0) return false; 2402239462Sdim } 2403239462Sdim 2404234353Sdim // Issue the call. 2405239462Sdim unsigned CallOpc = ARMSelectCallOp(UseReg); 2406239462Sdim MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2407276479Sdim DbgLoc, TII.get(CallOpc)); 2408239462Sdim 2409243830Sdim // ARM calls don't take a predicate, but tBL / tBLX do. 2410243830Sdim if(isThumb2) 2411321369Sdim MIB.add(predOps(ARMCC::AL)); 2412243830Sdim if (UseReg) 2413243830Sdim MIB.addReg(CalleeReg); 2414243830Sdim else if (!IntrMemName) 2415309124Sdim MIB.addGlobalAddress(GV, 0, 0); 2416243830Sdim else 2417309124Sdim MIB.addExternalSymbol(IntrMemName, 0); 2418239462Sdim 2419218893Sdim // Add implicit physical register uses to the call. 2420360784Sdim for (Register R : RegArgs) 2421321369Sdim MIB.addReg(R, RegState::Implicit); 2422218893Sdim 2423234353Sdim // Add a register mask with the call-preserved registers. 2424234353Sdim // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2425288943Sdim MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC)); 2426234353Sdim 2427218893Sdim // Finish off the call including any return values. 2428360784Sdim SmallVector<Register, 4> UsedRegs; 2429239462Sdim if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, isVarArg)) 2430239462Sdim return false; 2431218893Sdim 2432218893Sdim // Set all unused physreg defs as dead. 2433218893Sdim static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2434218893Sdim 2435218893Sdim return true; 2436234353Sdim} 2437218893Sdim 2438234353Sdimbool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) { 2439234353Sdim return Len <= 16; 2440218893Sdim} 2441218893Sdim 2442234353Sdimbool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, 2443249423Sdim uint64_t Len, unsigned Alignment) { 2444234353Sdim // Make sure we don't bloat code by inlining very large memcpy's. 2445234353Sdim if (!ARMIsMemCpySmall(Len)) 2446234353Sdim return false; 2447223017Sdim 2448234353Sdim while (Len) { 2449234353Sdim MVT VT; 2450249423Sdim if (!Alignment || Alignment >= 4) { 2451249423Sdim if (Len >= 4) 2452249423Sdim VT = MVT::i32; 2453249423Sdim else if (Len >= 2) 2454249423Sdim VT = MVT::i16; 2455249423Sdim else { 2456321369Sdim assert(Len == 1 && "Expected a length of 1!"); 2457249423Sdim VT = MVT::i8; 2458249423Sdim } 2459249423Sdim } else { 2460249423Sdim // Bound based on alignment. 2461249423Sdim if (Len >= 2 && Alignment == 2) 2462249423Sdim VT = MVT::i16; 2463249423Sdim else { 2464249423Sdim VT = MVT::i8; 2465249423Sdim } 2466234353Sdim } 2467223017Sdim 2468234353Sdim bool RV; 2469360784Sdim Register ResultReg; 2470234353Sdim RV = ARMEmitLoad(VT, ResultReg, Src); 2471321369Sdim assert(RV && "Should be able to handle this load."); 2472234353Sdim RV = ARMEmitStore(VT, ResultReg, Dest); 2473321369Sdim assert(RV && "Should be able to handle this store."); 2474234353Sdim (void)RV; 2475234353Sdim 2476234353Sdim unsigned Size = VT.getSizeInBits()/8; 2477234353Sdim Len -= Size; 2478234353Sdim Dest.Offset += Size; 2479234353Sdim Src.Offset += Size; 2480234353Sdim } 2481234353Sdim 2482234353Sdim return true; 2483234353Sdim} 2484234353Sdim 2485234353Sdimbool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) { 2486234353Sdim // FIXME: Handle more intrinsics. 2487234353Sdim switch (I.getIntrinsicID()) { 2488234353Sdim default: return false; 2489239462Sdim case Intrinsic::frameaddress: { 2490314564Sdim MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo(); 2491314564Sdim MFI.setFrameAddressIsTaken(true); 2492239462Sdim 2493280031Sdim unsigned LdrOpc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12; 2494280031Sdim const TargetRegisterClass *RC = isThumb2 ? &ARM::tGPRRegClass 2495280031Sdim : &ARM::GPRRegClass; 2496239462Sdim 2497239462Sdim const ARMBaseRegisterInfo *RegInfo = 2498288943Sdim static_cast<const ARMBaseRegisterInfo *>(Subtarget->getRegisterInfo()); 2499360784Sdim Register FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF)); 2500239462Sdim unsigned SrcReg = FramePtr; 2501239462Sdim 2502239462Sdim // Recursively load frame address 2503239462Sdim // ldr r0 [fp] 2504239462Sdim // ldr r0 [r0] 2505239462Sdim // ldr r0 [r0] 2506239462Sdim // ... 2507239462Sdim unsigned DestReg; 2508239462Sdim unsigned Depth = cast<ConstantInt>(I.getOperand(0))->getZExtValue(); 2509239462Sdim while (Depth--) { 2510239462Sdim DestReg = createResultReg(RC); 2511276479Sdim AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2512239462Sdim TII.get(LdrOpc), DestReg) 2513239462Sdim .addReg(SrcReg).addImm(0)); 2514239462Sdim SrcReg = DestReg; 2515239462Sdim } 2516280031Sdim updateValueMap(&I, SrcReg); 2517239462Sdim return true; 2518239462Sdim } 2519234353Sdim case Intrinsic::memcpy: 2520234353Sdim case Intrinsic::memmove: { 2521234353Sdim const MemTransferInst &MTI = cast<MemTransferInst>(I); 2522234353Sdim // Don't handle volatile. 2523234353Sdim if (MTI.isVolatile()) 2524223017Sdim return false; 2525234353Sdim 2526234353Sdim // Disable inlining for memmove before calls to ComputeAddress. Otherwise, 2527234353Sdim // we would emit dead code because we don't currently handle memmoves. 2528234353Sdim bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy); 2529234353Sdim if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) { 2530234353Sdim // Small memcpy's are common enough that we want to do them without a call 2531234353Sdim // if possible. 2532234353Sdim uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue(); 2533234353Sdim if (ARMIsMemCpySmall(Len)) { 2534234353Sdim Address Dest, Src; 2535234353Sdim if (!ARMComputeAddress(MTI.getRawDest(), Dest) || 2536234353Sdim !ARMComputeAddress(MTI.getRawSource(), Src)) 2537234353Sdim return false; 2538341825Sdim unsigned Alignment = MinAlign(MTI.getDestAlignment(), 2539341825Sdim MTI.getSourceAlignment()); 2540249423Sdim if (ARMTryEmitSmallMemCpy(Dest, Src, Len, Alignment)) 2541234353Sdim return true; 2542234353Sdim } 2543234353Sdim } 2544239462Sdim 2545234353Sdim if (!MTI.getLength()->getType()->isIntegerTy(32)) 2546223017Sdim return false; 2547239462Sdim 2548234353Sdim if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255) 2549234353Sdim return false; 2550223017Sdim 2551234353Sdim const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove"; 2552234353Sdim return SelectCall(&I, IntrMemName); 2553234353Sdim } 2554234353Sdim case Intrinsic::memset: { 2555234353Sdim const MemSetInst &MSI = cast<MemSetInst>(I); 2556234353Sdim // Don't handle volatile. 2557234353Sdim if (MSI.isVolatile()) 2558234353Sdim return false; 2559239462Sdim 2560234353Sdim if (!MSI.getLength()->getType()->isIntegerTy(32)) 2561234353Sdim return false; 2562239462Sdim 2563234353Sdim if (MSI.getDestAddressSpace() > 255) 2564234353Sdim return false; 2565239462Sdim 2566234353Sdim return SelectCall(&I, "memset"); 2567234353Sdim } 2568239462Sdim case Intrinsic::trap: { 2569360784Sdim unsigned Opcode; 2570360784Sdim if (Subtarget->isThumb()) 2571360784Sdim Opcode = ARM::tTRAP; 2572360784Sdim else 2573360784Sdim Opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP; 2574360784Sdim BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opcode)); 2575239462Sdim return true; 2576234353Sdim } 2577239462Sdim } 2578234353Sdim} 2579223017Sdim 2580234353Sdimbool ARMFastISel::SelectTrunc(const Instruction *I) { 2581239462Sdim // The high bits for a type smaller than the register size are assumed to be 2582234353Sdim // undefined. 2583234353Sdim Value *Op = I->getOperand(0); 2584234353Sdim 2585234353Sdim EVT SrcVT, DestVT; 2586288943Sdim SrcVT = TLI.getValueType(DL, Op->getType(), true); 2587288943Sdim DestVT = TLI.getValueType(DL, I->getType(), true); 2588234353Sdim 2589234353Sdim if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 2590223017Sdim return false; 2591234353Sdim if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 2592234353Sdim return false; 2593223017Sdim 2594234353Sdim unsigned SrcReg = getRegForValue(Op); 2595234353Sdim if (!SrcReg) return false; 2596234353Sdim 2597234353Sdim // Because the high bits are undefined, a truncate doesn't generate 2598234353Sdim // any code. 2599280031Sdim updateValueMap(I, SrcReg); 2600234353Sdim return true; 2601234353Sdim} 2602234353Sdim 2603249423Sdimunsigned ARMFastISel::ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, 2604234353Sdim bool isZExt) { 2605234353Sdim if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8) 2606234353Sdim return 0; 2607261991Sdim if (SrcVT != MVT::i16 && SrcVT != MVT::i8 && SrcVT != MVT::i1) 2608261991Sdim return 0; 2609234353Sdim 2610261991Sdim // Table of which combinations can be emitted as a single instruction, 2611261991Sdim // and which will require two. 2612261991Sdim static const uint8_t isSingleInstrTbl[3][2][2][2] = { 2613261991Sdim // ARM Thumb 2614261991Sdim // !hasV6Ops hasV6Ops !hasV6Ops hasV6Ops 2615261991Sdim // ext: s z s z s z s z 2616261991Sdim /* 1 */ { { { 0, 1 }, { 0, 1 } }, { { 0, 0 }, { 0, 1 } } }, 2617261991Sdim /* 8 */ { { { 0, 1 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } }, 2618261991Sdim /* 16 */ { { { 0, 0 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } } 2619261991Sdim }; 2620261991Sdim 2621261991Sdim // Target registers for: 2622261991Sdim // - For ARM can never be PC. 2623261991Sdim // - For 16-bit Thumb are restricted to lower 8 registers. 2624261991Sdim // - For 32-bit Thumb are restricted to non-SP and non-PC. 2625261991Sdim static const TargetRegisterClass *RCTbl[2][2] = { 2626261991Sdim // Instructions: Two Single 2627261991Sdim /* ARM */ { &ARM::GPRnopcRegClass, &ARM::GPRnopcRegClass }, 2628261991Sdim /* Thumb */ { &ARM::tGPRRegClass, &ARM::rGPRRegClass } 2629261991Sdim }; 2630261991Sdim 2631261991Sdim // Table governing the instruction(s) to be emitted. 2632261991Sdim static const struct InstructionTable { 2633261991Sdim uint32_t Opc : 16; 2634261991Sdim uint32_t hasS : 1; // Some instructions have an S bit, always set it to 0. 2635261991Sdim uint32_t Shift : 7; // For shift operand addressing mode, used by MOVsi. 2636261991Sdim uint32_t Imm : 8; // All instructions have either a shift or a mask. 2637261991Sdim } IT[2][2][3][2] = { 2638261991Sdim { // Two instructions (first is left shift, second is in this table). 2639261991Sdim { // ARM Opc S Shift Imm 2640261991Sdim /* 1 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 31 }, 2641261991Sdim /* 1 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 31 } }, 2642261991Sdim /* 8 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 24 }, 2643261991Sdim /* 8 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 24 } }, 2644261991Sdim /* 16 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 16 }, 2645261991Sdim /* 16 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 16 } } 2646261991Sdim }, 2647261991Sdim { // Thumb Opc S Shift Imm 2648261991Sdim /* 1 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 31 }, 2649261991Sdim /* 1 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 31 } }, 2650261991Sdim /* 8 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 24 }, 2651261991Sdim /* 8 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 24 } }, 2652261991Sdim /* 16 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 16 }, 2653261991Sdim /* 16 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 16 } } 2654261991Sdim } 2655261991Sdim }, 2656261991Sdim { // Single instruction. 2657261991Sdim { // ARM Opc S Shift Imm 2658261991Sdim /* 1 bit sext */ { { ARM::KILL , 0, ARM_AM::no_shift, 0 }, 2659261991Sdim /* 1 bit zext */ { ARM::ANDri , 1, ARM_AM::no_shift, 1 } }, 2660261991Sdim /* 8 bit sext */ { { ARM::SXTB , 0, ARM_AM::no_shift, 0 }, 2661261991Sdim /* 8 bit zext */ { ARM::ANDri , 1, ARM_AM::no_shift, 255 } }, 2662261991Sdim /* 16 bit sext */ { { ARM::SXTH , 0, ARM_AM::no_shift, 0 }, 2663261991Sdim /* 16 bit zext */ { ARM::UXTH , 0, ARM_AM::no_shift, 0 } } 2664261991Sdim }, 2665261991Sdim { // Thumb Opc S Shift Imm 2666261991Sdim /* 1 bit sext */ { { ARM::KILL , 0, ARM_AM::no_shift, 0 }, 2667261991Sdim /* 1 bit zext */ { ARM::t2ANDri, 1, ARM_AM::no_shift, 1 } }, 2668261991Sdim /* 8 bit sext */ { { ARM::t2SXTB , 0, ARM_AM::no_shift, 0 }, 2669261991Sdim /* 8 bit zext */ { ARM::t2ANDri, 1, ARM_AM::no_shift, 255 } }, 2670261991Sdim /* 16 bit sext */ { { ARM::t2SXTH , 0, ARM_AM::no_shift, 0 }, 2671261991Sdim /* 16 bit zext */ { ARM::t2UXTH , 0, ARM_AM::no_shift, 0 } } 2672261991Sdim } 2673223017Sdim } 2674261991Sdim }; 2675261991Sdim 2676261991Sdim unsigned SrcBits = SrcVT.getSizeInBits(); 2677261991Sdim unsigned DestBits = DestVT.getSizeInBits(); 2678261991Sdim (void) DestBits; 2679261991Sdim assert((SrcBits < DestBits) && "can only extend to larger types"); 2680261991Sdim assert((DestBits == 32 || DestBits == 16 || DestBits == 8) && 2681261991Sdim "other sizes unimplemented"); 2682261991Sdim assert((SrcBits == 16 || SrcBits == 8 || SrcBits == 1) && 2683261991Sdim "other sizes unimplemented"); 2684261991Sdim 2685261991Sdim bool hasV6Ops = Subtarget->hasV6Ops(); 2686261991Sdim unsigned Bitness = SrcBits / 8; // {1,8,16}=>{0,1,2} 2687261991Sdim assert((Bitness < 3) && "sanity-check table bounds"); 2688261991Sdim 2689261991Sdim bool isSingleInstr = isSingleInstrTbl[Bitness][isThumb2][hasV6Ops][isZExt]; 2690261991Sdim const TargetRegisterClass *RC = RCTbl[isThumb2][isSingleInstr]; 2691261991Sdim const InstructionTable *ITP = &IT[isSingleInstr][isThumb2][Bitness][isZExt]; 2692261991Sdim unsigned Opc = ITP->Opc; 2693261991Sdim assert(ARM::KILL != Opc && "Invalid table entry"); 2694261991Sdim unsigned hasS = ITP->hasS; 2695261991Sdim ARM_AM::ShiftOpc Shift = (ARM_AM::ShiftOpc) ITP->Shift; 2696261991Sdim assert(((Shift == ARM_AM::no_shift) == (Opc != ARM::MOVsi)) && 2697261991Sdim "only MOVsi has shift operand addressing mode"); 2698261991Sdim unsigned Imm = ITP->Imm; 2699261991Sdim 2700261991Sdim // 16-bit Thumb instructions always set CPSR (unless they're in an IT block). 2701261991Sdim bool setsCPSR = &ARM::tGPRRegClass == RC; 2702261991Sdim unsigned LSLOpc = isThumb2 ? ARM::tLSLri : ARM::MOVsi; 2703261991Sdim unsigned ResultReg; 2704261991Sdim // MOVsi encodes shift and immediate in shift operand addressing mode. 2705261991Sdim // The following condition has the same value when emitting two 2706261991Sdim // instruction sequences: both are shifts. 2707261991Sdim bool ImmIsSO = (Shift != ARM_AM::no_shift); 2708261991Sdim 2709261991Sdim // Either one or two instructions are emitted. 2710261991Sdim // They're always of the form: 2711261991Sdim // dst = in OP imm 2712261991Sdim // CPSR is set only by 16-bit Thumb instructions. 2713261991Sdim // Predicate, if any, is AL. 2714261991Sdim // S bit, if available, is always 0. 2715261991Sdim // When two are emitted the first's result will feed as the second's input, 2716261991Sdim // that value is then dead. 2717261991Sdim unsigned NumInstrsEmitted = isSingleInstr ? 1 : 2; 2718261991Sdim for (unsigned Instr = 0; Instr != NumInstrsEmitted; ++Instr) { 2719261991Sdim ResultReg = createResultReg(RC); 2720261991Sdim bool isLsl = (0 == Instr) && !isSingleInstr; 2721261991Sdim unsigned Opcode = isLsl ? LSLOpc : Opc; 2722261991Sdim ARM_AM::ShiftOpc ShiftAM = isLsl ? ARM_AM::lsl : Shift; 2723261991Sdim unsigned ImmEnc = ImmIsSO ? ARM_AM::getSORegOpc(ShiftAM, Imm) : Imm; 2724261991Sdim bool isKill = 1 == Instr; 2725261991Sdim MachineInstrBuilder MIB = BuildMI( 2726276479Sdim *FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opcode), ResultReg); 2727261991Sdim if (setsCPSR) 2728261991Sdim MIB.addReg(ARM::CPSR, RegState::Define); 2729261991Sdim SrcReg = constrainOperandRegClass(TII.get(Opcode), SrcReg, 1 + setsCPSR); 2730321369Sdim MIB.addReg(SrcReg, isKill * RegState::Kill) 2731321369Sdim .addImm(ImmEnc) 2732321369Sdim .add(predOps(ARMCC::AL)); 2733261991Sdim if (hasS) 2734321369Sdim MIB.add(condCodeOp()); 2735261991Sdim // Second instruction consumes the first's result. 2736261991Sdim SrcReg = ResultReg; 2737223017Sdim } 2738223017Sdim 2739234353Sdim return ResultReg; 2740234353Sdim} 2741234353Sdim 2742234353Sdimbool ARMFastISel::SelectIntExt(const Instruction *I) { 2743234353Sdim // On ARM, in general, integer casts don't involve legal types; this code 2744234353Sdim // handles promotable integers. 2745234353Sdim Type *DestTy = I->getType(); 2746234353Sdim Value *Src = I->getOperand(0); 2747234353Sdim Type *SrcTy = Src->getType(); 2748234353Sdim 2749234353Sdim bool isZExt = isa<ZExtInst>(I); 2750234353Sdim unsigned SrcReg = getRegForValue(Src); 2751234353Sdim if (!SrcReg) return false; 2752234353Sdim 2753249423Sdim EVT SrcEVT, DestEVT; 2754288943Sdim SrcEVT = TLI.getValueType(DL, SrcTy, true); 2755288943Sdim DestEVT = TLI.getValueType(DL, DestTy, true); 2756249423Sdim if (!SrcEVT.isSimple()) return false; 2757249423Sdim if (!DestEVT.isSimple()) return false; 2758249423Sdim 2759249423Sdim MVT SrcVT = SrcEVT.getSimpleVT(); 2760249423Sdim MVT DestVT = DestEVT.getSimpleVT(); 2761234353Sdim unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt); 2762234353Sdim if (ResultReg == 0) return false; 2763280031Sdim updateValueMap(I, ResultReg); 2764223017Sdim return true; 2765223017Sdim} 2766223017Sdim 2767239462Sdimbool ARMFastISel::SelectShift(const Instruction *I, 2768239462Sdim ARM_AM::ShiftOpc ShiftTy) { 2769239462Sdim // We handle thumb2 mode by target independent selector 2770239462Sdim // or SelectionDAG ISel. 2771239462Sdim if (isThumb2) 2772239462Sdim return false; 2773239462Sdim 2774239462Sdim // Only handle i32 now. 2775288943Sdim EVT DestVT = TLI.getValueType(DL, I->getType(), true); 2776239462Sdim if (DestVT != MVT::i32) 2777239462Sdim return false; 2778239462Sdim 2779239462Sdim unsigned Opc = ARM::MOVsr; 2780239462Sdim unsigned ShiftImm; 2781239462Sdim Value *Src2Value = I->getOperand(1); 2782239462Sdim if (const ConstantInt *CI = dyn_cast<ConstantInt>(Src2Value)) { 2783239462Sdim ShiftImm = CI->getZExtValue(); 2784239462Sdim 2785239462Sdim // Fall back to selection DAG isel if the shift amount 2786239462Sdim // is zero or greater than the width of the value type. 2787239462Sdim if (ShiftImm == 0 || ShiftImm >=32) 2788239462Sdim return false; 2789239462Sdim 2790239462Sdim Opc = ARM::MOVsi; 2791239462Sdim } 2792239462Sdim 2793239462Sdim Value *Src1Value = I->getOperand(0); 2794239462Sdim unsigned Reg1 = getRegForValue(Src1Value); 2795239462Sdim if (Reg1 == 0) return false; 2796239462Sdim 2797243830Sdim unsigned Reg2 = 0; 2798239462Sdim if (Opc == ARM::MOVsr) { 2799239462Sdim Reg2 = getRegForValue(Src2Value); 2800239462Sdim if (Reg2 == 0) return false; 2801239462Sdim } 2802239462Sdim 2803261991Sdim unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass); 2804239462Sdim if(ResultReg == 0) return false; 2805239462Sdim 2806276479Sdim MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2807239462Sdim TII.get(Opc), ResultReg) 2808239462Sdim .addReg(Reg1); 2809239462Sdim 2810239462Sdim if (Opc == ARM::MOVsi) 2811239462Sdim MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, ShiftImm)); 2812239462Sdim else if (Opc == ARM::MOVsr) { 2813239462Sdim MIB.addReg(Reg2); 2814239462Sdim MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, 0)); 2815239462Sdim } 2816239462Sdim 2817239462Sdim AddOptionalDefs(MIB); 2818280031Sdim updateValueMap(I, ResultReg); 2819239462Sdim return true; 2820239462Sdim} 2821239462Sdim 2822212793Sdim// TODO: SoftFP support. 2823280031Sdimbool ARMFastISel::fastSelectInstruction(const Instruction *I) { 2824212793Sdim switch (I->getOpcode()) { 2825212793Sdim case Instruction::Load: 2826218893Sdim return SelectLoad(I); 2827212793Sdim case Instruction::Store: 2828218893Sdim return SelectStore(I); 2829212793Sdim case Instruction::Br: 2830218893Sdim return SelectBranch(I); 2831234353Sdim case Instruction::IndirectBr: 2832234353Sdim return SelectIndirectBr(I); 2833218893Sdim case Instruction::ICmp: 2834218893Sdim case Instruction::FCmp: 2835218893Sdim return SelectCmp(I); 2836218893Sdim case Instruction::FPExt: 2837218893Sdim return SelectFPExt(I); 2838218893Sdim case Instruction::FPTrunc: 2839218893Sdim return SelectFPTrunc(I); 2840218893Sdim case Instruction::SIToFP: 2841234353Sdim return SelectIToFP(I, /*isSigned*/ true); 2842234353Sdim case Instruction::UIToFP: 2843234353Sdim return SelectIToFP(I, /*isSigned*/ false); 2844218893Sdim case Instruction::FPToSI: 2845234353Sdim return SelectFPToI(I, /*isSigned*/ true); 2846234353Sdim case Instruction::FPToUI: 2847234353Sdim return SelectFPToI(I, /*isSigned*/ false); 2848234353Sdim case Instruction::Add: 2849234353Sdim return SelectBinaryIntOp(I, ISD::ADD); 2850234353Sdim case Instruction::Or: 2851234353Sdim return SelectBinaryIntOp(I, ISD::OR); 2852234353Sdim case Instruction::Sub: 2853234353Sdim return SelectBinaryIntOp(I, ISD::SUB); 2854218893Sdim case Instruction::FAdd: 2855234353Sdim return SelectBinaryFPOp(I, ISD::FADD); 2856218893Sdim case Instruction::FSub: 2857234353Sdim return SelectBinaryFPOp(I, ISD::FSUB); 2858218893Sdim case Instruction::FMul: 2859234353Sdim return SelectBinaryFPOp(I, ISD::FMUL); 2860218893Sdim case Instruction::SDiv: 2861234353Sdim return SelectDiv(I, /*isSigned*/ true); 2862234353Sdim case Instruction::UDiv: 2863234353Sdim return SelectDiv(I, /*isSigned*/ false); 2864218893Sdim case Instruction::SRem: 2865234353Sdim return SelectRem(I, /*isSigned*/ true); 2866234353Sdim case Instruction::URem: 2867234353Sdim return SelectRem(I, /*isSigned*/ false); 2868218893Sdim case Instruction::Call: 2869234353Sdim if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 2870234353Sdim return SelectIntrinsicCall(*II); 2871218893Sdim return SelectCall(I); 2872218893Sdim case Instruction::Select: 2873218893Sdim return SelectSelect(I); 2874218893Sdim case Instruction::Ret: 2875218893Sdim return SelectRet(I); 2876223017Sdim case Instruction::Trunc: 2877234353Sdim return SelectTrunc(I); 2878223017Sdim case Instruction::ZExt: 2879223017Sdim case Instruction::SExt: 2880234353Sdim return SelectIntExt(I); 2881239462Sdim case Instruction::Shl: 2882239462Sdim return SelectShift(I, ARM_AM::lsl); 2883239462Sdim case Instruction::LShr: 2884239462Sdim return SelectShift(I, ARM_AM::lsr); 2885239462Sdim case Instruction::AShr: 2886239462Sdim return SelectShift(I, ARM_AM::asr); 2887212793Sdim default: break; 2888212793Sdim } 2889212793Sdim return false; 2890212793Sdim} 2891212793Sdim 2892261991Sdim// This table describes sign- and zero-extend instructions which can be 2893261991Sdim// folded into a preceding load. All of these extends have an immediate 2894261991Sdim// (sometimes a mask and sometimes a shift) that's applied after 2895261991Sdim// extension. 2896327952Sdimstatic const struct FoldableLoadExtendsStruct { 2897261991Sdim uint16_t Opc[2]; // ARM, Thumb. 2898261991Sdim uint8_t ExpectedImm; 2899261991Sdim uint8_t isZExt : 1; 2900261991Sdim uint8_t ExpectedVT : 7; 2901261991Sdim} FoldableLoadExtends[] = { 2902261991Sdim { { ARM::SXTH, ARM::t2SXTH }, 0, 0, MVT::i16 }, 2903261991Sdim { { ARM::UXTH, ARM::t2UXTH }, 0, 1, MVT::i16 }, 2904261991Sdim { { ARM::ANDri, ARM::t2ANDri }, 255, 1, MVT::i8 }, 2905261991Sdim { { ARM::SXTB, ARM::t2SXTB }, 0, 0, MVT::i8 }, 2906261991Sdim { { ARM::UXTB, ARM::t2UXTB }, 0, 1, MVT::i8 } 2907261991Sdim}; 2908261991Sdim 2909341825Sdim/// The specified machine instr operand is a vreg, and that 2910234353Sdim/// vreg is being provided by the specified load instruction. If possible, 2911234353Sdim/// try to fold the load as an operand to the instruction, returning true if 2912234353Sdim/// successful. 2913251662Sdimbool ARMFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, 2914251662Sdim const LoadInst *LI) { 2915234353Sdim // Verify we have a legal type before going any further. 2916234353Sdim MVT VT; 2917234353Sdim if (!isLoadTypeLegal(LI->getType(), VT)) 2918234353Sdim return false; 2919234353Sdim 2920234353Sdim // Combine load followed by zero- or sign-extend. 2921234353Sdim // ldrb r1, [r0] ldrb r1, [r0] 2922234353Sdim // uxtb r2, r1 => 2923234353Sdim // mov r3, r2 mov r3, r1 2924261991Sdim if (MI->getNumOperands() < 3 || !MI->getOperand(2).isImm()) 2925261991Sdim return false; 2926261991Sdim const uint64_t Imm = MI->getOperand(2).getImm(); 2927261991Sdim 2928261991Sdim bool Found = false; 2929261991Sdim bool isZExt; 2930321369Sdim for (const FoldableLoadExtendsStruct &FLE : FoldableLoadExtends) { 2931321369Sdim if (FLE.Opc[isThumb2] == MI->getOpcode() && 2932321369Sdim (uint64_t)FLE.ExpectedImm == Imm && 2933321369Sdim MVT((MVT::SimpleValueType)FLE.ExpectedVT) == VT) { 2934261991Sdim Found = true; 2935321369Sdim isZExt = FLE.isZExt; 2936261991Sdim } 2937234353Sdim } 2938261991Sdim if (!Found) return false; 2939261991Sdim 2940234353Sdim // See if we can handle this address. 2941234353Sdim Address Addr; 2942234353Sdim if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false; 2943239462Sdim 2944360784Sdim Register ResultReg = MI->getOperand(0).getReg(); 2945234353Sdim if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false)) 2946234353Sdim return false; 2947344779Sdim MachineBasicBlock::iterator I(MI); 2948344779Sdim removeDeadCode(I, std::next(I)); 2949234353Sdim return true; 2950234353Sdim} 2951234353Sdim 2952243830Sdimunsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, 2953249423Sdim unsigned Align, MVT VT) { 2954309124Sdim bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV); 2955243830Sdim 2956327952Sdim LLVMContext *Context = &MF->getFunction().getContext(); 2957296417Sdim unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2958296417Sdim unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 2959296417Sdim ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create( 2960296417Sdim GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj, 2961296417Sdim UseGOT_PREL ? ARMCP::GOT_PREL : ARMCP::no_modifier, 2962296417Sdim /*AddCurrentAddress=*/UseGOT_PREL); 2963243830Sdim 2964296417Sdim unsigned ConstAlign = 2965296417Sdim MF->getDataLayout().getPrefTypeAlignment(Type::getInt32PtrTy(*Context)); 2966296417Sdim unsigned Idx = MF->getConstantPool()->getConstantPoolIndex(CPV, ConstAlign); 2967344779Sdim MachineMemOperand *CPMMO = 2968344779Sdim MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF), 2969344779Sdim MachineMemOperand::MOLoad, 4, 4); 2970243830Sdim 2971360784Sdim Register TempReg = MF->getRegInfo().createVirtualRegister(&ARM::rGPRRegClass); 2972296417Sdim unsigned Opc = isThumb2 ? ARM::t2LDRpci : ARM::LDRcp; 2973296417Sdim MachineInstrBuilder MIB = 2974296417Sdim BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), TempReg) 2975344779Sdim .addConstantPoolIndex(Idx) 2976344779Sdim .addMemOperand(CPMMO); 2977296417Sdim if (Opc == ARM::LDRcp) 2978243830Sdim MIB.addImm(0); 2979321369Sdim MIB.add(predOps(ARMCC::AL)); 2980243830Sdim 2981296417Sdim // Fix the address by adding pc. 2982296417Sdim unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 2983296417Sdim Opc = Subtarget->isThumb() ? ARM::tPICADD : UseGOT_PREL ? ARM::PICLDR 2984296417Sdim : ARM::PICADD; 2985296417Sdim DestReg = constrainOperandRegClass(TII.get(Opc), DestReg, 0); 2986296417Sdim MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg) 2987296417Sdim .addReg(TempReg) 2988296417Sdim .addImm(ARMPCLabelIndex); 2989344779Sdim 2990296417Sdim if (!Subtarget->isThumb()) 2991321369Sdim MIB.add(predOps(ARMCC::AL)); 2992296417Sdim 2993296417Sdim if (UseGOT_PREL && Subtarget->isThumb()) { 2994296417Sdim unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 2995296417Sdim MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2996296417Sdim TII.get(ARM::t2LDRi12), NewDestReg) 2997296417Sdim .addReg(DestReg) 2998296417Sdim .addImm(0); 2999296417Sdim DestReg = NewDestReg; 3000296417Sdim AddOptionalDefs(MIB); 3001296417Sdim } 3002296417Sdim return DestReg; 3003243830Sdim} 3004243830Sdim 3005280031Sdimbool ARMFastISel::fastLowerArguments() { 3006249423Sdim if (!FuncInfo.CanLowerReturn) 3007249423Sdim return false; 3008249423Sdim 3009249423Sdim const Function *F = FuncInfo.Fn; 3010249423Sdim if (F->isVarArg()) 3011249423Sdim return false; 3012249423Sdim 3013249423Sdim CallingConv::ID CC = F->getCallingConv(); 3014249423Sdim switch (CC) { 3015249423Sdim default: 3016249423Sdim return false; 3017249423Sdim case CallingConv::Fast: 3018249423Sdim case CallingConv::C: 3019249423Sdim case CallingConv::ARM_AAPCS_VFP: 3020249423Sdim case CallingConv::ARM_AAPCS: 3021249423Sdim case CallingConv::ARM_APCS: 3022309124Sdim case CallingConv::Swift: 3023249423Sdim break; 3024249423Sdim } 3025249423Sdim 3026249423Sdim // Only handle simple cases. i.e. Up to 4 i8/i16/i32 scalar arguments 3027249423Sdim // which are passed in r0 - r3. 3028321369Sdim for (const Argument &Arg : F->args()) { 3029321369Sdim if (Arg.getArgNo() >= 4) 3030249423Sdim return false; 3031249423Sdim 3032321369Sdim if (Arg.hasAttribute(Attribute::InReg) || 3033321369Sdim Arg.hasAttribute(Attribute::StructRet) || 3034321369Sdim Arg.hasAttribute(Attribute::SwiftSelf) || 3035321369Sdim Arg.hasAttribute(Attribute::SwiftError) || 3036321369Sdim Arg.hasAttribute(Attribute::ByVal)) 3037249423Sdim return false; 3038249423Sdim 3039321369Sdim Type *ArgTy = Arg.getType(); 3040249423Sdim if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy()) 3041249423Sdim return false; 3042249423Sdim 3043288943Sdim EVT ArgVT = TLI.getValueType(DL, ArgTy); 3044249423Sdim if (!ArgVT.isSimple()) return false; 3045249423Sdim switch (ArgVT.getSimpleVT().SimpleTy) { 3046249423Sdim case MVT::i8: 3047249423Sdim case MVT::i16: 3048249423Sdim case MVT::i32: 3049249423Sdim break; 3050249423Sdim default: 3051249423Sdim return false; 3052249423Sdim } 3053249423Sdim } 3054249423Sdim 3055296417Sdim static const MCPhysReg GPRArgRegs[] = { 3056249423Sdim ARM::R0, ARM::R1, ARM::R2, ARM::R3 3057249423Sdim }; 3058249423Sdim 3059261991Sdim const TargetRegisterClass *RC = &ARM::rGPRRegClass; 3060321369Sdim for (const Argument &Arg : F->args()) { 3061321369Sdim unsigned ArgNo = Arg.getArgNo(); 3062321369Sdim unsigned SrcReg = GPRArgRegs[ArgNo]; 3063249423Sdim unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC); 3064249423Sdim // FIXME: Unfortunately it's necessary to emit a copy from the livein copy. 3065249423Sdim // Without this, EmitLiveInCopies may eliminate the livein if its only 3066249423Sdim // use is a bitcast (which isn't turned into an instruction). 3067249423Sdim unsigned ResultReg = createResultReg(RC); 3068276479Sdim BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 3069276479Sdim TII.get(TargetOpcode::COPY), 3070249423Sdim ResultReg).addReg(DstReg, getKillRegState(true)); 3071321369Sdim updateValueMap(&Arg, ResultReg); 3072249423Sdim } 3073249423Sdim 3074249423Sdim return true; 3075249423Sdim} 3076249423Sdim 3077212793Sdimnamespace llvm { 3078321369Sdim 3079239462Sdim FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo, 3080239462Sdim const TargetLibraryInfo *libInfo) { 3081288943Sdim if (funcInfo.MF->getSubtarget<ARMSubtarget>().useFastISel()) 3082288943Sdim return new ARMFastISel(funcInfo, libInfo); 3083218893Sdim 3084276479Sdim return nullptr; 3085212793Sdim } 3086321369Sdim 3087321369Sdim} // end namespace llvm 3088