1193323Sed//===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===// 2193323Sed// 3193323Sed// The LLVM Compiler Infrastructure 4193323Sed// 5193323Sed// This file is distributed under the University of Illinois Open Source 6193323Sed// License. See LICENSE.TXT for details. 7193323Sed// 8193323Sed//===----------------------------------------------------------------------===// 9193323Sed// 10193323Sed// This file implements the interfaces that Sparc uses to lower LLVM code into a 11193323Sed// selection DAG. 12193323Sed// 13193323Sed//===----------------------------------------------------------------------===// 14193323Sed 15193323Sed#include "SparcISelLowering.h" 16263764Sdim#include "MCTargetDesc/SparcMCExpr.h" 17252723Sdim#include "SparcMachineFunctionInfo.h" 18263509Sdim#include "SparcRegisterInfo.h" 19193323Sed#include "SparcTargetMachine.h" 20263764Sdim#include "SparcTargetObjectFile.h" 21193323Sed#include "llvm/CodeGen/CallingConvLower.h" 22193323Sed#include "llvm/CodeGen/MachineFrameInfo.h" 23193323Sed#include "llvm/CodeGen/MachineFunction.h" 24193323Sed#include "llvm/CodeGen/MachineInstrBuilder.h" 25193323Sed#include "llvm/CodeGen/MachineRegisterInfo.h" 26193323Sed#include "llvm/CodeGen/SelectionDAG.h" 27203954Srdivacky#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 28252723Sdim#include "llvm/IR/DerivedTypes.h" 29252723Sdim#include "llvm/IR/Function.h" 30252723Sdim#include "llvm/IR/Module.h" 31198090Srdivacky#include "llvm/Support/ErrorHandling.h" 32193323Sedusing namespace llvm; 33193323Sed 34193323Sed 35193323Sed//===----------------------------------------------------------------------===// 36193323Sed// Calling Convention Implementation 37193323Sed//===----------------------------------------------------------------------===// 38193323Sed 39218893Sdimstatic bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT, 40218893Sdim MVT &LocVT, CCValAssign::LocInfo &LocInfo, 41218893Sdim ISD::ArgFlagsTy &ArgFlags, CCState &State) 42218893Sdim{ 43218893Sdim assert (ArgFlags.isSRet()); 44218893Sdim 45263509Sdim // Assign SRet argument. 46218893Sdim State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, 47218893Sdim 0, 48218893Sdim LocVT, LocInfo)); 49218893Sdim return true; 50218893Sdim} 51218893Sdim 52218893Sdimstatic bool CC_Sparc_Assign_f64(unsigned &ValNo, MVT &ValVT, 53218893Sdim MVT &LocVT, CCValAssign::LocInfo &LocInfo, 54218893Sdim ISD::ArgFlagsTy &ArgFlags, CCState &State) 55218893Sdim{ 56235633Sdim static const uint16_t RegList[] = { 57218893Sdim SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5 58218893Sdim }; 59263509Sdim // Try to get first reg. 60218893Sdim if (unsigned Reg = State.AllocateReg(RegList, 6)) { 61218893Sdim State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 62218893Sdim } else { 63263509Sdim // Assign whole thing in stack. 64218893Sdim State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, 65218893Sdim State.AllocateStack(8,4), 66218893Sdim LocVT, LocInfo)); 67218893Sdim return true; 68218893Sdim } 69218893Sdim 70263509Sdim // Try to get second reg. 71218893Sdim if (unsigned Reg = State.AllocateReg(RegList, 6)) 72218893Sdim State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 73218893Sdim else 74218893Sdim State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, 75218893Sdim State.AllocateStack(4,4), 76218893Sdim LocVT, LocInfo)); 77218893Sdim return true; 78218893Sdim} 79218893Sdim 80252723Sdim// Allocate a full-sized argument for the 64-bit ABI. 81252723Sdimstatic bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, 82252723Sdim MVT &LocVT, CCValAssign::LocInfo &LocInfo, 83252723Sdim ISD::ArgFlagsTy &ArgFlags, CCState &State) { 84263764Sdim assert((LocVT == MVT::f32 || LocVT == MVT::f128 85263764Sdim || LocVT.getSizeInBits() == 64) && 86252723Sdim "Can't handle non-64 bits locations"); 87252723Sdim 88252723Sdim // Stack space is allocated for all arguments starting from [%fp+BIAS+128]. 89263764Sdim unsigned size = (LocVT == MVT::f128) ? 16 : 8; 90263764Sdim unsigned alignment = (LocVT == MVT::f128) ? 16 : 8; 91263764Sdim unsigned Offset = State.AllocateStack(size, alignment); 92252723Sdim unsigned Reg = 0; 93252723Sdim 94252723Sdim if (LocVT == MVT::i64 && Offset < 6*8) 95252723Sdim // Promote integers to %i0-%i5. 96252723Sdim Reg = SP::I0 + Offset/8; 97252723Sdim else if (LocVT == MVT::f64 && Offset < 16*8) 98252723Sdim // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15). 99252723Sdim Reg = SP::D0 + Offset/8; 100252723Sdim else if (LocVT == MVT::f32 && Offset < 16*8) 101252723Sdim // Promote floats to %f1, %f3, ... 102252723Sdim Reg = SP::F1 + Offset/4; 103263764Sdim else if (LocVT == MVT::f128 && Offset < 16*8) 104263764Sdim // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7). 105263764Sdim Reg = SP::Q0 + Offset/16; 106252723Sdim 107252723Sdim // Promote to register when possible, otherwise use the stack slot. 108252723Sdim if (Reg) { 109252723Sdim State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 110252723Sdim return true; 111252723Sdim } 112252723Sdim 113252723Sdim // This argument goes on the stack in an 8-byte slot. 114252723Sdim // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to 115252723Sdim // the right-aligned float. The first 4 bytes of the stack slot are undefined. 116252723Sdim if (LocVT == MVT::f32) 117252723Sdim Offset += 4; 118252723Sdim 119252723Sdim State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 120252723Sdim return true; 121252723Sdim} 122252723Sdim 123252723Sdim// Allocate a half-sized argument for the 64-bit ABI. 124252723Sdim// 125252723Sdim// This is used when passing { float, int } structs by value in registers. 126252723Sdimstatic bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, 127252723Sdim MVT &LocVT, CCValAssign::LocInfo &LocInfo, 128252723Sdim ISD::ArgFlagsTy &ArgFlags, CCState &State) { 129252723Sdim assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations"); 130252723Sdim unsigned Offset = State.AllocateStack(4, 4); 131252723Sdim 132252723Sdim if (LocVT == MVT::f32 && Offset < 16*8) { 133252723Sdim // Promote floats to %f0-%f31. 134252723Sdim State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4, 135252723Sdim LocVT, LocInfo)); 136252723Sdim return true; 137252723Sdim } 138252723Sdim 139252723Sdim if (LocVT == MVT::i32 && Offset < 6*8) { 140252723Sdim // Promote integers to %i0-%i5, using half the register. 141252723Sdim unsigned Reg = SP::I0 + Offset/8; 142252723Sdim LocVT = MVT::i64; 143252723Sdim LocInfo = CCValAssign::AExt; 144252723Sdim 145252723Sdim // Set the Custom bit if this i32 goes in the high bits of a register. 146252723Sdim if (Offset % 8 == 0) 147252723Sdim State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, 148252723Sdim LocVT, LocInfo)); 149252723Sdim else 150252723Sdim State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 151252723Sdim return true; 152252723Sdim } 153252723Sdim 154252723Sdim State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 155252723Sdim return true; 156252723Sdim} 157252723Sdim 158193323Sed#include "SparcGenCallingConv.inc" 159193323Sed 160252723Sdim// The calling conventions in SparcCallingConv.td are described in terms of the 161252723Sdim// callee's register window. This function translates registers to the 162252723Sdim// corresponding caller window %o register. 163252723Sdimstatic unsigned toCallerWindow(unsigned Reg) { 164252723Sdim assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7 && "Unexpected enum"); 165252723Sdim if (Reg >= SP::I0 && Reg <= SP::I7) 166252723Sdim return Reg - SP::I0 + SP::O0; 167252723Sdim return Reg; 168252723Sdim} 169252723Sdim 170198090SrdivackySDValue 171198090SrdivackySparcTargetLowering::LowerReturn(SDValue Chain, 172252723Sdim CallingConv::ID CallConv, bool IsVarArg, 173198090Srdivacky const SmallVectorImpl<ISD::OutputArg> &Outs, 174210299Sed const SmallVectorImpl<SDValue> &OutVals, 175263509Sdim SDLoc DL, SelectionDAG &DAG) const { 176252723Sdim if (Subtarget->is64Bit()) 177252723Sdim return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG); 178252723Sdim return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG); 179252723Sdim} 180198090Srdivacky 181252723SdimSDValue 182252723SdimSparcTargetLowering::LowerReturn_32(SDValue Chain, 183252723Sdim CallingConv::ID CallConv, bool IsVarArg, 184252723Sdim const SmallVectorImpl<ISD::OutputArg> &Outs, 185252723Sdim const SmallVectorImpl<SDValue> &OutVals, 186263509Sdim SDLoc DL, SelectionDAG &DAG) const { 187218893Sdim MachineFunction &MF = DAG.getMachineFunction(); 188218893Sdim 189193323Sed // CCValAssign - represent the assignment of the return value to locations. 190193323Sed SmallVector<CCValAssign, 16> RVLocs; 191193323Sed 192193323Sed // CCState - Info about the registers and stack slot. 193252723Sdim CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), 194245431Sdim DAG.getTarget(), RVLocs, *DAG.getContext()); 195193323Sed 196252723Sdim // Analyze return values. 197198090Srdivacky CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32); 198193323Sed 199193323Sed SDValue Flag; 200252723Sdim SmallVector<SDValue, 4> RetOps(1, Chain); 201252723Sdim // Make room for the return address offset. 202252723Sdim RetOps.push_back(SDValue()); 203193323Sed 204193323Sed // Copy the result values into the output registers. 205193323Sed for (unsigned i = 0; i != RVLocs.size(); ++i) { 206193323Sed CCValAssign &VA = RVLocs[i]; 207193323Sed assert(VA.isRegLoc() && "Can only return in registers!"); 208193323Sed 209252723Sdim Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), 210210299Sed OutVals[i], Flag); 211193323Sed 212193323Sed // Guarantee that all emitted copies are stuck together with flags. 213193323Sed Flag = Chain.getValue(1); 214252723Sdim RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 215193323Sed } 216219077Sdim 217263509Sdim unsigned RetAddrOffset = 8; // Call Inst + Delay Slot 218218893Sdim // If the function returns a struct, copy the SRetReturnReg to I0 219218893Sdim if (MF.getFunction()->hasStructRetAttr()) { 220218893Sdim SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>(); 221218893Sdim unsigned Reg = SFI->getSRetReturnReg(); 222218893Sdim if (!Reg) 223218893Sdim llvm_unreachable("sret virtual register not created in the entry block"); 224252723Sdim SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy()); 225252723Sdim Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Flag); 226218893Sdim Flag = Chain.getValue(1); 227252723Sdim RetOps.push_back(DAG.getRegister(SP::I0, getPointerTy())); 228219077Sdim RetAddrOffset = 12; // CallInst + Delay Slot + Unimp 229218893Sdim } 230193323Sed 231252723Sdim RetOps[0] = Chain; // Update chain. 232252723Sdim RetOps[1] = DAG.getConstant(RetAddrOffset, MVT::i32); 233219077Sdim 234252723Sdim // Add the flag if we have it. 235193323Sed if (Flag.getNode()) 236252723Sdim RetOps.push_back(Flag); 237252723Sdim 238252723Sdim return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, 239252723Sdim &RetOps[0], RetOps.size()); 240193323Sed} 241193323Sed 242252723Sdim// Lower return values for the 64-bit ABI. 243252723Sdim// Return values are passed the exactly the same way as function arguments. 244252723SdimSDValue 245252723SdimSparcTargetLowering::LowerReturn_64(SDValue Chain, 246252723Sdim CallingConv::ID CallConv, bool IsVarArg, 247252723Sdim const SmallVectorImpl<ISD::OutputArg> &Outs, 248252723Sdim const SmallVectorImpl<SDValue> &OutVals, 249263509Sdim SDLoc DL, SelectionDAG &DAG) const { 250252723Sdim // CCValAssign - represent the assignment of the return value to locations. 251252723Sdim SmallVector<CCValAssign, 16> RVLocs; 252252723Sdim 253252723Sdim // CCState - Info about the registers and stack slot. 254252723Sdim CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), 255252723Sdim DAG.getTarget(), RVLocs, *DAG.getContext()); 256252723Sdim 257252723Sdim // Analyze return values. 258263764Sdim CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64); 259252723Sdim 260252723Sdim SDValue Flag; 261252723Sdim SmallVector<SDValue, 4> RetOps(1, Chain); 262252723Sdim 263252723Sdim // The second operand on the return instruction is the return address offset. 264252723Sdim // The return address is always %i7+8 with the 64-bit ABI. 265252723Sdim RetOps.push_back(DAG.getConstant(8, MVT::i32)); 266252723Sdim 267252723Sdim // Copy the result values into the output registers. 268252723Sdim for (unsigned i = 0; i != RVLocs.size(); ++i) { 269252723Sdim CCValAssign &VA = RVLocs[i]; 270252723Sdim assert(VA.isRegLoc() && "Can only return in registers!"); 271252723Sdim SDValue OutVal = OutVals[i]; 272252723Sdim 273252723Sdim // Integer return values must be sign or zero extended by the callee. 274252723Sdim switch (VA.getLocInfo()) { 275252723Sdim case CCValAssign::SExt: 276252723Sdim OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal); 277252723Sdim break; 278252723Sdim case CCValAssign::ZExt: 279252723Sdim OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal); 280252723Sdim break; 281252723Sdim case CCValAssign::AExt: 282252723Sdim OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal); 283252723Sdim default: 284252723Sdim break; 285252723Sdim } 286252723Sdim 287252723Sdim // The custom bit on an i32 return value indicates that it should be passed 288252723Sdim // in the high bits of the register. 289252723Sdim if (VA.getValVT() == MVT::i32 && VA.needsCustom()) { 290252723Sdim OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal, 291252723Sdim DAG.getConstant(32, MVT::i32)); 292252723Sdim 293252723Sdim // The next value may go in the low bits of the same register. 294252723Sdim // Handle both at once. 295252723Sdim if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) { 296252723Sdim SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]); 297252723Sdim OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV); 298252723Sdim // Skip the next value, it's already done. 299252723Sdim ++i; 300252723Sdim } 301252723Sdim } 302252723Sdim 303252723Sdim Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Flag); 304252723Sdim 305252723Sdim // Guarantee that all emitted copies are stuck together with flags. 306252723Sdim Flag = Chain.getValue(1); 307252723Sdim RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 308252723Sdim } 309252723Sdim 310252723Sdim RetOps[0] = Chain; // Update chain. 311252723Sdim 312252723Sdim // Add the flag if we have it. 313252723Sdim if (Flag.getNode()) 314252723Sdim RetOps.push_back(Flag); 315252723Sdim 316252723Sdim return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, 317252723Sdim &RetOps[0], RetOps.size()); 318252723Sdim} 319252723Sdim 320252723SdimSDValue SparcTargetLowering:: 321252723SdimLowerFormalArguments(SDValue Chain, 322252723Sdim CallingConv::ID CallConv, 323252723Sdim bool IsVarArg, 324252723Sdim const SmallVectorImpl<ISD::InputArg> &Ins, 325263509Sdim SDLoc DL, 326252723Sdim SelectionDAG &DAG, 327252723Sdim SmallVectorImpl<SDValue> &InVals) const { 328252723Sdim if (Subtarget->is64Bit()) 329252723Sdim return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins, 330252723Sdim DL, DAG, InVals); 331252723Sdim return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins, 332252723Sdim DL, DAG, InVals); 333252723Sdim} 334252723Sdim 335252723Sdim/// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are 336198090Srdivacky/// passed in either one or two GPRs, including FP values. TODO: we should 337198090Srdivacky/// pass FP values in FP registers for fastcc functions. 338252723SdimSDValue SparcTargetLowering:: 339252723SdimLowerFormalArguments_32(SDValue Chain, 340252723Sdim CallingConv::ID CallConv, 341252723Sdim bool isVarArg, 342252723Sdim const SmallVectorImpl<ISD::InputArg> &Ins, 343263509Sdim SDLoc dl, 344252723Sdim SelectionDAG &DAG, 345252723Sdim SmallVectorImpl<SDValue> &InVals) const { 346193323Sed MachineFunction &MF = DAG.getMachineFunction(); 347193323Sed MachineRegisterInfo &RegInfo = MF.getRegInfo(); 348207618Srdivacky SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>(); 349193323Sed 350198090Srdivacky // Assign locations to all of the incoming arguments. 351198090Srdivacky SmallVector<CCValAssign, 16> ArgLocs; 352223017Sdim CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 353245431Sdim getTargetMachine(), ArgLocs, *DAG.getContext()); 354198090Srdivacky CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32); 355198090Srdivacky 356218893Sdim const unsigned StackOffset = 92; 357193323Sed 358198090Srdivacky for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 359198090Srdivacky CCValAssign &VA = ArgLocs[i]; 360193323Sed 361218893Sdim if (i == 0 && Ins[i].Flags.isSRet()) { 362263509Sdim // Get SRet from [%fp+64]. 363218893Sdim int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, 64, true); 364218893Sdim SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); 365218893Sdim SDValue Arg = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, 366218893Sdim MachinePointerInfo(), 367235633Sdim false, false, false, 0); 368218893Sdim InVals.push_back(Arg); 369218893Sdim continue; 370218893Sdim } 371193323Sed 372218893Sdim if (VA.isRegLoc()) { 373218893Sdim if (VA.needsCustom()) { 374218893Sdim assert(VA.getLocVT() == MVT::f64); 375218893Sdim unsigned VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); 376218893Sdim MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi); 377218893Sdim SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32); 378193323Sed 379218893Sdim assert(i+1 < e); 380218893Sdim CCValAssign &NextVA = ArgLocs[++i]; 381193323Sed 382193323Sed SDValue LoVal; 383218893Sdim if (NextVA.isMemLoc()) { 384218893Sdim int FrameIdx = MF.getFrameInfo()-> 385218893Sdim CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true); 386193323Sed SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); 387218893Sdim LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, 388218893Sdim MachinePointerInfo(), 389235633Sdim false, false, false, 0); 390218893Sdim } else { 391218893Sdim unsigned loReg = MF.addLiveIn(NextVA.getLocReg(), 392219077Sdim &SP::IntRegsRegClass); 393218893Sdim LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32); 394193323Sed } 395193323Sed SDValue WholeValue = 396193323Sed DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal); 397218893Sdim WholeValue = DAG.getNode(ISD::BITCAST, dl, MVT::f64, WholeValue); 398218893Sdim InVals.push_back(WholeValue); 399218893Sdim continue; 400218893Sdim } 401218893Sdim unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); 402218893Sdim MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg); 403218893Sdim SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 404218893Sdim if (VA.getLocVT() == MVT::f32) 405218893Sdim Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg); 406218893Sdim else if (VA.getLocVT() != MVT::i32) { 407218893Sdim Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg, 408218893Sdim DAG.getValueType(VA.getLocVT())); 409218893Sdim Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg); 410218893Sdim } 411218893Sdim InVals.push_back(Arg); 412218893Sdim continue; 413218893Sdim } 414193323Sed 415218893Sdim assert(VA.isMemLoc()); 416193323Sed 417218893Sdim unsigned Offset = VA.getLocMemOffset()+StackOffset; 418218893Sdim 419218893Sdim if (VA.needsCustom()) { 420218893Sdim assert(VA.getValVT() == MVT::f64); 421263509Sdim // If it is double-word aligned, just load. 422218893Sdim if (Offset % 8 == 0) { 423218893Sdim int FI = MF.getFrameInfo()->CreateFixedObject(8, 424218893Sdim Offset, 425218893Sdim true); 426218893Sdim SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy()); 427218893Sdim SDValue Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, 428218893Sdim MachinePointerInfo(), 429235633Sdim false,false, false, 0); 430218893Sdim InVals.push_back(Load); 431218893Sdim continue; 432193323Sed } 433218893Sdim 434218893Sdim int FI = MF.getFrameInfo()->CreateFixedObject(4, 435218893Sdim Offset, 436218893Sdim true); 437218893Sdim SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy()); 438218893Sdim SDValue HiVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, 439218893Sdim MachinePointerInfo(), 440235633Sdim false, false, false, 0); 441218893Sdim int FI2 = MF.getFrameInfo()->CreateFixedObject(4, 442218893Sdim Offset+4, 443218893Sdim true); 444218893Sdim SDValue FIPtr2 = DAG.getFrameIndex(FI2, getPointerTy()); 445218893Sdim 446218893Sdim SDValue LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, 447218893Sdim MachinePointerInfo(), 448235633Sdim false, false, false, 0); 449218893Sdim 450218893Sdim SDValue WholeValue = 451218893Sdim DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal); 452218893Sdim WholeValue = DAG.getNode(ISD::BITCAST, dl, MVT::f64, WholeValue); 453218893Sdim InVals.push_back(WholeValue); 454218893Sdim continue; 455193323Sed } 456218893Sdim 457218893Sdim int FI = MF.getFrameInfo()->CreateFixedObject(4, 458218893Sdim Offset, 459218893Sdim true); 460218893Sdim SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy()); 461218893Sdim SDValue Load ; 462218893Sdim if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) { 463218893Sdim Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, 464218893Sdim MachinePointerInfo(), 465235633Sdim false, false, false, 0); 466218893Sdim } else { 467218893Sdim ISD::LoadExtType LoadOp = ISD::SEXTLOAD; 468218893Sdim // Sparc is big endian, so add an offset based on the ObjectVT. 469218893Sdim unsigned Offset = 4-std::max(1U, VA.getValVT().getSizeInBits()/8); 470218893Sdim FIPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, FIPtr, 471218893Sdim DAG.getConstant(Offset, MVT::i32)); 472218893Sdim Load = DAG.getExtLoad(LoadOp, dl, MVT::i32, Chain, FIPtr, 473218893Sdim MachinePointerInfo(), 474218893Sdim VA.getValVT(), false, false,0); 475218893Sdim Load = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Load); 476218893Sdim } 477218893Sdim InVals.push_back(Load); 478193323Sed } 479193323Sed 480218893Sdim if (MF.getFunction()->hasStructRetAttr()) { 481263509Sdim // Copy the SRet Argument to SRetReturnReg. 482218893Sdim SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>(); 483218893Sdim unsigned Reg = SFI->getSRetReturnReg(); 484218893Sdim if (!Reg) { 485218893Sdim Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass); 486218893Sdim SFI->setSRetReturnReg(Reg); 487218893Sdim } 488218893Sdim SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]); 489218893Sdim Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain); 490218893Sdim } 491218893Sdim 492193323Sed // Store remaining ArgRegs to the stack if this is a varargs function. 493198090Srdivacky if (isVarArg) { 494235633Sdim static const uint16_t ArgRegs[] = { 495218893Sdim SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5 496218893Sdim }; 497218893Sdim unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs, 6); 498235633Sdim const uint16_t *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6; 499218893Sdim unsigned ArgOffset = CCInfo.getNextStackOffset(); 500218893Sdim if (NumAllocated == 6) 501218893Sdim ArgOffset += StackOffset; 502218893Sdim else { 503218893Sdim assert(!ArgOffset); 504218893Sdim ArgOffset = 68+4*NumAllocated; 505218893Sdim } 506218893Sdim 507193323Sed // Remember the vararg offset for the va_start implementation. 508207618Srdivacky FuncInfo->setVarArgsFrameOffset(ArgOffset); 509193323Sed 510198090Srdivacky std::vector<SDValue> OutChains; 511198090Srdivacky 512193323Sed for (; CurArgReg != ArgRegEnd; ++CurArgReg) { 513193323Sed unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); 514193323Sed MF.getRegInfo().addLiveIn(*CurArgReg, VReg); 515193323Sed SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32); 516193323Sed 517199481Srdivacky int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset, 518210299Sed true); 519193323Sed SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); 520193323Sed 521218893Sdim OutChains.push_back(DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, 522218893Sdim MachinePointerInfo(), 523203954Srdivacky false, false, 0)); 524193323Sed ArgOffset += 4; 525193323Sed } 526198090Srdivacky 527198090Srdivacky if (!OutChains.empty()) { 528198090Srdivacky OutChains.push_back(Chain); 529198090Srdivacky Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 530198090Srdivacky &OutChains[0], OutChains.size()); 531198090Srdivacky } 532193323Sed } 533193323Sed 534198090Srdivacky return Chain; 535193323Sed} 536193323Sed 537252723Sdim// Lower formal arguments for the 64 bit ABI. 538252723SdimSDValue SparcTargetLowering:: 539252723SdimLowerFormalArguments_64(SDValue Chain, 540252723Sdim CallingConv::ID CallConv, 541252723Sdim bool IsVarArg, 542252723Sdim const SmallVectorImpl<ISD::InputArg> &Ins, 543263509Sdim SDLoc DL, 544252723Sdim SelectionDAG &DAG, 545252723Sdim SmallVectorImpl<SDValue> &InVals) const { 546252723Sdim MachineFunction &MF = DAG.getMachineFunction(); 547252723Sdim 548252723Sdim // Analyze arguments according to CC_Sparc64. 549252723Sdim SmallVector<CCValAssign, 16> ArgLocs; 550252723Sdim CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), 551252723Sdim getTargetMachine(), ArgLocs, *DAG.getContext()); 552252723Sdim CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64); 553252723Sdim 554252723Sdim // The argument array begins at %fp+BIAS+128, after the register save area. 555252723Sdim const unsigned ArgArea = 128; 556252723Sdim 557252723Sdim for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 558252723Sdim CCValAssign &VA = ArgLocs[i]; 559252723Sdim if (VA.isRegLoc()) { 560252723Sdim // This argument is passed in a register. 561252723Sdim // All integer register arguments are promoted by the caller to i64. 562252723Sdim 563252723Sdim // Create a virtual register for the promoted live-in value. 564252723Sdim unsigned VReg = MF.addLiveIn(VA.getLocReg(), 565252723Sdim getRegClassFor(VA.getLocVT())); 566252723Sdim SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT()); 567252723Sdim 568252723Sdim // Get the high bits for i32 struct elements. 569252723Sdim if (VA.getValVT() == MVT::i32 && VA.needsCustom()) 570252723Sdim Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg, 571252723Sdim DAG.getConstant(32, MVT::i32)); 572252723Sdim 573252723Sdim // The caller promoted the argument, so insert an Assert?ext SDNode so we 574252723Sdim // won't promote the value again in this function. 575252723Sdim switch (VA.getLocInfo()) { 576252723Sdim case CCValAssign::SExt: 577252723Sdim Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg, 578252723Sdim DAG.getValueType(VA.getValVT())); 579252723Sdim break; 580252723Sdim case CCValAssign::ZExt: 581252723Sdim Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg, 582252723Sdim DAG.getValueType(VA.getValVT())); 583252723Sdim break; 584252723Sdim default: 585252723Sdim break; 586252723Sdim } 587252723Sdim 588252723Sdim // Truncate the register down to the argument type. 589252723Sdim if (VA.isExtInLoc()) 590252723Sdim Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg); 591252723Sdim 592252723Sdim InVals.push_back(Arg); 593252723Sdim continue; 594252723Sdim } 595252723Sdim 596252723Sdim // The registers are exhausted. This argument was passed on the stack. 597252723Sdim assert(VA.isMemLoc()); 598252723Sdim // The CC_Sparc64_Full/Half functions compute stack offsets relative to the 599252723Sdim // beginning of the arguments area at %fp+BIAS+128. 600252723Sdim unsigned Offset = VA.getLocMemOffset() + ArgArea; 601252723Sdim unsigned ValSize = VA.getValVT().getSizeInBits() / 8; 602252723Sdim // Adjust offset for extended arguments, SPARC is big-endian. 603252723Sdim // The caller will have written the full slot with extended bytes, but we 604252723Sdim // prefer our own extending loads. 605252723Sdim if (VA.isExtInLoc()) 606252723Sdim Offset += 8 - ValSize; 607252723Sdim int FI = MF.getFrameInfo()->CreateFixedObject(ValSize, Offset, true); 608252723Sdim InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, 609252723Sdim DAG.getFrameIndex(FI, getPointerTy()), 610252723Sdim MachinePointerInfo::getFixedStack(FI), 611252723Sdim false, false, false, 0)); 612252723Sdim } 613252723Sdim 614252723Sdim if (!IsVarArg) 615252723Sdim return Chain; 616252723Sdim 617252723Sdim // This function takes variable arguments, some of which may have been passed 618252723Sdim // in registers %i0-%i5. Variable floating point arguments are never passed 619252723Sdim // in floating point registers. They go on %i0-%i5 or on the stack like 620252723Sdim // integer arguments. 621252723Sdim // 622252723Sdim // The va_start intrinsic needs to know the offset to the first variable 623252723Sdim // argument. 624252723Sdim unsigned ArgOffset = CCInfo.getNextStackOffset(); 625252723Sdim SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>(); 626252723Sdim // Skip the 128 bytes of register save area. 627252723Sdim FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea + 628252723Sdim Subtarget->getStackPointerBias()); 629252723Sdim 630252723Sdim // Save the variable arguments that were passed in registers. 631252723Sdim // The caller is required to reserve stack space for 6 arguments regardless 632252723Sdim // of how many arguments were actually passed. 633252723Sdim SmallVector<SDValue, 8> OutChains; 634252723Sdim for (; ArgOffset < 6*8; ArgOffset += 8) { 635252723Sdim unsigned VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass); 636252723Sdim SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64); 637252723Sdim int FI = MF.getFrameInfo()->CreateFixedObject(8, ArgOffset + ArgArea, true); 638252723Sdim OutChains.push_back(DAG.getStore(Chain, DL, VArg, 639252723Sdim DAG.getFrameIndex(FI, getPointerTy()), 640252723Sdim MachinePointerInfo::getFixedStack(FI), 641252723Sdim false, false, 0)); 642252723Sdim } 643252723Sdim 644252723Sdim if (!OutChains.empty()) 645252723Sdim Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 646252723Sdim &OutChains[0], OutChains.size()); 647252723Sdim 648252723Sdim return Chain; 649252723Sdim} 650252723Sdim 651198090SrdivackySDValue 652245431SdimSparcTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 653207618Srdivacky SmallVectorImpl<SDValue> &InVals) const { 654252723Sdim if (Subtarget->is64Bit()) 655252723Sdim return LowerCall_64(CLI, InVals); 656252723Sdim return LowerCall_32(CLI, InVals); 657252723Sdim} 658252723Sdim 659263509Sdimstatic bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee, 660263509Sdim ImmutableCallSite *CS) { 661263509Sdim if (CS) 662263509Sdim return CS->hasFnAttr(Attribute::ReturnsTwice); 663263509Sdim 664263509Sdim const Function *CalleeFn = 0; 665263509Sdim if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 666263509Sdim CalleeFn = dyn_cast<Function>(G->getGlobal()); 667263509Sdim } else if (ExternalSymbolSDNode *E = 668263509Sdim dyn_cast<ExternalSymbolSDNode>(Callee)) { 669263509Sdim const Function *Fn = DAG.getMachineFunction().getFunction(); 670263509Sdim const Module *M = Fn->getParent(); 671263509Sdim const char *CalleeName = E->getSymbol(); 672263509Sdim CalleeFn = M->getFunction(CalleeName); 673263509Sdim } 674263509Sdim 675263509Sdim if (!CalleeFn) 676263509Sdim return false; 677263509Sdim return CalleeFn->hasFnAttribute(Attribute::ReturnsTwice); 678263509Sdim} 679263509Sdim 680252723Sdim// Lower a call for the 32-bit ABI. 681252723SdimSDValue 682252723SdimSparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI, 683252723Sdim SmallVectorImpl<SDValue> &InVals) const { 684245431Sdim SelectionDAG &DAG = CLI.DAG; 685263509Sdim SDLoc &dl = CLI.DL; 686263509Sdim SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 687263509Sdim SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 688263509Sdim SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 689245431Sdim SDValue Chain = CLI.Chain; 690245431Sdim SDValue Callee = CLI.Callee; 691245431Sdim bool &isTailCall = CLI.IsTailCall; 692245431Sdim CallingConv::ID CallConv = CLI.CallConv; 693245431Sdim bool isVarArg = CLI.IsVarArg; 694245431Sdim 695203954Srdivacky // Sparc target does not yet support tail call optimization. 696203954Srdivacky isTailCall = false; 697193323Sed 698193323Sed // Analyze operands of the call, assigning locations to each operand. 699193323Sed SmallVector<CCValAssign, 16> ArgLocs; 700223017Sdim CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 701245431Sdim DAG.getTarget(), ArgLocs, *DAG.getContext()); 702198090Srdivacky CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32); 703193323Sed 704193323Sed // Get the size of the outgoing arguments stack space requirement. 705193323Sed unsigned ArgsSize = CCInfo.getNextStackOffset(); 706193323Sed 707193323Sed // Keep stack frames 8-byte aligned. 708193323Sed ArgsSize = (ArgsSize+7) & ~7; 709193323Sed 710218893Sdim MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 711218893Sdim 712263509Sdim // Create local copies for byval args. 713218893Sdim SmallVector<SDValue, 8> ByValArgs; 714218893Sdim for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 715218893Sdim ISD::ArgFlagsTy Flags = Outs[i].Flags; 716218893Sdim if (!Flags.isByVal()) 717218893Sdim continue; 718218893Sdim 719218893Sdim SDValue Arg = OutVals[i]; 720218893Sdim unsigned Size = Flags.getByValSize(); 721218893Sdim unsigned Align = Flags.getByValAlign(); 722218893Sdim 723218893Sdim int FI = MFI->CreateStackObject(Size, Align, false); 724218893Sdim SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy()); 725218893Sdim SDValue SizeNode = DAG.getConstant(Size, MVT::i32); 726218893Sdim 727218893Sdim Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Align, 728263509Sdim false, // isVolatile, 729263509Sdim (Size <= 32), // AlwaysInline if size <= 32 730218893Sdim MachinePointerInfo(), MachinePointerInfo()); 731218893Sdim ByValArgs.push_back(FIPtr); 732218893Sdim } 733218893Sdim 734263509Sdim Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize, true), 735263509Sdim dl); 736193323Sed 737193323Sed SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 738193323Sed SmallVector<SDValue, 8> MemOpChains; 739193323Sed 740218893Sdim const unsigned StackOffset = 92; 741219077Sdim bool hasStructRetAttr = false; 742193323Sed // Walk the register/memloc assignments, inserting copies/loads. 743218893Sdim for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size(); 744218893Sdim i != e; 745218893Sdim ++i, ++realArgIdx) { 746193323Sed CCValAssign &VA = ArgLocs[i]; 747218893Sdim SDValue Arg = OutVals[realArgIdx]; 748193323Sed 749218893Sdim ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 750218893Sdim 751263509Sdim // Use local copy if it is a byval arg. 752218893Sdim if (Flags.isByVal()) 753218893Sdim Arg = ByValArgs[byvalArgIdx++]; 754218893Sdim 755193323Sed // Promote the value if needed. 756193323Sed switch (VA.getLocInfo()) { 757198090Srdivacky default: llvm_unreachable("Unknown loc info!"); 758193323Sed case CCValAssign::Full: break; 759193323Sed case CCValAssign::SExt: 760218893Sdim Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 761193323Sed break; 762193323Sed case CCValAssign::ZExt: 763218893Sdim Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 764193323Sed break; 765193323Sed case CCValAssign::AExt: 766218893Sdim Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 767193323Sed break; 768218893Sdim case CCValAssign::BCvt: 769218893Sdim Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 770218893Sdim break; 771193323Sed } 772193323Sed 773218893Sdim if (Flags.isSRet()) { 774218893Sdim assert(VA.needsCustom()); 775218893Sdim // store SRet argument in %sp+64 776218893Sdim SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32); 777218893Sdim SDValue PtrOff = DAG.getIntPtrConstant(64); 778218893Sdim PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); 779218893Sdim MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 780218893Sdim MachinePointerInfo(), 781218893Sdim false, false, 0)); 782219077Sdim hasStructRetAttr = true; 783193323Sed continue; 784193323Sed } 785193323Sed 786218893Sdim if (VA.needsCustom()) { 787218893Sdim assert(VA.getLocVT() == MVT::f64); 788193323Sed 789218893Sdim if (VA.isMemLoc()) { 790218893Sdim unsigned Offset = VA.getLocMemOffset() + StackOffset; 791263509Sdim // if it is double-word aligned, just store. 792218893Sdim if (Offset % 8 == 0) { 793218893Sdim SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32); 794218893Sdim SDValue PtrOff = DAG.getIntPtrConstant(Offset); 795218893Sdim PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); 796218893Sdim MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 797218893Sdim MachinePointerInfo(), 798218893Sdim false, false, 0)); 799218893Sdim continue; 800218893Sdim } 801193323Sed } 802193323Sed 803193323Sed SDValue StackPtr = DAG.CreateStackTemporary(MVT::f64, MVT::i32); 804218893Sdim SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, 805218893Sdim Arg, StackPtr, MachinePointerInfo(), 806203954Srdivacky false, false, 0); 807193323Sed // Sparc is big-endian, so the high part comes first. 808218893Sdim SDValue Hi = DAG.getLoad(MVT::i32, dl, Store, StackPtr, 809235633Sdim MachinePointerInfo(), false, false, false, 0); 810193323Sed // Increment the pointer to the other half. 811193323Sed StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr, 812193323Sed DAG.getIntPtrConstant(4)); 813193323Sed // Load the low part. 814218893Sdim SDValue Lo = DAG.getLoad(MVT::i32, dl, Store, StackPtr, 815235633Sdim MachinePointerInfo(), false, false, false, 0); 816193323Sed 817218893Sdim if (VA.isRegLoc()) { 818218893Sdim RegsToPass.push_back(std::make_pair(VA.getLocReg(), Hi)); 819218893Sdim assert(i+1 != e); 820218893Sdim CCValAssign &NextVA = ArgLocs[++i]; 821218893Sdim if (NextVA.isRegLoc()) { 822218893Sdim RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Lo)); 823218893Sdim } else { 824263509Sdim // Store the low part in stack. 825218893Sdim unsigned Offset = NextVA.getLocMemOffset() + StackOffset; 826218893Sdim SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32); 827218893Sdim SDValue PtrOff = DAG.getIntPtrConstant(Offset); 828218893Sdim PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); 829218893Sdim MemOpChains.push_back(DAG.getStore(Chain, dl, Lo, PtrOff, 830218893Sdim MachinePointerInfo(), 831218893Sdim false, false, 0)); 832218893Sdim } 833193323Sed } else { 834218893Sdim unsigned Offset = VA.getLocMemOffset() + StackOffset; 835218893Sdim // Store the high part. 836218893Sdim SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32); 837218893Sdim SDValue PtrOff = DAG.getIntPtrConstant(Offset); 838218893Sdim PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); 839218893Sdim MemOpChains.push_back(DAG.getStore(Chain, dl, Hi, PtrOff, 840218893Sdim MachinePointerInfo(), 841218893Sdim false, false, 0)); 842218893Sdim // Store the low part. 843218893Sdim PtrOff = DAG.getIntPtrConstant(Offset+4); 844218893Sdim PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); 845218893Sdim MemOpChains.push_back(DAG.getStore(Chain, dl, Lo, PtrOff, 846218893Sdim MachinePointerInfo(), 847218893Sdim false, false, 0)); 848193323Sed } 849218893Sdim continue; 850193323Sed } 851193323Sed 852218893Sdim // Arguments that can be passed on register must be kept at 853218893Sdim // RegsToPass vector 854218893Sdim if (VA.isRegLoc()) { 855218893Sdim if (VA.getLocVT() != MVT::f32) { 856218893Sdim RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 857218893Sdim continue; 858193323Sed } 859218893Sdim Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 860218893Sdim RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 861218893Sdim continue; 862193323Sed } 863193323Sed 864218893Sdim assert(VA.isMemLoc()); 865218893Sdim 866218893Sdim // Create a store off the stack pointer for this argument. 867218893Sdim SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32); 868218893Sdim SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset()+StackOffset); 869218893Sdim PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); 870218893Sdim MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 871218893Sdim MachinePointerInfo(), 872218893Sdim false, false, 0)); 873193323Sed } 874193323Sed 875218893Sdim 876193323Sed // Emit all stores, make sure the occur before any copies into physregs. 877193323Sed if (!MemOpChains.empty()) 878193323Sed Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 879193323Sed &MemOpChains[0], MemOpChains.size()); 880193323Sed 881193323Sed // Build a sequence of copy-to-reg nodes chained together with token 882193323Sed // chain and flag operands which copy the outgoing args into registers. 883221345Sdim // The InFlag in necessary since all emitted instructions must be 884193323Sed // stuck together. 885193323Sed SDValue InFlag; 886193323Sed for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 887252723Sdim unsigned Reg = toCallerWindow(RegsToPass[i].first); 888193323Sed Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InFlag); 889193323Sed InFlag = Chain.getValue(1); 890193323Sed } 891193323Sed 892219077Sdim unsigned SRetArgSize = (hasStructRetAttr)? getSRetArgSize(DAG, Callee):0; 893263509Sdim bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS); 894219077Sdim 895193323Sed // If the callee is a GlobalAddress node (quite common, every direct call is) 896193323Sed // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 897193323Sed // Likewise ExternalSymbol -> TargetExternalSymbol. 898263764Sdim unsigned TF = ((getTargetMachine().getRelocationModel() == Reloc::PIC_) 899263764Sdim ? SparcMCExpr::VK_Sparc_WPLT30 : 0); 900193323Sed if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 901263764Sdim Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0, TF); 902193323Sed else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) 903263764Sdim Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32, TF); 904193323Sed 905218893Sdim // Returns a chain & a flag for retval copy to use 906218893Sdim SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 907218893Sdim SmallVector<SDValue, 8> Ops; 908218893Sdim Ops.push_back(Chain); 909218893Sdim Ops.push_back(Callee); 910219077Sdim if (hasStructRetAttr) 911219077Sdim Ops.push_back(DAG.getTargetConstant(SRetArgSize, MVT::i32)); 912252723Sdim for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 913252723Sdim Ops.push_back(DAG.getRegister(toCallerWindow(RegsToPass[i].first), 914252723Sdim RegsToPass[i].second.getValueType())); 915263509Sdim 916263509Sdim // Add a register mask operand representing the call-preserved registers. 917263509Sdim const SparcRegisterInfo *TRI = 918263509Sdim ((const SparcTargetMachine&)getTargetMachine()).getRegisterInfo(); 919263509Sdim const uint32_t *Mask = ((hasReturnsTwice) 920263509Sdim ? TRI->getRTCallPreservedMask(CallConv) 921263509Sdim : TRI->getCallPreservedMask(CallConv)); 922263509Sdim assert(Mask && "Missing call preserved mask for calling convention"); 923263509Sdim Ops.push_back(DAG.getRegisterMask(Mask)); 924263509Sdim 925218893Sdim if (InFlag.getNode()) 926218893Sdim Ops.push_back(InFlag); 927218893Sdim 928218893Sdim Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, &Ops[0], Ops.size()); 929193323Sed InFlag = Chain.getValue(1); 930193323Sed 931193323Sed Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, true), 932263509Sdim DAG.getIntPtrConstant(0, true), InFlag, dl); 933193323Sed InFlag = Chain.getValue(1); 934193323Sed 935193323Sed // Assign locations to each value returned by this call. 936193323Sed SmallVector<CCValAssign, 16> RVLocs; 937223017Sdim CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), 938245431Sdim DAG.getTarget(), RVLocs, *DAG.getContext()); 939193323Sed 940198090Srdivacky RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32); 941193323Sed 942193323Sed // Copy all of the result registers out of their specified physreg. 943193323Sed for (unsigned i = 0; i != RVLocs.size(); ++i) { 944252723Sdim Chain = DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), 945193323Sed RVLocs[i].getValVT(), InFlag).getValue(1); 946193323Sed InFlag = Chain.getValue(2); 947198090Srdivacky InVals.push_back(Chain.getValue(0)); 948193323Sed } 949193323Sed 950198090Srdivacky return Chain; 951193323Sed} 952193323Sed 953263509Sdim// This functions returns true if CalleeName is a ABI function that returns 954263509Sdim// a long double (fp128). 955263509Sdimstatic bool isFP128ABICall(const char *CalleeName) 956263509Sdim{ 957263509Sdim static const char *const ABICalls[] = 958263509Sdim { "_Q_add", "_Q_sub", "_Q_mul", "_Q_div", 959263509Sdim "_Q_sqrt", "_Q_neg", 960263509Sdim "_Q_itoq", "_Q_stoq", "_Q_dtoq", "_Q_utoq", 961263509Sdim "_Q_lltoq", "_Q_ulltoq", 962263509Sdim 0 963263509Sdim }; 964263509Sdim for (const char * const *I = ABICalls; *I != 0; ++I) 965263509Sdim if (strcmp(CalleeName, *I) == 0) 966263509Sdim return true; 967263509Sdim return false; 968263509Sdim} 969263509Sdim 970219077Sdimunsigned 971219077SdimSparcTargetLowering::getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const 972219077Sdim{ 973219077Sdim const Function *CalleeFn = 0; 974219077Sdim if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 975219077Sdim CalleeFn = dyn_cast<Function>(G->getGlobal()); 976219077Sdim } else if (ExternalSymbolSDNode *E = 977219077Sdim dyn_cast<ExternalSymbolSDNode>(Callee)) { 978219077Sdim const Function *Fn = DAG.getMachineFunction().getFunction(); 979219077Sdim const Module *M = Fn->getParent(); 980263509Sdim const char *CalleeName = E->getSymbol(); 981263509Sdim CalleeFn = M->getFunction(CalleeName); 982263509Sdim if (!CalleeFn && isFP128ABICall(CalleeName)) 983263509Sdim return 16; // Return sizeof(fp128) 984219077Sdim } 985193323Sed 986219077Sdim if (!CalleeFn) 987219077Sdim return 0; 988193323Sed 989219077Sdim assert(CalleeFn->hasStructRetAttr() && 990219077Sdim "Callee does not have the StructRet attribute."); 991219077Sdim 992226890Sdim PointerType *Ty = cast<PointerType>(CalleeFn->arg_begin()->getType()); 993226890Sdim Type *ElementTy = Ty->getElementType(); 994245431Sdim return getDataLayout()->getTypeAllocSize(ElementTy); 995219077Sdim} 996219077Sdim 997252723Sdim 998252723Sdim// Fixup floating point arguments in the ... part of a varargs call. 999252723Sdim// 1000252723Sdim// The SPARC v9 ABI requires that floating point arguments are treated the same 1001252723Sdim// as integers when calling a varargs function. This does not apply to the 1002252723Sdim// fixed arguments that are part of the function's prototype. 1003252723Sdim// 1004252723Sdim// This function post-processes a CCValAssign array created by 1005252723Sdim// AnalyzeCallOperands(). 1006252723Sdimstatic void fixupVariableFloatArgs(SmallVectorImpl<CCValAssign> &ArgLocs, 1007252723Sdim ArrayRef<ISD::OutputArg> Outs) { 1008252723Sdim for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1009252723Sdim const CCValAssign &VA = ArgLocs[i]; 1010263764Sdim MVT ValTy = VA.getLocVT(); 1011252723Sdim // FIXME: What about f32 arguments? C promotes them to f64 when calling 1012252723Sdim // varargs functions. 1013263764Sdim if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128)) 1014252723Sdim continue; 1015252723Sdim // The fixed arguments to a varargs function still go in FP registers. 1016252723Sdim if (Outs[VA.getValNo()].IsFixed) 1017252723Sdim continue; 1018252723Sdim 1019252723Sdim // This floating point argument should be reassigned. 1020252723Sdim CCValAssign NewVA; 1021252723Sdim 1022252723Sdim // Determine the offset into the argument array. 1023263764Sdim unsigned firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0; 1024263764Sdim unsigned argSize = (ValTy == MVT::f64) ? 8 : 16; 1025263764Sdim unsigned Offset = argSize * (VA.getLocReg() - firstReg); 1026252723Sdim assert(Offset < 16*8 && "Offset out of range, bad register enum?"); 1027252723Sdim 1028252723Sdim if (Offset < 6*8) { 1029252723Sdim // This argument should go in %i0-%i5. 1030252723Sdim unsigned IReg = SP::I0 + Offset/8; 1031263764Sdim if (ValTy == MVT::f64) 1032263764Sdim // Full register, just bitconvert into i64. 1033263764Sdim NewVA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), 1034263764Sdim IReg, MVT::i64, CCValAssign::BCvt); 1035263764Sdim else { 1036263764Sdim assert(ValTy == MVT::f128 && "Unexpected type!"); 1037263764Sdim // Full register, just bitconvert into i128 -- We will lower this into 1038263764Sdim // two i64s in LowerCall_64. 1039263764Sdim NewVA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(), 1040263764Sdim IReg, MVT::i128, CCValAssign::BCvt); 1041263764Sdim } 1042252723Sdim } else { 1043252723Sdim // This needs to go to memory, we're out of integer registers. 1044252723Sdim NewVA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(), 1045252723Sdim Offset, VA.getLocVT(), VA.getLocInfo()); 1046252723Sdim } 1047252723Sdim ArgLocs[i] = NewVA; 1048252723Sdim } 1049252723Sdim} 1050252723Sdim 1051252723Sdim// Lower a call for the 64-bit ABI. 1052252723SdimSDValue 1053252723SdimSparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI, 1054252723Sdim SmallVectorImpl<SDValue> &InVals) const { 1055252723Sdim SelectionDAG &DAG = CLI.DAG; 1056263509Sdim SDLoc DL = CLI.DL; 1057252723Sdim SDValue Chain = CLI.Chain; 1058252723Sdim 1059263509Sdim // Sparc target does not yet support tail call optimization. 1060263509Sdim CLI.IsTailCall = false; 1061263509Sdim 1062252723Sdim // Analyze operands of the call, assigning locations to each operand. 1063252723Sdim SmallVector<CCValAssign, 16> ArgLocs; 1064252723Sdim CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), 1065252723Sdim DAG.getTarget(), ArgLocs, *DAG.getContext()); 1066252723Sdim CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64); 1067252723Sdim 1068252723Sdim // Get the size of the outgoing arguments stack space requirement. 1069252723Sdim // The stack offset computed by CC_Sparc64 includes all arguments. 1070252723Sdim // Called functions expect 6 argument words to exist in the stack frame, used 1071252723Sdim // or not. 1072252723Sdim unsigned ArgsSize = std::max(6*8u, CCInfo.getNextStackOffset()); 1073252723Sdim 1074252723Sdim // Keep stack frames 16-byte aligned. 1075252723Sdim ArgsSize = RoundUpToAlignment(ArgsSize, 16); 1076252723Sdim 1077252723Sdim // Varargs calls require special treatment. 1078252723Sdim if (CLI.IsVarArg) 1079252723Sdim fixupVariableFloatArgs(ArgLocs, CLI.Outs); 1080252723Sdim 1081252723Sdim // Adjust the stack pointer to make room for the arguments. 1082252723Sdim // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls 1083252723Sdim // with more than 6 arguments. 1084263509Sdim Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize, true), 1085263509Sdim DL); 1086252723Sdim 1087252723Sdim // Collect the set of registers to pass to the function and their values. 1088252723Sdim // This will be emitted as a sequence of CopyToReg nodes glued to the call 1089252723Sdim // instruction. 1090252723Sdim SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 1091252723Sdim 1092252723Sdim // Collect chains from all the memory opeations that copy arguments to the 1093252723Sdim // stack. They must follow the stack pointer adjustment above and precede the 1094252723Sdim // call instruction itself. 1095252723Sdim SmallVector<SDValue, 8> MemOpChains; 1096252723Sdim 1097252723Sdim for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1098252723Sdim const CCValAssign &VA = ArgLocs[i]; 1099252723Sdim SDValue Arg = CLI.OutVals[i]; 1100252723Sdim 1101252723Sdim // Promote the value if needed. 1102252723Sdim switch (VA.getLocInfo()) { 1103252723Sdim default: 1104252723Sdim llvm_unreachable("Unknown location info!"); 1105252723Sdim case CCValAssign::Full: 1106252723Sdim break; 1107252723Sdim case CCValAssign::SExt: 1108252723Sdim Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); 1109252723Sdim break; 1110252723Sdim case CCValAssign::ZExt: 1111252723Sdim Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); 1112252723Sdim break; 1113252723Sdim case CCValAssign::AExt: 1114252723Sdim Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); 1115252723Sdim break; 1116252723Sdim case CCValAssign::BCvt: 1117263764Sdim // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But 1118263764Sdim // SPARC does not support i128 natively. Lower it into two i64, see below. 1119263764Sdim if (!VA.needsCustom() || VA.getValVT() != MVT::f128 1120263764Sdim || VA.getLocVT() != MVT::i128) 1121263764Sdim Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); 1122252723Sdim break; 1123252723Sdim } 1124252723Sdim 1125252723Sdim if (VA.isRegLoc()) { 1126263764Sdim if (VA.needsCustom() && VA.getValVT() == MVT::f128 1127263764Sdim && VA.getLocVT() == MVT::i128) { 1128263764Sdim // Store and reload into the interger register reg and reg+1. 1129263764Sdim unsigned Offset = 8 * (VA.getLocReg() - SP::I0); 1130263764Sdim unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128; 1131263764Sdim SDValue StackPtr = DAG.getRegister(SP::O6, getPointerTy()); 1132263764Sdim SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset); 1133263764Sdim HiPtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr, 1134263764Sdim HiPtrOff); 1135263764Sdim SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8); 1136263764Sdim LoPtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr, 1137263764Sdim LoPtrOff); 1138263764Sdim 1139263764Sdim // Store to %sp+BIAS+128+Offset 1140263764Sdim SDValue Store = DAG.getStore(Chain, DL, Arg, HiPtrOff, 1141263764Sdim MachinePointerInfo(), 1142263764Sdim false, false, 0); 1143263764Sdim // Load into Reg and Reg+1 1144263764Sdim SDValue Hi64 = DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, 1145263764Sdim MachinePointerInfo(), 1146263764Sdim false, false, false, 0); 1147263764Sdim SDValue Lo64 = DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, 1148263764Sdim MachinePointerInfo(), 1149263764Sdim false, false, false, 0); 1150263764Sdim RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()), 1151263764Sdim Hi64)); 1152263764Sdim RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()+1), 1153263764Sdim Lo64)); 1154263764Sdim continue; 1155263764Sdim } 1156263764Sdim 1157252723Sdim // The custom bit on an i32 return value indicates that it should be 1158252723Sdim // passed in the high bits of the register. 1159252723Sdim if (VA.getValVT() == MVT::i32 && VA.needsCustom()) { 1160252723Sdim Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg, 1161252723Sdim DAG.getConstant(32, MVT::i32)); 1162252723Sdim 1163252723Sdim // The next value may go in the low bits of the same register. 1164252723Sdim // Handle both at once. 1165252723Sdim if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() && 1166252723Sdim ArgLocs[i+1].getLocReg() == VA.getLocReg()) { 1167252723Sdim SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, 1168252723Sdim CLI.OutVals[i+1]); 1169252723Sdim Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV); 1170252723Sdim // Skip the next value, it's already done. 1171252723Sdim ++i; 1172252723Sdim } 1173252723Sdim } 1174252723Sdim RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()), Arg)); 1175252723Sdim continue; 1176252723Sdim } 1177252723Sdim 1178252723Sdim assert(VA.isMemLoc()); 1179252723Sdim 1180252723Sdim // Create a store off the stack pointer for this argument. 1181252723Sdim SDValue StackPtr = DAG.getRegister(SP::O6, getPointerTy()); 1182252723Sdim // The argument area starts at %fp+BIAS+128 in the callee frame, 1183252723Sdim // %sp+BIAS+128 in ours. 1184252723Sdim SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() + 1185252723Sdim Subtarget->getStackPointerBias() + 1186252723Sdim 128); 1187252723Sdim PtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr, PtrOff); 1188252723Sdim MemOpChains.push_back(DAG.getStore(Chain, DL, Arg, PtrOff, 1189252723Sdim MachinePointerInfo(), 1190252723Sdim false, false, 0)); 1191252723Sdim } 1192252723Sdim 1193252723Sdim // Emit all stores, make sure they occur before the call. 1194252723Sdim if (!MemOpChains.empty()) 1195252723Sdim Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 1196252723Sdim &MemOpChains[0], MemOpChains.size()); 1197252723Sdim 1198252723Sdim // Build a sequence of CopyToReg nodes glued together with token chain and 1199252723Sdim // glue operands which copy the outgoing args into registers. The InGlue is 1200252723Sdim // necessary since all emitted instructions must be stuck together in order 1201252723Sdim // to pass the live physical registers. 1202252723Sdim SDValue InGlue; 1203252723Sdim for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1204252723Sdim Chain = DAG.getCopyToReg(Chain, DL, 1205252723Sdim RegsToPass[i].first, RegsToPass[i].second, InGlue); 1206252723Sdim InGlue = Chain.getValue(1); 1207252723Sdim } 1208252723Sdim 1209252723Sdim // If the callee is a GlobalAddress node (quite common, every direct call is) 1210252723Sdim // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1211252723Sdim // Likewise ExternalSymbol -> TargetExternalSymbol. 1212252723Sdim SDValue Callee = CLI.Callee; 1213263509Sdim bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS); 1214263764Sdim unsigned TF = ((getTargetMachine().getRelocationModel() == Reloc::PIC_) 1215263764Sdim ? SparcMCExpr::VK_Sparc_WPLT30 : 0); 1216252723Sdim if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1217263764Sdim Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, getPointerTy(), 0, 1218263764Sdim TF); 1219252723Sdim else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) 1220263764Sdim Callee = DAG.getTargetExternalSymbol(E->getSymbol(), getPointerTy(), TF); 1221252723Sdim 1222252723Sdim // Build the operands for the call instruction itself. 1223252723Sdim SmallVector<SDValue, 8> Ops; 1224252723Sdim Ops.push_back(Chain); 1225252723Sdim Ops.push_back(Callee); 1226252723Sdim for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1227252723Sdim Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1228252723Sdim RegsToPass[i].second.getValueType())); 1229252723Sdim 1230263509Sdim // Add a register mask operand representing the call-preserved registers. 1231263509Sdim const SparcRegisterInfo *TRI = 1232263509Sdim ((const SparcTargetMachine&)getTargetMachine()).getRegisterInfo(); 1233263509Sdim const uint32_t *Mask = ((hasReturnsTwice) 1234263509Sdim ? TRI->getRTCallPreservedMask(CLI.CallConv) 1235263509Sdim : TRI->getCallPreservedMask(CLI.CallConv)); 1236263509Sdim assert(Mask && "Missing call preserved mask for calling convention"); 1237263509Sdim Ops.push_back(DAG.getRegisterMask(Mask)); 1238263509Sdim 1239252723Sdim // Make sure the CopyToReg nodes are glued to the call instruction which 1240252723Sdim // consumes the registers. 1241252723Sdim if (InGlue.getNode()) 1242252723Sdim Ops.push_back(InGlue); 1243252723Sdim 1244252723Sdim // Now the call itself. 1245252723Sdim SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1246252723Sdim Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, &Ops[0], Ops.size()); 1247252723Sdim InGlue = Chain.getValue(1); 1248252723Sdim 1249252723Sdim // Revert the stack pointer immediately after the call. 1250252723Sdim Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, true), 1251263509Sdim DAG.getIntPtrConstant(0, true), InGlue, DL); 1252252723Sdim InGlue = Chain.getValue(1); 1253252723Sdim 1254252723Sdim // Now extract the return values. This is more or less the same as 1255252723Sdim // LowerFormalArguments_64. 1256252723Sdim 1257252723Sdim // Assign locations to each value returned by this call. 1258252723Sdim SmallVector<CCValAssign, 16> RVLocs; 1259252723Sdim CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), 1260252723Sdim DAG.getTarget(), RVLocs, *DAG.getContext()); 1261252723Sdim 1262263764Sdim // Set inreg flag manually for codegen generated library calls that 1263263764Sdim // return float. 1264263764Sdim if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && CLI.CS == 0) 1265263764Sdim CLI.Ins[0].Flags.setInReg(); 1266263764Sdim 1267263764Sdim RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64); 1268263764Sdim 1269252723Sdim // Copy all of the result registers out of their specified physreg. 1270252723Sdim for (unsigned i = 0; i != RVLocs.size(); ++i) { 1271252723Sdim CCValAssign &VA = RVLocs[i]; 1272252723Sdim unsigned Reg = toCallerWindow(VA.getLocReg()); 1273252723Sdim 1274252723Sdim // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can 1275252723Sdim // reside in the same register in the high and low bits. Reuse the 1276252723Sdim // CopyFromReg previous node to avoid duplicate copies. 1277252723Sdim SDValue RV; 1278252723Sdim if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1))) 1279252723Sdim if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg) 1280252723Sdim RV = Chain.getValue(0); 1281252723Sdim 1282252723Sdim // But usually we'll create a new CopyFromReg for a different register. 1283252723Sdim if (!RV.getNode()) { 1284252723Sdim RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue); 1285252723Sdim Chain = RV.getValue(1); 1286252723Sdim InGlue = Chain.getValue(2); 1287252723Sdim } 1288252723Sdim 1289252723Sdim // Get the high bits for i32 struct elements. 1290252723Sdim if (VA.getValVT() == MVT::i32 && VA.needsCustom()) 1291252723Sdim RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV, 1292252723Sdim DAG.getConstant(32, MVT::i32)); 1293252723Sdim 1294252723Sdim // The callee promoted the return value, so insert an Assert?ext SDNode so 1295252723Sdim // we won't promote the value again in this function. 1296252723Sdim switch (VA.getLocInfo()) { 1297252723Sdim case CCValAssign::SExt: 1298252723Sdim RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV, 1299252723Sdim DAG.getValueType(VA.getValVT())); 1300252723Sdim break; 1301252723Sdim case CCValAssign::ZExt: 1302252723Sdim RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV, 1303252723Sdim DAG.getValueType(VA.getValVT())); 1304252723Sdim break; 1305252723Sdim default: 1306252723Sdim break; 1307252723Sdim } 1308252723Sdim 1309252723Sdim // Truncate the register down to the return value type. 1310252723Sdim if (VA.isExtInLoc()) 1311252723Sdim RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV); 1312252723Sdim 1313252723Sdim InVals.push_back(RV); 1314252723Sdim } 1315252723Sdim 1316252723Sdim return Chain; 1317252723Sdim} 1318252723Sdim 1319193323Sed//===----------------------------------------------------------------------===// 1320193323Sed// TargetLowering Implementation 1321193323Sed//===----------------------------------------------------------------------===// 1322193323Sed 1323193323Sed/// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC 1324193323Sed/// condition. 1325193323Sedstatic SPCC::CondCodes IntCondCCodeToICC(ISD::CondCode CC) { 1326193323Sed switch (CC) { 1327198090Srdivacky default: llvm_unreachable("Unknown integer condition code!"); 1328193323Sed case ISD::SETEQ: return SPCC::ICC_E; 1329193323Sed case ISD::SETNE: return SPCC::ICC_NE; 1330193323Sed case ISD::SETLT: return SPCC::ICC_L; 1331193323Sed case ISD::SETGT: return SPCC::ICC_G; 1332193323Sed case ISD::SETLE: return SPCC::ICC_LE; 1333193323Sed case ISD::SETGE: return SPCC::ICC_GE; 1334193323Sed case ISD::SETULT: return SPCC::ICC_CS; 1335193323Sed case ISD::SETULE: return SPCC::ICC_LEU; 1336193323Sed case ISD::SETUGT: return SPCC::ICC_GU; 1337193323Sed case ISD::SETUGE: return SPCC::ICC_CC; 1338193323Sed } 1339193323Sed} 1340193323Sed 1341193323Sed/// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC 1342193323Sed/// FCC condition. 1343193323Sedstatic SPCC::CondCodes FPCondCCodeToFCC(ISD::CondCode CC) { 1344193323Sed switch (CC) { 1345198090Srdivacky default: llvm_unreachable("Unknown fp condition code!"); 1346193323Sed case ISD::SETEQ: 1347193323Sed case ISD::SETOEQ: return SPCC::FCC_E; 1348193323Sed case ISD::SETNE: 1349193323Sed case ISD::SETUNE: return SPCC::FCC_NE; 1350193323Sed case ISD::SETLT: 1351193323Sed case ISD::SETOLT: return SPCC::FCC_L; 1352193323Sed case ISD::SETGT: 1353193323Sed case ISD::SETOGT: return SPCC::FCC_G; 1354193323Sed case ISD::SETLE: 1355193323Sed case ISD::SETOLE: return SPCC::FCC_LE; 1356193323Sed case ISD::SETGE: 1357193323Sed case ISD::SETOGE: return SPCC::FCC_GE; 1358193323Sed case ISD::SETULT: return SPCC::FCC_UL; 1359193323Sed case ISD::SETULE: return SPCC::FCC_ULE; 1360193323Sed case ISD::SETUGT: return SPCC::FCC_UG; 1361193323Sed case ISD::SETUGE: return SPCC::FCC_UGE; 1362193323Sed case ISD::SETUO: return SPCC::FCC_U; 1363193323Sed case ISD::SETO: return SPCC::FCC_O; 1364193323Sed case ISD::SETONE: return SPCC::FCC_LG; 1365193323Sed case ISD::SETUEQ: return SPCC::FCC_UE; 1366193323Sed } 1367193323Sed} 1368193323Sed 1369193323SedSparcTargetLowering::SparcTargetLowering(TargetMachine &TM) 1370263764Sdim : TargetLowering(TM, new SparcELFTargetObjectFile()) { 1371252723Sdim Subtarget = &TM.getSubtarget<SparcSubtarget>(); 1372193323Sed 1373193323Sed // Set up the register classes. 1374245431Sdim addRegisterClass(MVT::i32, &SP::IntRegsRegClass); 1375245431Sdim addRegisterClass(MVT::f32, &SP::FPRegsRegClass); 1376245431Sdim addRegisterClass(MVT::f64, &SP::DFPRegsRegClass); 1377263509Sdim addRegisterClass(MVT::f128, &SP::QFPRegsRegClass); 1378252723Sdim if (Subtarget->is64Bit()) 1379252723Sdim addRegisterClass(MVT::i64, &SP::I64RegsRegClass); 1380193323Sed 1381193323Sed // Turn FP extload into load/fextend 1382193323Sed setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); 1383263509Sdim setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand); 1384263509Sdim 1385193323Sed // Sparc doesn't have i1 sign extending load 1386193323Sed setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 1387263509Sdim 1388193323Sed // Turn FP truncstore into trunc + store. 1389193323Sed setTruncStoreAction(MVT::f64, MVT::f32, Expand); 1390263509Sdim setTruncStoreAction(MVT::f128, MVT::f32, Expand); 1391263509Sdim setTruncStoreAction(MVT::f128, MVT::f64, Expand); 1392193323Sed 1393193323Sed // Custom legalize GlobalAddress nodes into LO/HI parts. 1394252723Sdim setOperationAction(ISD::GlobalAddress, getPointerTy(), Custom); 1395252723Sdim setOperationAction(ISD::GlobalTLSAddress, getPointerTy(), Custom); 1396252723Sdim setOperationAction(ISD::ConstantPool, getPointerTy(), Custom); 1397263509Sdim setOperationAction(ISD::BlockAddress, getPointerTy(), Custom); 1398193323Sed 1399193323Sed // Sparc doesn't have sext_inreg, replace them with shl/sra 1400193323Sed setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 1401193323Sed setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand); 1402193323Sed setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 1403193323Sed 1404193323Sed // Sparc has no REM or DIVREM operations. 1405193323Sed setOperationAction(ISD::UREM, MVT::i32, Expand); 1406193323Sed setOperationAction(ISD::SREM, MVT::i32, Expand); 1407193323Sed setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 1408193323Sed setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 1409193323Sed 1410263509Sdim // ... nor does SparcV9. 1411263509Sdim if (Subtarget->is64Bit()) { 1412263509Sdim setOperationAction(ISD::UREM, MVT::i64, Expand); 1413263509Sdim setOperationAction(ISD::SREM, MVT::i64, Expand); 1414263509Sdim setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 1415263509Sdim setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 1416263509Sdim } 1417263509Sdim 1418193323Sed // Custom expand fp<->sint 1419193323Sed setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 1420193323Sed setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 1421263509Sdim setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 1422263509Sdim setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 1423193323Sed 1424263509Sdim // Custom Expand fp<->uint 1425263509Sdim setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 1426263509Sdim setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 1427263509Sdim setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 1428263509Sdim setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 1429193323Sed 1430218893Sdim setOperationAction(ISD::BITCAST, MVT::f32, Expand); 1431218893Sdim setOperationAction(ISD::BITCAST, MVT::i32, Expand); 1432193323Sed 1433193323Sed // Sparc has no select or setcc: expand to SELECT_CC. 1434193323Sed setOperationAction(ISD::SELECT, MVT::i32, Expand); 1435193323Sed setOperationAction(ISD::SELECT, MVT::f32, Expand); 1436193323Sed setOperationAction(ISD::SELECT, MVT::f64, Expand); 1437263509Sdim setOperationAction(ISD::SELECT, MVT::f128, Expand); 1438263509Sdim 1439193323Sed setOperationAction(ISD::SETCC, MVT::i32, Expand); 1440193323Sed setOperationAction(ISD::SETCC, MVT::f32, Expand); 1441193323Sed setOperationAction(ISD::SETCC, MVT::f64, Expand); 1442263509Sdim setOperationAction(ISD::SETCC, MVT::f128, Expand); 1443193323Sed 1444193323Sed // Sparc doesn't have BRCOND either, it has BR_CC. 1445193323Sed setOperationAction(ISD::BRCOND, MVT::Other, Expand); 1446193323Sed setOperationAction(ISD::BRIND, MVT::Other, Expand); 1447193323Sed setOperationAction(ISD::BR_JT, MVT::Other, Expand); 1448193323Sed setOperationAction(ISD::BR_CC, MVT::i32, Custom); 1449193323Sed setOperationAction(ISD::BR_CC, MVT::f32, Custom); 1450193323Sed setOperationAction(ISD::BR_CC, MVT::f64, Custom); 1451263509Sdim setOperationAction(ISD::BR_CC, MVT::f128, Custom); 1452193323Sed 1453193323Sed setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 1454193323Sed setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 1455193323Sed setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 1456263509Sdim setOperationAction(ISD::SELECT_CC, MVT::f128, Custom); 1457193323Sed 1458252723Sdim if (Subtarget->is64Bit()) { 1459263509Sdim setOperationAction(ISD::ADDC, MVT::i64, Custom); 1460263509Sdim setOperationAction(ISD::ADDE, MVT::i64, Custom); 1461263509Sdim setOperationAction(ISD::SUBC, MVT::i64, Custom); 1462263509Sdim setOperationAction(ISD::SUBE, MVT::i64, Custom); 1463263509Sdim setOperationAction(ISD::BITCAST, MVT::f64, Expand); 1464263509Sdim setOperationAction(ISD::BITCAST, MVT::i64, Expand); 1465263509Sdim setOperationAction(ISD::SELECT, MVT::i64, Expand); 1466263509Sdim setOperationAction(ISD::SETCC, MVT::i64, Expand); 1467252723Sdim setOperationAction(ISD::BR_CC, MVT::i64, Custom); 1468252723Sdim setOperationAction(ISD::SELECT_CC, MVT::i64, Custom); 1469263509Sdim 1470263764Sdim setOperationAction(ISD::CTPOP, MVT::i64, 1471263764Sdim Subtarget->usePopc() ? Legal : Expand); 1472263509Sdim setOperationAction(ISD::CTTZ , MVT::i64, Expand); 1473263509Sdim setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); 1474263509Sdim setOperationAction(ISD::CTLZ , MVT::i64, Expand); 1475263509Sdim setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); 1476263509Sdim setOperationAction(ISD::BSWAP, MVT::i64, Expand); 1477263509Sdim setOperationAction(ISD::ROTL , MVT::i64, Expand); 1478263509Sdim setOperationAction(ISD::ROTR , MVT::i64, Expand); 1479263509Sdim setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom); 1480252723Sdim } 1481252723Sdim 1482263764Sdim // ATOMICs. 1483263764Sdim // FIXME: We insert fences for each atomics and generate sub-optimal code 1484263764Sdim // for PSO/TSO. Also, implement other atomicrmw operations. 1485193323Sed 1486263764Sdim setInsertFencesForAtomic(true); 1487263764Sdim 1488263764Sdim setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Legal); 1489263764Sdim setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, 1490263764Sdim (Subtarget->isV9() ? Legal: Expand)); 1491263764Sdim 1492263764Sdim 1493263764Sdim setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Legal); 1494263764Sdim 1495263764Sdim // Custom Lower Atomic LOAD/STORE 1496263764Sdim setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); 1497263764Sdim setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); 1498263764Sdim 1499263764Sdim if (Subtarget->is64Bit()) { 1500263764Sdim setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Legal); 1501263764Sdim setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Legal); 1502263764Sdim setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom); 1503263764Sdim setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Custom); 1504263764Sdim } 1505263764Sdim 1506263509Sdim if (!Subtarget->isV9()) { 1507263509Sdim // SparcV8 does not have FNEGD and FABSD. 1508263509Sdim setOperationAction(ISD::FNEG, MVT::f64, Custom); 1509263509Sdim setOperationAction(ISD::FABS, MVT::f64, Custom); 1510263509Sdim } 1511263509Sdim 1512263509Sdim setOperationAction(ISD::FSIN , MVT::f128, Expand); 1513263509Sdim setOperationAction(ISD::FCOS , MVT::f128, Expand); 1514263509Sdim setOperationAction(ISD::FSINCOS, MVT::f128, Expand); 1515263509Sdim setOperationAction(ISD::FREM , MVT::f128, Expand); 1516263509Sdim setOperationAction(ISD::FMA , MVT::f128, Expand); 1517193323Sed setOperationAction(ISD::FSIN , MVT::f64, Expand); 1518193323Sed setOperationAction(ISD::FCOS , MVT::f64, Expand); 1519252723Sdim setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 1520193323Sed setOperationAction(ISD::FREM , MVT::f64, Expand); 1521224145Sdim setOperationAction(ISD::FMA , MVT::f64, Expand); 1522193323Sed setOperationAction(ISD::FSIN , MVT::f32, Expand); 1523193323Sed setOperationAction(ISD::FCOS , MVT::f32, Expand); 1524252723Sdim setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 1525193323Sed setOperationAction(ISD::FREM , MVT::f32, Expand); 1526224145Sdim setOperationAction(ISD::FMA , MVT::f32, Expand); 1527193323Sed setOperationAction(ISD::CTTZ , MVT::i32, Expand); 1528235633Sdim setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); 1529193323Sed setOperationAction(ISD::CTLZ , MVT::i32, Expand); 1530235633Sdim setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); 1531193323Sed setOperationAction(ISD::ROTL , MVT::i32, Expand); 1532193323Sed setOperationAction(ISD::ROTR , MVT::i32, Expand); 1533193323Sed setOperationAction(ISD::BSWAP, MVT::i32, Expand); 1534263509Sdim setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); 1535193323Sed setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 1536193323Sed setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 1537263509Sdim setOperationAction(ISD::FPOW , MVT::f128, Expand); 1538193323Sed setOperationAction(ISD::FPOW , MVT::f64, Expand); 1539193323Sed setOperationAction(ISD::FPOW , MVT::f32, Expand); 1540193323Sed 1541193323Sed setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); 1542193323Sed setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); 1543193323Sed setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); 1544193323Sed 1545193323Sed // FIXME: Sparc provides these multiplies, but we don't have them yet. 1546193323Sed setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 1547193323Sed setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 1548193323Sed 1549263509Sdim if (Subtarget->is64Bit()) { 1550263509Sdim setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 1551263509Sdim setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 1552263509Sdim setOperationAction(ISD::MULHU, MVT::i64, Expand); 1553263509Sdim setOperationAction(ISD::MULHS, MVT::i64, Expand); 1554263764Sdim 1555263764Sdim setOperationAction(ISD::UMULO, MVT::i64, Custom); 1556263764Sdim setOperationAction(ISD::SMULO, MVT::i64, Custom); 1557263764Sdim 1558263764Sdim setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); 1559263764Sdim setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); 1560263764Sdim setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); 1561263509Sdim } 1562193323Sed 1563193323Sed // VASTART needs to be custom lowered to use the VarArgsFrameIndex. 1564193323Sed setOperationAction(ISD::VASTART , MVT::Other, Custom); 1565193323Sed // VAARG needs to be lowered to not do unaligned accesses for doubles. 1566193323Sed setOperationAction(ISD::VAARG , MVT::Other, Custom); 1567193323Sed 1568263764Sdim setOperationAction(ISD::TRAP , MVT::Other, Legal); 1569263764Sdim 1570193323Sed // Use the default implementation. 1571193323Sed setOperationAction(ISD::VACOPY , MVT::Other, Expand); 1572193323Sed setOperationAction(ISD::VAEND , MVT::Other, Expand); 1573193323Sed setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 1574193323Sed setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand); 1575193323Sed setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 1576193323Sed 1577263509Sdim setExceptionPointerRegister(SP::I0); 1578263509Sdim setExceptionSelectorRegister(SP::I1); 1579193323Sed 1580193323Sed setStackPointerRegisterToSaveRestore(SP::O6); 1581193323Sed 1582263764Sdim setOperationAction(ISD::CTPOP, MVT::i32, 1583263764Sdim Subtarget->usePopc() ? Legal : Expand); 1584193323Sed 1585263509Sdim if (Subtarget->isV9() && Subtarget->hasHardQuad()) { 1586263509Sdim setOperationAction(ISD::LOAD, MVT::f128, Legal); 1587263509Sdim setOperationAction(ISD::STORE, MVT::f128, Legal); 1588263509Sdim } else { 1589263509Sdim setOperationAction(ISD::LOAD, MVT::f128, Custom); 1590263509Sdim setOperationAction(ISD::STORE, MVT::f128, Custom); 1591263509Sdim } 1592263509Sdim 1593263509Sdim if (Subtarget->hasHardQuad()) { 1594263509Sdim setOperationAction(ISD::FADD, MVT::f128, Legal); 1595263509Sdim setOperationAction(ISD::FSUB, MVT::f128, Legal); 1596263509Sdim setOperationAction(ISD::FMUL, MVT::f128, Legal); 1597263509Sdim setOperationAction(ISD::FDIV, MVT::f128, Legal); 1598263509Sdim setOperationAction(ISD::FSQRT, MVT::f128, Legal); 1599263509Sdim setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal); 1600263509Sdim setOperationAction(ISD::FP_ROUND, MVT::f64, Legal); 1601263509Sdim if (Subtarget->isV9()) { 1602263509Sdim setOperationAction(ISD::FNEG, MVT::f128, Legal); 1603263509Sdim setOperationAction(ISD::FABS, MVT::f128, Legal); 1604263509Sdim } else { 1605263509Sdim setOperationAction(ISD::FNEG, MVT::f128, Custom); 1606263509Sdim setOperationAction(ISD::FABS, MVT::f128, Custom); 1607263509Sdim } 1608263509Sdim 1609263509Sdim if (!Subtarget->is64Bit()) { 1610263509Sdim setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll"); 1611263509Sdim setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull"); 1612263509Sdim setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq"); 1613263509Sdim setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq"); 1614263509Sdim } 1615263509Sdim 1616263509Sdim } else { 1617263509Sdim // Custom legalize f128 operations. 1618263509Sdim 1619263509Sdim setOperationAction(ISD::FADD, MVT::f128, Custom); 1620263509Sdim setOperationAction(ISD::FSUB, MVT::f128, Custom); 1621263509Sdim setOperationAction(ISD::FMUL, MVT::f128, Custom); 1622263509Sdim setOperationAction(ISD::FDIV, MVT::f128, Custom); 1623263509Sdim setOperationAction(ISD::FSQRT, MVT::f128, Custom); 1624263509Sdim setOperationAction(ISD::FNEG, MVT::f128, Custom); 1625263509Sdim setOperationAction(ISD::FABS, MVT::f128, Custom); 1626263509Sdim 1627263509Sdim setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom); 1628263509Sdim setOperationAction(ISD::FP_ROUND, MVT::f64, Custom); 1629263509Sdim setOperationAction(ISD::FP_ROUND, MVT::f32, Custom); 1630263509Sdim 1631263509Sdim // Setup Runtime library names. 1632263509Sdim if (Subtarget->is64Bit()) { 1633263509Sdim setLibcallName(RTLIB::ADD_F128, "_Qp_add"); 1634263509Sdim setLibcallName(RTLIB::SUB_F128, "_Qp_sub"); 1635263509Sdim setLibcallName(RTLIB::MUL_F128, "_Qp_mul"); 1636263509Sdim setLibcallName(RTLIB::DIV_F128, "_Qp_div"); 1637263509Sdim setLibcallName(RTLIB::SQRT_F128, "_Qp_sqrt"); 1638263509Sdim setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Qp_qtoi"); 1639263509Sdim setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Qp_qtoui"); 1640263509Sdim setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Qp_itoq"); 1641263509Sdim setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Qp_uitoq"); 1642263509Sdim setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Qp_qtox"); 1643263509Sdim setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Qp_qtoux"); 1644263509Sdim setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Qp_xtoq"); 1645263509Sdim setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Qp_uxtoq"); 1646263509Sdim setLibcallName(RTLIB::FPEXT_F32_F128, "_Qp_stoq"); 1647263509Sdim setLibcallName(RTLIB::FPEXT_F64_F128, "_Qp_dtoq"); 1648263509Sdim setLibcallName(RTLIB::FPROUND_F128_F32, "_Qp_qtos"); 1649263509Sdim setLibcallName(RTLIB::FPROUND_F128_F64, "_Qp_qtod"); 1650263509Sdim } else { 1651263509Sdim setLibcallName(RTLIB::ADD_F128, "_Q_add"); 1652263509Sdim setLibcallName(RTLIB::SUB_F128, "_Q_sub"); 1653263509Sdim setLibcallName(RTLIB::MUL_F128, "_Q_mul"); 1654263509Sdim setLibcallName(RTLIB::DIV_F128, "_Q_div"); 1655263509Sdim setLibcallName(RTLIB::SQRT_F128, "_Q_sqrt"); 1656263509Sdim setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Q_qtoi"); 1657263509Sdim setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Q_qtou"); 1658263509Sdim setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Q_itoq"); 1659263509Sdim setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Q_utoq"); 1660263509Sdim setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll"); 1661263509Sdim setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull"); 1662263509Sdim setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq"); 1663263509Sdim setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq"); 1664263509Sdim setLibcallName(RTLIB::FPEXT_F32_F128, "_Q_stoq"); 1665263509Sdim setLibcallName(RTLIB::FPEXT_F64_F128, "_Q_dtoq"); 1666263509Sdim setLibcallName(RTLIB::FPROUND_F128_F32, "_Q_qtos"); 1667263509Sdim setLibcallName(RTLIB::FPROUND_F128_F64, "_Q_qtod"); 1668263509Sdim } 1669263509Sdim } 1670263509Sdim 1671223017Sdim setMinFunctionAlignment(2); 1672223017Sdim 1673193323Sed computeRegisterProperties(); 1674193323Sed} 1675193323Sed 1676193323Sedconst char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const { 1677193323Sed switch (Opcode) { 1678193323Sed default: return 0; 1679193323Sed case SPISD::CMPICC: return "SPISD::CMPICC"; 1680193323Sed case SPISD::CMPFCC: return "SPISD::CMPFCC"; 1681193323Sed case SPISD::BRICC: return "SPISD::BRICC"; 1682252723Sdim case SPISD::BRXCC: return "SPISD::BRXCC"; 1683193323Sed case SPISD::BRFCC: return "SPISD::BRFCC"; 1684193323Sed case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC"; 1685252723Sdim case SPISD::SELECT_XCC: return "SPISD::SELECT_XCC"; 1686193323Sed case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC"; 1687193323Sed case SPISD::Hi: return "SPISD::Hi"; 1688193323Sed case SPISD::Lo: return "SPISD::Lo"; 1689193323Sed case SPISD::FTOI: return "SPISD::FTOI"; 1690193323Sed case SPISD::ITOF: return "SPISD::ITOF"; 1691263509Sdim case SPISD::FTOX: return "SPISD::FTOX"; 1692263509Sdim case SPISD::XTOF: return "SPISD::XTOF"; 1693193323Sed case SPISD::CALL: return "SPISD::CALL"; 1694193323Sed case SPISD::RET_FLAG: return "SPISD::RET_FLAG"; 1695218893Sdim case SPISD::GLOBAL_BASE_REG: return "SPISD::GLOBAL_BASE_REG"; 1696218893Sdim case SPISD::FLUSHW: return "SPISD::FLUSHW"; 1697263509Sdim case SPISD::TLS_ADD: return "SPISD::TLS_ADD"; 1698263509Sdim case SPISD::TLS_LD: return "SPISD::TLS_LD"; 1699263509Sdim case SPISD::TLS_CALL: return "SPISD::TLS_CALL"; 1700193323Sed } 1701193323Sed} 1702193323Sed 1703263509SdimEVT SparcTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { 1704263509Sdim if (!VT.isVector()) 1705263509Sdim return MVT::i32; 1706263509Sdim return VT.changeVectorElementTypeToInteger(); 1707263509Sdim} 1708263509Sdim 1709193323Sed/// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to 1710193323Sed/// be zero. Op is expected to be a target specific node. Used by DAG 1711193323Sed/// combiner. 1712263509Sdimvoid SparcTargetLowering::computeMaskedBitsForTargetNode 1713263509Sdim (const SDValue Op, 1714263509Sdim APInt &KnownZero, 1715263509Sdim APInt &KnownOne, 1716263509Sdim const SelectionDAG &DAG, 1717263509Sdim unsigned Depth) const { 1718193323Sed APInt KnownZero2, KnownOne2; 1719235633Sdim KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0); 1720193323Sed 1721193323Sed switch (Op.getOpcode()) { 1722193323Sed default: break; 1723193323Sed case SPISD::SELECT_ICC: 1724252723Sdim case SPISD::SELECT_XCC: 1725193323Sed case SPISD::SELECT_FCC: 1726235633Sdim DAG.ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); 1727235633Sdim DAG.ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 1728193323Sed assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1729193323Sed assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1730193323Sed 1731193323Sed // Only known if known in both the LHS and RHS. 1732193323Sed KnownOne &= KnownOne2; 1733193323Sed KnownZero &= KnownZero2; 1734193323Sed break; 1735193323Sed } 1736193323Sed} 1737193323Sed 1738193323Sed// Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so 1739193323Sed// set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition. 1740193323Sedstatic void LookThroughSetCC(SDValue &LHS, SDValue &RHS, 1741193323Sed ISD::CondCode CC, unsigned &SPCC) { 1742193323Sed if (isa<ConstantSDNode>(RHS) && 1743210299Sed cast<ConstantSDNode>(RHS)->isNullValue() && 1744193323Sed CC == ISD::SETNE && 1745252723Sdim (((LHS.getOpcode() == SPISD::SELECT_ICC || 1746252723Sdim LHS.getOpcode() == SPISD::SELECT_XCC) && 1747193323Sed LHS.getOperand(3).getOpcode() == SPISD::CMPICC) || 1748193323Sed (LHS.getOpcode() == SPISD::SELECT_FCC && 1749193323Sed LHS.getOperand(3).getOpcode() == SPISD::CMPFCC)) && 1750193323Sed isa<ConstantSDNode>(LHS.getOperand(0)) && 1751193323Sed isa<ConstantSDNode>(LHS.getOperand(1)) && 1752210299Sed cast<ConstantSDNode>(LHS.getOperand(0))->isOne() && 1753210299Sed cast<ConstantSDNode>(LHS.getOperand(1))->isNullValue()) { 1754193323Sed SDValue CMPCC = LHS.getOperand(3); 1755193323Sed SPCC = cast<ConstantSDNode>(LHS.getOperand(2))->getZExtValue(); 1756193323Sed LHS = CMPCC.getOperand(0); 1757193323Sed RHS = CMPCC.getOperand(1); 1758193323Sed } 1759193323Sed} 1760193323Sed 1761252723Sdim// Convert to a target node and set target flags. 1762252723SdimSDValue SparcTargetLowering::withTargetFlags(SDValue Op, unsigned TF, 1763252723Sdim SelectionDAG &DAG) const { 1764252723Sdim if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) 1765252723Sdim return DAG.getTargetGlobalAddress(GA->getGlobal(), 1766263509Sdim SDLoc(GA), 1767252723Sdim GA->getValueType(0), 1768252723Sdim GA->getOffset(), TF); 1769198090Srdivacky 1770252723Sdim if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op)) 1771252723Sdim return DAG.getTargetConstantPool(CP->getConstVal(), 1772252723Sdim CP->getValueType(0), 1773252723Sdim CP->getAlignment(), 1774252723Sdim CP->getOffset(), TF); 1775218893Sdim 1776263509Sdim if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op)) 1777263509Sdim return DAG.getTargetBlockAddress(BA->getBlockAddress(), 1778263509Sdim Op.getValueType(), 1779263509Sdim 0, 1780263509Sdim TF); 1781263509Sdim 1782252723Sdim if (const ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op)) 1783252723Sdim return DAG.getTargetExternalSymbol(ES->getSymbol(), 1784252723Sdim ES->getValueType(0), TF); 1785252723Sdim 1786252723Sdim llvm_unreachable("Unhandled address SDNode"); 1787193323Sed} 1788193323Sed 1789252723Sdim// Split Op into high and low parts according to HiTF and LoTF. 1790252723Sdim// Return an ADD node combining the parts. 1791252723SdimSDValue SparcTargetLowering::makeHiLoPair(SDValue Op, 1792252723Sdim unsigned HiTF, unsigned LoTF, 1793252723Sdim SelectionDAG &DAG) const { 1794263509Sdim SDLoc DL(Op); 1795252723Sdim EVT VT = Op.getValueType(); 1796252723Sdim SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG)); 1797252723Sdim SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG)); 1798252723Sdim return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo); 1799252723Sdim} 1800252723Sdim 1801252723Sdim// Build SDNodes for producing an address from a GlobalAddress, ConstantPool, 1802252723Sdim// or ExternalSymbol SDNode. 1803252723SdimSDValue SparcTargetLowering::makeAddress(SDValue Op, SelectionDAG &DAG) const { 1804263509Sdim SDLoc DL(Op); 1805252723Sdim EVT VT = getPointerTy(); 1806252723Sdim 1807252723Sdim // Handle PIC mode first. 1808252723Sdim if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 1809252723Sdim // This is the pic32 code model, the GOT is known to be smaller than 4GB. 1810263764Sdim SDValue HiLo = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_GOT22, 1811263764Sdim SparcMCExpr::VK_Sparc_GOT10, DAG); 1812252723Sdim SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT); 1813252723Sdim SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, HiLo); 1814263509Sdim // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this 1815263509Sdim // function has calls. 1816263509Sdim MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 1817263509Sdim MFI->setHasCalls(true); 1818252723Sdim return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr, 1819252723Sdim MachinePointerInfo::getGOT(), false, false, false, 0); 1820252723Sdim } 1821252723Sdim 1822252723Sdim // This is one of the absolute code models. 1823252723Sdim switch(getTargetMachine().getCodeModel()) { 1824252723Sdim default: 1825252723Sdim llvm_unreachable("Unsupported absolute code model"); 1826252723Sdim case CodeModel::Small: 1827252723Sdim // abs32. 1828263764Sdim return makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HI, 1829263764Sdim SparcMCExpr::VK_Sparc_LO, DAG); 1830252723Sdim case CodeModel::Medium: { 1831252723Sdim // abs44. 1832263764Sdim SDValue H44 = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_H44, 1833263764Sdim SparcMCExpr::VK_Sparc_M44, DAG); 1834252723Sdim H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, MVT::i32)); 1835263764Sdim SDValue L44 = withTargetFlags(Op, SparcMCExpr::VK_Sparc_L44, DAG); 1836252723Sdim L44 = DAG.getNode(SPISD::Lo, DL, VT, L44); 1837252723Sdim return DAG.getNode(ISD::ADD, DL, VT, H44, L44); 1838252723Sdim } 1839252723Sdim case CodeModel::Large: { 1840252723Sdim // abs64. 1841263764Sdim SDValue Hi = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HH, 1842263764Sdim SparcMCExpr::VK_Sparc_HM, DAG); 1843252723Sdim Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, MVT::i32)); 1844263764Sdim SDValue Lo = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HI, 1845263764Sdim SparcMCExpr::VK_Sparc_LO, DAG); 1846252723Sdim return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo); 1847252723Sdim } 1848252723Sdim } 1849252723Sdim} 1850252723Sdim 1851252723SdimSDValue SparcTargetLowering::LowerGlobalAddress(SDValue Op, 1852252723Sdim SelectionDAG &DAG) const { 1853252723Sdim return makeAddress(Op, DAG); 1854252723Sdim} 1855252723Sdim 1856198090SrdivackySDValue SparcTargetLowering::LowerConstantPool(SDValue Op, 1857207618Srdivacky SelectionDAG &DAG) const { 1858252723Sdim return makeAddress(Op, DAG); 1859193323Sed} 1860193323Sed 1861263509SdimSDValue SparcTargetLowering::LowerBlockAddress(SDValue Op, 1862263509Sdim SelectionDAG &DAG) const { 1863263509Sdim return makeAddress(Op, DAG); 1864193323Sed} 1865193323Sed 1866263509SdimSDValue SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op, 1867263509Sdim SelectionDAG &DAG) const { 1868263509Sdim 1869263509Sdim GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 1870263509Sdim SDLoc DL(GA); 1871263509Sdim const GlobalValue *GV = GA->getGlobal(); 1872263509Sdim EVT PtrVT = getPointerTy(); 1873263509Sdim 1874263509Sdim TLSModel::Model model = getTargetMachine().getTLSModel(GV); 1875263509Sdim 1876263509Sdim if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) { 1877263764Sdim unsigned HiTF = ((model == TLSModel::GeneralDynamic) 1878263764Sdim ? SparcMCExpr::VK_Sparc_TLS_GD_HI22 1879263764Sdim : SparcMCExpr::VK_Sparc_TLS_LDM_HI22); 1880263764Sdim unsigned LoTF = ((model == TLSModel::GeneralDynamic) 1881263764Sdim ? SparcMCExpr::VK_Sparc_TLS_GD_LO10 1882263764Sdim : SparcMCExpr::VK_Sparc_TLS_LDM_LO10); 1883263764Sdim unsigned addTF = ((model == TLSModel::GeneralDynamic) 1884263764Sdim ? SparcMCExpr::VK_Sparc_TLS_GD_ADD 1885263764Sdim : SparcMCExpr::VK_Sparc_TLS_LDM_ADD); 1886263764Sdim unsigned callTF = ((model == TLSModel::GeneralDynamic) 1887263764Sdim ? SparcMCExpr::VK_Sparc_TLS_GD_CALL 1888263764Sdim : SparcMCExpr::VK_Sparc_TLS_LDM_CALL); 1889263509Sdim 1890263509Sdim SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG); 1891263509Sdim SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT); 1892263509Sdim SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo, 1893263509Sdim withTargetFlags(Op, addTF, DAG)); 1894263509Sdim 1895263509Sdim SDValue Chain = DAG.getEntryNode(); 1896263509Sdim SDValue InFlag; 1897263509Sdim 1898263509Sdim Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(1, true), DL); 1899263509Sdim Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InFlag); 1900263509Sdim InFlag = Chain.getValue(1); 1901263509Sdim SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT); 1902263509Sdim SDValue Symbol = withTargetFlags(Op, callTF, DAG); 1903263509Sdim 1904263509Sdim SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1905263509Sdim SmallVector<SDValue, 4> Ops; 1906263509Sdim Ops.push_back(Chain); 1907263509Sdim Ops.push_back(Callee); 1908263509Sdim Ops.push_back(Symbol); 1909263509Sdim Ops.push_back(DAG.getRegister(SP::O0, PtrVT)); 1910263509Sdim const uint32_t *Mask = getTargetMachine() 1911263509Sdim .getRegisterInfo()->getCallPreservedMask(CallingConv::C); 1912263509Sdim assert(Mask && "Missing call preserved mask for calling convention"); 1913263509Sdim Ops.push_back(DAG.getRegisterMask(Mask)); 1914263509Sdim Ops.push_back(InFlag); 1915263509Sdim Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, &Ops[0], Ops.size()); 1916263509Sdim InFlag = Chain.getValue(1); 1917263509Sdim Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(1, true), 1918263509Sdim DAG.getIntPtrConstant(0, true), InFlag, DL); 1919263509Sdim InFlag = Chain.getValue(1); 1920263509Sdim SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InFlag); 1921263509Sdim 1922263509Sdim if (model != TLSModel::LocalDynamic) 1923263509Sdim return Ret; 1924263509Sdim 1925263509Sdim SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT, 1926263764Sdim withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_HIX22, DAG)); 1927263509Sdim SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT, 1928263764Sdim withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_LOX10, DAG)); 1929263509Sdim HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo); 1930263509Sdim return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo, 1931263764Sdim withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_ADD, DAG)); 1932263509Sdim } 1933263509Sdim 1934263509Sdim if (model == TLSModel::InitialExec) { 1935263764Sdim unsigned ldTF = ((PtrVT == MVT::i64)? SparcMCExpr::VK_Sparc_TLS_IE_LDX 1936263764Sdim : SparcMCExpr::VK_Sparc_TLS_IE_LD); 1937263509Sdim 1938263509Sdim SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT); 1939263509Sdim 1940263509Sdim // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this 1941263509Sdim // function has calls. 1942263509Sdim MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 1943263509Sdim MFI->setHasCalls(true); 1944263509Sdim 1945263509Sdim SDValue TGA = makeHiLoPair(Op, 1946263764Sdim SparcMCExpr::VK_Sparc_TLS_IE_HI22, 1947263764Sdim SparcMCExpr::VK_Sparc_TLS_IE_LO10, DAG); 1948263509Sdim SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA); 1949263509Sdim SDValue Offset = DAG.getNode(SPISD::TLS_LD, 1950263509Sdim DL, PtrVT, Ptr, 1951263509Sdim withTargetFlags(Op, ldTF, DAG)); 1952263509Sdim return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, 1953263509Sdim DAG.getRegister(SP::G7, PtrVT), Offset, 1954263764Sdim withTargetFlags(Op, 1955263764Sdim SparcMCExpr::VK_Sparc_TLS_IE_ADD, DAG)); 1956263509Sdim } 1957263509Sdim 1958263509Sdim assert(model == TLSModel::LocalExec); 1959263509Sdim SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT, 1960263764Sdim withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LE_HIX22, DAG)); 1961263509Sdim SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT, 1962263764Sdim withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LE_LOX10, DAG)); 1963263509Sdim SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo); 1964263509Sdim 1965263509Sdim return DAG.getNode(ISD::ADD, DL, PtrVT, 1966263509Sdim DAG.getRegister(SP::G7, PtrVT), Offset); 1967193323Sed} 1968193323Sed 1969263509SdimSDValue 1970263509SdimSparcTargetLowering::LowerF128_LibCallArg(SDValue Chain, ArgListTy &Args, 1971263509Sdim SDValue Arg, SDLoc DL, 1972263509Sdim SelectionDAG &DAG) const { 1973263509Sdim MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 1974263509Sdim EVT ArgVT = Arg.getValueType(); 1975263509Sdim Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 1976263509Sdim 1977263509Sdim ArgListEntry Entry; 1978263509Sdim Entry.Node = Arg; 1979263509Sdim Entry.Ty = ArgTy; 1980263509Sdim 1981263509Sdim if (ArgTy->isFP128Ty()) { 1982263509Sdim // Create a stack object and pass the pointer to the library function. 1983263509Sdim int FI = MFI->CreateStackObject(16, 8, false); 1984263509Sdim SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy()); 1985263509Sdim Chain = DAG.getStore(Chain, 1986263509Sdim DL, 1987263509Sdim Entry.Node, 1988263509Sdim FIPtr, 1989263509Sdim MachinePointerInfo(), 1990263509Sdim false, 1991263509Sdim false, 1992263509Sdim 8); 1993263509Sdim 1994263509Sdim Entry.Node = FIPtr; 1995263509Sdim Entry.Ty = PointerType::getUnqual(ArgTy); 1996263509Sdim } 1997263509Sdim Args.push_back(Entry); 1998263509Sdim return Chain; 1999263509Sdim} 2000263509Sdim 2001263509SdimSDValue 2002263509SdimSparcTargetLowering::LowerF128Op(SDValue Op, SelectionDAG &DAG, 2003263509Sdim const char *LibFuncName, 2004263509Sdim unsigned numArgs) const { 2005263509Sdim 2006263509Sdim ArgListTy Args; 2007263509Sdim 2008263509Sdim MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 2009263509Sdim 2010263509Sdim SDValue Callee = DAG.getExternalSymbol(LibFuncName, getPointerTy()); 2011263509Sdim Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext()); 2012263509Sdim Type *RetTyABI = RetTy; 2013263509Sdim SDValue Chain = DAG.getEntryNode(); 2014263509Sdim SDValue RetPtr; 2015263509Sdim 2016263509Sdim if (RetTy->isFP128Ty()) { 2017263509Sdim // Create a Stack Object to receive the return value of type f128. 2018263509Sdim ArgListEntry Entry; 2019263509Sdim int RetFI = MFI->CreateStackObject(16, 8, false); 2020263509Sdim RetPtr = DAG.getFrameIndex(RetFI, getPointerTy()); 2021263509Sdim Entry.Node = RetPtr; 2022263509Sdim Entry.Ty = PointerType::getUnqual(RetTy); 2023263509Sdim if (!Subtarget->is64Bit()) 2024263509Sdim Entry.isSRet = true; 2025263509Sdim Entry.isReturned = false; 2026263509Sdim Args.push_back(Entry); 2027263509Sdim RetTyABI = Type::getVoidTy(*DAG.getContext()); 2028263509Sdim } 2029263509Sdim 2030263509Sdim assert(Op->getNumOperands() >= numArgs && "Not enough operands!"); 2031263509Sdim for (unsigned i = 0, e = numArgs; i != e; ++i) { 2032263509Sdim Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG); 2033263509Sdim } 2034263509Sdim TargetLowering:: 2035263509Sdim CallLoweringInfo CLI(Chain, 2036263509Sdim RetTyABI, 2037263509Sdim false, false, false, false, 2038263509Sdim 0, CallingConv::C, 2039263509Sdim false, false, true, 2040263509Sdim Callee, Args, DAG, SDLoc(Op)); 2041263509Sdim std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); 2042263509Sdim 2043263509Sdim // chain is in second result. 2044263509Sdim if (RetTyABI == RetTy) 2045263509Sdim return CallInfo.first; 2046263509Sdim 2047263509Sdim assert (RetTy->isFP128Ty() && "Unexpected return type!"); 2048263509Sdim 2049263509Sdim Chain = CallInfo.second; 2050263509Sdim 2051263509Sdim // Load RetPtr to get the return value. 2052263509Sdim return DAG.getLoad(Op.getValueType(), 2053263509Sdim SDLoc(Op), 2054263509Sdim Chain, 2055263509Sdim RetPtr, 2056263509Sdim MachinePointerInfo(), 2057263509Sdim false, false, false, 8); 2058263509Sdim} 2059263509Sdim 2060263509SdimSDValue 2061263509SdimSparcTargetLowering::LowerF128Compare(SDValue LHS, SDValue RHS, 2062263509Sdim unsigned &SPCC, 2063263509Sdim SDLoc DL, 2064263509Sdim SelectionDAG &DAG) const { 2065263509Sdim 2066263509Sdim const char *LibCall = 0; 2067263509Sdim bool is64Bit = Subtarget->is64Bit(); 2068263509Sdim switch(SPCC) { 2069263509Sdim default: llvm_unreachable("Unhandled conditional code!"); 2070263509Sdim case SPCC::FCC_E : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break; 2071263509Sdim case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break; 2072263509Sdim case SPCC::FCC_L : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break; 2073263509Sdim case SPCC::FCC_G : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break; 2074263509Sdim case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break; 2075263509Sdim case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break; 2076263509Sdim case SPCC::FCC_UL : 2077263509Sdim case SPCC::FCC_ULE: 2078263509Sdim case SPCC::FCC_UG : 2079263509Sdim case SPCC::FCC_UGE: 2080263509Sdim case SPCC::FCC_U : 2081263509Sdim case SPCC::FCC_O : 2082263509Sdim case SPCC::FCC_LG : 2083263509Sdim case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break; 2084263509Sdim } 2085263509Sdim 2086263509Sdim SDValue Callee = DAG.getExternalSymbol(LibCall, getPointerTy()); 2087263509Sdim Type *RetTy = Type::getInt32Ty(*DAG.getContext()); 2088263509Sdim ArgListTy Args; 2089263509Sdim SDValue Chain = DAG.getEntryNode(); 2090263509Sdim Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG); 2091263509Sdim Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG); 2092263509Sdim 2093263509Sdim TargetLowering:: 2094263509Sdim CallLoweringInfo CLI(Chain, 2095263509Sdim RetTy, 2096263509Sdim false, false, false, false, 2097263509Sdim 0, CallingConv::C, 2098263509Sdim false, false, true, 2099263509Sdim Callee, Args, DAG, DL); 2100263509Sdim 2101263509Sdim std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); 2102263509Sdim 2103263509Sdim // result is in first, and chain is in second result. 2104263509Sdim SDValue Result = CallInfo.first; 2105263509Sdim 2106263509Sdim switch(SPCC) { 2107263509Sdim default: { 2108263509Sdim SDValue RHS = DAG.getTargetConstant(0, Result.getValueType()); 2109263509Sdim SPCC = SPCC::ICC_NE; 2110263509Sdim return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 2111263509Sdim } 2112263509Sdim case SPCC::FCC_UL : { 2113263509Sdim SDValue Mask = DAG.getTargetConstant(1, Result.getValueType()); 2114263509Sdim Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask); 2115263509Sdim SDValue RHS = DAG.getTargetConstant(0, Result.getValueType()); 2116263509Sdim SPCC = SPCC::ICC_NE; 2117263509Sdim return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 2118263509Sdim } 2119263509Sdim case SPCC::FCC_ULE: { 2120263509Sdim SDValue RHS = DAG.getTargetConstant(2, Result.getValueType()); 2121263509Sdim SPCC = SPCC::ICC_NE; 2122263509Sdim return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 2123263509Sdim } 2124263509Sdim case SPCC::FCC_UG : { 2125263509Sdim SDValue RHS = DAG.getTargetConstant(1, Result.getValueType()); 2126263509Sdim SPCC = SPCC::ICC_G; 2127263509Sdim return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 2128263509Sdim } 2129263509Sdim case SPCC::FCC_UGE: { 2130263509Sdim SDValue RHS = DAG.getTargetConstant(1, Result.getValueType()); 2131263509Sdim SPCC = SPCC::ICC_NE; 2132263509Sdim return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 2133263509Sdim } 2134263509Sdim 2135263509Sdim case SPCC::FCC_U : { 2136263509Sdim SDValue RHS = DAG.getTargetConstant(3, Result.getValueType()); 2137263509Sdim SPCC = SPCC::ICC_E; 2138263509Sdim return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 2139263509Sdim } 2140263509Sdim case SPCC::FCC_O : { 2141263509Sdim SDValue RHS = DAG.getTargetConstant(3, Result.getValueType()); 2142263509Sdim SPCC = SPCC::ICC_NE; 2143263509Sdim return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 2144263509Sdim } 2145263509Sdim case SPCC::FCC_LG : { 2146263509Sdim SDValue Mask = DAG.getTargetConstant(3, Result.getValueType()); 2147263509Sdim Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask); 2148263509Sdim SDValue RHS = DAG.getTargetConstant(0, Result.getValueType()); 2149263509Sdim SPCC = SPCC::ICC_NE; 2150263509Sdim return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 2151263509Sdim } 2152263509Sdim case SPCC::FCC_UE : { 2153263509Sdim SDValue Mask = DAG.getTargetConstant(3, Result.getValueType()); 2154263509Sdim Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask); 2155263509Sdim SDValue RHS = DAG.getTargetConstant(0, Result.getValueType()); 2156263509Sdim SPCC = SPCC::ICC_E; 2157263509Sdim return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 2158263509Sdim } 2159263509Sdim } 2160263509Sdim} 2161263509Sdim 2162263509Sdimstatic SDValue 2163263509SdimLowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG, 2164263509Sdim const SparcTargetLowering &TLI) { 2165263509Sdim 2166263509Sdim if (Op.getOperand(0).getValueType() == MVT::f64) 2167263509Sdim return TLI.LowerF128Op(Op, DAG, 2168263509Sdim TLI.getLibcallName(RTLIB::FPEXT_F64_F128), 1); 2169263509Sdim 2170263509Sdim if (Op.getOperand(0).getValueType() == MVT::f32) 2171263509Sdim return TLI.LowerF128Op(Op, DAG, 2172263509Sdim TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1); 2173263509Sdim 2174263509Sdim llvm_unreachable("fpextend with non-float operand!"); 2175263509Sdim return SDValue(0, 0); 2176263509Sdim} 2177263509Sdim 2178263509Sdimstatic SDValue 2179263509SdimLowerF128_FPROUND(SDValue Op, SelectionDAG &DAG, 2180263509Sdim const SparcTargetLowering &TLI) { 2181263509Sdim // FP_ROUND on f64 and f32 are legal. 2182263509Sdim if (Op.getOperand(0).getValueType() != MVT::f128) 2183263509Sdim return Op; 2184263509Sdim 2185263509Sdim if (Op.getValueType() == MVT::f64) 2186263509Sdim return TLI.LowerF128Op(Op, DAG, 2187263509Sdim TLI.getLibcallName(RTLIB::FPROUND_F128_F64), 1); 2188263509Sdim if (Op.getValueType() == MVT::f32) 2189263509Sdim return TLI.LowerF128Op(Op, DAG, 2190263509Sdim TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1); 2191263509Sdim 2192263509Sdim llvm_unreachable("fpround to non-float!"); 2193263509Sdim return SDValue(0, 0); 2194263509Sdim} 2195263509Sdim 2196263509Sdimstatic SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG, 2197263509Sdim const SparcTargetLowering &TLI, 2198263509Sdim bool hasHardQuad) { 2199263509Sdim SDLoc dl(Op); 2200263509Sdim EVT VT = Op.getValueType(); 2201263509Sdim assert(VT == MVT::i32 || VT == MVT::i64); 2202263509Sdim 2203263509Sdim // Expand f128 operations to fp128 abi calls. 2204263509Sdim if (Op.getOperand(0).getValueType() == MVT::f128 2205263509Sdim && (!hasHardQuad || !TLI.isTypeLegal(VT))) { 2206263509Sdim const char *libName = TLI.getLibcallName(VT == MVT::i32 2207263509Sdim ? RTLIB::FPTOSINT_F128_I32 2208263509Sdim : RTLIB::FPTOSINT_F128_I64); 2209263509Sdim return TLI.LowerF128Op(Op, DAG, libName, 1); 2210263509Sdim } 2211263509Sdim 2212263509Sdim // Expand if the resulting type is illegal. 2213263509Sdim if (!TLI.isTypeLegal(VT)) 2214263509Sdim return SDValue(0, 0); 2215263509Sdim 2216263509Sdim // Otherwise, Convert the fp value to integer in an FP register. 2217263509Sdim if (VT == MVT::i32) 2218263509Sdim Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0)); 2219263509Sdim else 2220263509Sdim Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0)); 2221263509Sdim 2222263509Sdim return DAG.getNode(ISD::BITCAST, dl, VT, Op); 2223263509Sdim} 2224263509Sdim 2225263509Sdimstatic SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG, 2226263509Sdim const SparcTargetLowering &TLI, 2227263509Sdim bool hasHardQuad) { 2228263509Sdim SDLoc dl(Op); 2229263509Sdim EVT OpVT = Op.getOperand(0).getValueType(); 2230263509Sdim assert(OpVT == MVT::i32 || (OpVT == MVT::i64)); 2231263509Sdim 2232263509Sdim EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64; 2233263509Sdim 2234263509Sdim // Expand f128 operations to fp128 ABI calls. 2235263509Sdim if (Op.getValueType() == MVT::f128 2236263509Sdim && (!hasHardQuad || !TLI.isTypeLegal(OpVT))) { 2237263509Sdim const char *libName = TLI.getLibcallName(OpVT == MVT::i32 2238263509Sdim ? RTLIB::SINTTOFP_I32_F128 2239263509Sdim : RTLIB::SINTTOFP_I64_F128); 2240263509Sdim return TLI.LowerF128Op(Op, DAG, libName, 1); 2241263509Sdim } 2242263509Sdim 2243263509Sdim // Expand if the operand type is illegal. 2244263509Sdim if (!TLI.isTypeLegal(OpVT)) 2245263509Sdim return SDValue(0, 0); 2246263509Sdim 2247263509Sdim // Otherwise, Convert the int value to FP in an FP register. 2248263509Sdim SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0)); 2249263509Sdim unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF; 2250263509Sdim return DAG.getNode(opcode, dl, Op.getValueType(), Tmp); 2251263509Sdim} 2252263509Sdim 2253263509Sdimstatic SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG, 2254263509Sdim const SparcTargetLowering &TLI, 2255263509Sdim bool hasHardQuad) { 2256263509Sdim SDLoc dl(Op); 2257263509Sdim EVT VT = Op.getValueType(); 2258263509Sdim 2259263509Sdim // Expand if it does not involve f128 or the target has support for 2260263509Sdim // quad floating point instructions and the resulting type is legal. 2261263509Sdim if (Op.getOperand(0).getValueType() != MVT::f128 || 2262263509Sdim (hasHardQuad && TLI.isTypeLegal(VT))) 2263263509Sdim return SDValue(0, 0); 2264263509Sdim 2265263509Sdim assert(VT == MVT::i32 || VT == MVT::i64); 2266263509Sdim 2267263509Sdim return TLI.LowerF128Op(Op, DAG, 2268263509Sdim TLI.getLibcallName(VT == MVT::i32 2269263509Sdim ? RTLIB::FPTOUINT_F128_I32 2270263509Sdim : RTLIB::FPTOUINT_F128_I64), 2271263509Sdim 1); 2272263509Sdim} 2273263509Sdim 2274263509Sdimstatic SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG, 2275263509Sdim const SparcTargetLowering &TLI, 2276263509Sdim bool hasHardQuad) { 2277263509Sdim SDLoc dl(Op); 2278263509Sdim EVT OpVT = Op.getOperand(0).getValueType(); 2279263509Sdim assert(OpVT == MVT::i32 || OpVT == MVT::i64); 2280263509Sdim 2281263509Sdim // Expand if it does not involve f128 or the target has support for 2282263509Sdim // quad floating point instructions and the operand type is legal. 2283263509Sdim if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT))) 2284263509Sdim return SDValue(0, 0); 2285263509Sdim 2286263509Sdim return TLI.LowerF128Op(Op, DAG, 2287263509Sdim TLI.getLibcallName(OpVT == MVT::i32 2288263509Sdim ? RTLIB::UINTTOFP_I32_F128 2289263509Sdim : RTLIB::UINTTOFP_I64_F128), 2290263509Sdim 1); 2291263509Sdim} 2292263509Sdim 2293263509Sdimstatic SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG, 2294263509Sdim const SparcTargetLowering &TLI, 2295263509Sdim bool hasHardQuad) { 2296193323Sed SDValue Chain = Op.getOperand(0); 2297193323Sed ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2298193323Sed SDValue LHS = Op.getOperand(2); 2299193323Sed SDValue RHS = Op.getOperand(3); 2300193323Sed SDValue Dest = Op.getOperand(4); 2301263509Sdim SDLoc dl(Op); 2302193323Sed unsigned Opc, SPCC = ~0U; 2303193323Sed 2304193323Sed // If this is a br_cc of a "setcc", and if the setcc got lowered into 2305193323Sed // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values. 2306193323Sed LookThroughSetCC(LHS, RHS, CC, SPCC); 2307193323Sed 2308193323Sed // Get the condition flag. 2309193323Sed SDValue CompareFlag; 2310252723Sdim if (LHS.getValueType().isInteger()) { 2311263509Sdim CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS); 2312193323Sed if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC); 2313252723Sdim // 32-bit compares use the icc flags, 64-bit uses the xcc flags. 2314252723Sdim Opc = LHS.getValueType() == MVT::i32 ? SPISD::BRICC : SPISD::BRXCC; 2315193323Sed } else { 2316263509Sdim if (!hasHardQuad && LHS.getValueType() == MVT::f128) { 2317263509Sdim if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC); 2318263509Sdim CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG); 2319263509Sdim Opc = SPISD::BRICC; 2320263509Sdim } else { 2321263509Sdim CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS); 2322263509Sdim if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC); 2323263509Sdim Opc = SPISD::BRFCC; 2324263509Sdim } 2325193323Sed } 2326193323Sed return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest, 2327193323Sed DAG.getConstant(SPCC, MVT::i32), CompareFlag); 2328193323Sed} 2329193323Sed 2330263509Sdimstatic SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, 2331263509Sdim const SparcTargetLowering &TLI, 2332263509Sdim bool hasHardQuad) { 2333193323Sed SDValue LHS = Op.getOperand(0); 2334193323Sed SDValue RHS = Op.getOperand(1); 2335193323Sed ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 2336193323Sed SDValue TrueVal = Op.getOperand(2); 2337193323Sed SDValue FalseVal = Op.getOperand(3); 2338263509Sdim SDLoc dl(Op); 2339193323Sed unsigned Opc, SPCC = ~0U; 2340193323Sed 2341193323Sed // If this is a select_cc of a "setcc", and if the setcc got lowered into 2342193323Sed // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values. 2343193323Sed LookThroughSetCC(LHS, RHS, CC, SPCC); 2344193323Sed 2345193323Sed SDValue CompareFlag; 2346252723Sdim if (LHS.getValueType().isInteger()) { 2347263509Sdim CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS); 2348252723Sdim Opc = LHS.getValueType() == MVT::i32 ? 2349252723Sdim SPISD::SELECT_ICC : SPISD::SELECT_XCC; 2350193323Sed if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC); 2351193323Sed } else { 2352263509Sdim if (!hasHardQuad && LHS.getValueType() == MVT::f128) { 2353263509Sdim if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC); 2354263509Sdim CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG); 2355263509Sdim Opc = SPISD::SELECT_ICC; 2356263509Sdim } else { 2357263509Sdim CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS); 2358263509Sdim Opc = SPISD::SELECT_FCC; 2359263509Sdim if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC); 2360263509Sdim } 2361193323Sed } 2362193323Sed return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal, 2363193323Sed DAG.getConstant(SPCC, MVT::i32), CompareFlag); 2364193323Sed} 2365193323Sed 2366193323Sedstatic SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG, 2367207618Srdivacky const SparcTargetLowering &TLI) { 2368207618Srdivacky MachineFunction &MF = DAG.getMachineFunction(); 2369207618Srdivacky SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>(); 2370207618Srdivacky 2371263509Sdim // Need frame address to find the address of VarArgsFrameIndex. 2372263509Sdim MF.getFrameInfo()->setFrameAddressIsTaken(true); 2373263509Sdim 2374193323Sed // vastart just stores the address of the VarArgsFrameIndex slot into the 2375193323Sed // memory location argument. 2376263509Sdim SDLoc DL(Op); 2377207618Srdivacky SDValue Offset = 2378252723Sdim DAG.getNode(ISD::ADD, DL, TLI.getPointerTy(), 2379252723Sdim DAG.getRegister(SP::I6, TLI.getPointerTy()), 2380252723Sdim DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset())); 2381193323Sed const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2382252723Sdim return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1), 2383218893Sdim MachinePointerInfo(SV), false, false, 0); 2384193323Sed} 2385193323Sed 2386193323Sedstatic SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) { 2387193323Sed SDNode *Node = Op.getNode(); 2388198090Srdivacky EVT VT = Node->getValueType(0); 2389193323Sed SDValue InChain = Node->getOperand(0); 2390193323Sed SDValue VAListPtr = Node->getOperand(1); 2391252723Sdim EVT PtrVT = VAListPtr.getValueType(); 2392193323Sed const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 2393263509Sdim SDLoc DL(Node); 2394252723Sdim SDValue VAList = DAG.getLoad(PtrVT, DL, InChain, VAListPtr, 2395235633Sdim MachinePointerInfo(SV), false, false, false, 0); 2396252723Sdim // Increment the pointer, VAList, to the next vaarg. 2397252723Sdim SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList, 2398252723Sdim DAG.getIntPtrConstant(VT.getSizeInBits()/8)); 2399252723Sdim // Store the incremented VAList to the legalized pointer. 2400252723Sdim InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, 2401218893Sdim VAListPtr, MachinePointerInfo(SV), false, false, 0); 2402252723Sdim // Load the actual argument out of the pointer VAList. 2403252723Sdim // We can't count on greater alignment than the word size. 2404252723Sdim return DAG.getLoad(VT, DL, InChain, VAList, MachinePointerInfo(), 2405252723Sdim false, false, false, 2406252723Sdim std::min(PtrVT.getSizeInBits(), VT.getSizeInBits())/8); 2407193323Sed} 2408193323Sed 2409263509Sdimstatic SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, 2410263509Sdim const SparcSubtarget *Subtarget) { 2411193323Sed SDValue Chain = Op.getOperand(0); // Legalize the chain. 2412193323Sed SDValue Size = Op.getOperand(1); // Legalize the size. 2413263509Sdim EVT VT = Size->getValueType(0); 2414263509Sdim SDLoc dl(Op); 2415193323Sed 2416193323Sed unsigned SPReg = SP::O6; 2417263509Sdim SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT); 2418263509Sdim SDValue NewSP = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value 2419193323Sed Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP); // Output chain 2420193323Sed 2421193323Sed // The resultant pointer is actually 16 words from the bottom of the stack, 2422193323Sed // to provide a register spill area. 2423263509Sdim unsigned regSpillArea = Subtarget->is64Bit() ? 128 : 96; 2424263509Sdim regSpillArea += Subtarget->getStackPointerBias(); 2425263509Sdim 2426263509Sdim SDValue NewVal = DAG.getNode(ISD::ADD, dl, VT, NewSP, 2427263509Sdim DAG.getConstant(regSpillArea, VT)); 2428193323Sed SDValue Ops[2] = { NewVal, Chain }; 2429193323Sed return DAG.getMergeValues(Ops, 2, dl); 2430193323Sed} 2431193323Sed 2432193323Sed 2433218893Sdimstatic SDValue getFLUSHW(SDValue Op, SelectionDAG &DAG) { 2434263509Sdim SDLoc dl(Op); 2435218893Sdim SDValue Chain = DAG.getNode(SPISD::FLUSHW, 2436218893Sdim dl, MVT::Other, DAG.getEntryNode()); 2437218893Sdim return Chain; 2438218893Sdim} 2439218893Sdim 2440263764Sdimstatic SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG, 2441263764Sdim const SparcSubtarget *Subtarget) { 2442218893Sdim MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 2443218893Sdim MFI->setFrameAddressIsTaken(true); 2444218893Sdim 2445218893Sdim EVT VT = Op.getValueType(); 2446263509Sdim SDLoc dl(Op); 2447218893Sdim unsigned FrameReg = SP::I6; 2448263764Sdim unsigned stackBias = Subtarget->getStackPointerBias(); 2449218893Sdim 2450263764Sdim SDValue FrameAddr; 2451218893Sdim 2452263764Sdim if (depth == 0) { 2453218893Sdim FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 2454263764Sdim if (Subtarget->is64Bit()) 2455263764Sdim FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr, 2456263764Sdim DAG.getIntPtrConstant(stackBias)); 2457263764Sdim return FrameAddr; 2458263764Sdim } 2459218893Sdim 2460263764Sdim // flush first to make sure the windowed registers' values are in stack 2461263764Sdim SDValue Chain = getFLUSHW(Op, DAG); 2462263764Sdim FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT); 2463263764Sdim 2464263764Sdim unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56; 2465263764Sdim 2466263764Sdim while (depth--) { 2467263764Sdim SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr, 2468263764Sdim DAG.getIntPtrConstant(Offset)); 2469263764Sdim FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo(), 2470263764Sdim false, false, false, 0); 2471218893Sdim } 2472263764Sdim if (Subtarget->is64Bit()) 2473263764Sdim FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr, 2474263764Sdim DAG.getIntPtrConstant(stackBias)); 2475218893Sdim return FrameAddr; 2476218893Sdim} 2477218893Sdim 2478263764Sdim 2479263764Sdimstatic SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, 2480263764Sdim const SparcSubtarget *Subtarget) { 2481263764Sdim 2482263764Sdim uint64_t depth = Op.getConstantOperandVal(0); 2483263764Sdim 2484263764Sdim return getFRAMEADDR(depth, Op, DAG, Subtarget); 2485263764Sdim 2486263764Sdim} 2487263764Sdim 2488263509Sdimstatic SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, 2489263764Sdim const SparcTargetLowering &TLI, 2490263764Sdim const SparcSubtarget *Subtarget) { 2491263509Sdim MachineFunction &MF = DAG.getMachineFunction(); 2492263509Sdim MachineFrameInfo *MFI = MF.getFrameInfo(); 2493218893Sdim MFI->setReturnAddressIsTaken(true); 2494218893Sdim 2495218893Sdim EVT VT = Op.getValueType(); 2496263509Sdim SDLoc dl(Op); 2497218893Sdim uint64_t depth = Op.getConstantOperandVal(0); 2498218893Sdim 2499218893Sdim SDValue RetAddr; 2500263509Sdim if (depth == 0) { 2501263509Sdim unsigned RetReg = MF.addLiveIn(SP::I7, 2502263509Sdim TLI.getRegClassFor(TLI.getPointerTy())); 2503218893Sdim RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT); 2504263764Sdim return RetAddr; 2505263764Sdim } 2506263509Sdim 2507263764Sdim // Need frame address to find return address of the caller. 2508263764Sdim SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget); 2509218893Sdim 2510263764Sdim unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60; 2511263764Sdim SDValue Ptr = DAG.getNode(ISD::ADD, 2512263764Sdim dl, VT, 2513263764Sdim FrameAddr, 2514263764Sdim DAG.getIntPtrConstant(Offset)); 2515263764Sdim RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr, 2516263764Sdim MachinePointerInfo(), false, false, false, 0); 2517263764Sdim 2518218893Sdim return RetAddr; 2519218893Sdim} 2520218893Sdim 2521263509Sdimstatic SDValue LowerF64Op(SDValue Op, SelectionDAG &DAG, unsigned opcode) 2522263509Sdim{ 2523263509Sdim SDLoc dl(Op); 2524263509Sdim 2525263509Sdim assert(Op.getValueType() == MVT::f64 && "LowerF64Op called on non-double!"); 2526263509Sdim assert(opcode == ISD::FNEG || opcode == ISD::FABS); 2527263509Sdim 2528263509Sdim // Lower fneg/fabs on f64 to fneg/fabs on f32. 2529263509Sdim // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd. 2530263509Sdim // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd. 2531263509Sdim 2532263509Sdim SDValue SrcReg64 = Op.getOperand(0); 2533263509Sdim SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32, 2534263509Sdim SrcReg64); 2535263509Sdim SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32, 2536263509Sdim SrcReg64); 2537263509Sdim 2538263509Sdim Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32); 2539263509Sdim 2540263509Sdim SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, 2541263509Sdim dl, MVT::f64), 0); 2542263509Sdim DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64, 2543263509Sdim DstReg64, Hi32); 2544263509Sdim DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64, 2545263509Sdim DstReg64, Lo32); 2546263509Sdim return DstReg64; 2547263509Sdim} 2548263509Sdim 2549263509Sdim// Lower a f128 load into two f64 loads. 2550263509Sdimstatic SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG) 2551263509Sdim{ 2552263509Sdim SDLoc dl(Op); 2553263509Sdim LoadSDNode *LdNode = dyn_cast<LoadSDNode>(Op.getNode()); 2554263509Sdim assert(LdNode && LdNode->getOffset().getOpcode() == ISD::UNDEF 2555263509Sdim && "Unexpected node type"); 2556263509Sdim 2557263509Sdim unsigned alignment = LdNode->getAlignment(); 2558263509Sdim if (alignment > 8) 2559263509Sdim alignment = 8; 2560263509Sdim 2561263509Sdim SDValue Hi64 = DAG.getLoad(MVT::f64, 2562263509Sdim dl, 2563263509Sdim LdNode->getChain(), 2564263509Sdim LdNode->getBasePtr(), 2565263509Sdim LdNode->getPointerInfo(), 2566263509Sdim false, false, false, alignment); 2567263509Sdim EVT addrVT = LdNode->getBasePtr().getValueType(); 2568263509Sdim SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT, 2569263509Sdim LdNode->getBasePtr(), 2570263509Sdim DAG.getConstant(8, addrVT)); 2571263509Sdim SDValue Lo64 = DAG.getLoad(MVT::f64, 2572263509Sdim dl, 2573263509Sdim LdNode->getChain(), 2574263509Sdim LoPtr, 2575263509Sdim LdNode->getPointerInfo(), 2576263509Sdim false, false, false, alignment); 2577263509Sdim 2578263509Sdim SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, MVT::i32); 2579263509Sdim SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, MVT::i32); 2580263509Sdim 2581263509Sdim SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, 2582263509Sdim dl, MVT::f128); 2583263509Sdim InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl, 2584263509Sdim MVT::f128, 2585263509Sdim SDValue(InFP128, 0), 2586263509Sdim Hi64, 2587263509Sdim SubRegEven); 2588263509Sdim InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl, 2589263509Sdim MVT::f128, 2590263509Sdim SDValue(InFP128, 0), 2591263509Sdim Lo64, 2592263509Sdim SubRegOdd); 2593263509Sdim SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1), 2594263509Sdim SDValue(Lo64.getNode(), 1) }; 2595263509Sdim SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2596263509Sdim &OutChains[0], 2); 2597263509Sdim SDValue Ops[2] = {SDValue(InFP128,0), OutChain}; 2598263509Sdim return DAG.getMergeValues(Ops, 2, dl); 2599263509Sdim} 2600263509Sdim 2601263509Sdim// Lower a f128 store into two f64 stores. 2602263509Sdimstatic SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG) { 2603263509Sdim SDLoc dl(Op); 2604263509Sdim StoreSDNode *StNode = dyn_cast<StoreSDNode>(Op.getNode()); 2605263509Sdim assert(StNode && StNode->getOffset().getOpcode() == ISD::UNDEF 2606263509Sdim && "Unexpected node type"); 2607263509Sdim SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, MVT::i32); 2608263509Sdim SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, MVT::i32); 2609263509Sdim 2610263509Sdim SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, 2611263509Sdim dl, 2612263509Sdim MVT::f64, 2613263509Sdim StNode->getValue(), 2614263509Sdim SubRegEven); 2615263509Sdim SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, 2616263509Sdim dl, 2617263509Sdim MVT::f64, 2618263509Sdim StNode->getValue(), 2619263509Sdim SubRegOdd); 2620263509Sdim 2621263509Sdim unsigned alignment = StNode->getAlignment(); 2622263509Sdim if (alignment > 8) 2623263509Sdim alignment = 8; 2624263509Sdim 2625263509Sdim SDValue OutChains[2]; 2626263509Sdim OutChains[0] = DAG.getStore(StNode->getChain(), 2627263509Sdim dl, 2628263509Sdim SDValue(Hi64, 0), 2629263509Sdim StNode->getBasePtr(), 2630263509Sdim MachinePointerInfo(), 2631263509Sdim false, false, alignment); 2632263509Sdim EVT addrVT = StNode->getBasePtr().getValueType(); 2633263509Sdim SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT, 2634263509Sdim StNode->getBasePtr(), 2635263509Sdim DAG.getConstant(8, addrVT)); 2636263509Sdim OutChains[1] = DAG.getStore(StNode->getChain(), 2637263509Sdim dl, 2638263509Sdim SDValue(Lo64, 0), 2639263509Sdim LoPtr, 2640263509Sdim MachinePointerInfo(), 2641263509Sdim false, false, alignment); 2642263509Sdim return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2643263509Sdim &OutChains[0], 2); 2644263509Sdim} 2645263509Sdim 2646263764Sdimstatic SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9) { 2647263764Sdim assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS) && "invalid"); 2648263509Sdim 2649263509Sdim if (Op.getValueType() == MVT::f64) 2650263764Sdim return LowerF64Op(Op, DAG, Op.getOpcode()); 2651263509Sdim if (Op.getValueType() != MVT::f128) 2652263509Sdim return Op; 2653263509Sdim 2654263764Sdim // Lower fabs/fneg on f128 to fabs/fneg on f64 2655263764Sdim // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64 2656263509Sdim 2657263509Sdim SDLoc dl(Op); 2658263509Sdim SDValue SrcReg128 = Op.getOperand(0); 2659263509Sdim SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64, 2660263509Sdim SrcReg128); 2661263509Sdim SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64, 2662263509Sdim SrcReg128); 2663263509Sdim if (isV9) 2664263509Sdim Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64); 2665263509Sdim else 2666263764Sdim Hi64 = LowerF64Op(Hi64, DAG, Op.getOpcode()); 2667263509Sdim 2668263509Sdim SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, 2669263509Sdim dl, MVT::f128), 0); 2670263509Sdim DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128, 2671263509Sdim DstReg128, Hi64); 2672263509Sdim DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128, 2673263509Sdim DstReg128, Lo64); 2674263509Sdim return DstReg128; 2675263509Sdim} 2676263509Sdim 2677263509Sdimstatic SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { 2678263509Sdim 2679263509Sdim if (Op.getValueType() != MVT::i64) 2680263509Sdim return Op; 2681263509Sdim 2682263509Sdim SDLoc dl(Op); 2683263509Sdim SDValue Src1 = Op.getOperand(0); 2684263509Sdim SDValue Src1Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1); 2685263509Sdim SDValue Src1Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src1, 2686263509Sdim DAG.getConstant(32, MVT::i64)); 2687263509Sdim Src1Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1Hi); 2688263509Sdim 2689263509Sdim SDValue Src2 = Op.getOperand(1); 2690263509Sdim SDValue Src2Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2); 2691263509Sdim SDValue Src2Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src2, 2692263509Sdim DAG.getConstant(32, MVT::i64)); 2693263509Sdim Src2Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2Hi); 2694263509Sdim 2695263509Sdim 2696263509Sdim bool hasChain = false; 2697263509Sdim unsigned hiOpc = Op.getOpcode(); 2698263509Sdim switch (Op.getOpcode()) { 2699263509Sdim default: llvm_unreachable("Invalid opcode"); 2700263509Sdim case ISD::ADDC: hiOpc = ISD::ADDE; break; 2701263509Sdim case ISD::ADDE: hasChain = true; break; 2702263509Sdim case ISD::SUBC: hiOpc = ISD::SUBE; break; 2703263509Sdim case ISD::SUBE: hasChain = true; break; 2704263509Sdim } 2705263509Sdim SDValue Lo; 2706263509Sdim SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Glue); 2707263509Sdim if (hasChain) { 2708263509Sdim Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo, 2709263509Sdim Op.getOperand(2)); 2710263509Sdim } else { 2711263509Sdim Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo); 2712263509Sdim } 2713263509Sdim SDValue Hi = DAG.getNode(hiOpc, dl, VTs, Src1Hi, Src2Hi, Lo.getValue(1)); 2714263509Sdim SDValue Carry = Hi.getValue(1); 2715263509Sdim 2716263509Sdim Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Lo); 2717263509Sdim Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Hi); 2718263509Sdim Hi = DAG.getNode(ISD::SHL, dl, MVT::i64, Hi, 2719263509Sdim DAG.getConstant(32, MVT::i64)); 2720263509Sdim 2721263509Sdim SDValue Dst = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, Lo); 2722263509Sdim SDValue Ops[2] = { Dst, Carry }; 2723263509Sdim return DAG.getMergeValues(Ops, 2, dl); 2724263509Sdim} 2725263509Sdim 2726263764Sdim// Custom lower UMULO/SMULO for SPARC. This code is similar to ExpandNode() 2727263764Sdim// in LegalizeDAG.cpp except the order of arguments to the library function. 2728263764Sdimstatic SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG, 2729263764Sdim const SparcTargetLowering &TLI) 2730263764Sdim{ 2731263764Sdim unsigned opcode = Op.getOpcode(); 2732263764Sdim assert((opcode == ISD::UMULO || opcode == ISD::SMULO) && "Invalid Opcode."); 2733263764Sdim 2734263764Sdim bool isSigned = (opcode == ISD::SMULO); 2735263764Sdim EVT VT = MVT::i64; 2736263764Sdim EVT WideVT = MVT::i128; 2737263764Sdim SDLoc dl(Op); 2738263764Sdim SDValue LHS = Op.getOperand(0); 2739263764Sdim 2740263764Sdim if (LHS.getValueType() != VT) 2741263764Sdim return Op; 2742263764Sdim 2743263764Sdim SDValue ShiftAmt = DAG.getConstant(63, VT); 2744263764Sdim 2745263764Sdim SDValue RHS = Op.getOperand(1); 2746263764Sdim SDValue HiLHS = DAG.getNode(ISD::SRA, dl, VT, LHS, ShiftAmt); 2747263764Sdim SDValue HiRHS = DAG.getNode(ISD::SRA, dl, MVT::i64, RHS, ShiftAmt); 2748263764Sdim SDValue Args[] = { HiLHS, LHS, HiRHS, RHS }; 2749263764Sdim 2750263764Sdim SDValue MulResult = TLI.makeLibCall(DAG, 2751263764Sdim RTLIB::MUL_I128, WideVT, 2752263764Sdim Args, 4, isSigned, dl).first; 2753263764Sdim SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, 2754263764Sdim MulResult, DAG.getIntPtrConstant(0)); 2755263764Sdim SDValue TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, 2756263764Sdim MulResult, DAG.getIntPtrConstant(1)); 2757263764Sdim if (isSigned) { 2758263764Sdim SDValue Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt); 2759263764Sdim TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, Tmp1, ISD::SETNE); 2760263764Sdim } else { 2761263764Sdim TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, DAG.getConstant(0, VT), 2762263764Sdim ISD::SETNE); 2763263764Sdim } 2764263764Sdim // MulResult is a node with an illegal type. Because such things are not 2765263764Sdim // generally permitted during this phase of legalization, delete the 2766263764Sdim // node. The above EXTRACT_ELEMENT nodes should have been folded. 2767263764Sdim DAG.DeleteNode(MulResult.getNode()); 2768263764Sdim 2769263764Sdim SDValue Ops[2] = { BottomHalf, TopHalf } ; 2770263764Sdim return DAG.getMergeValues(Ops, 2, dl); 2771263764Sdim} 2772263764Sdim 2773263764Sdimstatic SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) { 2774263764Sdim // Monotonic load/stores are legal. 2775263764Sdim if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic) 2776263764Sdim return Op; 2777263764Sdim 2778263764Sdim // Otherwise, expand with a fence. 2779263764Sdim return SDValue(); 2780263764Sdim} 2781263764Sdim 2782263764Sdim 2783193323SedSDValue SparcTargetLowering:: 2784207618SrdivackyLowerOperation(SDValue Op, SelectionDAG &DAG) const { 2785263509Sdim 2786263509Sdim bool hasHardQuad = Subtarget->hasHardQuad(); 2787263509Sdim bool isV9 = Subtarget->isV9(); 2788263509Sdim 2789193323Sed switch (Op.getOpcode()) { 2790198090Srdivacky default: llvm_unreachable("Should not custom lower this!"); 2791263509Sdim 2792263764Sdim case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this, 2793263764Sdim Subtarget); 2794263764Sdim case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG, 2795263764Sdim Subtarget); 2796263509Sdim case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 2797198090Srdivacky case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 2798263509Sdim case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 2799198090Srdivacky case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 2800263509Sdim case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this, 2801263509Sdim hasHardQuad); 2802263509Sdim case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this, 2803263509Sdim hasHardQuad); 2804263509Sdim case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG, *this, 2805263509Sdim hasHardQuad); 2806263509Sdim case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, *this, 2807263509Sdim hasHardQuad); 2808263509Sdim case ISD::BR_CC: return LowerBR_CC(Op, DAG, *this, 2809263509Sdim hasHardQuad); 2810263509Sdim case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG, *this, 2811263509Sdim hasHardQuad); 2812193323Sed case ISD::VASTART: return LowerVASTART(Op, DAG, *this); 2813193323Sed case ISD::VAARG: return LowerVAARG(Op, DAG); 2814263509Sdim case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG, 2815263509Sdim Subtarget); 2816263509Sdim 2817263509Sdim case ISD::LOAD: return LowerF128Load(Op, DAG); 2818263509Sdim case ISD::STORE: return LowerF128Store(Op, DAG); 2819263509Sdim case ISD::FADD: return LowerF128Op(Op, DAG, 2820263509Sdim getLibcallName(RTLIB::ADD_F128), 2); 2821263509Sdim case ISD::FSUB: return LowerF128Op(Op, DAG, 2822263509Sdim getLibcallName(RTLIB::SUB_F128), 2); 2823263509Sdim case ISD::FMUL: return LowerF128Op(Op, DAG, 2824263509Sdim getLibcallName(RTLIB::MUL_F128), 2); 2825263509Sdim case ISD::FDIV: return LowerF128Op(Op, DAG, 2826263509Sdim getLibcallName(RTLIB::DIV_F128), 2); 2827263509Sdim case ISD::FSQRT: return LowerF128Op(Op, DAG, 2828263509Sdim getLibcallName(RTLIB::SQRT_F128),1); 2829263764Sdim case ISD::FABS: 2830263764Sdim case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9); 2831263509Sdim case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this); 2832263509Sdim case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this); 2833263509Sdim case ISD::ADDC: 2834263509Sdim case ISD::ADDE: 2835263509Sdim case ISD::SUBC: 2836263509Sdim case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); 2837263764Sdim case ISD::UMULO: 2838263764Sdim case ISD::SMULO: return LowerUMULO_SMULO(Op, DAG, *this); 2839263764Sdim case ISD::ATOMIC_LOAD: 2840263764Sdim case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG); 2841193323Sed } 2842193323Sed} 2843193323Sed 2844193323SedMachineBasicBlock * 2845193323SedSparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 2846207618Srdivacky MachineBasicBlock *BB) const { 2847193323Sed switch (MI->getOpcode()) { 2848198090Srdivacky default: llvm_unreachable("Unknown SELECT_CC!"); 2849193323Sed case SP::SELECT_CC_Int_ICC: 2850193323Sed case SP::SELECT_CC_FP_ICC: 2851193323Sed case SP::SELECT_CC_DFP_ICC: 2852263509Sdim case SP::SELECT_CC_QFP_ICC: 2853263764Sdim return expandSelectCC(MI, BB, SP::BCOND); 2854193323Sed case SP::SELECT_CC_Int_FCC: 2855193323Sed case SP::SELECT_CC_FP_FCC: 2856193323Sed case SP::SELECT_CC_DFP_FCC: 2857263509Sdim case SP::SELECT_CC_QFP_FCC: 2858263764Sdim return expandSelectCC(MI, BB, SP::FBCOND); 2859263764Sdim 2860263764Sdim case SP::ATOMIC_LOAD_ADD_32: 2861263764Sdim return expandAtomicRMW(MI, BB, SP::ADDrr); 2862263764Sdim case SP::ATOMIC_LOAD_ADD_64: 2863263764Sdim return expandAtomicRMW(MI, BB, SP::ADDXrr); 2864263764Sdim case SP::ATOMIC_LOAD_SUB_32: 2865263764Sdim return expandAtomicRMW(MI, BB, SP::SUBrr); 2866263764Sdim case SP::ATOMIC_LOAD_SUB_64: 2867263764Sdim return expandAtomicRMW(MI, BB, SP::SUBXrr); 2868263764Sdim case SP::ATOMIC_LOAD_AND_32: 2869263764Sdim return expandAtomicRMW(MI, BB, SP::ANDrr); 2870263764Sdim case SP::ATOMIC_LOAD_AND_64: 2871263764Sdim return expandAtomicRMW(MI, BB, SP::ANDXrr); 2872263764Sdim case SP::ATOMIC_LOAD_OR_32: 2873263764Sdim return expandAtomicRMW(MI, BB, SP::ORrr); 2874263764Sdim case SP::ATOMIC_LOAD_OR_64: 2875263764Sdim return expandAtomicRMW(MI, BB, SP::ORXrr); 2876263764Sdim case SP::ATOMIC_LOAD_XOR_32: 2877263764Sdim return expandAtomicRMW(MI, BB, SP::XORrr); 2878263764Sdim case SP::ATOMIC_LOAD_XOR_64: 2879263764Sdim return expandAtomicRMW(MI, BB, SP::XORXrr); 2880263764Sdim case SP::ATOMIC_LOAD_NAND_32: 2881263764Sdim return expandAtomicRMW(MI, BB, SP::ANDrr); 2882263764Sdim case SP::ATOMIC_LOAD_NAND_64: 2883263764Sdim return expandAtomicRMW(MI, BB, SP::ANDXrr); 2884263764Sdim 2885263764Sdim case SP::ATOMIC_SWAP_64: 2886263764Sdim return expandAtomicRMW(MI, BB, 0); 2887263764Sdim 2888263764Sdim case SP::ATOMIC_LOAD_MAX_32: 2889263764Sdim return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_G); 2890263764Sdim case SP::ATOMIC_LOAD_MAX_64: 2891263764Sdim return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_G); 2892263764Sdim case SP::ATOMIC_LOAD_MIN_32: 2893263764Sdim return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_LE); 2894263764Sdim case SP::ATOMIC_LOAD_MIN_64: 2895263764Sdim return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_LE); 2896263764Sdim case SP::ATOMIC_LOAD_UMAX_32: 2897263764Sdim return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_GU); 2898263764Sdim case SP::ATOMIC_LOAD_UMAX_64: 2899263764Sdim return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_GU); 2900263764Sdim case SP::ATOMIC_LOAD_UMIN_32: 2901263764Sdim return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_LEU); 2902263764Sdim case SP::ATOMIC_LOAD_UMIN_64: 2903263764Sdim return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_LEU); 2904193323Sed } 2905263764Sdim} 2906193323Sed 2907263764SdimMachineBasicBlock* 2908263764SdimSparcTargetLowering::expandSelectCC(MachineInstr *MI, 2909263764Sdim MachineBasicBlock *BB, 2910263764Sdim unsigned BROpcode) const { 2911263764Sdim const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo(); 2912263764Sdim DebugLoc dl = MI->getDebugLoc(); 2913263764Sdim unsigned CC = (SPCC::CondCodes)MI->getOperand(3).getImm(); 2914193323Sed 2915193323Sed // To "insert" a SELECT_CC instruction, we actually have to insert the diamond 2916193323Sed // control-flow pattern. The incoming instruction knows the destination vreg 2917193323Sed // to set, the condition code register to branch on, the true/false values to 2918193323Sed // select between, and a branch opcode to use. 2919193323Sed const BasicBlock *LLVM_BB = BB->getBasicBlock(); 2920193323Sed MachineFunction::iterator It = BB; 2921193323Sed ++It; 2922193323Sed 2923193323Sed // thisMBB: 2924193323Sed // ... 2925193323Sed // TrueVal = ... 2926193323Sed // [f]bCC copy1MBB 2927193323Sed // fallthrough --> copy0MBB 2928193323Sed MachineBasicBlock *thisMBB = BB; 2929193323Sed MachineFunction *F = BB->getParent(); 2930193323Sed MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 2931193323Sed MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 2932218893Sdim F->insert(It, copy0MBB); 2933218893Sdim F->insert(It, sinkMBB); 2934210299Sed 2935210299Sed // Transfer the remainder of BB and its successor edges to sinkMBB. 2936210299Sed sinkMBB->splice(sinkMBB->begin(), BB, 2937210299Sed llvm::next(MachineBasicBlock::iterator(MI)), 2938210299Sed BB->end()); 2939210299Sed sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 2940210299Sed 2941210299Sed // Add the true and fallthrough blocks as its successors. 2942210299Sed BB->addSuccessor(copy0MBB); 2943210299Sed BB->addSuccessor(sinkMBB); 2944210299Sed 2945193323Sed BuildMI(BB, dl, TII.get(BROpcode)).addMBB(sinkMBB).addImm(CC); 2946193323Sed 2947193323Sed // copy0MBB: 2948193323Sed // %FalseValue = ... 2949193323Sed // # fallthrough to sinkMBB 2950193323Sed BB = copy0MBB; 2951193323Sed 2952193323Sed // Update machine-CFG edges 2953193323Sed BB->addSuccessor(sinkMBB); 2954193323Sed 2955193323Sed // sinkMBB: 2956193323Sed // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 2957193323Sed // ... 2958193323Sed BB = sinkMBB; 2959210299Sed BuildMI(*BB, BB->begin(), dl, TII.get(SP::PHI), MI->getOperand(0).getReg()) 2960193323Sed .addReg(MI->getOperand(2).getReg()).addMBB(copy0MBB) 2961193323Sed .addReg(MI->getOperand(1).getReg()).addMBB(thisMBB); 2962193323Sed 2963210299Sed MI->eraseFromParent(); // The pseudo instruction is gone now. 2964193323Sed return BB; 2965193323Sed} 2966193323Sed 2967263764SdimMachineBasicBlock* 2968263764SdimSparcTargetLowering::expandAtomicRMW(MachineInstr *MI, 2969263764Sdim MachineBasicBlock *MBB, 2970263764Sdim unsigned Opcode, 2971263764Sdim unsigned CondCode) const { 2972263764Sdim const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo(); 2973263764Sdim MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 2974263764Sdim DebugLoc DL = MI->getDebugLoc(); 2975263764Sdim 2976263764Sdim // MI is an atomic read-modify-write instruction of the form: 2977263764Sdim // 2978263764Sdim // rd = atomicrmw<op> addr, rs2 2979263764Sdim // 2980263764Sdim // All three operands are registers. 2981263764Sdim unsigned DestReg = MI->getOperand(0).getReg(); 2982263764Sdim unsigned AddrReg = MI->getOperand(1).getReg(); 2983263764Sdim unsigned Rs2Reg = MI->getOperand(2).getReg(); 2984263764Sdim 2985263764Sdim // SelectionDAG has already inserted memory barriers before and after MI, so 2986263764Sdim // we simply have to implement the operatiuon in terms of compare-and-swap. 2987263764Sdim // 2988263764Sdim // %val0 = load %addr 2989263764Sdim // loop: 2990263764Sdim // %val = phi %val0, %dest 2991263764Sdim // %upd = op %val, %rs2 2992263764Sdim // %dest = cas %addr, %val, %upd 2993263764Sdim // cmp %val, %dest 2994263764Sdim // bne loop 2995263764Sdim // done: 2996263764Sdim // 2997263764Sdim bool is64Bit = SP::I64RegsRegClass.hasSubClassEq(MRI.getRegClass(DestReg)); 2998263764Sdim const TargetRegisterClass *ValueRC = 2999263764Sdim is64Bit ? &SP::I64RegsRegClass : &SP::IntRegsRegClass; 3000263764Sdim unsigned Val0Reg = MRI.createVirtualRegister(ValueRC); 3001263764Sdim 3002263764Sdim BuildMI(*MBB, MI, DL, TII.get(is64Bit ? SP::LDXri : SP::LDri), Val0Reg) 3003263764Sdim .addReg(AddrReg).addImm(0); 3004263764Sdim 3005263764Sdim // Split the basic block MBB before MI and insert the loop block in the hole. 3006263764Sdim MachineFunction::iterator MFI = MBB; 3007263764Sdim const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 3008263764Sdim MachineFunction *MF = MBB->getParent(); 3009263764Sdim MachineBasicBlock *LoopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 3010263764Sdim MachineBasicBlock *DoneMBB = MF->CreateMachineBasicBlock(LLVM_BB); 3011263764Sdim ++MFI; 3012263764Sdim MF->insert(MFI, LoopMBB); 3013263764Sdim MF->insert(MFI, DoneMBB); 3014263764Sdim 3015263764Sdim // Move MI and following instructions to DoneMBB. 3016263764Sdim DoneMBB->splice(DoneMBB->begin(), MBB, MI, MBB->end()); 3017263764Sdim DoneMBB->transferSuccessorsAndUpdatePHIs(MBB); 3018263764Sdim 3019263764Sdim // Connect the CFG again. 3020263764Sdim MBB->addSuccessor(LoopMBB); 3021263764Sdim LoopMBB->addSuccessor(LoopMBB); 3022263764Sdim LoopMBB->addSuccessor(DoneMBB); 3023263764Sdim 3024263764Sdim // Build the loop block. 3025263764Sdim unsigned ValReg = MRI.createVirtualRegister(ValueRC); 3026263764Sdim // Opcode == 0 means try to write Rs2Reg directly (ATOMIC_SWAP). 3027263764Sdim unsigned UpdReg = (Opcode ? MRI.createVirtualRegister(ValueRC) : Rs2Reg); 3028263764Sdim 3029263764Sdim BuildMI(LoopMBB, DL, TII.get(SP::PHI), ValReg) 3030263764Sdim .addReg(Val0Reg).addMBB(MBB) 3031263764Sdim .addReg(DestReg).addMBB(LoopMBB); 3032263764Sdim 3033263764Sdim if (CondCode) { 3034263764Sdim // This is one of the min/max operations. We need a CMPrr followed by a 3035263764Sdim // MOVXCC/MOVICC. 3036263764Sdim BuildMI(LoopMBB, DL, TII.get(SP::CMPrr)).addReg(ValReg).addReg(Rs2Reg); 3037263764Sdim BuildMI(LoopMBB, DL, TII.get(Opcode), UpdReg) 3038263764Sdim .addReg(ValReg).addReg(Rs2Reg).addImm(CondCode); 3039263764Sdim } else if (Opcode) { 3040263764Sdim BuildMI(LoopMBB, DL, TII.get(Opcode), UpdReg) 3041263764Sdim .addReg(ValReg).addReg(Rs2Reg); 3042263764Sdim } 3043263764Sdim 3044263764Sdim if (MI->getOpcode() == SP::ATOMIC_LOAD_NAND_32 || 3045263764Sdim MI->getOpcode() == SP::ATOMIC_LOAD_NAND_64) { 3046263764Sdim unsigned TmpReg = UpdReg; 3047263764Sdim UpdReg = MRI.createVirtualRegister(ValueRC); 3048263764Sdim BuildMI(LoopMBB, DL, TII.get(SP::XORri), UpdReg).addReg(TmpReg).addImm(-1); 3049263764Sdim } 3050263764Sdim 3051263764Sdim BuildMI(LoopMBB, DL, TII.get(is64Bit ? SP::CASXrr : SP::CASrr), DestReg) 3052263764Sdim .addReg(AddrReg).addReg(ValReg).addReg(UpdReg) 3053263764Sdim .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); 3054263764Sdim BuildMI(LoopMBB, DL, TII.get(SP::CMPrr)).addReg(ValReg).addReg(DestReg); 3055263764Sdim BuildMI(LoopMBB, DL, TII.get(is64Bit ? SP::BPXCC : SP::BCOND)) 3056263764Sdim .addMBB(LoopMBB).addImm(SPCC::ICC_NE); 3057263764Sdim 3058263764Sdim MI->eraseFromParent(); 3059263764Sdim return DoneMBB; 3060263764Sdim} 3061263764Sdim 3062193323Sed//===----------------------------------------------------------------------===// 3063193323Sed// Sparc Inline Assembly Support 3064193323Sed//===----------------------------------------------------------------------===// 3065193323Sed 3066193323Sed/// getConstraintType - Given a constraint letter, return the type of 3067193323Sed/// constraint it is for this target. 3068193323SedSparcTargetLowering::ConstraintType 3069193323SedSparcTargetLowering::getConstraintType(const std::string &Constraint) const { 3070193323Sed if (Constraint.size() == 1) { 3071193323Sed switch (Constraint[0]) { 3072193323Sed default: break; 3073193323Sed case 'r': return C_RegisterClass; 3074263764Sdim case 'I': // SIMM13 3075263764Sdim return C_Other; 3076193323Sed } 3077193323Sed } 3078193323Sed 3079193323Sed return TargetLowering::getConstraintType(Constraint); 3080193323Sed} 3081193323Sed 3082263764SdimTargetLowering::ConstraintWeight SparcTargetLowering:: 3083263764SdimgetSingleConstraintMatchWeight(AsmOperandInfo &info, 3084263764Sdim const char *constraint) const { 3085263764Sdim ConstraintWeight weight = CW_Invalid; 3086263764Sdim Value *CallOperandVal = info.CallOperandVal; 3087263764Sdim // If we don't have a value, we can't do a match, 3088263764Sdim // but allow it at the lowest weight. 3089263764Sdim if (CallOperandVal == NULL) 3090263764Sdim return CW_Default; 3091263764Sdim 3092263764Sdim // Look at the constraint type. 3093263764Sdim switch (*constraint) { 3094263764Sdim default: 3095263764Sdim weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 3096263764Sdim break; 3097263764Sdim case 'I': // SIMM13 3098263764Sdim if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) { 3099263764Sdim if (isInt<13>(C->getSExtValue())) 3100263764Sdim weight = CW_Constant; 3101263764Sdim } 3102263764Sdim break; 3103263764Sdim } 3104263764Sdim return weight; 3105263764Sdim} 3106263764Sdim 3107263764Sdim/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 3108263764Sdim/// vector. If it is invalid, don't add anything to Ops. 3109263764Sdimvoid SparcTargetLowering:: 3110263764SdimLowerAsmOperandForConstraint(SDValue Op, 3111263764Sdim std::string &Constraint, 3112263764Sdim std::vector<SDValue> &Ops, 3113263764Sdim SelectionDAG &DAG) const { 3114263764Sdim SDValue Result(0, 0); 3115263764Sdim 3116263764Sdim // Only support length 1 constraints for now. 3117263764Sdim if (Constraint.length() > 1) 3118263764Sdim return; 3119263764Sdim 3120263764Sdim char ConstraintLetter = Constraint[0]; 3121263764Sdim switch (ConstraintLetter) { 3122263764Sdim default: break; 3123263764Sdim case 'I': 3124263764Sdim if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 3125263764Sdim if (isInt<13>(C->getSExtValue())) { 3126263764Sdim Result = DAG.getTargetConstant(C->getSExtValue(), Op.getValueType()); 3127263764Sdim break; 3128263764Sdim } 3129263764Sdim return; 3130263764Sdim } 3131263764Sdim } 3132263764Sdim 3133263764Sdim if (Result.getNode()) { 3134263764Sdim Ops.push_back(Result); 3135263764Sdim return; 3136263764Sdim } 3137263764Sdim TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 3138263764Sdim} 3139263764Sdim 3140193323Sedstd::pair<unsigned, const TargetRegisterClass*> 3141193323SedSparcTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 3142263509Sdim MVT VT) const { 3143193323Sed if (Constraint.size() == 1) { 3144193323Sed switch (Constraint[0]) { 3145193323Sed case 'r': 3146245431Sdim return std::make_pair(0U, &SP::IntRegsRegClass); 3147193323Sed } 3148263764Sdim } else if (!Constraint.empty() && Constraint.size() <= 5 3149263764Sdim && Constraint[0] == '{' && *(Constraint.end()-1) == '}') { 3150263764Sdim // constraint = '{r<d>}' 3151263764Sdim // Remove the braces from around the name. 3152263764Sdim StringRef name(Constraint.data()+1, Constraint.size()-2); 3153263764Sdim // Handle register aliases: 3154263764Sdim // r0-r7 -> g0-g7 3155263764Sdim // r8-r15 -> o0-o7 3156263764Sdim // r16-r23 -> l0-l7 3157263764Sdim // r24-r31 -> i0-i7 3158263764Sdim uint64_t intVal = 0; 3159263764Sdim if (name.substr(0, 1).equals("r") 3160263764Sdim && !name.substr(1).getAsInteger(10, intVal) && intVal <= 31) { 3161263764Sdim const char regTypes[] = { 'g', 'o', 'l', 'i' }; 3162263764Sdim char regType = regTypes[intVal/8]; 3163263764Sdim char regIdx = '0' + (intVal % 8); 3164263764Sdim char tmp[] = { '{', regType, regIdx, '}', 0 }; 3165263764Sdim std::string newConstraint = std::string(tmp); 3166263764Sdim return TargetLowering::getRegForInlineAsmConstraint(newConstraint, VT); 3167263764Sdim } 3168193323Sed } 3169193323Sed 3170193323Sed return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 3171193323Sed} 3172193323Sed 3173193323Sedbool 3174193323SedSparcTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 3175193323Sed // The Sparc target isn't yet aware of offsets. 3176193323Sed return false; 3177193323Sed} 3178263509Sdim 3179263509Sdimvoid SparcTargetLowering::ReplaceNodeResults(SDNode *N, 3180263509Sdim SmallVectorImpl<SDValue>& Results, 3181263509Sdim SelectionDAG &DAG) const { 3182263509Sdim 3183263509Sdim SDLoc dl(N); 3184263509Sdim 3185263509Sdim RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL; 3186263509Sdim 3187263509Sdim switch (N->getOpcode()) { 3188263509Sdim default: 3189263509Sdim llvm_unreachable("Do not know how to custom type legalize this operation!"); 3190263509Sdim 3191263509Sdim case ISD::FP_TO_SINT: 3192263509Sdim case ISD::FP_TO_UINT: 3193263509Sdim // Custom lower only if it involves f128 or i64. 3194263509Sdim if (N->getOperand(0).getValueType() != MVT::f128 3195263509Sdim || N->getValueType(0) != MVT::i64) 3196263509Sdim return; 3197263509Sdim libCall = ((N->getOpcode() == ISD::FP_TO_SINT) 3198263509Sdim ? RTLIB::FPTOSINT_F128_I64 3199263509Sdim : RTLIB::FPTOUINT_F128_I64); 3200263509Sdim 3201263509Sdim Results.push_back(LowerF128Op(SDValue(N, 0), 3202263509Sdim DAG, 3203263509Sdim getLibcallName(libCall), 3204263509Sdim 1)); 3205263509Sdim return; 3206263509Sdim 3207263509Sdim case ISD::SINT_TO_FP: 3208263509Sdim case ISD::UINT_TO_FP: 3209263509Sdim // Custom lower only if it involves f128 or i64. 3210263509Sdim if (N->getValueType(0) != MVT::f128 3211263509Sdim || N->getOperand(0).getValueType() != MVT::i64) 3212263509Sdim return; 3213263509Sdim 3214263509Sdim libCall = ((N->getOpcode() == ISD::SINT_TO_FP) 3215263509Sdim ? RTLIB::SINTTOFP_I64_F128 3216263509Sdim : RTLIB::UINTTOFP_I64_F128); 3217263509Sdim 3218263509Sdim Results.push_back(LowerF128Op(SDValue(N, 0), 3219263509Sdim DAG, 3220263509Sdim getLibcallName(libCall), 3221263509Sdim 1)); 3222263509Sdim return; 3223263509Sdim } 3224263509Sdim} 3225