SparcISelLowering.cpp (261991) | SparcISelLowering.cpp (262613) |
---|---|
1//===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the interfaces that Sparc uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#include "SparcISelLowering.h" | 1//===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the interfaces that Sparc uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#include "SparcISelLowering.h" |
16#include "MCTargetDesc/SparcMCExpr.h" |
|
16#include "SparcMachineFunctionInfo.h" 17#include "SparcRegisterInfo.h" 18#include "SparcTargetMachine.h" 19#include "MCTargetDesc/SparcBaseInfo.h" | 17#include "SparcMachineFunctionInfo.h" 18#include "SparcRegisterInfo.h" 19#include "SparcTargetMachine.h" 20#include "MCTargetDesc/SparcBaseInfo.h" |
21#include "SparcTargetObjectFile.h" |
|
20#include "llvm/CodeGen/CallingConvLower.h" 21#include "llvm/CodeGen/MachineFrameInfo.h" 22#include "llvm/CodeGen/MachineFunction.h" 23#include "llvm/CodeGen/MachineInstrBuilder.h" 24#include "llvm/CodeGen/MachineRegisterInfo.h" 25#include "llvm/CodeGen/SelectionDAG.h" 26#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 27#include "llvm/IR/DerivedTypes.h" --- 47 unchanged lines hidden (view full) --- 75 LocVT, LocInfo)); 76 return true; 77} 78 79// Allocate a full-sized argument for the 64-bit ABI. 80static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, 81 MVT &LocVT, CCValAssign::LocInfo &LocInfo, 82 ISD::ArgFlagsTy &ArgFlags, CCState &State) { | 22#include "llvm/CodeGen/CallingConvLower.h" 23#include "llvm/CodeGen/MachineFrameInfo.h" 24#include "llvm/CodeGen/MachineFunction.h" 25#include "llvm/CodeGen/MachineInstrBuilder.h" 26#include "llvm/CodeGen/MachineRegisterInfo.h" 27#include "llvm/CodeGen/SelectionDAG.h" 28#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 29#include "llvm/IR/DerivedTypes.h" --- 47 unchanged lines hidden (view full) --- 77 LocVT, LocInfo)); 78 return true; 79} 80 81// Allocate a full-sized argument for the 64-bit ABI. 82static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, 83 MVT &LocVT, CCValAssign::LocInfo &LocInfo, 84 ISD::ArgFlagsTy &ArgFlags, CCState &State) { |
83 assert((LocVT == MVT::f32 || LocVT.getSizeInBits() == 64) && | 85 assert((LocVT == MVT::f32 || LocVT == MVT::f128 86 || LocVT.getSizeInBits() == 64) && |
84 "Can't handle non-64 bits locations"); 85 86 // Stack space is allocated for all arguments starting from [%fp+BIAS+128]. | 87 "Can't handle non-64 bits locations"); 88 89 // Stack space is allocated for all arguments starting from [%fp+BIAS+128]. |
87 unsigned Offset = State.AllocateStack(8, 8); | 90 unsigned size = (LocVT == MVT::f128) ? 16 : 8; 91 unsigned alignment = (LocVT == MVT::f128) ? 16 : 8; 92 unsigned Offset = State.AllocateStack(size, alignment); |
88 unsigned Reg = 0; 89 90 if (LocVT == MVT::i64 && Offset < 6*8) 91 // Promote integers to %i0-%i5. 92 Reg = SP::I0 + Offset/8; 93 else if (LocVT == MVT::f64 && Offset < 16*8) 94 // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15). 95 Reg = SP::D0 + Offset/8; 96 else if (LocVT == MVT::f32 && Offset < 16*8) 97 // Promote floats to %f1, %f3, ... 98 Reg = SP::F1 + Offset/4; | 93 unsigned Reg = 0; 94 95 if (LocVT == MVT::i64 && Offset < 6*8) 96 // Promote integers to %i0-%i5. 97 Reg = SP::I0 + Offset/8; 98 else if (LocVT == MVT::f64 && Offset < 16*8) 99 // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15). 100 Reg = SP::D0 + Offset/8; 101 else if (LocVT == MVT::f32 && Offset < 16*8) 102 // Promote floats to %f1, %f3, ... 103 Reg = SP::F1 + Offset/4; |
104 else if (LocVT == MVT::f128 && Offset < 16*8) 105 // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7). 106 Reg = SP::Q0 + Offset/16; |
|
99 100 // Promote to register when possible, otherwise use the stack slot. 101 if (Reg) { 102 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 103 return true; 104 } 105 106 // This argument goes on the stack in an 8-byte slot. --- 136 unchanged lines hidden (view full) --- 243 // CCValAssign - represent the assignment of the return value to locations. 244 SmallVector<CCValAssign, 16> RVLocs; 245 246 // CCState - Info about the registers and stack slot. 247 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), 248 DAG.getTarget(), RVLocs, *DAG.getContext()); 249 250 // Analyze return values. | 107 108 // Promote to register when possible, otherwise use the stack slot. 109 if (Reg) { 110 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 111 return true; 112 } 113 114 // This argument goes on the stack in an 8-byte slot. --- 136 unchanged lines hidden (view full) --- 251 // CCValAssign - represent the assignment of the return value to locations. 252 SmallVector<CCValAssign, 16> RVLocs; 253 254 // CCState - Info about the registers and stack slot. 255 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), 256 DAG.getTarget(), RVLocs, *DAG.getContext()); 257 258 // Analyze return values. |
251 CCInfo.AnalyzeReturn(Outs, CC_Sparc64); | 259 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64); |
252 253 SDValue Flag; 254 SmallVector<SDValue, 4> RetOps(1, Chain); 255 256 // The second operand on the return instruction is the return address offset. 257 // The return address is always %i7+8 with the 64-bit ABI. 258 RetOps.push_back(DAG.getConstant(8, MVT::i32)); 259 --- 623 unchanged lines hidden (view full) --- 883 } 884 885 unsigned SRetArgSize = (hasStructRetAttr)? getSRetArgSize(DAG, Callee):0; 886 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS); 887 888 // If the callee is a GlobalAddress node (quite common, every direct call is) 889 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 890 // Likewise ExternalSymbol -> TargetExternalSymbol. | 260 261 SDValue Flag; 262 SmallVector<SDValue, 4> RetOps(1, Chain); 263 264 // The second operand on the return instruction is the return address offset. 265 // The return address is always %i7+8 with the 64-bit ABI. 266 RetOps.push_back(DAG.getConstant(8, MVT::i32)); 267 --- 623 unchanged lines hidden (view full) --- 891 } 892 893 unsigned SRetArgSize = (hasStructRetAttr)? getSRetArgSize(DAG, Callee):0; 894 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS); 895 896 // If the callee is a GlobalAddress node (quite common, every direct call is) 897 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 898 // Likewise ExternalSymbol -> TargetExternalSymbol. |
899 unsigned TF = ((getTargetMachine().getRelocationModel() == Reloc::PIC_) 900 ? SparcMCExpr::VK_Sparc_WPLT30 : 0); |
|
891 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) | 901 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) |
892 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32); | 902 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0, TF); |
893 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) | 903 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) |
894 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32); | 904 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32, TF); |
895 896 // Returns a chain & a flag for retval copy to use 897 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 898 SmallVector<SDValue, 8> Ops; 899 Ops.push_back(Chain); 900 Ops.push_back(Callee); 901 if (hasStructRetAttr) 902 Ops.push_back(DAG.getTargetConstant(SRetArgSize, MVT::i32)); --- 90 unchanged lines hidden (view full) --- 993// fixed arguments that are part of the function's prototype. 994// 995// This function post-processes a CCValAssign array created by 996// AnalyzeCallOperands(). 997static void fixupVariableFloatArgs(SmallVectorImpl<CCValAssign> &ArgLocs, 998 ArrayRef<ISD::OutputArg> Outs) { 999 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1000 const CCValAssign &VA = ArgLocs[i]; | 905 906 // Returns a chain & a flag for retval copy to use 907 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 908 SmallVector<SDValue, 8> Ops; 909 Ops.push_back(Chain); 910 Ops.push_back(Callee); 911 if (hasStructRetAttr) 912 Ops.push_back(DAG.getTargetConstant(SRetArgSize, MVT::i32)); --- 90 unchanged lines hidden (view full) --- 1003// fixed arguments that are part of the function's prototype. 1004// 1005// This function post-processes a CCValAssign array created by 1006// AnalyzeCallOperands(). 1007static void fixupVariableFloatArgs(SmallVectorImpl<CCValAssign> &ArgLocs, 1008 ArrayRef<ISD::OutputArg> Outs) { 1009 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1010 const CCValAssign &VA = ArgLocs[i]; |
1011 MVT ValTy = VA.getLocVT(); |
|
1001 // FIXME: What about f32 arguments? C promotes them to f64 when calling 1002 // varargs functions. | 1012 // FIXME: What about f32 arguments? C promotes them to f64 when calling 1013 // varargs functions. |
1003 if (!VA.isRegLoc() || VA.getLocVT() != MVT::f64) | 1014 if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128)) |
1004 continue; 1005 // The fixed arguments to a varargs function still go in FP registers. 1006 if (Outs[VA.getValNo()].IsFixed) 1007 continue; 1008 1009 // This floating point argument should be reassigned. 1010 CCValAssign NewVA; 1011 1012 // Determine the offset into the argument array. | 1015 continue; 1016 // The fixed arguments to a varargs function still go in FP registers. 1017 if (Outs[VA.getValNo()].IsFixed) 1018 continue; 1019 1020 // This floating point argument should be reassigned. 1021 CCValAssign NewVA; 1022 1023 // Determine the offset into the argument array. |
1013 unsigned Offset = 8 * (VA.getLocReg() - SP::D0); | 1024 unsigned firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0; 1025 unsigned argSize = (ValTy == MVT::f64) ? 8 : 16; 1026 unsigned Offset = argSize * (VA.getLocReg() - firstReg); |
1014 assert(Offset < 16*8 && "Offset out of range, bad register enum?"); 1015 1016 if (Offset < 6*8) { 1017 // This argument should go in %i0-%i5. 1018 unsigned IReg = SP::I0 + Offset/8; | 1027 assert(Offset < 16*8 && "Offset out of range, bad register enum?"); 1028 1029 if (Offset < 6*8) { 1030 // This argument should go in %i0-%i5. 1031 unsigned IReg = SP::I0 + Offset/8; |
1019 // Full register, just bitconvert into i64. 1020 NewVA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), 1021 IReg, MVT::i64, CCValAssign::BCvt); | 1032 if (ValTy == MVT::f64) 1033 // Full register, just bitconvert into i64. 1034 NewVA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), 1035 IReg, MVT::i64, CCValAssign::BCvt); 1036 else { 1037 assert(ValTy == MVT::f128 && "Unexpected type!"); 1038 // Full register, just bitconvert into i128 -- We will lower this into 1039 // two i64s in LowerCall_64. 1040 NewVA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(), 1041 IReg, MVT::i128, CCValAssign::BCvt); 1042 } |
1022 } else { 1023 // This needs to go to memory, we're out of integer registers. 1024 NewVA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(), 1025 Offset, VA.getLocVT(), VA.getLocInfo()); 1026 } 1027 ArgLocs[i] = NewVA; 1028 } 1029} --- 59 unchanged lines hidden (view full) --- 1089 break; 1090 case CCValAssign::ZExt: 1091 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); 1092 break; 1093 case CCValAssign::AExt: 1094 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); 1095 break; 1096 case CCValAssign::BCvt: | 1043 } else { 1044 // This needs to go to memory, we're out of integer registers. 1045 NewVA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(), 1046 Offset, VA.getLocVT(), VA.getLocInfo()); 1047 } 1048 ArgLocs[i] = NewVA; 1049 } 1050} --- 59 unchanged lines hidden (view full) --- 1110 break; 1111 case CCValAssign::ZExt: 1112 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); 1113 break; 1114 case CCValAssign::AExt: 1115 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); 1116 break; 1117 case CCValAssign::BCvt: |
1097 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); | 1118 // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But 1119 // SPARC does not support i128 natively. Lower it into two i64, see below. 1120 if (!VA.needsCustom() || VA.getValVT() != MVT::f128 1121 || VA.getLocVT() != MVT::i128) 1122 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); |
1098 break; 1099 } 1100 1101 if (VA.isRegLoc()) { | 1123 break; 1124 } 1125 1126 if (VA.isRegLoc()) { |
1127 if (VA.needsCustom() && VA.getValVT() == MVT::f128 1128 && VA.getLocVT() == MVT::i128) { 1129 // Store and reload into the interger register reg and reg+1. 1130 unsigned Offset = 8 * (VA.getLocReg() - SP::I0); 1131 unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128; 1132 SDValue StackPtr = DAG.getRegister(SP::O6, getPointerTy()); 1133 SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset); 1134 HiPtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr, 1135 HiPtrOff); 1136 SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8); 1137 LoPtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr, 1138 LoPtrOff); 1139 1140 // Store to %sp+BIAS+128+Offset 1141 SDValue Store = DAG.getStore(Chain, DL, Arg, HiPtrOff, 1142 MachinePointerInfo(), 1143 false, false, 0); 1144 // Load into Reg and Reg+1 1145 SDValue Hi64 = DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, 1146 MachinePointerInfo(), 1147 false, false, false, 0); 1148 SDValue Lo64 = DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, 1149 MachinePointerInfo(), 1150 false, false, false, 0); 1151 RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()), 1152 Hi64)); 1153 RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()+1), 1154 Lo64)); 1155 continue; 1156 } 1157 |
|
1102 // The custom bit on an i32 return value indicates that it should be 1103 // passed in the high bits of the register. 1104 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) { 1105 Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg, 1106 DAG.getConstant(32, MVT::i32)); 1107 1108 // The next value may go in the low bits of the same register. 1109 // Handle both at once. --- 41 unchanged lines hidden (view full) --- 1151 InGlue = Chain.getValue(1); 1152 } 1153 1154 // If the callee is a GlobalAddress node (quite common, every direct call is) 1155 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1156 // Likewise ExternalSymbol -> TargetExternalSymbol. 1157 SDValue Callee = CLI.Callee; 1158 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS); | 1158 // The custom bit on an i32 return value indicates that it should be 1159 // passed in the high bits of the register. 1160 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) { 1161 Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg, 1162 DAG.getConstant(32, MVT::i32)); 1163 1164 // The next value may go in the low bits of the same register. 1165 // Handle both at once. --- 41 unchanged lines hidden (view full) --- 1207 InGlue = Chain.getValue(1); 1208 } 1209 1210 // If the callee is a GlobalAddress node (quite common, every direct call is) 1211 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1212 // Likewise ExternalSymbol -> TargetExternalSymbol. 1213 SDValue Callee = CLI.Callee; 1214 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS); |
1215 unsigned TF = ((getTargetMachine().getRelocationModel() == Reloc::PIC_) 1216 ? SparcMCExpr::VK_Sparc_WPLT30 : 0); |
|
1159 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) | 1217 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) |
1160 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, getPointerTy()); | 1218 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, getPointerTy(), 0, 1219 TF); |
1161 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) | 1220 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) |
1162 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), getPointerTy()); | 1221 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), getPointerTy(), TF); |
1163 1164 // Build the operands for the call instruction itself. 1165 SmallVector<SDValue, 8> Ops; 1166 Ops.push_back(Chain); 1167 Ops.push_back(Callee); 1168 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1169 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1170 RegsToPass[i].second.getValueType())); --- 24 unchanged lines hidden (view full) --- 1195 1196 // Now extract the return values. This is more or less the same as 1197 // LowerFormalArguments_64. 1198 1199 // Assign locations to each value returned by this call. 1200 SmallVector<CCValAssign, 16> RVLocs; 1201 CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), 1202 DAG.getTarget(), RVLocs, *DAG.getContext()); | 1222 1223 // Build the operands for the call instruction itself. 1224 SmallVector<SDValue, 8> Ops; 1225 Ops.push_back(Chain); 1226 Ops.push_back(Callee); 1227 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1228 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1229 RegsToPass[i].second.getValueType())); --- 24 unchanged lines hidden (view full) --- 1254 1255 // Now extract the return values. This is more or less the same as 1256 // LowerFormalArguments_64. 1257 1258 // Assign locations to each value returned by this call. 1259 SmallVector<CCValAssign, 16> RVLocs; 1260 CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), 1261 DAG.getTarget(), RVLocs, *DAG.getContext()); |
1203 RVInfo.AnalyzeCallResult(CLI.Ins, CC_Sparc64); | |
1204 | 1262 |
1263 // Set inreg flag manually for codegen generated library calls that 1264 // return float. 1265 if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && CLI.CS == 0) 1266 CLI.Ins[0].Flags.setInReg(); 1267 1268 RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64); 1269 |
|
1205 // Copy all of the result registers out of their specified physreg. 1206 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1207 CCValAssign &VA = RVLocs[i]; 1208 unsigned Reg = toCallerWindow(VA.getLocReg()); 1209 1210 // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can 1211 // reside in the same register in the high and low bits. Reuse the 1212 // CopyFromReg previous node to avoid duplicate copies. --- 85 unchanged lines hidden (view full) --- 1298 case ISD::SETUO: return SPCC::FCC_U; 1299 case ISD::SETO: return SPCC::FCC_O; 1300 case ISD::SETONE: return SPCC::FCC_LG; 1301 case ISD::SETUEQ: return SPCC::FCC_UE; 1302 } 1303} 1304 1305SparcTargetLowering::SparcTargetLowering(TargetMachine &TM) | 1270 // Copy all of the result registers out of their specified physreg. 1271 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1272 CCValAssign &VA = RVLocs[i]; 1273 unsigned Reg = toCallerWindow(VA.getLocReg()); 1274 1275 // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can 1276 // reside in the same register in the high and low bits. Reuse the 1277 // CopyFromReg previous node to avoid duplicate copies. --- 85 unchanged lines hidden (view full) --- 1363 case ISD::SETUO: return SPCC::FCC_U; 1364 case ISD::SETO: return SPCC::FCC_O; 1365 case ISD::SETONE: return SPCC::FCC_LG; 1366 case ISD::SETUEQ: return SPCC::FCC_UE; 1367 } 1368} 1369 1370SparcTargetLowering::SparcTargetLowering(TargetMachine &TM) |
1306 : TargetLowering(TM, new TargetLoweringObjectFileELF()) { | 1371 : TargetLowering(TM, new SparcELFTargetObjectFile()) { |
1307 Subtarget = &TM.getSubtarget<SparcSubtarget>(); 1308 1309 // Set up the register classes. 1310 addRegisterClass(MVT::i32, &SP::IntRegsRegClass); 1311 addRegisterClass(MVT::f32, &SP::FPRegsRegClass); 1312 addRegisterClass(MVT::f64, &SP::DFPRegsRegClass); 1313 addRegisterClass(MVT::f128, &SP::QFPRegsRegClass); 1314 if (Subtarget->is64Bit()) --- 83 unchanged lines hidden (view full) --- 1398 setOperationAction(ISD::SUBE, MVT::i64, Custom); 1399 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 1400 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 1401 setOperationAction(ISD::SELECT, MVT::i64, Expand); 1402 setOperationAction(ISD::SETCC, MVT::i64, Expand); 1403 setOperationAction(ISD::BR_CC, MVT::i64, Custom); 1404 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom); 1405 | 1372 Subtarget = &TM.getSubtarget<SparcSubtarget>(); 1373 1374 // Set up the register classes. 1375 addRegisterClass(MVT::i32, &SP::IntRegsRegClass); 1376 addRegisterClass(MVT::f32, &SP::FPRegsRegClass); 1377 addRegisterClass(MVT::f64, &SP::DFPRegsRegClass); 1378 addRegisterClass(MVT::f128, &SP::QFPRegsRegClass); 1379 if (Subtarget->is64Bit()) --- 83 unchanged lines hidden (view full) --- 1463 setOperationAction(ISD::SUBE, MVT::i64, Custom); 1464 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 1465 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 1466 setOperationAction(ISD::SELECT, MVT::i64, Expand); 1467 setOperationAction(ISD::SETCC, MVT::i64, Expand); 1468 setOperationAction(ISD::BR_CC, MVT::i64, Custom); 1469 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom); 1470 |
1406 setOperationAction(ISD::CTPOP, MVT::i64, Legal); | 1471 setOperationAction(ISD::CTPOP, MVT::i64, 1472 Subtarget->usePopc() ? Legal : Expand); |
1407 setOperationAction(ISD::CTTZ , MVT::i64, Expand); 1408 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); 1409 setOperationAction(ISD::CTLZ , MVT::i64, Expand); 1410 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); 1411 setOperationAction(ISD::BSWAP, MVT::i64, Expand); 1412 setOperationAction(ISD::ROTL , MVT::i64, Expand); 1413 setOperationAction(ISD::ROTR , MVT::i64, Expand); 1414 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom); 1415 } 1416 | 1473 setOperationAction(ISD::CTTZ , MVT::i64, Expand); 1474 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); 1475 setOperationAction(ISD::CTLZ , MVT::i64, Expand); 1476 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); 1477 setOperationAction(ISD::BSWAP, MVT::i64, Expand); 1478 setOperationAction(ISD::ROTL , MVT::i64, Expand); 1479 setOperationAction(ISD::ROTR , MVT::i64, Expand); 1480 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom); 1481 } 1482 |
1417 // FIXME: There are instructions available for ATOMIC_FENCE 1418 // on SparcV8 and later. 1419 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand); | 1483 // ATOMICs. 1484 // FIXME: We insert fences for each atomics and generate sub-optimal code 1485 // for PSO/TSO. Also, implement other atomicrmw operations. |
1420 | 1486 |
1487 setInsertFencesForAtomic(true); 1488 1489 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Legal); 1490 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, 1491 (Subtarget->isV9() ? Legal: Expand)); 1492 1493 1494 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Legal); 1495 1496 // Custom Lower Atomic LOAD/STORE 1497 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); 1498 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); 1499 1500 if (Subtarget->is64Bit()) { 1501 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Legal); 1502 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Legal); 1503 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom); 1504 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Custom); 1505 } 1506 |
|
1421 if (!Subtarget->isV9()) { 1422 // SparcV8 does not have FNEGD and FABSD. 1423 setOperationAction(ISD::FNEG, MVT::f64, Custom); 1424 setOperationAction(ISD::FABS, MVT::f64, Custom); 1425 } 1426 1427 setOperationAction(ISD::FSIN , MVT::f128, Expand); 1428 setOperationAction(ISD::FCOS , MVT::f128, Expand); --- 5 unchanged lines hidden (view full) --- 1434 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 1435 setOperationAction(ISD::FREM , MVT::f64, Expand); 1436 setOperationAction(ISD::FMA , MVT::f64, Expand); 1437 setOperationAction(ISD::FSIN , MVT::f32, Expand); 1438 setOperationAction(ISD::FCOS , MVT::f32, Expand); 1439 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 1440 setOperationAction(ISD::FREM , MVT::f32, Expand); 1441 setOperationAction(ISD::FMA , MVT::f32, Expand); | 1507 if (!Subtarget->isV9()) { 1508 // SparcV8 does not have FNEGD and FABSD. 1509 setOperationAction(ISD::FNEG, MVT::f64, Custom); 1510 setOperationAction(ISD::FABS, MVT::f64, Custom); 1511 } 1512 1513 setOperationAction(ISD::FSIN , MVT::f128, Expand); 1514 setOperationAction(ISD::FCOS , MVT::f128, Expand); --- 5 unchanged lines hidden (view full) --- 1520 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 1521 setOperationAction(ISD::FREM , MVT::f64, Expand); 1522 setOperationAction(ISD::FMA , MVT::f64, Expand); 1523 setOperationAction(ISD::FSIN , MVT::f32, Expand); 1524 setOperationAction(ISD::FCOS , MVT::f32, Expand); 1525 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 1526 setOperationAction(ISD::FREM , MVT::f32, Expand); 1527 setOperationAction(ISD::FMA , MVT::f32, Expand); |
1442 setOperationAction(ISD::CTPOP, MVT::i32, Expand); | |
1443 setOperationAction(ISD::CTTZ , MVT::i32, Expand); 1444 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); 1445 setOperationAction(ISD::CTLZ , MVT::i32, Expand); 1446 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); 1447 setOperationAction(ISD::ROTL , MVT::i32, Expand); 1448 setOperationAction(ISD::ROTR , MVT::i32, Expand); 1449 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 1450 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); --- 11 unchanged lines hidden (view full) --- 1462 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 1463 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 1464 1465 if (Subtarget->is64Bit()) { 1466 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 1467 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 1468 setOperationAction(ISD::MULHU, MVT::i64, Expand); 1469 setOperationAction(ISD::MULHS, MVT::i64, Expand); | 1528 setOperationAction(ISD::CTTZ , MVT::i32, Expand); 1529 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); 1530 setOperationAction(ISD::CTLZ , MVT::i32, Expand); 1531 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); 1532 setOperationAction(ISD::ROTL , MVT::i32, Expand); 1533 setOperationAction(ISD::ROTR , MVT::i32, Expand); 1534 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 1535 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); --- 11 unchanged lines hidden (view full) --- 1547 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 1548 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 1549 1550 if (Subtarget->is64Bit()) { 1551 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 1552 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 1553 setOperationAction(ISD::MULHU, MVT::i64, Expand); 1554 setOperationAction(ISD::MULHS, MVT::i64, Expand); |
1555 1556 setOperationAction(ISD::UMULO, MVT::i64, Custom); 1557 setOperationAction(ISD::SMULO, MVT::i64, Custom); 1558 1559 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); 1560 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); 1561 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); |
|
1470 } 1471 1472 // VASTART needs to be custom lowered to use the VarArgsFrameIndex. 1473 setOperationAction(ISD::VASTART , MVT::Other, Custom); 1474 // VAARG needs to be lowered to not do unaligned accesses for doubles. 1475 setOperationAction(ISD::VAARG , MVT::Other, Custom); 1476 | 1562 } 1563 1564 // VASTART needs to be custom lowered to use the VarArgsFrameIndex. 1565 setOperationAction(ISD::VASTART , MVT::Other, Custom); 1566 // VAARG needs to be lowered to not do unaligned accesses for doubles. 1567 setOperationAction(ISD::VAARG , MVT::Other, Custom); 1568 |
1569 setOperationAction(ISD::TRAP , MVT::Other, Legal); 1570 |
|
1477 // Use the default implementation. 1478 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 1479 setOperationAction(ISD::VAEND , MVT::Other, Expand); 1480 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 1481 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand); 1482 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 1483 1484 setExceptionPointerRegister(SP::I0); 1485 setExceptionSelectorRegister(SP::I1); 1486 1487 setStackPointerRegisterToSaveRestore(SP::O6); 1488 | 1571 // Use the default implementation. 1572 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 1573 setOperationAction(ISD::VAEND , MVT::Other, Expand); 1574 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 1575 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand); 1576 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 1577 1578 setExceptionPointerRegister(SP::I0); 1579 setExceptionSelectorRegister(SP::I1); 1580 1581 setStackPointerRegisterToSaveRestore(SP::O6); 1582 |
1489 if (Subtarget->isV9()) 1490 setOperationAction(ISD::CTPOP, MVT::i32, Legal); | 1583 setOperationAction(ISD::CTPOP, MVT::i32, 1584 Subtarget->usePopc() ? Legal : Expand); |
1491 1492 if (Subtarget->isV9() && Subtarget->hasHardQuad()) { 1493 setOperationAction(ISD::LOAD, MVT::f128, Legal); 1494 setOperationAction(ISD::STORE, MVT::f128, Legal); 1495 } else { 1496 setOperationAction(ISD::LOAD, MVT::f128, Custom); 1497 setOperationAction(ISD::STORE, MVT::f128, Custom); 1498 } --- 210 unchanged lines hidden (view full) --- 1709// or ExternalSymbol SDNode. 1710SDValue SparcTargetLowering::makeAddress(SDValue Op, SelectionDAG &DAG) const { 1711 SDLoc DL(Op); 1712 EVT VT = getPointerTy(); 1713 1714 // Handle PIC mode first. 1715 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 1716 // This is the pic32 code model, the GOT is known to be smaller than 4GB. | 1585 1586 if (Subtarget->isV9() && Subtarget->hasHardQuad()) { 1587 setOperationAction(ISD::LOAD, MVT::f128, Legal); 1588 setOperationAction(ISD::STORE, MVT::f128, Legal); 1589 } else { 1590 setOperationAction(ISD::LOAD, MVT::f128, Custom); 1591 setOperationAction(ISD::STORE, MVT::f128, Custom); 1592 } --- 210 unchanged lines hidden (view full) --- 1803// or ExternalSymbol SDNode. 1804SDValue SparcTargetLowering::makeAddress(SDValue Op, SelectionDAG &DAG) const { 1805 SDLoc DL(Op); 1806 EVT VT = getPointerTy(); 1807 1808 // Handle PIC mode first. 1809 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 1810 // This is the pic32 code model, the GOT is known to be smaller than 4GB. |
1717 SDValue HiLo = makeHiLoPair(Op, SPII::MO_HI, SPII::MO_LO, DAG); | 1811 SDValue HiLo = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_GOT22, 1812 SparcMCExpr::VK_Sparc_GOT10, DAG); |
1718 SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT); 1719 SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, HiLo); 1720 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this 1721 // function has calls. 1722 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 1723 MFI->setHasCalls(true); 1724 return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr, 1725 MachinePointerInfo::getGOT(), false, false, false, 0); 1726 } 1727 1728 // This is one of the absolute code models. 1729 switch(getTargetMachine().getCodeModel()) { 1730 default: 1731 llvm_unreachable("Unsupported absolute code model"); | 1813 SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT); 1814 SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, HiLo); 1815 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this 1816 // function has calls. 1817 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 1818 MFI->setHasCalls(true); 1819 return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr, 1820 MachinePointerInfo::getGOT(), false, false, false, 0); 1821 } 1822 1823 // This is one of the absolute code models. 1824 switch(getTargetMachine().getCodeModel()) { 1825 default: 1826 llvm_unreachable("Unsupported absolute code model"); |
1732 case CodeModel::JITDefault: | |
1733 case CodeModel::Small: 1734 // abs32. | 1827 case CodeModel::Small: 1828 // abs32. |
1735 return makeHiLoPair(Op, SPII::MO_HI, SPII::MO_LO, DAG); | 1829 return makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HI, 1830 SparcMCExpr::VK_Sparc_LO, DAG); |
1736 case CodeModel::Medium: { 1737 // abs44. | 1831 case CodeModel::Medium: { 1832 // abs44. |
1738 SDValue H44 = makeHiLoPair(Op, SPII::MO_H44, SPII::MO_M44, DAG); | 1833 SDValue H44 = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_H44, 1834 SparcMCExpr::VK_Sparc_M44, DAG); |
1739 H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, MVT::i32)); | 1835 H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, MVT::i32)); |
1740 SDValue L44 = withTargetFlags(Op, SPII::MO_L44, DAG); | 1836 SDValue L44 = withTargetFlags(Op, SparcMCExpr::VK_Sparc_L44, DAG); |
1741 L44 = DAG.getNode(SPISD::Lo, DL, VT, L44); 1742 return DAG.getNode(ISD::ADD, DL, VT, H44, L44); 1743 } 1744 case CodeModel::Large: { 1745 // abs64. | 1837 L44 = DAG.getNode(SPISD::Lo, DL, VT, L44); 1838 return DAG.getNode(ISD::ADD, DL, VT, H44, L44); 1839 } 1840 case CodeModel::Large: { 1841 // abs64. |
1746 SDValue Hi = makeHiLoPair(Op, SPII::MO_HH, SPII::MO_HM, DAG); | 1842 SDValue Hi = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HH, 1843 SparcMCExpr::VK_Sparc_HM, DAG); |
1747 Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, MVT::i32)); | 1844 Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, MVT::i32)); |
1748 SDValue Lo = makeHiLoPair(Op, SPII::MO_HI, SPII::MO_LO, DAG); | 1845 SDValue Lo = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HI, 1846 SparcMCExpr::VK_Sparc_LO, DAG); |
1749 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo); 1750 } 1751 } 1752} 1753 1754SDValue SparcTargetLowering::LowerGlobalAddress(SDValue Op, 1755 SelectionDAG &DAG) const { 1756 return makeAddress(Op, DAG); --- 15 unchanged lines hidden (view full) --- 1772 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 1773 SDLoc DL(GA); 1774 const GlobalValue *GV = GA->getGlobal(); 1775 EVT PtrVT = getPointerTy(); 1776 1777 TLSModel::Model model = getTargetMachine().getTLSModel(GV); 1778 1779 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) { | 1847 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo); 1848 } 1849 } 1850} 1851 1852SDValue SparcTargetLowering::LowerGlobalAddress(SDValue Op, 1853 SelectionDAG &DAG) const { 1854 return makeAddress(Op, DAG); --- 15 unchanged lines hidden (view full) --- 1870 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 1871 SDLoc DL(GA); 1872 const GlobalValue *GV = GA->getGlobal(); 1873 EVT PtrVT = getPointerTy(); 1874 1875 TLSModel::Model model = getTargetMachine().getTLSModel(GV); 1876 1877 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) { |
1780 unsigned HiTF = ((model == TLSModel::GeneralDynamic)? SPII::MO_TLS_GD_HI22 1781 : SPII::MO_TLS_LDM_HI22); 1782 unsigned LoTF = ((model == TLSModel::GeneralDynamic)? SPII::MO_TLS_GD_LO10 1783 : SPII::MO_TLS_LDM_LO10); 1784 unsigned addTF = ((model == TLSModel::GeneralDynamic)? SPII::MO_TLS_GD_ADD 1785 : SPII::MO_TLS_LDM_ADD); 1786 unsigned callTF = ((model == TLSModel::GeneralDynamic)? SPII::MO_TLS_GD_CALL 1787 : SPII::MO_TLS_LDM_CALL); | 1878 unsigned HiTF = ((model == TLSModel::GeneralDynamic) 1879 ? SparcMCExpr::VK_Sparc_TLS_GD_HI22 1880 : SparcMCExpr::VK_Sparc_TLS_LDM_HI22); 1881 unsigned LoTF = ((model == TLSModel::GeneralDynamic) 1882 ? SparcMCExpr::VK_Sparc_TLS_GD_LO10 1883 : SparcMCExpr::VK_Sparc_TLS_LDM_LO10); 1884 unsigned addTF = ((model == TLSModel::GeneralDynamic) 1885 ? SparcMCExpr::VK_Sparc_TLS_GD_ADD 1886 : SparcMCExpr::VK_Sparc_TLS_LDM_ADD); 1887 unsigned callTF = ((model == TLSModel::GeneralDynamic) 1888 ? SparcMCExpr::VK_Sparc_TLS_GD_CALL 1889 : SparcMCExpr::VK_Sparc_TLS_LDM_CALL); |
1788 1789 SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG); 1790 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT); 1791 SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo, 1792 withTargetFlags(Op, addTF, DAG)); 1793 1794 SDValue Chain = DAG.getEntryNode(); 1795 SDValue InFlag; --- 21 unchanged lines hidden (view full) --- 1817 DAG.getIntPtrConstant(0, true), InFlag, DL); 1818 InFlag = Chain.getValue(1); 1819 SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InFlag); 1820 1821 if (model != TLSModel::LocalDynamic) 1822 return Ret; 1823 1824 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT, | 1890 1891 SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG); 1892 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT); 1893 SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo, 1894 withTargetFlags(Op, addTF, DAG)); 1895 1896 SDValue Chain = DAG.getEntryNode(); 1897 SDValue InFlag; --- 21 unchanged lines hidden (view full) --- 1919 DAG.getIntPtrConstant(0, true), InFlag, DL); 1920 InFlag = Chain.getValue(1); 1921 SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InFlag); 1922 1923 if (model != TLSModel::LocalDynamic) 1924 return Ret; 1925 1926 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT, |
1825 withTargetFlags(Op, SPII::MO_TLS_LDO_HIX22, DAG)); | 1927 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_HIX22, DAG)); |
1826 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT, | 1928 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT, |
1827 withTargetFlags(Op, SPII::MO_TLS_LDO_LOX10, DAG)); | 1929 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_LOX10, DAG)); |
1828 HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo); 1829 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo, | 1930 HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo); 1931 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo, |
1830 withTargetFlags(Op, SPII::MO_TLS_LDO_ADD, DAG)); | 1932 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_ADD, DAG)); |
1831 } 1832 1833 if (model == TLSModel::InitialExec) { | 1933 } 1934 1935 if (model == TLSModel::InitialExec) { |
1834 unsigned ldTF = ((PtrVT == MVT::i64)? SPII::MO_TLS_IE_LDX 1835 : SPII::MO_TLS_IE_LD); | 1936 unsigned ldTF = ((PtrVT == MVT::i64)? SparcMCExpr::VK_Sparc_TLS_IE_LDX 1937 : SparcMCExpr::VK_Sparc_TLS_IE_LD); |
1836 1837 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT); 1838 1839 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this 1840 // function has calls. 1841 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 1842 MFI->setHasCalls(true); 1843 1844 SDValue TGA = makeHiLoPair(Op, | 1938 1939 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT); 1940 1941 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this 1942 // function has calls. 1943 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 1944 MFI->setHasCalls(true); 1945 1946 SDValue TGA = makeHiLoPair(Op, |
1845 SPII::MO_TLS_IE_HI22, SPII::MO_TLS_IE_LO10, DAG); | 1947 SparcMCExpr::VK_Sparc_TLS_IE_HI22, 1948 SparcMCExpr::VK_Sparc_TLS_IE_LO10, DAG); |
1846 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA); 1847 SDValue Offset = DAG.getNode(SPISD::TLS_LD, 1848 DL, PtrVT, Ptr, 1849 withTargetFlags(Op, ldTF, DAG)); 1850 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, 1851 DAG.getRegister(SP::G7, PtrVT), Offset, | 1949 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA); 1950 SDValue Offset = DAG.getNode(SPISD::TLS_LD, 1951 DL, PtrVT, Ptr, 1952 withTargetFlags(Op, ldTF, DAG)); 1953 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, 1954 DAG.getRegister(SP::G7, PtrVT), Offset, |
1852 withTargetFlags(Op, SPII::MO_TLS_IE_ADD, DAG)); | 1955 withTargetFlags(Op, 1956 SparcMCExpr::VK_Sparc_TLS_IE_ADD, DAG)); |
1853 } 1854 1855 assert(model == TLSModel::LocalExec); 1856 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT, | 1957 } 1958 1959 assert(model == TLSModel::LocalExec); 1960 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT, |
1857 withTargetFlags(Op, SPII::MO_TLS_LE_HIX22, DAG)); | 1961 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LE_HIX22, DAG)); |
1858 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT, | 1962 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT, |
1859 withTargetFlags(Op, SPII::MO_TLS_LE_LOX10, DAG)); | 1963 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LE_LOX10, DAG)); |
1860 SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo); 1861 1862 return DAG.getNode(ISD::ADD, DL, PtrVT, 1863 DAG.getRegister(SP::G7, PtrVT), Offset); 1864} 1865 1866SDValue 1867SparcTargetLowering::LowerF128_LibCallArg(SDValue Chain, ArgListTy &Args, --- 461 unchanged lines hidden (view full) --- 2329 2330static SDValue getFLUSHW(SDValue Op, SelectionDAG &DAG) { 2331 SDLoc dl(Op); 2332 SDValue Chain = DAG.getNode(SPISD::FLUSHW, 2333 dl, MVT::Other, DAG.getEntryNode()); 2334 return Chain; 2335} 2336 | 1964 SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo); 1965 1966 return DAG.getNode(ISD::ADD, DL, PtrVT, 1967 DAG.getRegister(SP::G7, PtrVT), Offset); 1968} 1969 1970SDValue 1971SparcTargetLowering::LowerF128_LibCallArg(SDValue Chain, ArgListTy &Args, --- 461 unchanged lines hidden (view full) --- 2433 2434static SDValue getFLUSHW(SDValue Op, SelectionDAG &DAG) { 2435 SDLoc dl(Op); 2436 SDValue Chain = DAG.getNode(SPISD::FLUSHW, 2437 dl, MVT::Other, DAG.getEntryNode()); 2438 return Chain; 2439} 2440 |
2337static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) { | 2441static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG, 2442 const SparcSubtarget *Subtarget) { |
2338 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 2339 MFI->setFrameAddressIsTaken(true); 2340 2341 EVT VT = Op.getValueType(); 2342 SDLoc dl(Op); 2343 unsigned FrameReg = SP::I6; | 2443 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 2444 MFI->setFrameAddressIsTaken(true); 2445 2446 EVT VT = Op.getValueType(); 2447 SDLoc dl(Op); 2448 unsigned FrameReg = SP::I6; |
2449 unsigned stackBias = Subtarget->getStackPointerBias(); |
|
2344 | 2450 |
2345 uint64_t depth = Op.getConstantOperandVal(0); 2346 | |
2347 SDValue FrameAddr; | 2451 SDValue FrameAddr; |
2348 if (depth == 0) | 2452 2453 if (depth == 0) { |
2349 FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); | 2454 FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); |
2350 else { 2351 // flush first to make sure the windowed registers' values are in stack 2352 SDValue Chain = getFLUSHW(Op, DAG); 2353 FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT); | 2455 if (Subtarget->is64Bit()) 2456 FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr, 2457 DAG.getIntPtrConstant(stackBias)); 2458 return FrameAddr; 2459 } |
2354 | 2460 |
2355 for (uint64_t i = 0; i != depth; ++i) { 2356 SDValue Ptr = DAG.getNode(ISD::ADD, 2357 dl, MVT::i32, 2358 FrameAddr, DAG.getIntPtrConstant(56)); 2359 FrameAddr = DAG.getLoad(MVT::i32, dl, 2360 Chain, 2361 Ptr, 2362 MachinePointerInfo(), false, false, false, 0); 2363 } | 2461 // flush first to make sure the windowed registers' values are in stack 2462 SDValue Chain = getFLUSHW(Op, DAG); 2463 FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT); 2464 2465 unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56; 2466 2467 while (depth--) { 2468 SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr, 2469 DAG.getIntPtrConstant(Offset)); 2470 FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo(), 2471 false, false, false, 0); |
2364 } | 2472 } |
2473 if (Subtarget->is64Bit()) 2474 FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr, 2475 DAG.getIntPtrConstant(stackBias)); |
|
2365 return FrameAddr; 2366} 2367 | 2476 return FrameAddr; 2477} 2478 |
2479 2480static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, 2481 const SparcSubtarget *Subtarget) { 2482 2483 uint64_t depth = Op.getConstantOperandVal(0); 2484 2485 return getFRAMEADDR(depth, Op, DAG, Subtarget); 2486 2487} 2488 |
|
2368static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, | 2489static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, |
2369 const SparcTargetLowering &TLI) { | 2490 const SparcTargetLowering &TLI, 2491 const SparcSubtarget *Subtarget) { |
2370 MachineFunction &MF = DAG.getMachineFunction(); 2371 MachineFrameInfo *MFI = MF.getFrameInfo(); 2372 MFI->setReturnAddressIsTaken(true); 2373 2374 EVT VT = Op.getValueType(); 2375 SDLoc dl(Op); 2376 uint64_t depth = Op.getConstantOperandVal(0); 2377 2378 SDValue RetAddr; 2379 if (depth == 0) { 2380 unsigned RetReg = MF.addLiveIn(SP::I7, 2381 TLI.getRegClassFor(TLI.getPointerTy())); 2382 RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT); | 2492 MachineFunction &MF = DAG.getMachineFunction(); 2493 MachineFrameInfo *MFI = MF.getFrameInfo(); 2494 MFI->setReturnAddressIsTaken(true); 2495 2496 EVT VT = Op.getValueType(); 2497 SDLoc dl(Op); 2498 uint64_t depth = Op.getConstantOperandVal(0); 2499 2500 SDValue RetAddr; 2501 if (depth == 0) { 2502 unsigned RetReg = MF.addLiveIn(SP::I7, 2503 TLI.getRegClassFor(TLI.getPointerTy())); 2504 RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT); |
2383 } else { 2384 // Need frame address to find return address of the caller. 2385 MFI->setFrameAddressIsTaken(true); | 2505 return RetAddr; 2506 } |
2386 | 2507 |
2387 // flush first to make sure the windowed registers' values are in stack 2388 SDValue Chain = getFLUSHW(Op, DAG); 2389 RetAddr = DAG.getCopyFromReg(Chain, dl, SP::I6, VT); | 2508 // Need frame address to find return address of the caller. 2509 SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget); |
2390 | 2510 |
2391 for (uint64_t i = 0; i != depth; ++i) { 2392 SDValue Ptr = DAG.getNode(ISD::ADD, 2393 dl, MVT::i32, 2394 RetAddr, 2395 DAG.getIntPtrConstant((i == depth-1)?60:56)); 2396 RetAddr = DAG.getLoad(MVT::i32, dl, 2397 Chain, 2398 Ptr, 2399 MachinePointerInfo(), false, false, false, 0); 2400 } 2401 } | 2511 unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60; 2512 SDValue Ptr = DAG.getNode(ISD::ADD, 2513 dl, VT, 2514 FrameAddr, 2515 DAG.getIntPtrConstant(Offset)); 2516 RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr, 2517 MachinePointerInfo(), false, false, false, 0); 2518 |
2402 return RetAddr; 2403} 2404 2405static SDValue LowerF64Op(SDValue Op, SelectionDAG &DAG, unsigned opcode) 2406{ 2407 SDLoc dl(Op); 2408 2409 assert(Op.getValueType() == MVT::f64 && "LowerF64Op called on non-double!"); --- 112 unchanged lines hidden (view full) --- 2522 SDValue(Lo64, 0), 2523 LoPtr, 2524 MachinePointerInfo(), 2525 false, false, alignment); 2526 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2527 &OutChains[0], 2); 2528} 2529 | 2519 return RetAddr; 2520} 2521 2522static SDValue LowerF64Op(SDValue Op, SelectionDAG &DAG, unsigned opcode) 2523{ 2524 SDLoc dl(Op); 2525 2526 assert(Op.getValueType() == MVT::f64 && "LowerF64Op called on non-double!"); --- 112 unchanged lines hidden (view full) --- 2639 SDValue(Lo64, 0), 2640 LoPtr, 2641 MachinePointerInfo(), 2642 false, false, alignment); 2643 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2644 &OutChains[0], 2); 2645} 2646 |
2530static SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG, 2531 const SparcTargetLowering &TLI, 2532 bool is64Bit) { 2533 if (Op.getValueType() == MVT::f64) 2534 return LowerF64Op(Op, DAG, ISD::FNEG); 2535 if (Op.getValueType() == MVT::f128) 2536 return TLI.LowerF128Op(Op, DAG, ((is64Bit) ? "_Qp_neg" : "_Q_neg"), 1); 2537 return Op; 2538} | 2647static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9) { 2648 assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS) && "invalid"); |
2539 | 2649 |
2540static SDValue LowerFABS(SDValue Op, SelectionDAG &DAG, bool isV9) { | |
2541 if (Op.getValueType() == MVT::f64) | 2650 if (Op.getValueType() == MVT::f64) |
2542 return LowerF64Op(Op, DAG, ISD::FABS); | 2651 return LowerF64Op(Op, DAG, Op.getOpcode()); |
2543 if (Op.getValueType() != MVT::f128) 2544 return Op; 2545 | 2652 if (Op.getValueType() != MVT::f128) 2653 return Op; 2654 |
2546 // Lower fabs on f128 to fabs on f64 2547 // fabs f128 => fabs f64:sub_even64, fmov f64:sub_odd64 | 2655 // Lower fabs/fneg on f128 to fabs/fneg on f64 2656 // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64 |
2548 2549 SDLoc dl(Op); 2550 SDValue SrcReg128 = Op.getOperand(0); 2551 SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64, 2552 SrcReg128); 2553 SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64, 2554 SrcReg128); 2555 if (isV9) 2556 Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64); 2557 else | 2657 2658 SDLoc dl(Op); 2659 SDValue SrcReg128 = Op.getOperand(0); 2660 SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64, 2661 SrcReg128); 2662 SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64, 2663 SrcReg128); 2664 if (isV9) 2665 Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64); 2666 else |
2558 Hi64 = LowerF64Op(Hi64, DAG, ISD::FABS); | 2667 Hi64 = LowerF64Op(Hi64, DAG, Op.getOpcode()); |
2559 2560 SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, 2561 dl, MVT::f128), 0); 2562 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128, 2563 DstReg128, Hi64); 2564 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128, 2565 DstReg128, Lo64); 2566 return DstReg128; --- 43 unchanged lines hidden (view full) --- 2610 Hi = DAG.getNode(ISD::SHL, dl, MVT::i64, Hi, 2611 DAG.getConstant(32, MVT::i64)); 2612 2613 SDValue Dst = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, Lo); 2614 SDValue Ops[2] = { Dst, Carry }; 2615 return DAG.getMergeValues(Ops, 2, dl); 2616} 2617 | 2668 2669 SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, 2670 dl, MVT::f128), 0); 2671 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128, 2672 DstReg128, Hi64); 2673 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128, 2674 DstReg128, Lo64); 2675 return DstReg128; --- 43 unchanged lines hidden (view full) --- 2719 Hi = DAG.getNode(ISD::SHL, dl, MVT::i64, Hi, 2720 DAG.getConstant(32, MVT::i64)); 2721 2722 SDValue Dst = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, Lo); 2723 SDValue Ops[2] = { Dst, Carry }; 2724 return DAG.getMergeValues(Ops, 2, dl); 2725} 2726 |
2727// Custom lower UMULO/SMULO for SPARC. This code is similar to ExpandNode() 2728// in LegalizeDAG.cpp except the order of arguments to the library function. 2729static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG, 2730 const SparcTargetLowering &TLI) 2731{ 2732 unsigned opcode = Op.getOpcode(); 2733 assert((opcode == ISD::UMULO || opcode == ISD::SMULO) && "Invalid Opcode."); 2734 2735 bool isSigned = (opcode == ISD::SMULO); 2736 EVT VT = MVT::i64; 2737 EVT WideVT = MVT::i128; 2738 SDLoc dl(Op); 2739 SDValue LHS = Op.getOperand(0); 2740 2741 if (LHS.getValueType() != VT) 2742 return Op; 2743 2744 SDValue ShiftAmt = DAG.getConstant(63, VT); 2745 2746 SDValue RHS = Op.getOperand(1); 2747 SDValue HiLHS = DAG.getNode(ISD::SRA, dl, VT, LHS, ShiftAmt); 2748 SDValue HiRHS = DAG.getNode(ISD::SRA, dl, MVT::i64, RHS, ShiftAmt); 2749 SDValue Args[] = { HiLHS, LHS, HiRHS, RHS }; 2750 2751 SDValue MulResult = TLI.makeLibCall(DAG, 2752 RTLIB::MUL_I128, WideVT, 2753 Args, 4, isSigned, dl).first; 2754 SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, 2755 MulResult, DAG.getIntPtrConstant(0)); 2756 SDValue TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, 2757 MulResult, DAG.getIntPtrConstant(1)); 2758 if (isSigned) { 2759 SDValue Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt); 2760 TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, Tmp1, ISD::SETNE); 2761 } else { 2762 TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, DAG.getConstant(0, VT), 2763 ISD::SETNE); 2764 } 2765 // MulResult is a node with an illegal type. Because such things are not 2766 // generally permitted during this phase of legalization, delete the 2767 // node. The above EXTRACT_ELEMENT nodes should have been folded. 2768 DAG.DeleteNode(MulResult.getNode()); 2769 2770 SDValue Ops[2] = { BottomHalf, TopHalf } ; 2771 return DAG.getMergeValues(Ops, 2, dl); 2772} 2773 2774static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) { 2775 // Monotonic load/stores are legal. 2776 if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic) 2777 return Op; 2778 2779 // Otherwise, expand with a fence. 2780 return SDValue(); 2781} 2782 2783 |
|
2618SDValue SparcTargetLowering:: 2619LowerOperation(SDValue Op, SelectionDAG &DAG) const { 2620 2621 bool hasHardQuad = Subtarget->hasHardQuad(); | 2784SDValue SparcTargetLowering:: 2785LowerOperation(SDValue Op, SelectionDAG &DAG) const { 2786 2787 bool hasHardQuad = Subtarget->hasHardQuad(); |
2622 bool is64Bit = Subtarget->is64Bit(); | |
2623 bool isV9 = Subtarget->isV9(); 2624 2625 switch (Op.getOpcode()) { 2626 default: llvm_unreachable("Should not custom lower this!"); 2627 | 2788 bool isV9 = Subtarget->isV9(); 2789 2790 switch (Op.getOpcode()) { 2791 default: llvm_unreachable("Should not custom lower this!"); 2792 |
2628 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this); 2629 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); | 2793 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this, 2794 Subtarget); 2795 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG, 2796 Subtarget); |
2630 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 2631 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 2632 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 2633 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 2634 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this, 2635 hasHardQuad); 2636 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this, 2637 hasHardQuad); --- 17 unchanged lines hidden (view full) --- 2655 case ISD::FSUB: return LowerF128Op(Op, DAG, 2656 getLibcallName(RTLIB::SUB_F128), 2); 2657 case ISD::FMUL: return LowerF128Op(Op, DAG, 2658 getLibcallName(RTLIB::MUL_F128), 2); 2659 case ISD::FDIV: return LowerF128Op(Op, DAG, 2660 getLibcallName(RTLIB::DIV_F128), 2); 2661 case ISD::FSQRT: return LowerF128Op(Op, DAG, 2662 getLibcallName(RTLIB::SQRT_F128),1); | 2797 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 2798 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 2799 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 2800 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 2801 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this, 2802 hasHardQuad); 2803 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this, 2804 hasHardQuad); --- 17 unchanged lines hidden (view full) --- 2822 case ISD::FSUB: return LowerF128Op(Op, DAG, 2823 getLibcallName(RTLIB::SUB_F128), 2); 2824 case ISD::FMUL: return LowerF128Op(Op, DAG, 2825 getLibcallName(RTLIB::MUL_F128), 2); 2826 case ISD::FDIV: return LowerF128Op(Op, DAG, 2827 getLibcallName(RTLIB::DIV_F128), 2); 2828 case ISD::FSQRT: return LowerF128Op(Op, DAG, 2829 getLibcallName(RTLIB::SQRT_F128),1); |
2663 case ISD::FNEG: return LowerFNEG(Op, DAG, *this, is64Bit); 2664 case ISD::FABS: return LowerFABS(Op, DAG, isV9); | 2830 case ISD::FABS: 2831 case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9); |
2665 case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this); 2666 case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this); 2667 case ISD::ADDC: 2668 case ISD::ADDE: 2669 case ISD::SUBC: 2670 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); | 2832 case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this); 2833 case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this); 2834 case ISD::ADDC: 2835 case ISD::ADDE: 2836 case ISD::SUBC: 2837 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); |
2838 case ISD::UMULO: 2839 case ISD::SMULO: return LowerUMULO_SMULO(Op, DAG, *this); 2840 case ISD::ATOMIC_LOAD: 2841 case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG); |
|
2671 } 2672} 2673 2674MachineBasicBlock * 2675SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 2676 MachineBasicBlock *BB) const { | 2842 } 2843} 2844 2845MachineBasicBlock * 2846SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 2847 MachineBasicBlock *BB) const { |
2677 const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo(); 2678 unsigned BROpcode; 2679 unsigned CC; 2680 DebugLoc dl = MI->getDebugLoc(); 2681 // Figure out the conditional branch opcode to use for this select_cc. | |
2682 switch (MI->getOpcode()) { 2683 default: llvm_unreachable("Unknown SELECT_CC!"); 2684 case SP::SELECT_CC_Int_ICC: 2685 case SP::SELECT_CC_FP_ICC: 2686 case SP::SELECT_CC_DFP_ICC: 2687 case SP::SELECT_CC_QFP_ICC: | 2848 switch (MI->getOpcode()) { 2849 default: llvm_unreachable("Unknown SELECT_CC!"); 2850 case SP::SELECT_CC_Int_ICC: 2851 case SP::SELECT_CC_FP_ICC: 2852 case SP::SELECT_CC_DFP_ICC: 2853 case SP::SELECT_CC_QFP_ICC: |
2688 BROpcode = SP::BCOND; 2689 break; | 2854 return expandSelectCC(MI, BB, SP::BCOND); |
2690 case SP::SELECT_CC_Int_FCC: 2691 case SP::SELECT_CC_FP_FCC: 2692 case SP::SELECT_CC_DFP_FCC: 2693 case SP::SELECT_CC_QFP_FCC: | 2855 case SP::SELECT_CC_Int_FCC: 2856 case SP::SELECT_CC_FP_FCC: 2857 case SP::SELECT_CC_DFP_FCC: 2858 case SP::SELECT_CC_QFP_FCC: |
2694 BROpcode = SP::FBCOND; 2695 break; | 2859 return expandSelectCC(MI, BB, SP::FBCOND); 2860 2861 case SP::ATOMIC_LOAD_ADD_32: 2862 return expandAtomicRMW(MI, BB, SP::ADDrr); 2863 case SP::ATOMIC_LOAD_ADD_64: 2864 return expandAtomicRMW(MI, BB, SP::ADDXrr); 2865 case SP::ATOMIC_LOAD_SUB_32: 2866 return expandAtomicRMW(MI, BB, SP::SUBrr); 2867 case SP::ATOMIC_LOAD_SUB_64: 2868 return expandAtomicRMW(MI, BB, SP::SUBXrr); 2869 case SP::ATOMIC_LOAD_AND_32: 2870 return expandAtomicRMW(MI, BB, SP::ANDrr); 2871 case SP::ATOMIC_LOAD_AND_64: 2872 return expandAtomicRMW(MI, BB, SP::ANDXrr); 2873 case SP::ATOMIC_LOAD_OR_32: 2874 return expandAtomicRMW(MI, BB, SP::ORrr); 2875 case SP::ATOMIC_LOAD_OR_64: 2876 return expandAtomicRMW(MI, BB, SP::ORXrr); 2877 case SP::ATOMIC_LOAD_XOR_32: 2878 return expandAtomicRMW(MI, BB, SP::XORrr); 2879 case SP::ATOMIC_LOAD_XOR_64: 2880 return expandAtomicRMW(MI, BB, SP::XORXrr); 2881 case SP::ATOMIC_LOAD_NAND_32: 2882 return expandAtomicRMW(MI, BB, SP::ANDrr); 2883 case SP::ATOMIC_LOAD_NAND_64: 2884 return expandAtomicRMW(MI, BB, SP::ANDXrr); 2885 2886 case SP::ATOMIC_SWAP_64: 2887 return expandAtomicRMW(MI, BB, 0); 2888 2889 case SP::ATOMIC_LOAD_MAX_32: 2890 return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_G); 2891 case SP::ATOMIC_LOAD_MAX_64: 2892 return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_G); 2893 case SP::ATOMIC_LOAD_MIN_32: 2894 return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_LE); 2895 case SP::ATOMIC_LOAD_MIN_64: 2896 return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_LE); 2897 case SP::ATOMIC_LOAD_UMAX_32: 2898 return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_GU); 2899 case SP::ATOMIC_LOAD_UMAX_64: 2900 return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_GU); 2901 case SP::ATOMIC_LOAD_UMIN_32: 2902 return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_LEU); 2903 case SP::ATOMIC_LOAD_UMIN_64: 2904 return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_LEU); |
2696 } | 2905 } |
2906} |
|
2697 | 2907 |
2698 CC = (SPCC::CondCodes)MI->getOperand(3).getImm(); | 2908MachineBasicBlock* 2909SparcTargetLowering::expandSelectCC(MachineInstr *MI, 2910 MachineBasicBlock *BB, 2911 unsigned BROpcode) const { 2912 const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo(); 2913 DebugLoc dl = MI->getDebugLoc(); 2914 unsigned CC = (SPCC::CondCodes)MI->getOperand(3).getImm(); |
2699 2700 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond 2701 // control-flow pattern. The incoming instruction knows the destination vreg 2702 // to set, the condition code register to branch on, the true/false values to 2703 // select between, and a branch opcode to use. 2704 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 2705 MachineFunction::iterator It = BB; 2706 ++It; --- 37 unchanged lines hidden (view full) --- 2744 BuildMI(*BB, BB->begin(), dl, TII.get(SP::PHI), MI->getOperand(0).getReg()) 2745 .addReg(MI->getOperand(2).getReg()).addMBB(copy0MBB) 2746 .addReg(MI->getOperand(1).getReg()).addMBB(thisMBB); 2747 2748 MI->eraseFromParent(); // The pseudo instruction is gone now. 2749 return BB; 2750} 2751 | 2915 2916 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond 2917 // control-flow pattern. The incoming instruction knows the destination vreg 2918 // to set, the condition code register to branch on, the true/false values to 2919 // select between, and a branch opcode to use. 2920 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 2921 MachineFunction::iterator It = BB; 2922 ++It; --- 37 unchanged lines hidden (view full) --- 2960 BuildMI(*BB, BB->begin(), dl, TII.get(SP::PHI), MI->getOperand(0).getReg()) 2961 .addReg(MI->getOperand(2).getReg()).addMBB(copy0MBB) 2962 .addReg(MI->getOperand(1).getReg()).addMBB(thisMBB); 2963 2964 MI->eraseFromParent(); // The pseudo instruction is gone now. 2965 return BB; 2966} 2967 |
2968MachineBasicBlock* 2969SparcTargetLowering::expandAtomicRMW(MachineInstr *MI, 2970 MachineBasicBlock *MBB, 2971 unsigned Opcode, 2972 unsigned CondCode) const { 2973 const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo(); 2974 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 2975 DebugLoc DL = MI->getDebugLoc(); 2976 2977 // MI is an atomic read-modify-write instruction of the form: 2978 // 2979 // rd = atomicrmw<op> addr, rs2 2980 // 2981 // All three operands are registers. 2982 unsigned DestReg = MI->getOperand(0).getReg(); 2983 unsigned AddrReg = MI->getOperand(1).getReg(); 2984 unsigned Rs2Reg = MI->getOperand(2).getReg(); 2985 2986 // SelectionDAG has already inserted memory barriers before and after MI, so 2987 // we simply have to implement the operatiuon in terms of compare-and-swap. 2988 // 2989 // %val0 = load %addr 2990 // loop: 2991 // %val = phi %val0, %dest 2992 // %upd = op %val, %rs2 2993 // %dest = cas %addr, %val, %upd 2994 // cmp %val, %dest 2995 // bne loop 2996 // done: 2997 // 2998 bool is64Bit = SP::I64RegsRegClass.hasSubClassEq(MRI.getRegClass(DestReg)); 2999 const TargetRegisterClass *ValueRC = 3000 is64Bit ? &SP::I64RegsRegClass : &SP::IntRegsRegClass; 3001 unsigned Val0Reg = MRI.createVirtualRegister(ValueRC); 3002 3003 BuildMI(*MBB, MI, DL, TII.get(is64Bit ? SP::LDXri : SP::LDri), Val0Reg) 3004 .addReg(AddrReg).addImm(0); 3005 3006 // Split the basic block MBB before MI and insert the loop block in the hole. 3007 MachineFunction::iterator MFI = MBB; 3008 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 3009 MachineFunction *MF = MBB->getParent(); 3010 MachineBasicBlock *LoopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 3011 MachineBasicBlock *DoneMBB = MF->CreateMachineBasicBlock(LLVM_BB); 3012 ++MFI; 3013 MF->insert(MFI, LoopMBB); 3014 MF->insert(MFI, DoneMBB); 3015 3016 // Move MI and following instructions to DoneMBB. 3017 DoneMBB->splice(DoneMBB->begin(), MBB, MI, MBB->end()); 3018 DoneMBB->transferSuccessorsAndUpdatePHIs(MBB); 3019 3020 // Connect the CFG again. 3021 MBB->addSuccessor(LoopMBB); 3022 LoopMBB->addSuccessor(LoopMBB); 3023 LoopMBB->addSuccessor(DoneMBB); 3024 3025 // Build the loop block. 3026 unsigned ValReg = MRI.createVirtualRegister(ValueRC); 3027 // Opcode == 0 means try to write Rs2Reg directly (ATOMIC_SWAP). 3028 unsigned UpdReg = (Opcode ? MRI.createVirtualRegister(ValueRC) : Rs2Reg); 3029 3030 BuildMI(LoopMBB, DL, TII.get(SP::PHI), ValReg) 3031 .addReg(Val0Reg).addMBB(MBB) 3032 .addReg(DestReg).addMBB(LoopMBB); 3033 3034 if (CondCode) { 3035 // This is one of the min/max operations. We need a CMPrr followed by a 3036 // MOVXCC/MOVICC. 3037 BuildMI(LoopMBB, DL, TII.get(SP::CMPrr)).addReg(ValReg).addReg(Rs2Reg); 3038 BuildMI(LoopMBB, DL, TII.get(Opcode), UpdReg) 3039 .addReg(ValReg).addReg(Rs2Reg).addImm(CondCode); 3040 } else if (Opcode) { 3041 BuildMI(LoopMBB, DL, TII.get(Opcode), UpdReg) 3042 .addReg(ValReg).addReg(Rs2Reg); 3043 } 3044 3045 if (MI->getOpcode() == SP::ATOMIC_LOAD_NAND_32 || 3046 MI->getOpcode() == SP::ATOMIC_LOAD_NAND_64) { 3047 unsigned TmpReg = UpdReg; 3048 UpdReg = MRI.createVirtualRegister(ValueRC); 3049 BuildMI(LoopMBB, DL, TII.get(SP::XORri), UpdReg).addReg(TmpReg).addImm(-1); 3050 } 3051 3052 BuildMI(LoopMBB, DL, TII.get(is64Bit ? SP::CASXrr : SP::CASrr), DestReg) 3053 .addReg(AddrReg).addReg(ValReg).addReg(UpdReg) 3054 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); 3055 BuildMI(LoopMBB, DL, TII.get(SP::CMPrr)).addReg(ValReg).addReg(DestReg); 3056 BuildMI(LoopMBB, DL, TII.get(is64Bit ? SP::BPXCC : SP::BCOND)) 3057 .addMBB(LoopMBB).addImm(SPCC::ICC_NE); 3058 3059 MI->eraseFromParent(); 3060 return DoneMBB; 3061} 3062 |
|
2752//===----------------------------------------------------------------------===// 2753// Sparc Inline Assembly Support 2754//===----------------------------------------------------------------------===// 2755 2756/// getConstraintType - Given a constraint letter, return the type of 2757/// constraint it is for this target. 2758SparcTargetLowering::ConstraintType 2759SparcTargetLowering::getConstraintType(const std::string &Constraint) const { 2760 if (Constraint.size() == 1) { 2761 switch (Constraint[0]) { 2762 default: break; 2763 case 'r': return C_RegisterClass; | 3063//===----------------------------------------------------------------------===// 3064// Sparc Inline Assembly Support 3065//===----------------------------------------------------------------------===// 3066 3067/// getConstraintType - Given a constraint letter, return the type of 3068/// constraint it is for this target. 3069SparcTargetLowering::ConstraintType 3070SparcTargetLowering::getConstraintType(const std::string &Constraint) const { 3071 if (Constraint.size() == 1) { 3072 switch (Constraint[0]) { 3073 default: break; 3074 case 'r': return C_RegisterClass; |
3075 case 'I': // SIMM13 3076 return C_Other; |
|
2764 } 2765 } 2766 2767 return TargetLowering::getConstraintType(Constraint); 2768} 2769 | 3077 } 3078 } 3079 3080 return TargetLowering::getConstraintType(Constraint); 3081} 3082 |
3083TargetLowering::ConstraintWeight SparcTargetLowering:: 3084getSingleConstraintMatchWeight(AsmOperandInfo &info, 3085 const char *constraint) const { 3086 ConstraintWeight weight = CW_Invalid; 3087 Value *CallOperandVal = info.CallOperandVal; 3088 // If we don't have a value, we can't do a match, 3089 // but allow it at the lowest weight. 3090 if (CallOperandVal == NULL) 3091 return CW_Default; 3092 3093 // Look at the constraint type. 3094 switch (*constraint) { 3095 default: 3096 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 3097 break; 3098 case 'I': // SIMM13 3099 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) { 3100 if (isInt<13>(C->getSExtValue())) 3101 weight = CW_Constant; 3102 } 3103 break; 3104 } 3105 return weight; 3106} 3107 3108/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 3109/// vector. If it is invalid, don't add anything to Ops. 3110void SparcTargetLowering:: 3111LowerAsmOperandForConstraint(SDValue Op, 3112 std::string &Constraint, 3113 std::vector<SDValue> &Ops, 3114 SelectionDAG &DAG) const { 3115 SDValue Result(0, 0); 3116 3117 // Only support length 1 constraints for now. 3118 if (Constraint.length() > 1) 3119 return; 3120 3121 char ConstraintLetter = Constraint[0]; 3122 switch (ConstraintLetter) { 3123 default: break; 3124 case 'I': 3125 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 3126 if (isInt<13>(C->getSExtValue())) { 3127 Result = DAG.getTargetConstant(C->getSExtValue(), Op.getValueType()); 3128 break; 3129 } 3130 return; 3131 } 3132 } 3133 3134 if (Result.getNode()) { 3135 Ops.push_back(Result); 3136 return; 3137 } 3138 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 3139} 3140 |
|
2770std::pair<unsigned, const TargetRegisterClass*> 2771SparcTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 2772 MVT VT) const { 2773 if (Constraint.size() == 1) { 2774 switch (Constraint[0]) { 2775 case 'r': 2776 return std::make_pair(0U, &SP::IntRegsRegClass); 2777 } | 3141std::pair<unsigned, const TargetRegisterClass*> 3142SparcTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 3143 MVT VT) const { 3144 if (Constraint.size() == 1) { 3145 switch (Constraint[0]) { 3146 case 'r': 3147 return std::make_pair(0U, &SP::IntRegsRegClass); 3148 } |
3149 } else if (!Constraint.empty() && Constraint.size() <= 5 3150 && Constraint[0] == '{' && *(Constraint.end()-1) == '}') { 3151 // constraint = '{r<d>}' 3152 // Remove the braces from around the name. 3153 StringRef name(Constraint.data()+1, Constraint.size()-2); 3154 // Handle register aliases: 3155 // r0-r7 -> g0-g7 3156 // r8-r15 -> o0-o7 3157 // r16-r23 -> l0-l7 3158 // r24-r31 -> i0-i7 3159 uint64_t intVal = 0; 3160 if (name.substr(0, 1).equals("r") 3161 && !name.substr(1).getAsInteger(10, intVal) && intVal <= 31) { 3162 const char regTypes[] = { 'g', 'o', 'l', 'i' }; 3163 char regType = regTypes[intVal/8]; 3164 char regIdx = '0' + (intVal % 8); 3165 char tmp[] = { '{', regType, regIdx, '}', 0 }; 3166 std::string newConstraint = std::string(tmp); 3167 return TargetLowering::getRegForInlineAsmConstraint(newConstraint, VT); 3168 } |
|
2778 } 2779 2780 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 2781} 2782 2783bool 2784SparcTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 2785 // The Sparc target isn't yet aware of offsets. --- 49 unchanged lines hidden --- | 3169 } 3170 3171 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 3172} 3173 3174bool 3175SparcTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 3176 // The Sparc target isn't yet aware of offsets. --- 49 unchanged lines hidden --- |