1//===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the PPCISelLowering class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "PPCISelLowering.h" 15#include "MCTargetDesc/PPCPredicates.h" 16#include "PPCMachineFunctionInfo.h" 17#include "PPCPerfectShuffle.h" 18#include "PPCTargetMachine.h" 19#include "PPCTargetObjectFile.h" 20#include "llvm/ADT/STLExtras.h" 21#include "llvm/CodeGen/CallingConvLower.h" 22#include "llvm/CodeGen/MachineFrameInfo.h" 23#include "llvm/CodeGen/MachineFunction.h" 24#include "llvm/CodeGen/MachineInstrBuilder.h" 25#include "llvm/CodeGen/MachineRegisterInfo.h" 26#include "llvm/CodeGen/SelectionDAG.h" 27#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 28#include "llvm/IR/CallingConv.h" 29#include "llvm/IR/Constants.h" 30#include "llvm/IR/DerivedTypes.h" 31#include "llvm/IR/Function.h" 32#include "llvm/IR/Intrinsics.h" 33#include "llvm/Support/CommandLine.h" 34#include "llvm/Support/ErrorHandling.h" 35#include "llvm/Support/MathExtras.h" 36#include "llvm/Support/raw_ostream.h" 37#include "llvm/Target/TargetOptions.h" 38using namespace llvm; 39 40static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 41cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 42 43static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 44cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 45 46static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", 47cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); 48 49static TargetLoweringObjectFile *CreateTLOF(const PPCTargetMachine &TM) { 50 if (TM.getSubtargetImpl()->isDarwin()) 51 return new TargetLoweringObjectFileMachO(); 52 53 if (TM.getSubtargetImpl()->isSVR4ABI()) 54 return new PPC64LinuxTargetObjectFile(); 55 56 return new TargetLoweringObjectFileELF(); 57} 58 59PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) 60 : TargetLowering(TM, CreateTLOF(TM)), PPCSubTarget(*TM.getSubtargetImpl()) { 61 const PPCSubtarget *Subtarget = &TM.getSubtarget<PPCSubtarget>(); 62 63 setPow2DivIsCheap(); 64 65 // Use _setjmp/_longjmp instead of setjmp/longjmp. 66 setUseUnderscoreSetJmp(true); 67 setUseUnderscoreLongJmp(true); 68 69 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 70 // arguments are at least 4/8 bytes aligned. 71 bool isPPC64 = Subtarget->isPPC64(); 72 setMinStackArgumentAlignment(isPPC64 ? 8:4); 73 74 // Set up the register classes. 75 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 76 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 77 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 78 79 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD 80 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 81 setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand); 82 83 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 84 85 // PowerPC has pre-inc load and store's. 86 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 87 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 88 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 89 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 90 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 91 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 92 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 93 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 94 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 95 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 96 97 // This is used in the ppcf128->int sequence. Note it has different semantics 98 // from FP_ROUND: that rounds to nearest, this rounds to zero. 99 setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom); 100 101 // We do not currently implement these libm ops for PowerPC. 102 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 103 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 104 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 105 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 106 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 107 setOperationAction(ISD::FREM, MVT::ppcf128, Expand); 108 109 // PowerPC has no SREM/UREM instructions 110 setOperationAction(ISD::SREM, MVT::i32, Expand); 111 setOperationAction(ISD::UREM, MVT::i32, Expand); 112 setOperationAction(ISD::SREM, MVT::i64, Expand); 113 setOperationAction(ISD::UREM, MVT::i64, Expand); 114 115 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 116 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 117 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 118 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 119 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 120 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 121 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 122 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 123 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 124 125 // We don't support sin/cos/sqrt/fmod/pow 126 setOperationAction(ISD::FSIN , MVT::f64, Expand); 127 setOperationAction(ISD::FCOS , MVT::f64, Expand); 128 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 129 setOperationAction(ISD::FREM , MVT::f64, Expand); 130 setOperationAction(ISD::FPOW , MVT::f64, Expand); 131 setOperationAction(ISD::FMA , MVT::f64, Legal); 132 setOperationAction(ISD::FSIN , MVT::f32, Expand); 133 setOperationAction(ISD::FCOS , MVT::f32, Expand); 134 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 135 setOperationAction(ISD::FREM , MVT::f32, Expand); 136 setOperationAction(ISD::FPOW , MVT::f32, Expand); 137 setOperationAction(ISD::FMA , MVT::f32, Legal); 138 139 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 140 141 // If we're enabling GP optimizations, use hardware square root 142 if (!Subtarget->hasFSQRT() && 143 !(TM.Options.UnsafeFPMath && 144 Subtarget->hasFRSQRTE() && Subtarget->hasFRE())) 145 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 146 147 if (!Subtarget->hasFSQRT() && 148 !(TM.Options.UnsafeFPMath && 149 Subtarget->hasFRSQRTES() && Subtarget->hasFRES())) 150 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 151 152 if (Subtarget->hasFCPSGN()) { 153 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); 154 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); 155 } else { 156 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 157 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 158 } 159 160 if (Subtarget->hasFPRND()) { 161 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 162 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 163 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 164 setOperationAction(ISD::FROUND, MVT::f64, Legal); 165 166 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 167 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 168 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 169 setOperationAction(ISD::FROUND, MVT::f32, Legal); 170 } 171 172 // PowerPC does not have BSWAP, CTPOP or CTTZ 173 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 174 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 175 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); 176 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); 177 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 178 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 179 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); 180 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); 181 182 if (Subtarget->hasPOPCNTD()) { 183 setOperationAction(ISD::CTPOP, MVT::i32 , Legal); 184 setOperationAction(ISD::CTPOP, MVT::i64 , Legal); 185 } else { 186 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 187 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 188 } 189 190 // PowerPC does not have ROTR 191 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 192 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 193 194 // PowerPC does not have Select 195 setOperationAction(ISD::SELECT, MVT::i32, Expand); 196 setOperationAction(ISD::SELECT, MVT::i64, Expand); 197 setOperationAction(ISD::SELECT, MVT::f32, Expand); 198 setOperationAction(ISD::SELECT, MVT::f64, Expand); 199 200 // PowerPC wants to turn select_cc of FP into fsel when possible. 201 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 202 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 203 204 // PowerPC wants to optimize integer setcc a bit 205 setOperationAction(ISD::SETCC, MVT::i32, Custom); 206 207 // PowerPC does not have BRCOND which requires SetCC 208 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 209 210 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 211 212 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 213 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 214 215 // PowerPC does not have [U|S]INT_TO_FP 216 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 217 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 218 219 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 220 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 221 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 222 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 223 224 // We cannot sextinreg(i1). Expand to shifts. 225 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 226 227 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 228 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 229 // support continuation, user-level threading, and etc.. As a result, no 230 // other SjLj exception interfaces are implemented and please don't build 231 // your own exception handling based on them. 232 // LLVM/Clang supports zero-cost DWARF exception handling. 233 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 234 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 235 236 // We want to legalize GlobalAddress and ConstantPool nodes into the 237 // appropriate instructions to materialize the address. 238 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 239 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 240 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 241 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 242 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 243 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 244 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 245 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 246 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 247 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 248 249 // TRAP is legal. 250 setOperationAction(ISD::TRAP, MVT::Other, Legal); 251 252 // TRAMPOLINE is custom lowered. 253 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 254 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 255 256 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 257 setOperationAction(ISD::VASTART , MVT::Other, Custom); 258 259 if (Subtarget->isSVR4ABI()) { 260 if (isPPC64) { 261 // VAARG always uses double-word chunks, so promote anything smaller. 262 setOperationAction(ISD::VAARG, MVT::i1, Promote); 263 AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64); 264 setOperationAction(ISD::VAARG, MVT::i8, Promote); 265 AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64); 266 setOperationAction(ISD::VAARG, MVT::i16, Promote); 267 AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64); 268 setOperationAction(ISD::VAARG, MVT::i32, Promote); 269 AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64); 270 setOperationAction(ISD::VAARG, MVT::Other, Expand); 271 } else { 272 // VAARG is custom lowered with the 32-bit SVR4 ABI. 273 setOperationAction(ISD::VAARG, MVT::Other, Custom); 274 setOperationAction(ISD::VAARG, MVT::i64, Custom); 275 } 276 } else 277 setOperationAction(ISD::VAARG, MVT::Other, Expand); 278 279 if (Subtarget->isSVR4ABI() && !isPPC64) 280 // VACOPY is custom lowered with the 32-bit SVR4 ABI. 281 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 282 else 283 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 284 285 // Use the default implementation. 286 setOperationAction(ISD::VAEND , MVT::Other, Expand); 287 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 288 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 289 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 290 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 291 292 // We want to custom lower some of our intrinsics. 293 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 294 295 // To handle counter-based loop conditions. 296 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); 297 298 // Comparisons that require checking two conditions. 299 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 300 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 301 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 302 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 303 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 304 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 305 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 306 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 307 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 308 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 309 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 310 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 311 312 if (Subtarget->has64BitSupport()) { 313 // They also have instructions for converting between i64 and fp. 314 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 315 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 316 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 317 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 318 // This is just the low 32 bits of a (signed) fp->i64 conversion. 319 // We cannot do this with Promote because i64 is not a legal type. 320 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 321 322 if (PPCSubTarget.hasLFIWAX() || Subtarget->isPPC64()) 323 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 324 } else { 325 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 326 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 327 } 328 329 // With the instructions enabled under FPCVT, we can do everything. 330 if (PPCSubTarget.hasFPCVT()) { 331 if (Subtarget->has64BitSupport()) { 332 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 333 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 334 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 335 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 336 } 337 338 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 339 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 340 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 341 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 342 } 343 344 if (Subtarget->use64BitRegs()) { 345 // 64-bit PowerPC implementations can support i64 types directly 346 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 347 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 348 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 349 // 64-bit PowerPC wants to expand i128 shifts itself. 350 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 351 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 352 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 353 } else { 354 // 32-bit PowerPC wants to expand i64 shifts itself. 355 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 356 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 357 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 358 } 359 360 if (Subtarget->hasAltivec()) { 361 // First set operation action for all vector types to expand. Then we 362 // will selectively turn on ones that can be effectively codegen'd. 363 for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 364 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { 365 MVT::SimpleValueType VT = (MVT::SimpleValueType)i; 366 367 // add/sub are legal for all supported vector VT's. 368 setOperationAction(ISD::ADD , VT, Legal); 369 setOperationAction(ISD::SUB , VT, Legal); 370 371 // We promote all shuffles to v16i8. 372 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 373 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 374 375 // We promote all non-typed operations to v4i32. 376 setOperationAction(ISD::AND , VT, Promote); 377 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 378 setOperationAction(ISD::OR , VT, Promote); 379 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 380 setOperationAction(ISD::XOR , VT, Promote); 381 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 382 setOperationAction(ISD::LOAD , VT, Promote); 383 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 384 setOperationAction(ISD::SELECT, VT, Promote); 385 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 386 setOperationAction(ISD::STORE, VT, Promote); 387 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 388 389 // No other operations are legal. 390 setOperationAction(ISD::MUL , VT, Expand); 391 setOperationAction(ISD::SDIV, VT, Expand); 392 setOperationAction(ISD::SREM, VT, Expand); 393 setOperationAction(ISD::UDIV, VT, Expand); 394 setOperationAction(ISD::UREM, VT, Expand); 395 setOperationAction(ISD::FDIV, VT, Expand); 396 setOperationAction(ISD::FREM, VT, Expand); 397 setOperationAction(ISD::FNEG, VT, Expand); 398 setOperationAction(ISD::FSQRT, VT, Expand); 399 setOperationAction(ISD::FLOG, VT, Expand); 400 setOperationAction(ISD::FLOG10, VT, Expand); 401 setOperationAction(ISD::FLOG2, VT, Expand); 402 setOperationAction(ISD::FEXP, VT, Expand); 403 setOperationAction(ISD::FEXP2, VT, Expand); 404 setOperationAction(ISD::FSIN, VT, Expand); 405 setOperationAction(ISD::FCOS, VT, Expand); 406 setOperationAction(ISD::FABS, VT, Expand); 407 setOperationAction(ISD::FPOWI, VT, Expand); 408 setOperationAction(ISD::FFLOOR, VT, Expand); 409 setOperationAction(ISD::FCEIL, VT, Expand); 410 setOperationAction(ISD::FTRUNC, VT, Expand); 411 setOperationAction(ISD::FRINT, VT, Expand); 412 setOperationAction(ISD::FNEARBYINT, VT, Expand); 413 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 414 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 415 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 416 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 417 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 418 setOperationAction(ISD::UDIVREM, VT, Expand); 419 setOperationAction(ISD::SDIVREM, VT, Expand); 420 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 421 setOperationAction(ISD::FPOW, VT, Expand); 422 setOperationAction(ISD::CTPOP, VT, Expand); 423 setOperationAction(ISD::CTLZ, VT, Expand); 424 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); 425 setOperationAction(ISD::CTTZ, VT, Expand); 426 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); 427 setOperationAction(ISD::VSELECT, VT, Expand); 428 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 429 430 for (unsigned j = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 431 j <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++j) { 432 MVT::SimpleValueType InnerVT = (MVT::SimpleValueType)j; 433 setTruncStoreAction(VT, InnerVT, Expand); 434 } 435 setLoadExtAction(ISD::SEXTLOAD, VT, Expand); 436 setLoadExtAction(ISD::ZEXTLOAD, VT, Expand); 437 setLoadExtAction(ISD::EXTLOAD, VT, Expand); 438 } 439 440 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 441 // with merges, splats, etc. 442 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 443 444 setOperationAction(ISD::AND , MVT::v4i32, Legal); 445 setOperationAction(ISD::OR , MVT::v4i32, Legal); 446 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 447 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 448 setOperationAction(ISD::SELECT, MVT::v4i32, Expand); 449 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 450 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 451 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 452 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 453 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 454 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 455 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 456 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 457 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 458 459 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 460 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 461 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 462 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 463 464 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 465 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 466 467 if (TM.Options.UnsafeFPMath) { 468 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 469 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 470 } 471 472 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 473 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 474 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 475 476 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 477 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 478 479 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 480 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 481 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 482 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 483 484 // Altivec does not contain unordered floating-point compare instructions 485 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 486 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 487 setCondCodeAction(ISD::SETUGT, MVT::v4f32, Expand); 488 setCondCodeAction(ISD::SETUGE, MVT::v4f32, Expand); 489 setCondCodeAction(ISD::SETULT, MVT::v4f32, Expand); 490 setCondCodeAction(ISD::SETULE, MVT::v4f32, Expand); 491 492 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); 493 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); 494 } 495 496 if (Subtarget->has64BitSupport()) { 497 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 498 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal); 499 } 500 501 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Expand); 502 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Expand); 503 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); 504 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); 505 506 setBooleanContents(ZeroOrOneBooleanContent); 507 // Altivec instructions set fields to all zeros or all ones. 508 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 509 510 if (isPPC64) { 511 setStackPointerRegisterToSaveRestore(PPC::X1); 512 setExceptionPointerRegister(PPC::X3); 513 setExceptionSelectorRegister(PPC::X4); 514 } else { 515 setStackPointerRegisterToSaveRestore(PPC::R1); 516 setExceptionPointerRegister(PPC::R3); 517 setExceptionSelectorRegister(PPC::R4); 518 } 519 520 // We have target-specific dag combine patterns for the following nodes: 521 setTargetDAGCombine(ISD::SINT_TO_FP); 522 setTargetDAGCombine(ISD::LOAD); 523 setTargetDAGCombine(ISD::STORE); 524 setTargetDAGCombine(ISD::BR_CC); 525 setTargetDAGCombine(ISD::BSWAP); 526 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 527 528 // Use reciprocal estimates. 529 if (TM.Options.UnsafeFPMath) { 530 setTargetDAGCombine(ISD::FDIV); 531 setTargetDAGCombine(ISD::FSQRT); 532 } 533 534 // Darwin long double math library functions have $LDBL128 appended. 535 if (Subtarget->isDarwin()) { 536 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 537 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 538 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 539 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 540 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 541 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 542 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 543 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 544 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 545 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 546 } 547 548 setMinFunctionAlignment(2); 549 if (PPCSubTarget.isDarwin()) 550 setPrefFunctionAlignment(4); 551 552 if (isPPC64 && Subtarget->isJITCodeModel()) 553 // Temporary workaround for the inability of PPC64 JIT to handle jump 554 // tables. 555 setSupportJumpTables(false); 556 557 setInsertFencesForAtomic(true); 558 559 if (Subtarget->enableMachineScheduler()) 560 setSchedulingPreference(Sched::Source); 561 else 562 setSchedulingPreference(Sched::Hybrid); 563 564 computeRegisterProperties(); 565 566 // The Freescale cores does better with aggressive inlining of memcpy and 567 // friends. Gcc uses same threshold of 128 bytes (= 32 word stores). 568 if (Subtarget->getDarwinDirective() == PPC::DIR_E500mc || 569 Subtarget->getDarwinDirective() == PPC::DIR_E5500) { 570 MaxStoresPerMemset = 32; 571 MaxStoresPerMemsetOptSize = 16; 572 MaxStoresPerMemcpy = 32; 573 MaxStoresPerMemcpyOptSize = 8; 574 MaxStoresPerMemmove = 32; 575 MaxStoresPerMemmoveOptSize = 8; 576 577 setPrefFunctionAlignment(4); 578 } 579} 580 581/// getMaxByValAlign - Helper for getByValTypeAlignment to determine 582/// the desired ByVal argument alignment. 583static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign, 584 unsigned MaxMaxAlign) { 585 if (MaxAlign == MaxMaxAlign) 586 return; 587 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 588 if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256) 589 MaxAlign = 32; 590 else if (VTy->getBitWidth() >= 128 && MaxAlign < 16) 591 MaxAlign = 16; 592 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 593 unsigned EltAlign = 0; 594 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign); 595 if (EltAlign > MaxAlign) 596 MaxAlign = EltAlign; 597 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 598 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 599 unsigned EltAlign = 0; 600 getMaxByValAlign(STy->getElementType(i), EltAlign, MaxMaxAlign); 601 if (EltAlign > MaxAlign) 602 MaxAlign = EltAlign; 603 if (MaxAlign == MaxMaxAlign) 604 break; 605 } 606 } 607} 608 609/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 610/// function arguments in the caller parameter area. 611unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty) const { 612 // Darwin passes everything on 4 byte boundary. 613 if (PPCSubTarget.isDarwin()) 614 return 4; 615 616 // 16byte and wider vectors are passed on 16byte boundary. 617 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 618 unsigned Align = PPCSubTarget.isPPC64() ? 8 : 4; 619 if (PPCSubTarget.hasAltivec() || PPCSubTarget.hasQPX()) 620 getMaxByValAlign(Ty, Align, PPCSubTarget.hasQPX() ? 32 : 16); 621 return Align; 622} 623 624const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 625 switch (Opcode) { 626 default: return 0; 627 case PPCISD::FSEL: return "PPCISD::FSEL"; 628 case PPCISD::FCFID: return "PPCISD::FCFID"; 629 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 630 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 631 case PPCISD::FRE: return "PPCISD::FRE"; 632 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; 633 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 634 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 635 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 636 case PPCISD::VPERM: return "PPCISD::VPERM"; 637 case PPCISD::Hi: return "PPCISD::Hi"; 638 case PPCISD::Lo: return "PPCISD::Lo"; 639 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 640 case PPCISD::TOC_RESTORE: return "PPCISD::TOC_RESTORE"; 641 case PPCISD::LOAD: return "PPCISD::LOAD"; 642 case PPCISD::LOAD_TOC: return "PPCISD::LOAD_TOC"; 643 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 644 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 645 case PPCISD::SRL: return "PPCISD::SRL"; 646 case PPCISD::SRA: return "PPCISD::SRA"; 647 case PPCISD::SHL: return "PPCISD::SHL"; 648 case PPCISD::CALL: return "PPCISD::CALL"; 649 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; 650 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 651 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 652 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 653 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; 654 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; 655 case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; 656 case PPCISD::VCMP: return "PPCISD::VCMP"; 657 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 658 case PPCISD::LBRX: return "PPCISD::LBRX"; 659 case PPCISD::STBRX: return "PPCISD::STBRX"; 660 case PPCISD::LARX: return "PPCISD::LARX"; 661 case PPCISD::STCX: return "PPCISD::STCX"; 662 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 663 case PPCISD::BDNZ: return "PPCISD::BDNZ"; 664 case PPCISD::BDZ: return "PPCISD::BDZ"; 665 case PPCISD::MFFS: return "PPCISD::MFFS"; 666 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 667 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 668 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 669 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 670 case PPCISD::ADDIS_TOC_HA: return "PPCISD::ADDIS_TOC_HA"; 671 case PPCISD::LD_TOC_L: return "PPCISD::LD_TOC_L"; 672 case PPCISD::ADDI_TOC_L: return "PPCISD::ADDI_TOC_L"; 673 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; 674 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; 675 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; 676 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; 677 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; 678 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; 679 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; 680 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; 681 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; 682 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; 683 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; 684 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; 685 case PPCISD::SC: return "PPCISD::SC"; 686 } 687} 688 689EVT PPCTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { 690 if (!VT.isVector()) 691 return MVT::i32; 692 return VT.changeVectorElementTypeToInteger(); 693} 694 695//===----------------------------------------------------------------------===// 696// Node matching predicates, for use by the tblgen matching code. 697//===----------------------------------------------------------------------===// 698 699/// isFloatingPointZero - Return true if this is 0.0 or -0.0. 700static bool isFloatingPointZero(SDValue Op) { 701 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 702 return CFP->getValueAPF().isZero(); 703 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 704 // Maybe this has already been legalized into the constant pool? 705 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 706 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 707 return CFP->getValueAPF().isZero(); 708 } 709 return false; 710} 711 712/// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 713/// true if Op is undef or if it matches the specified value. 714static bool isConstantOrUndef(int Op, int Val) { 715 return Op < 0 || Op == Val; 716} 717 718/// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 719/// VPKUHUM instruction. 720bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary) { 721 if (!isUnary) { 722 for (unsigned i = 0; i != 16; ++i) 723 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 724 return false; 725 } else { 726 for (unsigned i = 0; i != 8; ++i) 727 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1) || 728 !isConstantOrUndef(N->getMaskElt(i+8), i*2+1)) 729 return false; 730 } 731 return true; 732} 733 734/// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 735/// VPKUWUM instruction. 736bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary) { 737 if (!isUnary) { 738 for (unsigned i = 0; i != 16; i += 2) 739 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 740 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 741 return false; 742 } else { 743 for (unsigned i = 0; i != 8; i += 2) 744 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 745 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3) || 746 !isConstantOrUndef(N->getMaskElt(i+8), i*2+2) || 747 !isConstantOrUndef(N->getMaskElt(i+9), i*2+3)) 748 return false; 749 } 750 return true; 751} 752 753/// isVMerge - Common function, used to match vmrg* shuffles. 754/// 755static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 756 unsigned LHSStart, unsigned RHSStart) { 757 assert(N->getValueType(0) == MVT::v16i8 && 758 "PPC only supports shuffles by bytes!"); 759 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 760 "Unsupported merge size!"); 761 762 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 763 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 764 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 765 LHSStart+j+i*UnitSize) || 766 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 767 RHSStart+j+i*UnitSize)) 768 return false; 769 } 770 return true; 771} 772 773/// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 774/// a VRGL* instruction with the specified unit size (1,2 or 4 bytes). 775bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 776 bool isUnary) { 777 if (!isUnary) 778 return isVMerge(N, UnitSize, 8, 24); 779 return isVMerge(N, UnitSize, 8, 8); 780} 781 782/// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 783/// a VRGH* instruction with the specified unit size (1,2 or 4 bytes). 784bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 785 bool isUnary) { 786 if (!isUnary) 787 return isVMerge(N, UnitSize, 0, 16); 788 return isVMerge(N, UnitSize, 0, 0); 789} 790 791 792/// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 793/// amount, otherwise return -1. 794int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) { 795 assert(N->getValueType(0) == MVT::v16i8 && 796 "PPC only supports shuffles by bytes!"); 797 798 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 799 800 // Find the first non-undef value in the shuffle mask. 801 unsigned i; 802 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 803 /*search*/; 804 805 if (i == 16) return -1; // all undef. 806 807 // Otherwise, check to see if the rest of the elements are consecutively 808 // numbered from this value. 809 unsigned ShiftAmt = SVOp->getMaskElt(i); 810 if (ShiftAmt < i) return -1; 811 ShiftAmt -= i; 812 813 if (!isUnary) { 814 // Check the rest of the elements to see if they are consecutive. 815 for (++i; i != 16; ++i) 816 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 817 return -1; 818 } else { 819 // Check the rest of the elements to see if they are consecutive. 820 for (++i; i != 16; ++i) 821 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 822 return -1; 823 } 824 return ShiftAmt; 825} 826 827/// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 828/// specifies a splat of a single element that is suitable for input to 829/// VSPLTB/VSPLTH/VSPLTW. 830bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 831 assert(N->getValueType(0) == MVT::v16i8 && 832 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 833 834 // This is a splat operation if each element of the permute is the same, and 835 // if the value doesn't reference the second vector. 836 unsigned ElementBase = N->getMaskElt(0); 837 838 // FIXME: Handle UNDEF elements too! 839 if (ElementBase >= 16) 840 return false; 841 842 // Check that the indices are consecutive, in the case of a multi-byte element 843 // splatted with a v16i8 mask. 844 for (unsigned i = 1; i != EltSize; ++i) 845 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 846 return false; 847 848 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 849 if (N->getMaskElt(i) < 0) continue; 850 for (unsigned j = 0; j != EltSize; ++j) 851 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 852 return false; 853 } 854 return true; 855} 856 857/// isAllNegativeZeroVector - Returns true if all elements of build_vector 858/// are -0.0. 859bool PPC::isAllNegativeZeroVector(SDNode *N) { 860 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N); 861 862 APInt APVal, APUndef; 863 unsigned BitSize; 864 bool HasAnyUndefs; 865 866 if (BV->isConstantSplat(APVal, APUndef, BitSize, HasAnyUndefs, 32, true)) 867 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 868 return CFP->getValueAPF().isNegZero(); 869 870 return false; 871} 872 873/// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 874/// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 875unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) { 876 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 877 assert(isSplatShuffleMask(SVOp, EltSize)); 878 return SVOp->getMaskElt(0) / EltSize; 879} 880 881/// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 882/// by using a vspltis[bhw] instruction of the specified element size, return 883/// the constant being splatted. The ByteSize field indicates the number of 884/// bytes of each element [124] -> [bhw]. 885SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 886 SDValue OpVal(0, 0); 887 888 // If ByteSize of the splat is bigger than the element size of the 889 // build_vector, then we have a case where we are checking for a splat where 890 // multiple elements of the buildvector are folded together into a single 891 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 892 unsigned EltSize = 16/N->getNumOperands(); 893 if (EltSize < ByteSize) { 894 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 895 SDValue UniquedVals[4]; 896 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 897 898 // See if all of the elements in the buildvector agree across. 899 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 900 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 901 // If the element isn't a constant, bail fully out. 902 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 903 904 905 if (UniquedVals[i&(Multiple-1)].getNode() == 0) 906 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 907 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 908 return SDValue(); // no match. 909 } 910 911 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 912 // either constant or undef values that are identical for each chunk. See 913 // if these chunks can form into a larger vspltis*. 914 915 // Check to see if all of the leading entries are either 0 or -1. If 916 // neither, then this won't fit into the immediate field. 917 bool LeadingZero = true; 918 bool LeadingOnes = true; 919 for (unsigned i = 0; i != Multiple-1; ++i) { 920 if (UniquedVals[i].getNode() == 0) continue; // Must have been undefs. 921 922 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue(); 923 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue(); 924 } 925 // Finally, check the least significant entry. 926 if (LeadingZero) { 927 if (UniquedVals[Multiple-1].getNode() == 0) 928 return DAG.getTargetConstant(0, MVT::i32); // 0,0,0,undef 929 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 930 if (Val < 16) 931 return DAG.getTargetConstant(Val, MVT::i32); // 0,0,0,4 -> vspltisw(4) 932 } 933 if (LeadingOnes) { 934 if (UniquedVals[Multiple-1].getNode() == 0) 935 return DAG.getTargetConstant(~0U, MVT::i32); // -1,-1,-1,undef 936 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 937 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 938 return DAG.getTargetConstant(Val, MVT::i32); 939 } 940 941 return SDValue(); 942 } 943 944 // Check to see if this buildvec has a single non-undef value in its elements. 945 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 946 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 947 if (OpVal.getNode() == 0) 948 OpVal = N->getOperand(i); 949 else if (OpVal != N->getOperand(i)) 950 return SDValue(); 951 } 952 953 if (OpVal.getNode() == 0) return SDValue(); // All UNDEF: use implicit def. 954 955 unsigned ValSizeInBytes = EltSize; 956 uint64_t Value = 0; 957 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 958 Value = CN->getZExtValue(); 959 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 960 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 961 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 962 } 963 964 // If the splat value is larger than the element value, then we can never do 965 // this splat. The only case that we could fit the replicated bits into our 966 // immediate field for would be zero, and we prefer to use vxor for it. 967 if (ValSizeInBytes < ByteSize) return SDValue(); 968 969 // If the element value is larger than the splat value, cut it in half and 970 // check to see if the two halves are equal. Continue doing this until we 971 // get to ByteSize. This allows us to handle 0x01010101 as 0x01. 972 while (ValSizeInBytes > ByteSize) { 973 ValSizeInBytes >>= 1; 974 975 // If the top half equals the bottom half, we're still ok. 976 if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) != 977 (Value & ((1 << (8*ValSizeInBytes))-1))) 978 return SDValue(); 979 } 980 981 // Properly sign extend the value. 982 int MaskVal = SignExtend32(Value, ByteSize * 8); 983 984 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 985 if (MaskVal == 0) return SDValue(); 986 987 // Finally, if this value fits in a 5 bit sext field, return it 988 if (SignExtend32<5>(MaskVal) == MaskVal) 989 return DAG.getTargetConstant(MaskVal, MVT::i32); 990 return SDValue(); 991} 992 993//===----------------------------------------------------------------------===// 994// Addressing Mode Selection 995//===----------------------------------------------------------------------===// 996 997/// isIntS16Immediate - This method tests to see if the node is either a 32-bit 998/// or 64-bit immediate, and if the value can be accurately represented as a 999/// sign extension from a 16-bit value. If so, this returns true and the 1000/// immediate. 1001static bool isIntS16Immediate(SDNode *N, short &Imm) { 1002 if (N->getOpcode() != ISD::Constant) 1003 return false; 1004 1005 Imm = (short)cast<ConstantSDNode>(N)->getZExtValue(); 1006 if (N->getValueType(0) == MVT::i32) 1007 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 1008 else 1009 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 1010} 1011static bool isIntS16Immediate(SDValue Op, short &Imm) { 1012 return isIntS16Immediate(Op.getNode(), Imm); 1013} 1014 1015 1016/// SelectAddressRegReg - Given the specified addressed, check to see if it 1017/// can be represented as an indexed [r+r] operation. Returns false if it 1018/// can be more efficiently represented with [r+imm]. 1019bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 1020 SDValue &Index, 1021 SelectionDAG &DAG) const { 1022 short imm = 0; 1023 if (N.getOpcode() == ISD::ADD) { 1024 if (isIntS16Immediate(N.getOperand(1), imm)) 1025 return false; // r+i 1026 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 1027 return false; // r+i 1028 1029 Base = N.getOperand(0); 1030 Index = N.getOperand(1); 1031 return true; 1032 } else if (N.getOpcode() == ISD::OR) { 1033 if (isIntS16Immediate(N.getOperand(1), imm)) 1034 return false; // r+i can fold it if we can. 1035 1036 // If this is an or of disjoint bitfields, we can codegen this as an add 1037 // (for better address arithmetic) if the LHS and RHS of the OR are provably 1038 // disjoint. 1039 APInt LHSKnownZero, LHSKnownOne; 1040 APInt RHSKnownZero, RHSKnownOne; 1041 DAG.ComputeMaskedBits(N.getOperand(0), 1042 LHSKnownZero, LHSKnownOne); 1043 1044 if (LHSKnownZero.getBoolValue()) { 1045 DAG.ComputeMaskedBits(N.getOperand(1), 1046 RHSKnownZero, RHSKnownOne); 1047 // If all of the bits are known zero on the LHS or RHS, the add won't 1048 // carry. 1049 if (~(LHSKnownZero | RHSKnownZero) == 0) { 1050 Base = N.getOperand(0); 1051 Index = N.getOperand(1); 1052 return true; 1053 } 1054 } 1055 } 1056 1057 return false; 1058} 1059 1060// If we happen to be doing an i64 load or store into a stack slot that has 1061// less than a 4-byte alignment, then the frame-index elimination may need to 1062// use an indexed load or store instruction (because the offset may not be a 1063// multiple of 4). The extra register needed to hold the offset comes from the 1064// register scavenger, and it is possible that the scavenger will need to use 1065// an emergency spill slot. As a result, we need to make sure that a spill slot 1066// is allocated when doing an i64 load/store into a less-than-4-byte-aligned 1067// stack slot. 1068static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { 1069 // FIXME: This does not handle the LWA case. 1070 if (VT != MVT::i64) 1071 return; 1072 1073 // NOTE: We'll exclude negative FIs here, which come from argument 1074 // lowering, because there are no known test cases triggering this problem 1075 // using packed structures (or similar). We can remove this exclusion if 1076 // we find such a test case. The reason why this is so test-case driven is 1077 // because this entire 'fixup' is only to prevent crashes (from the 1078 // register scavenger) on not-really-valid inputs. For example, if we have: 1079 // %a = alloca i1 1080 // %b = bitcast i1* %a to i64* 1081 // store i64* a, i64 b 1082 // then the store should really be marked as 'align 1', but is not. If it 1083 // were marked as 'align 1' then the indexed form would have been 1084 // instruction-selected initially, and the problem this 'fixup' is preventing 1085 // won't happen regardless. 1086 if (FrameIdx < 0) 1087 return; 1088 1089 MachineFunction &MF = DAG.getMachineFunction(); 1090 MachineFrameInfo *MFI = MF.getFrameInfo(); 1091 1092 unsigned Align = MFI->getObjectAlignment(FrameIdx); 1093 if (Align >= 4) 1094 return; 1095 1096 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1097 FuncInfo->setHasNonRISpills(); 1098} 1099 1100/// Returns true if the address N can be represented by a base register plus 1101/// a signed 16-bit displacement [r+imm], and if it is not better 1102/// represented as reg+reg. If Aligned is true, only accept displacements 1103/// suitable for STD and friends, i.e. multiples of 4. 1104bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 1105 SDValue &Base, 1106 SelectionDAG &DAG, 1107 bool Aligned) const { 1108 // FIXME dl should come from parent load or store, not from address 1109 SDLoc dl(N); 1110 // If this can be more profitably realized as r+r, fail. 1111 if (SelectAddressRegReg(N, Disp, Base, DAG)) 1112 return false; 1113 1114 if (N.getOpcode() == ISD::ADD) { 1115 short imm = 0; 1116 if (isIntS16Immediate(N.getOperand(1), imm) && 1117 (!Aligned || (imm & 3) == 0)) { 1118 Disp = DAG.getTargetConstant(imm, N.getValueType()); 1119 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 1120 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1121 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1122 } else { 1123 Base = N.getOperand(0); 1124 } 1125 return true; // [r+i] 1126 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 1127 // Match LOAD (ADD (X, Lo(G))). 1128 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 1129 && "Cannot handle constant offsets yet!"); 1130 Disp = N.getOperand(1).getOperand(0); // The global address. 1131 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 1132 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 1133 Disp.getOpcode() == ISD::TargetConstantPool || 1134 Disp.getOpcode() == ISD::TargetJumpTable); 1135 Base = N.getOperand(0); 1136 return true; // [&g+r] 1137 } 1138 } else if (N.getOpcode() == ISD::OR) { 1139 short imm = 0; 1140 if (isIntS16Immediate(N.getOperand(1), imm) && 1141 (!Aligned || (imm & 3) == 0)) { 1142 // If this is an or of disjoint bitfields, we can codegen this as an add 1143 // (for better address arithmetic) if the LHS and RHS of the OR are 1144 // provably disjoint. 1145 APInt LHSKnownZero, LHSKnownOne; 1146 DAG.ComputeMaskedBits(N.getOperand(0), LHSKnownZero, LHSKnownOne); 1147 1148 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 1149 // If all of the bits are known zero on the LHS or RHS, the add won't 1150 // carry. 1151 Base = N.getOperand(0); 1152 Disp = DAG.getTargetConstant(imm, N.getValueType()); 1153 return true; 1154 } 1155 } 1156 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 1157 // Loading from a constant address. 1158 1159 // If this address fits entirely in a 16-bit sext immediate field, codegen 1160 // this as "d, 0" 1161 short Imm; 1162 if (isIntS16Immediate(CN, Imm) && (!Aligned || (Imm & 3) == 0)) { 1163 Disp = DAG.getTargetConstant(Imm, CN->getValueType(0)); 1164 Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 1165 CN->getValueType(0)); 1166 return true; 1167 } 1168 1169 // Handle 32-bit sext immediates with LIS + addr mode. 1170 if ((CN->getValueType(0) == MVT::i32 || 1171 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && 1172 (!Aligned || (CN->getZExtValue() & 3) == 0)) { 1173 int Addr = (int)CN->getZExtValue(); 1174 1175 // Otherwise, break this down into an LIS + disp. 1176 Disp = DAG.getTargetConstant((short)Addr, MVT::i32); 1177 1178 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, MVT::i32); 1179 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 1180 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 1181 return true; 1182 } 1183 } 1184 1185 Disp = DAG.getTargetConstant(0, getPointerTy()); 1186 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { 1187 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1188 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1189 } else 1190 Base = N; 1191 return true; // [r+0] 1192} 1193 1194/// SelectAddressRegRegOnly - Given the specified addressed, force it to be 1195/// represented as an indexed [r+r] operation. 1196bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 1197 SDValue &Index, 1198 SelectionDAG &DAG) const { 1199 // Check to see if we can easily represent this as an [r+r] address. This 1200 // will fail if it thinks that the address is more profitably represented as 1201 // reg+imm, e.g. where imm = 0. 1202 if (SelectAddressRegReg(N, Base, Index, DAG)) 1203 return true; 1204 1205 // If the operand is an addition, always emit this as [r+r], since this is 1206 // better (for code size, and execution, as the memop does the add for free) 1207 // than emitting an explicit add. 1208 if (N.getOpcode() == ISD::ADD) { 1209 Base = N.getOperand(0); 1210 Index = N.getOperand(1); 1211 return true; 1212 } 1213 1214 // Otherwise, do it the hard way, using R0 as the base register. 1215 Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 1216 N.getValueType()); 1217 Index = N; 1218 return true; 1219} 1220 1221/// getPreIndexedAddressParts - returns true by value, base pointer and 1222/// offset pointer and addressing mode by reference if the node's address 1223/// can be legally represented as pre-indexed load / store address. 1224bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 1225 SDValue &Offset, 1226 ISD::MemIndexedMode &AM, 1227 SelectionDAG &DAG) const { 1228 if (DisablePPCPreinc) return false; 1229 1230 bool isLoad = true; 1231 SDValue Ptr; 1232 EVT VT; 1233 unsigned Alignment; 1234 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1235 Ptr = LD->getBasePtr(); 1236 VT = LD->getMemoryVT(); 1237 Alignment = LD->getAlignment(); 1238 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 1239 Ptr = ST->getBasePtr(); 1240 VT = ST->getMemoryVT(); 1241 Alignment = ST->getAlignment(); 1242 isLoad = false; 1243 } else 1244 return false; 1245 1246 // PowerPC doesn't have preinc load/store instructions for vectors. 1247 if (VT.isVector()) 1248 return false; 1249 1250 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { 1251 1252 // Common code will reject creating a pre-inc form if the base pointer 1253 // is a frame index, or if N is a store and the base pointer is either 1254 // the same as or a predecessor of the value being stored. Check for 1255 // those situations here, and try with swapped Base/Offset instead. 1256 bool Swap = false; 1257 1258 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) 1259 Swap = true; 1260 else if (!isLoad) { 1261 SDValue Val = cast<StoreSDNode>(N)->getValue(); 1262 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) 1263 Swap = true; 1264 } 1265 1266 if (Swap) 1267 std::swap(Base, Offset); 1268 1269 AM = ISD::PRE_INC; 1270 return true; 1271 } 1272 1273 // LDU/STU can only handle immediates that are a multiple of 4. 1274 if (VT != MVT::i64) { 1275 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, false)) 1276 return false; 1277 } else { 1278 // LDU/STU need an address with at least 4-byte alignment. 1279 if (Alignment < 4) 1280 return false; 1281 1282 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, true)) 1283 return false; 1284 } 1285 1286 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1287 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 1288 // sext i32 to i64 when addr mode is r+i. 1289 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 1290 LD->getExtensionType() == ISD::SEXTLOAD && 1291 isa<ConstantSDNode>(Offset)) 1292 return false; 1293 } 1294 1295 AM = ISD::PRE_INC; 1296 return true; 1297} 1298 1299//===----------------------------------------------------------------------===// 1300// LowerOperation implementation 1301//===----------------------------------------------------------------------===// 1302 1303/// GetLabelAccessInfo - Return true if we should reference labels using a 1304/// PICBase, set the HiOpFlags and LoOpFlags to the target MO flags. 1305static bool GetLabelAccessInfo(const TargetMachine &TM, unsigned &HiOpFlags, 1306 unsigned &LoOpFlags, const GlobalValue *GV = 0) { 1307 HiOpFlags = PPCII::MO_HA; 1308 LoOpFlags = PPCII::MO_LO; 1309 1310 // Don't use the pic base if not in PIC relocation model. Or if we are on a 1311 // non-darwin platform. We don't support PIC on other platforms yet. 1312 bool isPIC = TM.getRelocationModel() == Reloc::PIC_ && 1313 TM.getSubtarget<PPCSubtarget>().isDarwin(); 1314 if (isPIC) { 1315 HiOpFlags |= PPCII::MO_PIC_FLAG; 1316 LoOpFlags |= PPCII::MO_PIC_FLAG; 1317 } 1318 1319 // If this is a reference to a global value that requires a non-lazy-ptr, make 1320 // sure that instruction lowering adds it. 1321 if (GV && TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV, TM)) { 1322 HiOpFlags |= PPCII::MO_NLP_FLAG; 1323 LoOpFlags |= PPCII::MO_NLP_FLAG; 1324 1325 if (GV->hasHiddenVisibility()) { 1326 HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1327 LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1328 } 1329 } 1330 1331 return isPIC; 1332} 1333 1334static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 1335 SelectionDAG &DAG) { 1336 EVT PtrVT = HiPart.getValueType(); 1337 SDValue Zero = DAG.getConstant(0, PtrVT); 1338 SDLoc DL(HiPart); 1339 1340 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 1341 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 1342 1343 // With PIC, the first instruction is actually "GR+hi(&G)". 1344 if (isPIC) 1345 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 1346 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 1347 1348 // Generate non-pic code that has direct accesses to the constant pool. 1349 // The address of the global is just (hi(&g)+lo(&g)). 1350 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 1351} 1352 1353SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 1354 SelectionDAG &DAG) const { 1355 EVT PtrVT = Op.getValueType(); 1356 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 1357 const Constant *C = CP->getConstVal(); 1358 1359 // 64-bit SVR4 ABI code is always position-independent. 1360 // The actual address of the GlobalValue is stored in the TOC. 1361 if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) { 1362 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); 1363 return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(CP), MVT::i64, GA, 1364 DAG.getRegister(PPC::X2, MVT::i64)); 1365 } 1366 1367 unsigned MOHiFlag, MOLoFlag; 1368 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag); 1369 SDValue CPIHi = 1370 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag); 1371 SDValue CPILo = 1372 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag); 1373 return LowerLabelRef(CPIHi, CPILo, isPIC, DAG); 1374} 1375 1376SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 1377 EVT PtrVT = Op.getValueType(); 1378 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 1379 1380 // 64-bit SVR4 ABI code is always position-independent. 1381 // The actual address of the GlobalValue is stored in the TOC. 1382 if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) { 1383 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 1384 return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(JT), MVT::i64, GA, 1385 DAG.getRegister(PPC::X2, MVT::i64)); 1386 } 1387 1388 unsigned MOHiFlag, MOLoFlag; 1389 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag); 1390 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 1391 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 1392 return LowerLabelRef(JTIHi, JTILo, isPIC, DAG); 1393} 1394 1395SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 1396 SelectionDAG &DAG) const { 1397 EVT PtrVT = Op.getValueType(); 1398 1399 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 1400 1401 unsigned MOHiFlag, MOLoFlag; 1402 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag); 1403 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 1404 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 1405 return LowerLabelRef(TgtBAHi, TgtBALo, isPIC, DAG); 1406} 1407 1408SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 1409 SelectionDAG &DAG) const { 1410 1411 // FIXME: TLS addresses currently use medium model code sequences, 1412 // which is the most useful form. Eventually support for small and 1413 // large models could be added if users need it, at the cost of 1414 // additional complexity. 1415 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 1416 SDLoc dl(GA); 1417 const GlobalValue *GV = GA->getGlobal(); 1418 EVT PtrVT = getPointerTy(); 1419 bool is64bit = PPCSubTarget.isPPC64(); 1420 1421 TLSModel::Model Model = getTargetMachine().getTLSModel(GV); 1422 1423 if (Model == TLSModel::LocalExec) { 1424 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 1425 PPCII::MO_TPREL_HA); 1426 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 1427 PPCII::MO_TPREL_LO); 1428 SDValue TLSReg = DAG.getRegister(is64bit ? PPC::X13 : PPC::R2, 1429 is64bit ? MVT::i64 : MVT::i32); 1430 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 1431 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 1432 } 1433 1434 if (!is64bit) 1435 llvm_unreachable("only local-exec is currently supported for ppc32"); 1436 1437 if (Model == TLSModel::InitialExec) { 1438 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 1439 SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 1440 PPCII::MO_TLS); 1441 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 1442 SDValue TPOffsetHi = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, 1443 PtrVT, GOTReg, TGA); 1444 SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, 1445 PtrVT, TGA, TPOffsetHi); 1446 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); 1447 } 1448 1449 if (Model == TLSModel::GeneralDynamic) { 1450 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 1451 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 1452 SDValue GOTEntryHi = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, 1453 GOTReg, TGA); 1454 SDValue GOTEntry = DAG.getNode(PPCISD::ADDI_TLSGD_L, dl, PtrVT, 1455 GOTEntryHi, TGA); 1456 1457 // We need a chain node, and don't have one handy. The underlying 1458 // call has no side effects, so using the function entry node 1459 // suffices. 1460 SDValue Chain = DAG.getEntryNode(); 1461 Chain = DAG.getCopyToReg(Chain, dl, PPC::X3, GOTEntry); 1462 SDValue ParmReg = DAG.getRegister(PPC::X3, MVT::i64); 1463 SDValue TLSAddr = DAG.getNode(PPCISD::GET_TLS_ADDR, dl, 1464 PtrVT, ParmReg, TGA); 1465 // The return value from GET_TLS_ADDR really is in X3 already, but 1466 // some hacks are needed here to tie everything together. The extra 1467 // copies dissolve during subsequent transforms. 1468 Chain = DAG.getCopyToReg(Chain, dl, PPC::X3, TLSAddr); 1469 return DAG.getCopyFromReg(Chain, dl, PPC::X3, PtrVT); 1470 } 1471 1472 if (Model == TLSModel::LocalDynamic) { 1473 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 1474 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 1475 SDValue GOTEntryHi = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, 1476 GOTReg, TGA); 1477 SDValue GOTEntry = DAG.getNode(PPCISD::ADDI_TLSLD_L, dl, PtrVT, 1478 GOTEntryHi, TGA); 1479 1480 // We need a chain node, and don't have one handy. The underlying 1481 // call has no side effects, so using the function entry node 1482 // suffices. 1483 SDValue Chain = DAG.getEntryNode(); 1484 Chain = DAG.getCopyToReg(Chain, dl, PPC::X3, GOTEntry); 1485 SDValue ParmReg = DAG.getRegister(PPC::X3, MVT::i64); 1486 SDValue TLSAddr = DAG.getNode(PPCISD::GET_TLSLD_ADDR, dl, 1487 PtrVT, ParmReg, TGA); 1488 // The return value from GET_TLSLD_ADDR really is in X3 already, but 1489 // some hacks are needed here to tie everything together. The extra 1490 // copies dissolve during subsequent transforms. 1491 Chain = DAG.getCopyToReg(Chain, dl, PPC::X3, TLSAddr); 1492 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, PtrVT, 1493 Chain, ParmReg, TGA); 1494 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); 1495 } 1496 1497 llvm_unreachable("Unknown TLS model!"); 1498} 1499 1500SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 1501 SelectionDAG &DAG) const { 1502 EVT PtrVT = Op.getValueType(); 1503 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 1504 SDLoc DL(GSDN); 1505 const GlobalValue *GV = GSDN->getGlobal(); 1506 1507 // 64-bit SVR4 ABI code is always position-independent. 1508 // The actual address of the GlobalValue is stored in the TOC. 1509 if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) { 1510 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 1511 return DAG.getNode(PPCISD::TOC_ENTRY, DL, MVT::i64, GA, 1512 DAG.getRegister(PPC::X2, MVT::i64)); 1513 } 1514 1515 unsigned MOHiFlag, MOLoFlag; 1516 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag, GV); 1517 1518 SDValue GAHi = 1519 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 1520 SDValue GALo = 1521 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 1522 1523 SDValue Ptr = LowerLabelRef(GAHi, GALo, isPIC, DAG); 1524 1525 // If the global reference is actually to a non-lazy-pointer, we have to do an 1526 // extra load to get the address of the global. 1527 if (MOHiFlag & PPCII::MO_NLP_FLAG) 1528 Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo(), 1529 false, false, false, 0); 1530 return Ptr; 1531} 1532 1533SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 1534 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1535 SDLoc dl(Op); 1536 1537 // If we're comparing for equality to zero, expose the fact that this is 1538 // implented as a ctlz/srl pair on ppc, so that the dag combiner can 1539 // fold the new nodes. 1540 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1541 if (C->isNullValue() && CC == ISD::SETEQ) { 1542 EVT VT = Op.getOperand(0).getValueType(); 1543 SDValue Zext = Op.getOperand(0); 1544 if (VT.bitsLT(MVT::i32)) { 1545 VT = MVT::i32; 1546 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0)); 1547 } 1548 unsigned Log2b = Log2_32(VT.getSizeInBits()); 1549 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext); 1550 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz, 1551 DAG.getConstant(Log2b, MVT::i32)); 1552 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc); 1553 } 1554 // Leave comparisons against 0 and -1 alone for now, since they're usually 1555 // optimized. FIXME: revisit this when we can custom lower all setcc 1556 // optimizations. 1557 if (C->isAllOnesValue() || C->isNullValue()) 1558 return SDValue(); 1559 } 1560 1561 // If we have an integer seteq/setne, turn it into a compare against zero 1562 // by xor'ing the rhs with the lhs, which is faster than setting a 1563 // condition register, reading it back out, and masking the correct bit. The 1564 // normal approach here uses sub to do this instead of xor. Using xor exposes 1565 // the result to other bit-twiddling opportunities. 1566 EVT LHSVT = Op.getOperand(0).getValueType(); 1567 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 1568 EVT VT = Op.getValueType(); 1569 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 1570 Op.getOperand(1)); 1571 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, LHSVT), CC); 1572 } 1573 return SDValue(); 1574} 1575 1576SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG, 1577 const PPCSubtarget &Subtarget) const { 1578 SDNode *Node = Op.getNode(); 1579 EVT VT = Node->getValueType(0); 1580 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1581 SDValue InChain = Node->getOperand(0); 1582 SDValue VAListPtr = Node->getOperand(1); 1583 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 1584 SDLoc dl(Node); 1585 1586 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 1587 1588 // gpr_index 1589 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 1590 VAListPtr, MachinePointerInfo(SV), MVT::i8, 1591 false, false, 0); 1592 InChain = GprIndex.getValue(1); 1593 1594 if (VT == MVT::i64) { 1595 // Check if GprIndex is even 1596 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 1597 DAG.getConstant(1, MVT::i32)); 1598 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 1599 DAG.getConstant(0, MVT::i32), ISD::SETNE); 1600 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 1601 DAG.getConstant(1, MVT::i32)); 1602 // Align GprIndex to be even if it isn't 1603 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 1604 GprIndex); 1605 } 1606 1607 // fpr index is 1 byte after gpr 1608 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 1609 DAG.getConstant(1, MVT::i32)); 1610 1611 // fpr 1612 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 1613 FprPtr, MachinePointerInfo(SV), MVT::i8, 1614 false, false, 0); 1615 InChain = FprIndex.getValue(1); 1616 1617 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 1618 DAG.getConstant(8, MVT::i32)); 1619 1620 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 1621 DAG.getConstant(4, MVT::i32)); 1622 1623 // areas 1624 SDValue OverflowArea = DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, 1625 MachinePointerInfo(), false, false, 1626 false, 0); 1627 InChain = OverflowArea.getValue(1); 1628 1629 SDValue RegSaveArea = DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, 1630 MachinePointerInfo(), false, false, 1631 false, 0); 1632 InChain = RegSaveArea.getValue(1); 1633 1634 // select overflow_area if index > 8 1635 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 1636 DAG.getConstant(8, MVT::i32), ISD::SETLT); 1637 1638 // adjustment constant gpr_index * 4/8 1639 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 1640 VT.isInteger() ? GprIndex : FprIndex, 1641 DAG.getConstant(VT.isInteger() ? 4 : 8, 1642 MVT::i32)); 1643 1644 // OurReg = RegSaveArea + RegConstant 1645 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 1646 RegConstant); 1647 1648 // Floating types are 32 bytes into RegSaveArea 1649 if (VT.isFloatingPoint()) 1650 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 1651 DAG.getConstant(32, MVT::i32)); 1652 1653 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 1654 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 1655 VT.isInteger() ? GprIndex : FprIndex, 1656 DAG.getConstant(VT == MVT::i64 ? 2 : 1, 1657 MVT::i32)); 1658 1659 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 1660 VT.isInteger() ? VAListPtr : FprPtr, 1661 MachinePointerInfo(SV), 1662 MVT::i8, false, false, 0); 1663 1664 // determine if we should load from reg_save_area or overflow_area 1665 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 1666 1667 // increase overflow_area by 4/8 if gpr/fpr > 8 1668 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 1669 DAG.getConstant(VT.isInteger() ? 4 : 8, 1670 MVT::i32)); 1671 1672 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 1673 OverflowAreaPlusN); 1674 1675 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, 1676 OverflowAreaPtr, 1677 MachinePointerInfo(), 1678 MVT::i32, false, false, 0); 1679 1680 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo(), 1681 false, false, false, 0); 1682} 1683 1684SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG, 1685 const PPCSubtarget &Subtarget) const { 1686 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only"); 1687 1688 // We have to copy the entire va_list struct: 1689 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte 1690 return DAG.getMemcpy(Op.getOperand(0), Op, 1691 Op.getOperand(1), Op.getOperand(2), 1692 DAG.getConstant(12, MVT::i32), 8, false, true, 1693 MachinePointerInfo(), MachinePointerInfo()); 1694} 1695 1696SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 1697 SelectionDAG &DAG) const { 1698 return Op.getOperand(0); 1699} 1700 1701SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 1702 SelectionDAG &DAG) const { 1703 SDValue Chain = Op.getOperand(0); 1704 SDValue Trmp = Op.getOperand(1); // trampoline 1705 SDValue FPtr = Op.getOperand(2); // nested function 1706 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 1707 SDLoc dl(Op); 1708 1709 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1710 bool isPPC64 = (PtrVT == MVT::i64); 1711 Type *IntPtrTy = 1712 DAG.getTargetLoweringInfo().getDataLayout()->getIntPtrType( 1713 *DAG.getContext()); 1714 1715 TargetLowering::ArgListTy Args; 1716 TargetLowering::ArgListEntry Entry; 1717 1718 Entry.Ty = IntPtrTy; 1719 Entry.Node = Trmp; Args.push_back(Entry); 1720 1721 // TrampSize == (isPPC64 ? 48 : 40); 1722 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, 1723 isPPC64 ? MVT::i64 : MVT::i32); 1724 Args.push_back(Entry); 1725 1726 Entry.Node = FPtr; Args.push_back(Entry); 1727 Entry.Node = Nest; Args.push_back(Entry); 1728 1729 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 1730 TargetLowering::CallLoweringInfo CLI(Chain, 1731 Type::getVoidTy(*DAG.getContext()), 1732 false, false, false, false, 0, 1733 CallingConv::C, 1734 /*isTailCall=*/false, 1735 /*doesNotRet=*/false, 1736 /*isReturnValueUsed=*/true, 1737 DAG.getExternalSymbol("__trampoline_setup", PtrVT), 1738 Args, DAG, dl); 1739 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 1740 1741 return CallResult.second; 1742} 1743 1744SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG, 1745 const PPCSubtarget &Subtarget) const { 1746 MachineFunction &MF = DAG.getMachineFunction(); 1747 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1748 1749 SDLoc dl(Op); 1750 1751 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { 1752 // vastart just stores the address of the VarArgsFrameIndex slot into the 1753 // memory location argument. 1754 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1755 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 1756 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1757 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 1758 MachinePointerInfo(SV), 1759 false, false, 0); 1760 } 1761 1762 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 1763 // We suppose the given va_list is already allocated. 1764 // 1765 // typedef struct { 1766 // char gpr; /* index into the array of 8 GPRs 1767 // * stored in the register save area 1768 // * gpr=0 corresponds to r3, 1769 // * gpr=1 to r4, etc. 1770 // */ 1771 // char fpr; /* index into the array of 8 FPRs 1772 // * stored in the register save area 1773 // * fpr=0 corresponds to f1, 1774 // * fpr=1 to f2, etc. 1775 // */ 1776 // char *overflow_arg_area; 1777 // /* location on stack that holds 1778 // * the next overflow argument 1779 // */ 1780 // char *reg_save_area; 1781 // /* where r3:r10 and f1:f8 (if saved) 1782 // * are stored 1783 // */ 1784 // } va_list[1]; 1785 1786 1787 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), MVT::i32); 1788 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), MVT::i32); 1789 1790 1791 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1792 1793 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 1794 PtrVT); 1795 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 1796 PtrVT); 1797 1798 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 1799 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, PtrVT); 1800 1801 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 1802 SDValue ConstStackOffset = DAG.getConstant(StackOffset, PtrVT); 1803 1804 uint64_t FPROffset = 1; 1805 SDValue ConstFPROffset = DAG.getConstant(FPROffset, PtrVT); 1806 1807 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1808 1809 // Store first byte : number of int regs 1810 SDValue firstStore = DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, 1811 Op.getOperand(1), 1812 MachinePointerInfo(SV), 1813 MVT::i8, false, false, 0); 1814 uint64_t nextOffset = FPROffset; 1815 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 1816 ConstFPROffset); 1817 1818 // Store second byte : number of float regs 1819 SDValue secondStore = 1820 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 1821 MachinePointerInfo(SV, nextOffset), MVT::i8, 1822 false, false, 0); 1823 nextOffset += StackOffset; 1824 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 1825 1826 // Store second word : arguments given on stack 1827 SDValue thirdStore = 1828 DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 1829 MachinePointerInfo(SV, nextOffset), 1830 false, false, 0); 1831 nextOffset += FrameOffset; 1832 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 1833 1834 // Store third word : arguments given in registers 1835 return DAG.getStore(thirdStore, dl, FR, nextPtr, 1836 MachinePointerInfo(SV, nextOffset), 1837 false, false, 0); 1838 1839} 1840 1841#include "PPCGenCallingConv.inc" 1842 1843// Function whose sole purpose is to kill compiler warnings 1844// stemming from unused functions included from PPCGenCallingConv.inc. 1845CCAssignFn *PPCTargetLowering::useFastISelCCs(unsigned Flag) const { 1846 return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS; 1847} 1848 1849bool llvm::CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 1850 CCValAssign::LocInfo &LocInfo, 1851 ISD::ArgFlagsTy &ArgFlags, 1852 CCState &State) { 1853 return true; 1854} 1855 1856bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, 1857 MVT &LocVT, 1858 CCValAssign::LocInfo &LocInfo, 1859 ISD::ArgFlagsTy &ArgFlags, 1860 CCState &State) { 1861 static const uint16_t ArgRegs[] = { 1862 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 1863 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 1864 }; 1865 const unsigned NumArgRegs = array_lengthof(ArgRegs); 1866 1867 unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs); 1868 1869 // Skip one register if the first unallocated register has an even register 1870 // number and there are still argument registers available which have not been 1871 // allocated yet. RegNum is actually an index into ArgRegs, which means we 1872 // need to skip a register if RegNum is odd. 1873 if (RegNum != NumArgRegs && RegNum % 2 == 1) { 1874 State.AllocateReg(ArgRegs[RegNum]); 1875 } 1876 1877 // Always return false here, as this function only makes sure that the first 1878 // unallocated register has an odd register number and does not actually 1879 // allocate a register for the current argument. 1880 return false; 1881} 1882 1883bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, 1884 MVT &LocVT, 1885 CCValAssign::LocInfo &LocInfo, 1886 ISD::ArgFlagsTy &ArgFlags, 1887 CCState &State) { 1888 static const uint16_t ArgRegs[] = { 1889 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1890 PPC::F8 1891 }; 1892 1893 const unsigned NumArgRegs = array_lengthof(ArgRegs); 1894 1895 unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs); 1896 1897 // If there is only one Floating-point register left we need to put both f64 1898 // values of a split ppc_fp128 value on the stack. 1899 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) { 1900 State.AllocateReg(ArgRegs[RegNum]); 1901 } 1902 1903 // Always return false here, as this function only makes sure that the two f64 1904 // values a ppc_fp128 value is split into are both passed in registers or both 1905 // passed on the stack and does not actually allocate a register for the 1906 // current argument. 1907 return false; 1908} 1909 1910/// GetFPR - Get the set of FP registers that should be allocated for arguments, 1911/// on Darwin. 1912static const uint16_t *GetFPR() { 1913 static const uint16_t FPR[] = { 1914 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1915 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13 1916 }; 1917 1918 return FPR; 1919} 1920 1921/// CalculateStackSlotSize - Calculates the size reserved for this argument on 1922/// the stack. 1923static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 1924 unsigned PtrByteSize) { 1925 unsigned ArgSize = ArgVT.getSizeInBits()/8; 1926 if (Flags.isByVal()) 1927 ArgSize = Flags.getByValSize(); 1928 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 1929 1930 return ArgSize; 1931} 1932 1933SDValue 1934PPCTargetLowering::LowerFormalArguments(SDValue Chain, 1935 CallingConv::ID CallConv, bool isVarArg, 1936 const SmallVectorImpl<ISD::InputArg> 1937 &Ins, 1938 SDLoc dl, SelectionDAG &DAG, 1939 SmallVectorImpl<SDValue> &InVals) 1940 const { 1941 if (PPCSubTarget.isSVR4ABI()) { 1942 if (PPCSubTarget.isPPC64()) 1943 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, 1944 dl, DAG, InVals); 1945 else 1946 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, 1947 dl, DAG, InVals); 1948 } else { 1949 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, 1950 dl, DAG, InVals); 1951 } 1952} 1953 1954SDValue 1955PPCTargetLowering::LowerFormalArguments_32SVR4( 1956 SDValue Chain, 1957 CallingConv::ID CallConv, bool isVarArg, 1958 const SmallVectorImpl<ISD::InputArg> 1959 &Ins, 1960 SDLoc dl, SelectionDAG &DAG, 1961 SmallVectorImpl<SDValue> &InVals) const { 1962 1963 // 32-bit SVR4 ABI Stack Frame Layout: 1964 // +-----------------------------------+ 1965 // +--> | Back chain | 1966 // | +-----------------------------------+ 1967 // | | Floating-point register save area | 1968 // | +-----------------------------------+ 1969 // | | General register save area | 1970 // | +-----------------------------------+ 1971 // | | CR save word | 1972 // | +-----------------------------------+ 1973 // | | VRSAVE save word | 1974 // | +-----------------------------------+ 1975 // | | Alignment padding | 1976 // | +-----------------------------------+ 1977 // | | Vector register save area | 1978 // | +-----------------------------------+ 1979 // | | Local variable space | 1980 // | +-----------------------------------+ 1981 // | | Parameter list area | 1982 // | +-----------------------------------+ 1983 // | | LR save word | 1984 // | +-----------------------------------+ 1985 // SP--> +--- | Back chain | 1986 // +-----------------------------------+ 1987 // 1988 // Specifications: 1989 // System V Application Binary Interface PowerPC Processor Supplement 1990 // AltiVec Technology Programming Interface Manual 1991 1992 MachineFunction &MF = DAG.getMachineFunction(); 1993 MachineFrameInfo *MFI = MF.getFrameInfo(); 1994 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1995 1996 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1997 // Potential tail calls could cause overwriting of argument stack slots. 1998 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 1999 (CallConv == CallingConv::Fast)); 2000 unsigned PtrByteSize = 4; 2001 2002 // Assign locations to all of the incoming arguments. 2003 SmallVector<CCValAssign, 16> ArgLocs; 2004 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2005 getTargetMachine(), ArgLocs, *DAG.getContext()); 2006 2007 // Reserve space for the linkage area on the stack. 2008 CCInfo.AllocateStack(PPCFrameLowering::getLinkageSize(false, false), PtrByteSize); 2009 2010 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); 2011 2012 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2013 CCValAssign &VA = ArgLocs[i]; 2014 2015 // Arguments stored in registers. 2016 if (VA.isRegLoc()) { 2017 const TargetRegisterClass *RC; 2018 EVT ValVT = VA.getValVT(); 2019 2020 switch (ValVT.getSimpleVT().SimpleTy) { 2021 default: 2022 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 2023 case MVT::i32: 2024 RC = &PPC::GPRCRegClass; 2025 break; 2026 case MVT::f32: 2027 RC = &PPC::F4RCRegClass; 2028 break; 2029 case MVT::f64: 2030 RC = &PPC::F8RCRegClass; 2031 break; 2032 case MVT::v16i8: 2033 case MVT::v8i16: 2034 case MVT::v4i32: 2035 case MVT::v4f32: 2036 RC = &PPC::VRRCRegClass; 2037 break; 2038 } 2039 2040 // Transform the arguments stored in physical registers into virtual ones. 2041 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2042 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, ValVT); 2043 2044 InVals.push_back(ArgValue); 2045 } else { 2046 // Argument stored in memory. 2047 assert(VA.isMemLoc()); 2048 2049 unsigned ArgSize = VA.getLocVT().getSizeInBits() / 8; 2050 int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(), 2051 isImmutable); 2052 2053 // Create load nodes to retrieve arguments from the stack. 2054 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2055 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 2056 MachinePointerInfo(), 2057 false, false, false, 0)); 2058 } 2059 } 2060 2061 // Assign locations to all of the incoming aggregate by value arguments. 2062 // Aggregates passed by value are stored in the local variable space of the 2063 // caller's stack frame, right above the parameter list area. 2064 SmallVector<CCValAssign, 16> ByValArgLocs; 2065 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2066 getTargetMachine(), ByValArgLocs, *DAG.getContext()); 2067 2068 // Reserve stack space for the allocations in CCInfo. 2069 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 2070 2071 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); 2072 2073 // Area that is at least reserved in the caller of this function. 2074 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 2075 2076 // Set the size that is at least reserved in caller of this function. Tail 2077 // call optimized function's reserved stack space needs to be aligned so that 2078 // taking the difference between two stack areas will result in an aligned 2079 // stack. 2080 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 2081 2082 MinReservedArea = 2083 std::max(MinReservedArea, 2084 PPCFrameLowering::getMinCallFrameSize(false, false)); 2085 2086 unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameLowering()-> 2087 getStackAlignment(); 2088 unsigned AlignMask = TargetAlign-1; 2089 MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask; 2090 2091 FI->setMinReservedArea(MinReservedArea); 2092 2093 SmallVector<SDValue, 8> MemOps; 2094 2095 // If the function takes variable number of arguments, make a frame index for 2096 // the start of the first vararg value... for expansion of llvm.va_start. 2097 if (isVarArg) { 2098 static const uint16_t GPArgRegs[] = { 2099 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2100 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2101 }; 2102 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 2103 2104 static const uint16_t FPArgRegs[] = { 2105 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 2106 PPC::F8 2107 }; 2108 const unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 2109 2110 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs, 2111 NumGPArgRegs)); 2112 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs, 2113 NumFPArgRegs)); 2114 2115 // Make room for NumGPArgRegs and NumFPArgRegs. 2116 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 2117 NumFPArgRegs * EVT(MVT::f64).getSizeInBits()/8; 2118 2119 FuncInfo->setVarArgsStackOffset( 2120 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 2121 CCInfo.getNextStackOffset(), true)); 2122 2123 FuncInfo->setVarArgsFrameIndex(MFI->CreateStackObject(Depth, 8, false)); 2124 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2125 2126 // The fixed integer arguments of a variadic function are stored to the 2127 // VarArgsFrameIndex on the stack so that they may be loaded by deferencing 2128 // the result of va_next. 2129 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 2130 // Get an existing live-in vreg, or add a new one. 2131 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 2132 if (!VReg) 2133 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 2134 2135 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2136 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2137 MachinePointerInfo(), false, false, 0); 2138 MemOps.push_back(Store); 2139 // Increment the address by four for the next argument to store 2140 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); 2141 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2142 } 2143 2144 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 2145 // is set. 2146 // The double arguments are stored to the VarArgsFrameIndex 2147 // on the stack. 2148 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 2149 // Get an existing live-in vreg, or add a new one. 2150 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 2151 if (!VReg) 2152 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 2153 2154 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 2155 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2156 MachinePointerInfo(), false, false, 0); 2157 MemOps.push_back(Store); 2158 // Increment the address by eight for the next argument to store 2159 SDValue PtrOff = DAG.getConstant(EVT(MVT::f64).getSizeInBits()/8, 2160 PtrVT); 2161 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2162 } 2163 } 2164 2165 if (!MemOps.empty()) 2166 Chain = DAG.getNode(ISD::TokenFactor, dl, 2167 MVT::Other, &MemOps[0], MemOps.size()); 2168 2169 return Chain; 2170} 2171 2172// PPC64 passes i8, i16, and i32 values in i64 registers. Promote 2173// value to MVT::i64 and then truncate to the correct register size. 2174SDValue 2175PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT, 2176 SelectionDAG &DAG, SDValue ArgVal, 2177 SDLoc dl) const { 2178 if (Flags.isSExt()) 2179 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 2180 DAG.getValueType(ObjectVT)); 2181 else if (Flags.isZExt()) 2182 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 2183 DAG.getValueType(ObjectVT)); 2184 2185 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 2186} 2187 2188// Set the size that is at least reserved in caller of this function. Tail 2189// call optimized functions' reserved stack space needs to be aligned so that 2190// taking the difference between two stack areas will result in an aligned 2191// stack. 2192void 2193PPCTargetLowering::setMinReservedArea(MachineFunction &MF, SelectionDAG &DAG, 2194 unsigned nAltivecParamsAtEnd, 2195 unsigned MinReservedArea, 2196 bool isPPC64) const { 2197 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 2198 // Add the Altivec parameters at the end, if needed. 2199 if (nAltivecParamsAtEnd) { 2200 MinReservedArea = ((MinReservedArea+15)/16)*16; 2201 MinReservedArea += 16*nAltivecParamsAtEnd; 2202 } 2203 MinReservedArea = 2204 std::max(MinReservedArea, 2205 PPCFrameLowering::getMinCallFrameSize(isPPC64, true)); 2206 unsigned TargetAlign 2207 = DAG.getMachineFunction().getTarget().getFrameLowering()-> 2208 getStackAlignment(); 2209 unsigned AlignMask = TargetAlign-1; 2210 MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask; 2211 FI->setMinReservedArea(MinReservedArea); 2212} 2213 2214SDValue 2215PPCTargetLowering::LowerFormalArguments_64SVR4( 2216 SDValue Chain, 2217 CallingConv::ID CallConv, bool isVarArg, 2218 const SmallVectorImpl<ISD::InputArg> 2219 &Ins, 2220 SDLoc dl, SelectionDAG &DAG, 2221 SmallVectorImpl<SDValue> &InVals) const { 2222 // TODO: add description of PPC stack frame format, or at least some docs. 2223 // 2224 MachineFunction &MF = DAG.getMachineFunction(); 2225 MachineFrameInfo *MFI = MF.getFrameInfo(); 2226 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2227 2228 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2229 // Potential tail calls could cause overwriting of argument stack slots. 2230 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 2231 (CallConv == CallingConv::Fast)); 2232 unsigned PtrByteSize = 8; 2233 2234 unsigned ArgOffset = PPCFrameLowering::getLinkageSize(true, true); 2235 // Area that is at least reserved in caller of this function. 2236 unsigned MinReservedArea = ArgOffset; 2237 2238 static const uint16_t GPR[] = { 2239 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 2240 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 2241 }; 2242 2243 static const uint16_t *FPR = GetFPR(); 2244 2245 static const uint16_t VR[] = { 2246 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 2247 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 2248 }; 2249 2250 const unsigned Num_GPR_Regs = array_lengthof(GPR); 2251 const unsigned Num_FPR_Regs = 13; 2252 const unsigned Num_VR_Regs = array_lengthof(VR); 2253 2254 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 2255 2256 // Add DAG nodes to load the arguments or copy them out of registers. On 2257 // entry to a function on PPC, the arguments start after the linkage area, 2258 // although the first ones are often in registers. 2259 2260 SmallVector<SDValue, 8> MemOps; 2261 unsigned nAltivecParamsAtEnd = 0; 2262 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 2263 unsigned CurArgIdx = 0; 2264 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 2265 SDValue ArgVal; 2266 bool needsLoad = false; 2267 EVT ObjectVT = Ins[ArgNo].VT; 2268 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 2269 unsigned ArgSize = ObjSize; 2270 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 2271 std::advance(FuncArg, Ins[ArgNo].OrigArgIndex - CurArgIdx); 2272 CurArgIdx = Ins[ArgNo].OrigArgIndex; 2273 2274 unsigned CurArgOffset = ArgOffset; 2275 2276 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 2277 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 2278 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 2279 if (isVarArg) { 2280 MinReservedArea = ((MinReservedArea+15)/16)*16; 2281 MinReservedArea += CalculateStackSlotSize(ObjectVT, 2282 Flags, 2283 PtrByteSize); 2284 } else 2285 nAltivecParamsAtEnd++; 2286 } else 2287 // Calculate min reserved area. 2288 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 2289 Flags, 2290 PtrByteSize); 2291 2292 // FIXME the codegen can be much improved in some cases. 2293 // We do not have to keep everything in memory. 2294 if (Flags.isByVal()) { 2295 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 2296 ObjSize = Flags.getByValSize(); 2297 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2298 // Empty aggregate parameters do not take up registers. Examples: 2299 // struct { } a; 2300 // union { } b; 2301 // int c[0]; 2302 // etc. However, we have to provide a place-holder in InVals, so 2303 // pretend we have an 8-byte item at the current address for that 2304 // purpose. 2305 if (!ObjSize) { 2306 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 2307 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2308 InVals.push_back(FIN); 2309 continue; 2310 } 2311 2312 unsigned BVAlign = Flags.getByValAlign(); 2313 if (BVAlign > 8) { 2314 ArgOffset = ((ArgOffset+BVAlign-1)/BVAlign)*BVAlign; 2315 CurArgOffset = ArgOffset; 2316 } 2317 2318 // All aggregates smaller than 8 bytes must be passed right-justified. 2319 if (ObjSize < PtrByteSize) 2320 CurArgOffset = CurArgOffset + (PtrByteSize - ObjSize); 2321 // The value of the object is its address. 2322 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, true); 2323 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2324 InVals.push_back(FIN); 2325 2326 if (ObjSize < 8) { 2327 if (GPR_idx != Num_GPR_Regs) { 2328 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2329 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2330 SDValue Store; 2331 2332 if (ObjSize==1 || ObjSize==2 || ObjSize==4) { 2333 EVT ObjType = (ObjSize == 1 ? MVT::i8 : 2334 (ObjSize == 2 ? MVT::i16 : MVT::i32)); 2335 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 2336 MachinePointerInfo(FuncArg), 2337 ObjType, false, false, 0); 2338 } else { 2339 // For sizes that don't fit a truncating store (3, 5, 6, 7), 2340 // store the whole register as-is to the parameter save area 2341 // slot. The address of the parameter was already calculated 2342 // above (InVals.push_back(FIN)) to be the right-justified 2343 // offset within the slot. For this store, we need a new 2344 // frame index that points at the beginning of the slot. 2345 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 2346 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2347 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2348 MachinePointerInfo(FuncArg), 2349 false, false, 0); 2350 } 2351 2352 MemOps.push_back(Store); 2353 ++GPR_idx; 2354 } 2355 // Whether we copied from a register or not, advance the offset 2356 // into the parameter save area by a full doubleword. 2357 ArgOffset += PtrByteSize; 2358 continue; 2359 } 2360 2361 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 2362 // Store whatever pieces of the object are in registers 2363 // to memory. ArgOffset will be the address of the beginning 2364 // of the object. 2365 if (GPR_idx != Num_GPR_Regs) { 2366 unsigned VReg; 2367 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2368 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 2369 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2370 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2371 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2372 MachinePointerInfo(FuncArg, j), 2373 false, false, 0); 2374 MemOps.push_back(Store); 2375 ++GPR_idx; 2376 ArgOffset += PtrByteSize; 2377 } else { 2378 ArgOffset += ArgSize - j; 2379 break; 2380 } 2381 } 2382 continue; 2383 } 2384 2385 switch (ObjectVT.getSimpleVT().SimpleTy) { 2386 default: llvm_unreachable("Unhandled argument type!"); 2387 case MVT::i32: 2388 case MVT::i64: 2389 if (GPR_idx != Num_GPR_Regs) { 2390 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2391 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 2392 2393 if (ObjectVT == MVT::i32) 2394 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 2395 // value to MVT::i64 and then truncate to the correct register size. 2396 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 2397 2398 ++GPR_idx; 2399 } else { 2400 needsLoad = true; 2401 ArgSize = PtrByteSize; 2402 } 2403 ArgOffset += 8; 2404 break; 2405 2406 case MVT::f32: 2407 case MVT::f64: 2408 // Every 8 bytes of argument space consumes one of the GPRs available for 2409 // argument passing. 2410 if (GPR_idx != Num_GPR_Regs) { 2411 ++GPR_idx; 2412 } 2413 if (FPR_idx != Num_FPR_Regs) { 2414 unsigned VReg; 2415 2416 if (ObjectVT == MVT::f32) 2417 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 2418 else 2419 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 2420 2421 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2422 ++FPR_idx; 2423 } else { 2424 needsLoad = true; 2425 ArgSize = PtrByteSize; 2426 } 2427 2428 ArgOffset += 8; 2429 break; 2430 case MVT::v4f32: 2431 case MVT::v4i32: 2432 case MVT::v8i16: 2433 case MVT::v16i8: 2434 // Note that vector arguments in registers don't reserve stack space, 2435 // except in varargs functions. 2436 if (VR_idx != Num_VR_Regs) { 2437 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 2438 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2439 if (isVarArg) { 2440 while ((ArgOffset % 16) != 0) { 2441 ArgOffset += PtrByteSize; 2442 if (GPR_idx != Num_GPR_Regs) 2443 GPR_idx++; 2444 } 2445 ArgOffset += 16; 2446 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 2447 } 2448 ++VR_idx; 2449 } else { 2450 // Vectors are aligned. 2451 ArgOffset = ((ArgOffset+15)/16)*16; 2452 CurArgOffset = ArgOffset; 2453 ArgOffset += 16; 2454 needsLoad = true; 2455 } 2456 break; 2457 } 2458 2459 // We need to load the argument to a virtual register if we determined 2460 // above that we ran out of physical registers of the appropriate type. 2461 if (needsLoad) { 2462 int FI = MFI->CreateFixedObject(ObjSize, 2463 CurArgOffset + (ArgSize - ObjSize), 2464 isImmutable); 2465 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2466 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), 2467 false, false, false, 0); 2468 } 2469 2470 InVals.push_back(ArgVal); 2471 } 2472 2473 // Set the size that is at least reserved in caller of this function. Tail 2474 // call optimized functions' reserved stack space needs to be aligned so that 2475 // taking the difference between two stack areas will result in an aligned 2476 // stack. 2477 setMinReservedArea(MF, DAG, nAltivecParamsAtEnd, MinReservedArea, true); 2478 2479 // If the function takes variable number of arguments, make a frame index for 2480 // the start of the first vararg value... for expansion of llvm.va_start. 2481 if (isVarArg) { 2482 int Depth = ArgOffset; 2483 2484 FuncInfo->setVarArgsFrameIndex( 2485 MFI->CreateFixedObject(PtrByteSize, Depth, true)); 2486 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2487 2488 // If this function is vararg, store any remaining integer argument regs 2489 // to their spots on the stack so that they may be loaded by deferencing the 2490 // result of va_next. 2491 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 2492 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2493 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2494 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2495 MachinePointerInfo(), false, false, 0); 2496 MemOps.push_back(Store); 2497 // Increment the address by four for the next argument to store 2498 SDValue PtrOff = DAG.getConstant(PtrByteSize, PtrVT); 2499 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2500 } 2501 } 2502 2503 if (!MemOps.empty()) 2504 Chain = DAG.getNode(ISD::TokenFactor, dl, 2505 MVT::Other, &MemOps[0], MemOps.size()); 2506 2507 return Chain; 2508} 2509 2510SDValue 2511PPCTargetLowering::LowerFormalArguments_Darwin( 2512 SDValue Chain, 2513 CallingConv::ID CallConv, bool isVarArg, 2514 const SmallVectorImpl<ISD::InputArg> 2515 &Ins, 2516 SDLoc dl, SelectionDAG &DAG, 2517 SmallVectorImpl<SDValue> &InVals) const { 2518 // TODO: add description of PPC stack frame format, or at least some docs. 2519 // 2520 MachineFunction &MF = DAG.getMachineFunction(); 2521 MachineFrameInfo *MFI = MF.getFrameInfo(); 2522 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2523 2524 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2525 bool isPPC64 = PtrVT == MVT::i64; 2526 // Potential tail calls could cause overwriting of argument stack slots. 2527 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 2528 (CallConv == CallingConv::Fast)); 2529 unsigned PtrByteSize = isPPC64 ? 8 : 4; 2530 2531 unsigned ArgOffset = PPCFrameLowering::getLinkageSize(isPPC64, true); 2532 // Area that is at least reserved in caller of this function. 2533 unsigned MinReservedArea = ArgOffset; 2534 2535 static const uint16_t GPR_32[] = { // 32-bit registers. 2536 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2537 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2538 }; 2539 static const uint16_t GPR_64[] = { // 64-bit registers. 2540 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 2541 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 2542 }; 2543 2544 static const uint16_t *FPR = GetFPR(); 2545 2546 static const uint16_t VR[] = { 2547 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 2548 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 2549 }; 2550 2551 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 2552 const unsigned Num_FPR_Regs = 13; 2553 const unsigned Num_VR_Regs = array_lengthof( VR); 2554 2555 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 2556 2557 const uint16_t *GPR = isPPC64 ? GPR_64 : GPR_32; 2558 2559 // In 32-bit non-varargs functions, the stack space for vectors is after the 2560 // stack space for non-vectors. We do not use this space unless we have 2561 // too many vectors to fit in registers, something that only occurs in 2562 // constructed examples:), but we have to walk the arglist to figure 2563 // that out...for the pathological case, compute VecArgOffset as the 2564 // start of the vector parameter area. Computing VecArgOffset is the 2565 // entire point of the following loop. 2566 unsigned VecArgOffset = ArgOffset; 2567 if (!isVarArg && !isPPC64) { 2568 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 2569 ++ArgNo) { 2570 EVT ObjectVT = Ins[ArgNo].VT; 2571 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 2572 2573 if (Flags.isByVal()) { 2574 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 2575 unsigned ObjSize = Flags.getByValSize(); 2576 unsigned ArgSize = 2577 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2578 VecArgOffset += ArgSize; 2579 continue; 2580 } 2581 2582 switch(ObjectVT.getSimpleVT().SimpleTy) { 2583 default: llvm_unreachable("Unhandled argument type!"); 2584 case MVT::i32: 2585 case MVT::f32: 2586 VecArgOffset += 4; 2587 break; 2588 case MVT::i64: // PPC64 2589 case MVT::f64: 2590 // FIXME: We are guaranteed to be !isPPC64 at this point. 2591 // Does MVT::i64 apply? 2592 VecArgOffset += 8; 2593 break; 2594 case MVT::v4f32: 2595 case MVT::v4i32: 2596 case MVT::v8i16: 2597 case MVT::v16i8: 2598 // Nothing to do, we're only looking at Nonvector args here. 2599 break; 2600 } 2601 } 2602 } 2603 // We've found where the vector parameter area in memory is. Skip the 2604 // first 12 parameters; these don't use that memory. 2605 VecArgOffset = ((VecArgOffset+15)/16)*16; 2606 VecArgOffset += 12*16; 2607 2608 // Add DAG nodes to load the arguments or copy them out of registers. On 2609 // entry to a function on PPC, the arguments start after the linkage area, 2610 // although the first ones are often in registers. 2611 2612 SmallVector<SDValue, 8> MemOps; 2613 unsigned nAltivecParamsAtEnd = 0; 2614 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 2615 unsigned CurArgIdx = 0; 2616 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 2617 SDValue ArgVal; 2618 bool needsLoad = false; 2619 EVT ObjectVT = Ins[ArgNo].VT; 2620 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 2621 unsigned ArgSize = ObjSize; 2622 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 2623 std::advance(FuncArg, Ins[ArgNo].OrigArgIndex - CurArgIdx); 2624 CurArgIdx = Ins[ArgNo].OrigArgIndex; 2625 2626 unsigned CurArgOffset = ArgOffset; 2627 2628 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 2629 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 2630 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 2631 if (isVarArg || isPPC64) { 2632 MinReservedArea = ((MinReservedArea+15)/16)*16; 2633 MinReservedArea += CalculateStackSlotSize(ObjectVT, 2634 Flags, 2635 PtrByteSize); 2636 } else nAltivecParamsAtEnd++; 2637 } else 2638 // Calculate min reserved area. 2639 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 2640 Flags, 2641 PtrByteSize); 2642 2643 // FIXME the codegen can be much improved in some cases. 2644 // We do not have to keep everything in memory. 2645 if (Flags.isByVal()) { 2646 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 2647 ObjSize = Flags.getByValSize(); 2648 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2649 // Objects of size 1 and 2 are right justified, everything else is 2650 // left justified. This means the memory address is adjusted forwards. 2651 if (ObjSize==1 || ObjSize==2) { 2652 CurArgOffset = CurArgOffset + (4 - ObjSize); 2653 } 2654 // The value of the object is its address. 2655 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, true); 2656 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2657 InVals.push_back(FIN); 2658 if (ObjSize==1 || ObjSize==2) { 2659 if (GPR_idx != Num_GPR_Regs) { 2660 unsigned VReg; 2661 if (isPPC64) 2662 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2663 else 2664 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 2665 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2666 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16; 2667 SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 2668 MachinePointerInfo(FuncArg), 2669 ObjType, false, false, 0); 2670 MemOps.push_back(Store); 2671 ++GPR_idx; 2672 } 2673 2674 ArgOffset += PtrByteSize; 2675 2676 continue; 2677 } 2678 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 2679 // Store whatever pieces of the object are in registers 2680 // to memory. ArgOffset will be the address of the beginning 2681 // of the object. 2682 if (GPR_idx != Num_GPR_Regs) { 2683 unsigned VReg; 2684 if (isPPC64) 2685 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2686 else 2687 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 2688 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 2689 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2690 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2691 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2692 MachinePointerInfo(FuncArg, j), 2693 false, false, 0); 2694 MemOps.push_back(Store); 2695 ++GPR_idx; 2696 ArgOffset += PtrByteSize; 2697 } else { 2698 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 2699 break; 2700 } 2701 } 2702 continue; 2703 } 2704 2705 switch (ObjectVT.getSimpleVT().SimpleTy) { 2706 default: llvm_unreachable("Unhandled argument type!"); 2707 case MVT::i32: 2708 if (!isPPC64) { 2709 if (GPR_idx != Num_GPR_Regs) { 2710 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 2711 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 2712 ++GPR_idx; 2713 } else { 2714 needsLoad = true; 2715 ArgSize = PtrByteSize; 2716 } 2717 // All int arguments reserve stack space in the Darwin ABI. 2718 ArgOffset += PtrByteSize; 2719 break; 2720 } 2721 // FALLTHROUGH 2722 case MVT::i64: // PPC64 2723 if (GPR_idx != Num_GPR_Regs) { 2724 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2725 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 2726 2727 if (ObjectVT == MVT::i32) 2728 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 2729 // value to MVT::i64 and then truncate to the correct register size. 2730 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 2731 2732 ++GPR_idx; 2733 } else { 2734 needsLoad = true; 2735 ArgSize = PtrByteSize; 2736 } 2737 // All int arguments reserve stack space in the Darwin ABI. 2738 ArgOffset += 8; 2739 break; 2740 2741 case MVT::f32: 2742 case MVT::f64: 2743 // Every 4 bytes of argument space consumes one of the GPRs available for 2744 // argument passing. 2745 if (GPR_idx != Num_GPR_Regs) { 2746 ++GPR_idx; 2747 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 2748 ++GPR_idx; 2749 } 2750 if (FPR_idx != Num_FPR_Regs) { 2751 unsigned VReg; 2752 2753 if (ObjectVT == MVT::f32) 2754 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 2755 else 2756 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 2757 2758 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2759 ++FPR_idx; 2760 } else { 2761 needsLoad = true; 2762 } 2763 2764 // All FP arguments reserve stack space in the Darwin ABI. 2765 ArgOffset += isPPC64 ? 8 : ObjSize; 2766 break; 2767 case MVT::v4f32: 2768 case MVT::v4i32: 2769 case MVT::v8i16: 2770 case MVT::v16i8: 2771 // Note that vector arguments in registers don't reserve stack space, 2772 // except in varargs functions. 2773 if (VR_idx != Num_VR_Regs) { 2774 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 2775 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2776 if (isVarArg) { 2777 while ((ArgOffset % 16) != 0) { 2778 ArgOffset += PtrByteSize; 2779 if (GPR_idx != Num_GPR_Regs) 2780 GPR_idx++; 2781 } 2782 ArgOffset += 16; 2783 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 2784 } 2785 ++VR_idx; 2786 } else { 2787 if (!isVarArg && !isPPC64) { 2788 // Vectors go after all the nonvectors. 2789 CurArgOffset = VecArgOffset; 2790 VecArgOffset += 16; 2791 } else { 2792 // Vectors are aligned. 2793 ArgOffset = ((ArgOffset+15)/16)*16; 2794 CurArgOffset = ArgOffset; 2795 ArgOffset += 16; 2796 } 2797 needsLoad = true; 2798 } 2799 break; 2800 } 2801 2802 // We need to load the argument to a virtual register if we determined above 2803 // that we ran out of physical registers of the appropriate type. 2804 if (needsLoad) { 2805 int FI = MFI->CreateFixedObject(ObjSize, 2806 CurArgOffset + (ArgSize - ObjSize), 2807 isImmutable); 2808 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2809 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), 2810 false, false, false, 0); 2811 } 2812 2813 InVals.push_back(ArgVal); 2814 } 2815 2816 // Set the size that is at least reserved in caller of this function. Tail 2817 // call optimized functions' reserved stack space needs to be aligned so that 2818 // taking the difference between two stack areas will result in an aligned 2819 // stack. 2820 setMinReservedArea(MF, DAG, nAltivecParamsAtEnd, MinReservedArea, isPPC64); 2821 2822 // If the function takes variable number of arguments, make a frame index for 2823 // the start of the first vararg value... for expansion of llvm.va_start. 2824 if (isVarArg) { 2825 int Depth = ArgOffset; 2826 2827 FuncInfo->setVarArgsFrameIndex( 2828 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 2829 Depth, true)); 2830 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2831 2832 // If this function is vararg, store any remaining integer argument regs 2833 // to their spots on the stack so that they may be loaded by deferencing the 2834 // result of va_next. 2835 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 2836 unsigned VReg; 2837 2838 if (isPPC64) 2839 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2840 else 2841 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 2842 2843 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2844 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2845 MachinePointerInfo(), false, false, 0); 2846 MemOps.push_back(Store); 2847 // Increment the address by four for the next argument to store 2848 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); 2849 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2850 } 2851 } 2852 2853 if (!MemOps.empty()) 2854 Chain = DAG.getNode(ISD::TokenFactor, dl, 2855 MVT::Other, &MemOps[0], MemOps.size()); 2856 2857 return Chain; 2858} 2859 2860/// CalculateParameterAndLinkageAreaSize - Get the size of the parameter plus 2861/// linkage area for the Darwin ABI, or the 64-bit SVR4 ABI. 2862static unsigned 2863CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG, 2864 bool isPPC64, 2865 bool isVarArg, 2866 unsigned CC, 2867 const SmallVectorImpl<ISD::OutputArg> 2868 &Outs, 2869 const SmallVectorImpl<SDValue> &OutVals, 2870 unsigned &nAltivecParamsAtEnd) { 2871 // Count how many bytes are to be pushed on the stack, including the linkage 2872 // area, and parameter passing area. We start with 24/48 bytes, which is 2873 // prereserved space for [SP][CR][LR][3 x unused]. 2874 unsigned NumBytes = PPCFrameLowering::getLinkageSize(isPPC64, true); 2875 unsigned NumOps = Outs.size(); 2876 unsigned PtrByteSize = isPPC64 ? 8 : 4; 2877 2878 // Add up all the space actually used. 2879 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 2880 // they all go in registers, but we must reserve stack space for them for 2881 // possible use by the caller. In varargs or 64-bit calls, parameters are 2882 // assigned stack space in order, with padding so Altivec parameters are 2883 // 16-byte aligned. 2884 nAltivecParamsAtEnd = 0; 2885 for (unsigned i = 0; i != NumOps; ++i) { 2886 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2887 EVT ArgVT = Outs[i].VT; 2888 // Varargs Altivec parameters are padded to a 16 byte boundary. 2889 if (ArgVT==MVT::v4f32 || ArgVT==MVT::v4i32 || 2890 ArgVT==MVT::v8i16 || ArgVT==MVT::v16i8) { 2891 if (!isVarArg && !isPPC64) { 2892 // Non-varargs Altivec parameters go after all the non-Altivec 2893 // parameters; handle those later so we know how much padding we need. 2894 nAltivecParamsAtEnd++; 2895 continue; 2896 } 2897 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 2898 NumBytes = ((NumBytes+15)/16)*16; 2899 } 2900 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 2901 } 2902 2903 // Allow for Altivec parameters at the end, if needed. 2904 if (nAltivecParamsAtEnd) { 2905 NumBytes = ((NumBytes+15)/16)*16; 2906 NumBytes += 16*nAltivecParamsAtEnd; 2907 } 2908 2909 // The prolog code of the callee may store up to 8 GPR argument registers to 2910 // the stack, allowing va_start to index over them in memory if its varargs. 2911 // Because we cannot tell if this is needed on the caller side, we have to 2912 // conservatively assume that it is needed. As such, make sure we have at 2913 // least enough stack space for the caller to store the 8 GPRs. 2914 NumBytes = std::max(NumBytes, 2915 PPCFrameLowering::getMinCallFrameSize(isPPC64, true)); 2916 2917 // Tail call needs the stack to be aligned. 2918 if (CC == CallingConv::Fast && DAG.getTarget().Options.GuaranteedTailCallOpt){ 2919 unsigned TargetAlign = DAG.getMachineFunction().getTarget(). 2920 getFrameLowering()->getStackAlignment(); 2921 unsigned AlignMask = TargetAlign-1; 2922 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 2923 } 2924 2925 return NumBytes; 2926} 2927 2928/// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 2929/// adjusted to accommodate the arguments for the tailcall. 2930static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 2931 unsigned ParamSize) { 2932 2933 if (!isTailCall) return 0; 2934 2935 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 2936 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 2937 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 2938 // Remember only if the new adjustement is bigger. 2939 if (SPDiff < FI->getTailCallSPDelta()) 2940 FI->setTailCallSPDelta(SPDiff); 2941 2942 return SPDiff; 2943} 2944 2945/// IsEligibleForTailCallOptimization - Check whether the call is eligible 2946/// for tail call optimization. Targets which want to do tail call 2947/// optimization should implement this function. 2948bool 2949PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 2950 CallingConv::ID CalleeCC, 2951 bool isVarArg, 2952 const SmallVectorImpl<ISD::InputArg> &Ins, 2953 SelectionDAG& DAG) const { 2954 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 2955 return false; 2956 2957 // Variable argument functions are not supported. 2958 if (isVarArg) 2959 return false; 2960 2961 MachineFunction &MF = DAG.getMachineFunction(); 2962 CallingConv::ID CallerCC = MF.getFunction()->getCallingConv(); 2963 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 2964 // Functions containing by val parameters are not supported. 2965 for (unsigned i = 0; i != Ins.size(); i++) { 2966 ISD::ArgFlagsTy Flags = Ins[i].Flags; 2967 if (Flags.isByVal()) return false; 2968 } 2969 2970 // Non PIC/GOT tail calls are supported. 2971 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 2972 return true; 2973 2974 // At the moment we can only do local tail calls (in same module, hidden 2975 // or protected) if we are generating PIC. 2976 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 2977 return G->getGlobal()->hasHiddenVisibility() 2978 || G->getGlobal()->hasProtectedVisibility(); 2979 } 2980 2981 return false; 2982} 2983 2984/// isCallCompatibleAddress - Return the immediate to use if the specified 2985/// 32-bit value is representable in the immediate field of a BxA instruction. 2986static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 2987 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 2988 if (!C) return 0; 2989 2990 int Addr = C->getZExtValue(); 2991 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 2992 SignExtend32<26>(Addr) != Addr) 2993 return 0; // Top 6 bits have to be sext of immediate. 2994 2995 return DAG.getConstant((int)C->getZExtValue() >> 2, 2996 DAG.getTargetLoweringInfo().getPointerTy()).getNode(); 2997} 2998 2999namespace { 3000 3001struct TailCallArgumentInfo { 3002 SDValue Arg; 3003 SDValue FrameIdxOp; 3004 int FrameIdx; 3005 3006 TailCallArgumentInfo() : FrameIdx(0) {} 3007}; 3008 3009} 3010 3011/// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 3012static void 3013StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG, 3014 SDValue Chain, 3015 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, 3016 SmallVectorImpl<SDValue> &MemOpChains, 3017 SDLoc dl) { 3018 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 3019 SDValue Arg = TailCallArgs[i].Arg; 3020 SDValue FIN = TailCallArgs[i].FrameIdxOp; 3021 int FI = TailCallArgs[i].FrameIdx; 3022 // Store relative to framepointer. 3023 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, FIN, 3024 MachinePointerInfo::getFixedStack(FI), 3025 false, false, 0)); 3026 } 3027} 3028 3029/// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 3030/// the appropriate stack slot for the tail call optimized function call. 3031static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, 3032 MachineFunction &MF, 3033 SDValue Chain, 3034 SDValue OldRetAddr, 3035 SDValue OldFP, 3036 int SPDiff, 3037 bool isPPC64, 3038 bool isDarwinABI, 3039 SDLoc dl) { 3040 if (SPDiff) { 3041 // Calculate the new stack slot for the return address. 3042 int SlotSize = isPPC64 ? 8 : 4; 3043 int NewRetAddrLoc = SPDiff + PPCFrameLowering::getReturnSaveOffset(isPPC64, 3044 isDarwinABI); 3045 int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize, 3046 NewRetAddrLoc, true); 3047 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 3048 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 3049 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, 3050 MachinePointerInfo::getFixedStack(NewRetAddr), 3051 false, false, 0); 3052 3053 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack 3054 // slot as the FP is never overwritten. 3055 if (isDarwinABI) { 3056 int NewFPLoc = 3057 SPDiff + PPCFrameLowering::getFramePointerSaveOffset(isPPC64, isDarwinABI); 3058 int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc, 3059 true); 3060 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 3061 Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx, 3062 MachinePointerInfo::getFixedStack(NewFPIdx), 3063 false, false, 0); 3064 } 3065 } 3066 return Chain; 3067} 3068 3069/// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 3070/// the position of the argument. 3071static void 3072CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 3073 SDValue Arg, int SPDiff, unsigned ArgOffset, 3074 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { 3075 int Offset = ArgOffset + SPDiff; 3076 uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8; 3077 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 3078 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 3079 SDValue FIN = DAG.getFrameIndex(FI, VT); 3080 TailCallArgumentInfo Info; 3081 Info.Arg = Arg; 3082 Info.FrameIdxOp = FIN; 3083 Info.FrameIdx = FI; 3084 TailCallArguments.push_back(Info); 3085} 3086 3087/// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 3088/// stack slot. Returns the chain as result and the loaded frame pointers in 3089/// LROpOut/FPOpout. Used when tail calling. 3090SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, 3091 int SPDiff, 3092 SDValue Chain, 3093 SDValue &LROpOut, 3094 SDValue &FPOpOut, 3095 bool isDarwinABI, 3096 SDLoc dl) const { 3097 if (SPDiff) { 3098 // Load the LR and FP stack slot for later adjusting. 3099 EVT VT = PPCSubTarget.isPPC64() ? MVT::i64 : MVT::i32; 3100 LROpOut = getReturnAddrFrameIndex(DAG); 3101 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo(), 3102 false, false, false, 0); 3103 Chain = SDValue(LROpOut.getNode(), 1); 3104 3105 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack 3106 // slot as the FP is never overwritten. 3107 if (isDarwinABI) { 3108 FPOpOut = getFramePointerFrameIndex(DAG); 3109 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo(), 3110 false, false, false, 0); 3111 Chain = SDValue(FPOpOut.getNode(), 1); 3112 } 3113 } 3114 return Chain; 3115} 3116 3117/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 3118/// by "Src" to address "Dst" of size "Size". Alignment information is 3119/// specified by the specific parameter attribute. The copy will be passed as 3120/// a byval function parameter. 3121/// Sometimes what we are copying is the end of a larger object, the part that 3122/// does not fit in registers. 3123static SDValue 3124CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 3125 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 3126 SDLoc dl) { 3127 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 3128 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 3129 false, false, MachinePointerInfo(0), 3130 MachinePointerInfo(0)); 3131} 3132 3133/// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 3134/// tail calls. 3135static void 3136LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, 3137 SDValue Arg, SDValue PtrOff, int SPDiff, 3138 unsigned ArgOffset, bool isPPC64, bool isTailCall, 3139 bool isVector, SmallVectorImpl<SDValue> &MemOpChains, 3140 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, 3141 SDLoc dl) { 3142 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3143 if (!isTailCall) { 3144 if (isVector) { 3145 SDValue StackPtr; 3146 if (isPPC64) 3147 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 3148 else 3149 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 3150 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 3151 DAG.getConstant(ArgOffset, PtrVT)); 3152 } 3153 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 3154 MachinePointerInfo(), false, false, 0)); 3155 // Calculate and remember argument location. 3156 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 3157 TailCallArguments); 3158} 3159 3160static 3161void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 3162 SDLoc dl, bool isPPC64, int SPDiff, unsigned NumBytes, 3163 SDValue LROp, SDValue FPOp, bool isDarwinABI, 3164 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { 3165 MachineFunction &MF = DAG.getMachineFunction(); 3166 3167 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 3168 // might overwrite each other in case of tail call optimization. 3169 SmallVector<SDValue, 8> MemOpChains2; 3170 // Do not flag preceding copytoreg stuff together with the following stuff. 3171 InFlag = SDValue(); 3172 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 3173 MemOpChains2, dl); 3174 if (!MemOpChains2.empty()) 3175 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3176 &MemOpChains2[0], MemOpChains2.size()); 3177 3178 // Store the return address to the appropriate stack slot. 3179 Chain = EmitTailCallStoreFPAndRetAddr(DAG, MF, Chain, LROp, FPOp, SPDiff, 3180 isPPC64, isDarwinABI, dl); 3181 3182 // Emit callseq_end just before tailcall node. 3183 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 3184 DAG.getIntPtrConstant(0, true), InFlag, dl); 3185 InFlag = Chain.getValue(1); 3186} 3187 3188static 3189unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, 3190 SDValue &Chain, SDLoc dl, int SPDiff, bool isTailCall, 3191 SmallVectorImpl<std::pair<unsigned, SDValue> > &RegsToPass, 3192 SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys, 3193 const PPCSubtarget &PPCSubTarget) { 3194 3195 bool isPPC64 = PPCSubTarget.isPPC64(); 3196 bool isSVR4ABI = PPCSubTarget.isSVR4ABI(); 3197 3198 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3199 NodeTys.push_back(MVT::Other); // Returns a chain 3200 NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use. 3201 3202 unsigned CallOpc = PPCISD::CALL; 3203 3204 bool needIndirectCall = true; 3205 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) { 3206 // If this is an absolute destination address, use the munged value. 3207 Callee = SDValue(Dest, 0); 3208 needIndirectCall = false; 3209 } 3210 3211 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 3212 // XXX Work around for http://llvm.org/bugs/show_bug.cgi?id=5201 3213 // Use indirect calls for ALL functions calls in JIT mode, since the 3214 // far-call stubs may be outside relocation limits for a BL instruction. 3215 if (!DAG.getTarget().getSubtarget<PPCSubtarget>().isJITCodeModel()) { 3216 unsigned OpFlags = 0; 3217 if (DAG.getTarget().getRelocationModel() != Reloc::Static && 3218 (PPCSubTarget.getTargetTriple().isMacOSX() && 3219 PPCSubTarget.getTargetTriple().isMacOSXVersionLT(10, 5)) && 3220 (G->getGlobal()->isDeclaration() || 3221 G->getGlobal()->isWeakForLinker())) { 3222 // PC-relative references to external symbols should go through $stub, 3223 // unless we're building with the leopard linker or later, which 3224 // automatically synthesizes these stubs. 3225 OpFlags = PPCII::MO_DARWIN_STUB; 3226 } 3227 3228 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, 3229 // every direct call is) turn it into a TargetGlobalAddress / 3230 // TargetExternalSymbol node so that legalize doesn't hack it. 3231 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, 3232 Callee.getValueType(), 3233 0, OpFlags); 3234 needIndirectCall = false; 3235 } 3236 } 3237 3238 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 3239 unsigned char OpFlags = 0; 3240 3241 if (DAG.getTarget().getRelocationModel() != Reloc::Static && 3242 (PPCSubTarget.getTargetTriple().isMacOSX() && 3243 PPCSubTarget.getTargetTriple().isMacOSXVersionLT(10, 5))) { 3244 // PC-relative references to external symbols should go through $stub, 3245 // unless we're building with the leopard linker or later, which 3246 // automatically synthesizes these stubs. 3247 OpFlags = PPCII::MO_DARWIN_STUB; 3248 } 3249 3250 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(), 3251 OpFlags); 3252 needIndirectCall = false; 3253 } 3254 3255 if (needIndirectCall) { 3256 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 3257 // to do the call, we can't use PPCISD::CALL. 3258 SDValue MTCTROps[] = {Chain, Callee, InFlag}; 3259 3260 if (isSVR4ABI && isPPC64) { 3261 // Function pointers in the 64-bit SVR4 ABI do not point to the function 3262 // entry point, but to the function descriptor (the function entry point 3263 // address is part of the function descriptor though). 3264 // The function descriptor is a three doubleword structure with the 3265 // following fields: function entry point, TOC base address and 3266 // environment pointer. 3267 // Thus for a call through a function pointer, the following actions need 3268 // to be performed: 3269 // 1. Save the TOC of the caller in the TOC save area of its stack 3270 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 3271 // 2. Load the address of the function entry point from the function 3272 // descriptor. 3273 // 3. Load the TOC of the callee from the function descriptor into r2. 3274 // 4. Load the environment pointer from the function descriptor into 3275 // r11. 3276 // 5. Branch to the function entry point address. 3277 // 6. On return of the callee, the TOC of the caller needs to be 3278 // restored (this is done in FinishCall()). 3279 // 3280 // All those operations are flagged together to ensure that no other 3281 // operations can be scheduled in between. E.g. without flagging the 3282 // operations together, a TOC access in the caller could be scheduled 3283 // between the load of the callee TOC and the branch to the callee, which 3284 // results in the TOC access going through the TOC of the callee instead 3285 // of going through the TOC of the caller, which leads to incorrect code. 3286 3287 // Load the address of the function entry point from the function 3288 // descriptor. 3289 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other, MVT::Glue); 3290 SDValue LoadFuncPtr = DAG.getNode(PPCISD::LOAD, dl, VTs, MTCTROps, 3291 InFlag.getNode() ? 3 : 2); 3292 Chain = LoadFuncPtr.getValue(1); 3293 InFlag = LoadFuncPtr.getValue(2); 3294 3295 // Load environment pointer into r11. 3296 // Offset of the environment pointer within the function descriptor. 3297 SDValue PtrOff = DAG.getIntPtrConstant(16); 3298 3299 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff); 3300 SDValue LoadEnvPtr = DAG.getNode(PPCISD::LOAD, dl, VTs, Chain, AddPtr, 3301 InFlag); 3302 Chain = LoadEnvPtr.getValue(1); 3303 InFlag = LoadEnvPtr.getValue(2); 3304 3305 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr, 3306 InFlag); 3307 Chain = EnvVal.getValue(0); 3308 InFlag = EnvVal.getValue(1); 3309 3310 // Load TOC of the callee into r2. We are using a target-specific load 3311 // with r2 hard coded, because the result of a target-independent load 3312 // would never go directly into r2, since r2 is a reserved register (which 3313 // prevents the register allocator from allocating it), resulting in an 3314 // additional register being allocated and an unnecessary move instruction 3315 // being generated. 3316 VTs = DAG.getVTList(MVT::Other, MVT::Glue); 3317 SDValue LoadTOCPtr = DAG.getNode(PPCISD::LOAD_TOC, dl, VTs, Chain, 3318 Callee, InFlag); 3319 Chain = LoadTOCPtr.getValue(0); 3320 InFlag = LoadTOCPtr.getValue(1); 3321 3322 MTCTROps[0] = Chain; 3323 MTCTROps[1] = LoadFuncPtr; 3324 MTCTROps[2] = InFlag; 3325 } 3326 3327 Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, MTCTROps, 3328 2 + (InFlag.getNode() != 0)); 3329 InFlag = Chain.getValue(1); 3330 3331 NodeTys.clear(); 3332 NodeTys.push_back(MVT::Other); 3333 NodeTys.push_back(MVT::Glue); 3334 Ops.push_back(Chain); 3335 CallOpc = PPCISD::BCTRL; 3336 Callee.setNode(0); 3337 // Add use of X11 (holding environment pointer) 3338 if (isSVR4ABI && isPPC64) 3339 Ops.push_back(DAG.getRegister(PPC::X11, PtrVT)); 3340 // Add CTR register as callee so a bctr can be emitted later. 3341 if (isTailCall) 3342 Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT)); 3343 } 3344 3345 // If this is a direct call, pass the chain and the callee. 3346 if (Callee.getNode()) { 3347 Ops.push_back(Chain); 3348 Ops.push_back(Callee); 3349 } 3350 // If this is a tail call add stack pointer delta. 3351 if (isTailCall) 3352 Ops.push_back(DAG.getConstant(SPDiff, MVT::i32)); 3353 3354 // Add argument registers to the end of the list so that they are known live 3355 // into the call. 3356 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 3357 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 3358 RegsToPass[i].second.getValueType())); 3359 3360 return CallOpc; 3361} 3362 3363static 3364bool isLocalCall(const SDValue &Callee) 3365{ 3366 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 3367 return !G->getGlobal()->isDeclaration() && 3368 !G->getGlobal()->isWeakForLinker(); 3369 return false; 3370} 3371 3372SDValue 3373PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 3374 CallingConv::ID CallConv, bool isVarArg, 3375 const SmallVectorImpl<ISD::InputArg> &Ins, 3376 SDLoc dl, SelectionDAG &DAG, 3377 SmallVectorImpl<SDValue> &InVals) const { 3378 3379 SmallVector<CCValAssign, 16> RVLocs; 3380 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3381 getTargetMachine(), RVLocs, *DAG.getContext()); 3382 CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC); 3383 3384 // Copy all of the result registers out of their specified physreg. 3385 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 3386 CCValAssign &VA = RVLocs[i]; 3387 assert(VA.isRegLoc() && "Can only return in registers!"); 3388 3389 SDValue Val = DAG.getCopyFromReg(Chain, dl, 3390 VA.getLocReg(), VA.getLocVT(), InFlag); 3391 Chain = Val.getValue(1); 3392 InFlag = Val.getValue(2); 3393 3394 switch (VA.getLocInfo()) { 3395 default: llvm_unreachable("Unknown loc info!"); 3396 case CCValAssign::Full: break; 3397 case CCValAssign::AExt: 3398 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 3399 break; 3400 case CCValAssign::ZExt: 3401 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 3402 DAG.getValueType(VA.getValVT())); 3403 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 3404 break; 3405 case CCValAssign::SExt: 3406 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 3407 DAG.getValueType(VA.getValVT())); 3408 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 3409 break; 3410 } 3411 3412 InVals.push_back(Val); 3413 } 3414 3415 return Chain; 3416} 3417 3418SDValue 3419PPCTargetLowering::FinishCall(CallingConv::ID CallConv, SDLoc dl, 3420 bool isTailCall, bool isVarArg, 3421 SelectionDAG &DAG, 3422 SmallVector<std::pair<unsigned, SDValue>, 8> 3423 &RegsToPass, 3424 SDValue InFlag, SDValue Chain, 3425 SDValue &Callee, 3426 int SPDiff, unsigned NumBytes, 3427 const SmallVectorImpl<ISD::InputArg> &Ins, 3428 SmallVectorImpl<SDValue> &InVals) const { 3429 std::vector<EVT> NodeTys; 3430 SmallVector<SDValue, 8> Ops; 3431 unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, dl, SPDiff, 3432 isTailCall, RegsToPass, Ops, NodeTys, 3433 PPCSubTarget); 3434 3435 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 3436 if (isVarArg && PPCSubTarget.isSVR4ABI() && !PPCSubTarget.isPPC64()) 3437 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 3438 3439 // When performing tail call optimization the callee pops its arguments off 3440 // the stack. Account for this here so these bytes can be pushed back on in 3441 // PPCFrameLowering::eliminateCallFramePseudoInstr. 3442 int BytesCalleePops = 3443 (CallConv == CallingConv::Fast && 3444 getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0; 3445 3446 // Add a register mask operand representing the call-preserved registers. 3447 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 3448 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv); 3449 assert(Mask && "Missing call preserved mask for calling convention"); 3450 Ops.push_back(DAG.getRegisterMask(Mask)); 3451 3452 if (InFlag.getNode()) 3453 Ops.push_back(InFlag); 3454 3455 // Emit tail call. 3456 if (isTailCall) { 3457 assert(((Callee.getOpcode() == ISD::Register && 3458 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 3459 Callee.getOpcode() == ISD::TargetExternalSymbol || 3460 Callee.getOpcode() == ISD::TargetGlobalAddress || 3461 isa<ConstantSDNode>(Callee)) && 3462 "Expecting an global address, external symbol, absolute value or register"); 3463 3464 return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, &Ops[0], Ops.size()); 3465 } 3466 3467 // Add a NOP immediately after the branch instruction when using the 64-bit 3468 // SVR4 ABI. At link time, if caller and callee are in a different module and 3469 // thus have a different TOC, the call will be replaced with a call to a stub 3470 // function which saves the current TOC, loads the TOC of the callee and 3471 // branches to the callee. The NOP will be replaced with a load instruction 3472 // which restores the TOC of the caller from the TOC save slot of the current 3473 // stack frame. If caller and callee belong to the same module (and have the 3474 // same TOC), the NOP will remain unchanged. 3475 3476 bool needsTOCRestore = false; 3477 if (!isTailCall && PPCSubTarget.isSVR4ABI()&& PPCSubTarget.isPPC64()) { 3478 if (CallOpc == PPCISD::BCTRL) { 3479 // This is a call through a function pointer. 3480 // Restore the caller TOC from the save area into R2. 3481 // See PrepareCall() for more information about calls through function 3482 // pointers in the 64-bit SVR4 ABI. 3483 // We are using a target-specific load with r2 hard coded, because the 3484 // result of a target-independent load would never go directly into r2, 3485 // since r2 is a reserved register (which prevents the register allocator 3486 // from allocating it), resulting in an additional register being 3487 // allocated and an unnecessary move instruction being generated. 3488 needsTOCRestore = true; 3489 } else if ((CallOpc == PPCISD::CALL) && 3490 (!isLocalCall(Callee) || 3491 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) { 3492 // Otherwise insert NOP for non-local calls. 3493 CallOpc = PPCISD::CALL_NOP; 3494 } 3495 } 3496 3497 Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size()); 3498 InFlag = Chain.getValue(1); 3499 3500 if (needsTOCRestore) { 3501 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 3502 Chain = DAG.getNode(PPCISD::TOC_RESTORE, dl, VTs, Chain, InFlag); 3503 InFlag = Chain.getValue(1); 3504 } 3505 3506 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 3507 DAG.getIntPtrConstant(BytesCalleePops, true), 3508 InFlag, dl); 3509 if (!Ins.empty()) 3510 InFlag = Chain.getValue(1); 3511 3512 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 3513 Ins, dl, DAG, InVals); 3514} 3515 3516SDValue 3517PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 3518 SmallVectorImpl<SDValue> &InVals) const { 3519 SelectionDAG &DAG = CLI.DAG; 3520 SDLoc &dl = CLI.DL; 3521 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 3522 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 3523 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 3524 SDValue Chain = CLI.Chain; 3525 SDValue Callee = CLI.Callee; 3526 bool &isTailCall = CLI.IsTailCall; 3527 CallingConv::ID CallConv = CLI.CallConv; 3528 bool isVarArg = CLI.IsVarArg; 3529 3530 if (isTailCall) 3531 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 3532 Ins, DAG); 3533 3534 if (PPCSubTarget.isSVR4ABI()) { 3535 if (PPCSubTarget.isPPC64()) 3536 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg, 3537 isTailCall, Outs, OutVals, Ins, 3538 dl, DAG, InVals); 3539 else 3540 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg, 3541 isTailCall, Outs, OutVals, Ins, 3542 dl, DAG, InVals); 3543 } 3544 3545 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, 3546 isTailCall, Outs, OutVals, Ins, 3547 dl, DAG, InVals); 3548} 3549 3550SDValue 3551PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee, 3552 CallingConv::ID CallConv, bool isVarArg, 3553 bool isTailCall, 3554 const SmallVectorImpl<ISD::OutputArg> &Outs, 3555 const SmallVectorImpl<SDValue> &OutVals, 3556 const SmallVectorImpl<ISD::InputArg> &Ins, 3557 SDLoc dl, SelectionDAG &DAG, 3558 SmallVectorImpl<SDValue> &InVals) const { 3559 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 3560 // of the 32-bit SVR4 ABI stack frame layout. 3561 3562 assert((CallConv == CallingConv::C || 3563 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 3564 3565 unsigned PtrByteSize = 4; 3566 3567 MachineFunction &MF = DAG.getMachineFunction(); 3568 3569 // Mark this function as potentially containing a function that contains a 3570 // tail call. As a consequence the frame pointer will be used for dynamicalloc 3571 // and restoring the callers stack pointer in this functions epilog. This is 3572 // done because by tail calling the called function might overwrite the value 3573 // in this function's (MF) stack pointer stack slot 0(SP). 3574 if (getTargetMachine().Options.GuaranteedTailCallOpt && 3575 CallConv == CallingConv::Fast) 3576 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 3577 3578 // Count how many bytes are to be pushed on the stack, including the linkage 3579 // area, parameter list area and the part of the local variable space which 3580 // contains copies of aggregates which are passed by value. 3581 3582 // Assign locations to all of the outgoing arguments. 3583 SmallVector<CCValAssign, 16> ArgLocs; 3584 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3585 getTargetMachine(), ArgLocs, *DAG.getContext()); 3586 3587 // Reserve space for the linkage area on the stack. 3588 CCInfo.AllocateStack(PPCFrameLowering::getLinkageSize(false, false), PtrByteSize); 3589 3590 if (isVarArg) { 3591 // Handle fixed and variable vector arguments differently. 3592 // Fixed vector arguments go into registers as long as registers are 3593 // available. Variable vector arguments always go into memory. 3594 unsigned NumArgs = Outs.size(); 3595 3596 for (unsigned i = 0; i != NumArgs; ++i) { 3597 MVT ArgVT = Outs[i].VT; 3598 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 3599 bool Result; 3600 3601 if (Outs[i].IsFixed) { 3602 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 3603 CCInfo); 3604 } else { 3605 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 3606 ArgFlags, CCInfo); 3607 } 3608 3609 if (Result) { 3610#ifndef NDEBUG 3611 errs() << "Call operand #" << i << " has unhandled type " 3612 << EVT(ArgVT).getEVTString() << "\n"; 3613#endif 3614 llvm_unreachable(0); 3615 } 3616 } 3617 } else { 3618 // All arguments are treated the same. 3619 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); 3620 } 3621 3622 // Assign locations to all of the outgoing aggregate by value arguments. 3623 SmallVector<CCValAssign, 16> ByValArgLocs; 3624 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3625 getTargetMachine(), ByValArgLocs, *DAG.getContext()); 3626 3627 // Reserve stack space for the allocations in CCInfo. 3628 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 3629 3630 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); 3631 3632 // Size of the linkage area, parameter list area and the part of the local 3633 // space variable where copies of aggregates which are passed by value are 3634 // stored. 3635 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 3636 3637 // Calculate by how many bytes the stack has to be adjusted in case of tail 3638 // call optimization. 3639 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 3640 3641 // Adjust the stack pointer for the new arguments... 3642 // These operations are automatically eliminated by the prolog/epilog pass 3643 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), 3644 dl); 3645 SDValue CallSeqStart = Chain; 3646 3647 // Load the return address and frame pointer so it can be moved somewhere else 3648 // later. 3649 SDValue LROp, FPOp; 3650 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, false, 3651 dl); 3652 3653 // Set up a copy of the stack pointer for use loading and storing any 3654 // arguments that may not fit in the registers available for argument 3655 // passing. 3656 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 3657 3658 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 3659 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 3660 SmallVector<SDValue, 8> MemOpChains; 3661 3662 bool seenFloatArg = false; 3663 // Walk the register/memloc assignments, inserting copies/loads. 3664 for (unsigned i = 0, j = 0, e = ArgLocs.size(); 3665 i != e; 3666 ++i) { 3667 CCValAssign &VA = ArgLocs[i]; 3668 SDValue Arg = OutVals[i]; 3669 ISD::ArgFlagsTy Flags = Outs[i].Flags; 3670 3671 if (Flags.isByVal()) { 3672 // Argument is an aggregate which is passed by value, thus we need to 3673 // create a copy of it in the local variable space of the current stack 3674 // frame (which is the stack frame of the caller) and pass the address of 3675 // this copy to the callee. 3676 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 3677 CCValAssign &ByValVA = ByValArgLocs[j++]; 3678 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 3679 3680 // Memory reserved in the local variable space of the callers stack frame. 3681 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 3682 3683 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 3684 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 3685 3686 // Create a copy of the argument in the local area of the current 3687 // stack frame. 3688 SDValue MemcpyCall = 3689 CreateCopyOfByValArgument(Arg, PtrOff, 3690 CallSeqStart.getNode()->getOperand(0), 3691 Flags, DAG, dl); 3692 3693 // This must go outside the CALLSEQ_START..END. 3694 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 3695 CallSeqStart.getNode()->getOperand(1), 3696 SDLoc(MemcpyCall)); 3697 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 3698 NewCallSeqStart.getNode()); 3699 Chain = CallSeqStart = NewCallSeqStart; 3700 3701 // Pass the address of the aggregate copy on the stack either in a 3702 // physical register or in the parameter list area of the current stack 3703 // frame to the callee. 3704 Arg = PtrOff; 3705 } 3706 3707 if (VA.isRegLoc()) { 3708 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 3709 // Put argument in a physical register. 3710 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 3711 } else { 3712 // Put argument in the parameter list area of the current stack frame. 3713 assert(VA.isMemLoc()); 3714 unsigned LocMemOffset = VA.getLocMemOffset(); 3715 3716 if (!isTailCall) { 3717 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 3718 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 3719 3720 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 3721 MachinePointerInfo(), 3722 false, false, 0)); 3723 } else { 3724 // Calculate and remember argument location. 3725 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 3726 TailCallArguments); 3727 } 3728 } 3729 } 3730 3731 if (!MemOpChains.empty()) 3732 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3733 &MemOpChains[0], MemOpChains.size()); 3734 3735 // Build a sequence of copy-to-reg nodes chained together with token chain 3736 // and flag operands which copy the outgoing args into the appropriate regs. 3737 SDValue InFlag; 3738 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 3739 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 3740 RegsToPass[i].second, InFlag); 3741 InFlag = Chain.getValue(1); 3742 } 3743 3744 // Set CR bit 6 to true if this is a vararg call with floating args passed in 3745 // registers. 3746 if (isVarArg) { 3747 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 3748 SDValue Ops[] = { Chain, InFlag }; 3749 3750 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 3751 dl, VTs, Ops, InFlag.getNode() ? 2 : 1); 3752 3753 InFlag = Chain.getValue(1); 3754 } 3755 3756 if (isTailCall) 3757 PrepareTailCall(DAG, InFlag, Chain, dl, false, SPDiff, NumBytes, LROp, FPOp, 3758 false, TailCallArguments); 3759 3760 return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG, 3761 RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes, 3762 Ins, InVals); 3763} 3764 3765// Copy an argument into memory, being careful to do this outside the 3766// call sequence for the call to which the argument belongs. 3767SDValue 3768PPCTargetLowering::createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff, 3769 SDValue CallSeqStart, 3770 ISD::ArgFlagsTy Flags, 3771 SelectionDAG &DAG, 3772 SDLoc dl) const { 3773 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 3774 CallSeqStart.getNode()->getOperand(0), 3775 Flags, DAG, dl); 3776 // The MEMCPY must go outside the CALLSEQ_START..END. 3777 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 3778 CallSeqStart.getNode()->getOperand(1), 3779 SDLoc(MemcpyCall)); 3780 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 3781 NewCallSeqStart.getNode()); 3782 return NewCallSeqStart; 3783} 3784 3785SDValue 3786PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee, 3787 CallingConv::ID CallConv, bool isVarArg, 3788 bool isTailCall, 3789 const SmallVectorImpl<ISD::OutputArg> &Outs, 3790 const SmallVectorImpl<SDValue> &OutVals, 3791 const SmallVectorImpl<ISD::InputArg> &Ins, 3792 SDLoc dl, SelectionDAG &DAG, 3793 SmallVectorImpl<SDValue> &InVals) const { 3794 3795 unsigned NumOps = Outs.size(); 3796 3797 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3798 unsigned PtrByteSize = 8; 3799 3800 MachineFunction &MF = DAG.getMachineFunction(); 3801 3802 // Mark this function as potentially containing a function that contains a 3803 // tail call. As a consequence the frame pointer will be used for dynamicalloc 3804 // and restoring the callers stack pointer in this functions epilog. This is 3805 // done because by tail calling the called function might overwrite the value 3806 // in this function's (MF) stack pointer stack slot 0(SP). 3807 if (getTargetMachine().Options.GuaranteedTailCallOpt && 3808 CallConv == CallingConv::Fast) 3809 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 3810 3811 unsigned nAltivecParamsAtEnd = 0; 3812 3813 // Count how many bytes are to be pushed on the stack, including the linkage 3814 // area, and parameter passing area. We start with at least 48 bytes, which 3815 // is reserved space for [SP][CR][LR][3 x unused]. 3816 // NOTE: For PPC64, nAltivecParamsAtEnd always remains zero as a result 3817 // of this call. 3818 unsigned NumBytes = 3819 CalculateParameterAndLinkageAreaSize(DAG, true, isVarArg, CallConv, 3820 Outs, OutVals, nAltivecParamsAtEnd); 3821 3822 // Calculate by how many bytes the stack has to be adjusted in case of tail 3823 // call optimization. 3824 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 3825 3826 // To protect arguments on the stack from being clobbered in a tail call, 3827 // force all the loads to happen before doing any other lowering. 3828 if (isTailCall) 3829 Chain = DAG.getStackArgumentTokenFactor(Chain); 3830 3831 // Adjust the stack pointer for the new arguments... 3832 // These operations are automatically eliminated by the prolog/epilog pass 3833 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), 3834 dl); 3835 SDValue CallSeqStart = Chain; 3836 3837 // Load the return address and frame pointer so it can be move somewhere else 3838 // later. 3839 SDValue LROp, FPOp; 3840 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 3841 dl); 3842 3843 // Set up a copy of the stack pointer for use loading and storing any 3844 // arguments that may not fit in the registers available for argument 3845 // passing. 3846 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 3847 3848 // Figure out which arguments are going to go in registers, and which in 3849 // memory. Also, if this is a vararg function, floating point operations 3850 // must be stored to our stack, and loaded into integer regs as well, if 3851 // any integer regs are available for argument passing. 3852 unsigned ArgOffset = PPCFrameLowering::getLinkageSize(true, true); 3853 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3854 3855 static const uint16_t GPR[] = { 3856 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3857 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3858 }; 3859 static const uint16_t *FPR = GetFPR(); 3860 3861 static const uint16_t VR[] = { 3862 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3863 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3864 }; 3865 const unsigned NumGPRs = array_lengthof(GPR); 3866 const unsigned NumFPRs = 13; 3867 const unsigned NumVRs = array_lengthof(VR); 3868 3869 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 3870 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 3871 3872 SmallVector<SDValue, 8> MemOpChains; 3873 for (unsigned i = 0; i != NumOps; ++i) { 3874 SDValue Arg = OutVals[i]; 3875 ISD::ArgFlagsTy Flags = Outs[i].Flags; 3876 3877 // PtrOff will be used to store the current argument to the stack if a 3878 // register cannot be found for it. 3879 SDValue PtrOff; 3880 3881 PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); 3882 3883 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 3884 3885 // Promote integers to 64-bit values. 3886 if (Arg.getValueType() == MVT::i32) { 3887 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 3888 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 3889 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 3890 } 3891 3892 // FIXME memcpy is used way more than necessary. Correctness first. 3893 // Note: "by value" is code for passing a structure by value, not 3894 // basic types. 3895 if (Flags.isByVal()) { 3896 // Note: Size includes alignment padding, so 3897 // struct x { short a; char b; } 3898 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 3899 // These are the proper values we need for right-justifying the 3900 // aggregate in a parameter register. 3901 unsigned Size = Flags.getByValSize(); 3902 3903 // An empty aggregate parameter takes up no storage and no 3904 // registers. 3905 if (Size == 0) 3906 continue; 3907 3908 unsigned BVAlign = Flags.getByValAlign(); 3909 if (BVAlign > 8) { 3910 if (BVAlign % PtrByteSize != 0) 3911 llvm_unreachable( 3912 "ByVal alignment is not a multiple of the pointer size"); 3913 3914 ArgOffset = ((ArgOffset+BVAlign-1)/BVAlign)*BVAlign; 3915 } 3916 3917 // All aggregates smaller than 8 bytes must be passed right-justified. 3918 if (Size==1 || Size==2 || Size==4) { 3919 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 3920 if (GPR_idx != NumGPRs) { 3921 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 3922 MachinePointerInfo(), VT, 3923 false, false, 0); 3924 MemOpChains.push_back(Load.getValue(1)); 3925 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3926 3927 ArgOffset += PtrByteSize; 3928 continue; 3929 } 3930 } 3931 3932 if (GPR_idx == NumGPRs && Size < 8) { 3933 SDValue Const = DAG.getConstant(PtrByteSize - Size, 3934 PtrOff.getValueType()); 3935 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 3936 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 3937 CallSeqStart, 3938 Flags, DAG, dl); 3939 ArgOffset += PtrByteSize; 3940 continue; 3941 } 3942 // Copy entire object into memory. There are cases where gcc-generated 3943 // code assumes it is there, even if it could be put entirely into 3944 // registers. (This is not what the doc says.) 3945 3946 // FIXME: The above statement is likely due to a misunderstanding of the 3947 // documents. All arguments must be copied into the parameter area BY 3948 // THE CALLEE in the event that the callee takes the address of any 3949 // formal argument. That has not yet been implemented. However, it is 3950 // reasonable to use the stack area as a staging area for the register 3951 // load. 3952 3953 // Skip this for small aggregates, as we will use the same slot for a 3954 // right-justified copy, below. 3955 if (Size >= 8) 3956 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 3957 CallSeqStart, 3958 Flags, DAG, dl); 3959 3960 // When a register is available, pass a small aggregate right-justified. 3961 if (Size < 8 && GPR_idx != NumGPRs) { 3962 // The easiest way to get this right-justified in a register 3963 // is to copy the structure into the rightmost portion of a 3964 // local variable slot, then load the whole slot into the 3965 // register. 3966 // FIXME: The memcpy seems to produce pretty awful code for 3967 // small aggregates, particularly for packed ones. 3968 // FIXME: It would be preferable to use the slot in the 3969 // parameter save area instead of a new local variable. 3970 SDValue Const = DAG.getConstant(8 - Size, PtrOff.getValueType()); 3971 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 3972 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 3973 CallSeqStart, 3974 Flags, DAG, dl); 3975 3976 // Load the slot into the register. 3977 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, PtrOff, 3978 MachinePointerInfo(), 3979 false, false, false, 0); 3980 MemOpChains.push_back(Load.getValue(1)); 3981 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3982 3983 // Done with this argument. 3984 ArgOffset += PtrByteSize; 3985 continue; 3986 } 3987 3988 // For aggregates larger than PtrByteSize, copy the pieces of the 3989 // object that fit into registers from the parameter save area. 3990 for (unsigned j=0; j<Size; j+=PtrByteSize) { 3991 SDValue Const = DAG.getConstant(j, PtrOff.getValueType()); 3992 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 3993 if (GPR_idx != NumGPRs) { 3994 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 3995 MachinePointerInfo(), 3996 false, false, false, 0); 3997 MemOpChains.push_back(Load.getValue(1)); 3998 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3999 ArgOffset += PtrByteSize; 4000 } else { 4001 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 4002 break; 4003 } 4004 } 4005 continue; 4006 } 4007 4008 switch (Arg.getSimpleValueType().SimpleTy) { 4009 default: llvm_unreachable("Unexpected ValueType for argument!"); 4010 case MVT::i32: 4011 case MVT::i64: 4012 if (GPR_idx != NumGPRs) { 4013 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 4014 } else { 4015 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4016 true, isTailCall, false, MemOpChains, 4017 TailCallArguments, dl); 4018 } 4019 ArgOffset += PtrByteSize; 4020 break; 4021 case MVT::f32: 4022 case MVT::f64: 4023 if (FPR_idx != NumFPRs) { 4024 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 4025 4026 if (isVarArg) { 4027 // A single float or an aggregate containing only a single float 4028 // must be passed right-justified in the stack doubleword, and 4029 // in the GPR, if one is available. 4030 SDValue StoreOff; 4031 if (Arg.getSimpleValueType().SimpleTy == MVT::f32) { 4032 SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType()); 4033 StoreOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 4034 } else 4035 StoreOff = PtrOff; 4036 4037 SDValue Store = DAG.getStore(Chain, dl, Arg, StoreOff, 4038 MachinePointerInfo(), false, false, 0); 4039 MemOpChains.push_back(Store); 4040 4041 // Float varargs are always shadowed in available integer registers 4042 if (GPR_idx != NumGPRs) { 4043 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 4044 MachinePointerInfo(), false, false, 4045 false, 0); 4046 MemOpChains.push_back(Load.getValue(1)); 4047 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4048 } 4049 } else if (GPR_idx != NumGPRs) 4050 // If we have any FPRs remaining, we may also have GPRs remaining. 4051 ++GPR_idx; 4052 } else { 4053 // Single-precision floating-point values are mapped to the 4054 // second (rightmost) word of the stack doubleword. 4055 if (Arg.getValueType() == MVT::f32) { 4056 SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType()); 4057 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 4058 } 4059 4060 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4061 true, isTailCall, false, MemOpChains, 4062 TailCallArguments, dl); 4063 } 4064 ArgOffset += 8; 4065 break; 4066 case MVT::v4f32: 4067 case MVT::v4i32: 4068 case MVT::v8i16: 4069 case MVT::v16i8: 4070 if (isVarArg) { 4071 // These go aligned on the stack, or in the corresponding R registers 4072 // when within range. The Darwin PPC ABI doc claims they also go in 4073 // V registers; in fact gcc does this only for arguments that are 4074 // prototyped, not for those that match the ... We do it for all 4075 // arguments, seems to work. 4076 while (ArgOffset % 16 !=0) { 4077 ArgOffset += PtrByteSize; 4078 if (GPR_idx != NumGPRs) 4079 GPR_idx++; 4080 } 4081 // We could elide this store in the case where the object fits 4082 // entirely in R registers. Maybe later. 4083 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4084 DAG.getConstant(ArgOffset, PtrVT)); 4085 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 4086 MachinePointerInfo(), false, false, 0); 4087 MemOpChains.push_back(Store); 4088 if (VR_idx != NumVRs) { 4089 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, 4090 MachinePointerInfo(), 4091 false, false, false, 0); 4092 MemOpChains.push_back(Load.getValue(1)); 4093 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 4094 } 4095 ArgOffset += 16; 4096 for (unsigned i=0; i<16; i+=PtrByteSize) { 4097 if (GPR_idx == NumGPRs) 4098 break; 4099 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 4100 DAG.getConstant(i, PtrVT)); 4101 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 4102 false, false, false, 0); 4103 MemOpChains.push_back(Load.getValue(1)); 4104 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4105 } 4106 break; 4107 } 4108 4109 // Non-varargs Altivec params generally go in registers, but have 4110 // stack space allocated at the end. 4111 if (VR_idx != NumVRs) { 4112 // Doesn't have GPR space allocated. 4113 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 4114 } else { 4115 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4116 true, isTailCall, true, MemOpChains, 4117 TailCallArguments, dl); 4118 ArgOffset += 16; 4119 } 4120 break; 4121 } 4122 } 4123 4124 if (!MemOpChains.empty()) 4125 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 4126 &MemOpChains[0], MemOpChains.size()); 4127 4128 // Check if this is an indirect call (MTCTR/BCTRL). 4129 // See PrepareCall() for more information about calls through function 4130 // pointers in the 64-bit SVR4 ABI. 4131 if (!isTailCall && 4132 !dyn_cast<GlobalAddressSDNode>(Callee) && 4133 !dyn_cast<ExternalSymbolSDNode>(Callee) && 4134 !isBLACompatibleAddress(Callee, DAG)) { 4135 // Load r2 into a virtual register and store it to the TOC save area. 4136 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 4137 // TOC save area offset. 4138 SDValue PtrOff = DAG.getIntPtrConstant(40); 4139 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 4140 Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr, MachinePointerInfo(), 4141 false, false, 0); 4142 // R12 must contain the address of an indirect callee. This does not 4143 // mean the MTCTR instruction must use R12; it's easier to model this 4144 // as an extra parameter, so do that. 4145 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); 4146 } 4147 4148 // Build a sequence of copy-to-reg nodes chained together with token chain 4149 // and flag operands which copy the outgoing args into the appropriate regs. 4150 SDValue InFlag; 4151 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 4152 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 4153 RegsToPass[i].second, InFlag); 4154 InFlag = Chain.getValue(1); 4155 } 4156 4157 if (isTailCall) 4158 PrepareTailCall(DAG, InFlag, Chain, dl, true, SPDiff, NumBytes, LROp, 4159 FPOp, true, TailCallArguments); 4160 4161 return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG, 4162 RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes, 4163 Ins, InVals); 4164} 4165 4166SDValue 4167PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee, 4168 CallingConv::ID CallConv, bool isVarArg, 4169 bool isTailCall, 4170 const SmallVectorImpl<ISD::OutputArg> &Outs, 4171 const SmallVectorImpl<SDValue> &OutVals, 4172 const SmallVectorImpl<ISD::InputArg> &Ins, 4173 SDLoc dl, SelectionDAG &DAG, 4174 SmallVectorImpl<SDValue> &InVals) const { 4175 4176 unsigned NumOps = Outs.size(); 4177 4178 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4179 bool isPPC64 = PtrVT == MVT::i64; 4180 unsigned PtrByteSize = isPPC64 ? 8 : 4; 4181 4182 MachineFunction &MF = DAG.getMachineFunction(); 4183 4184 // Mark this function as potentially containing a function that contains a 4185 // tail call. As a consequence the frame pointer will be used for dynamicalloc 4186 // and restoring the callers stack pointer in this functions epilog. This is 4187 // done because by tail calling the called function might overwrite the value 4188 // in this function's (MF) stack pointer stack slot 0(SP). 4189 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4190 CallConv == CallingConv::Fast) 4191 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 4192 4193 unsigned nAltivecParamsAtEnd = 0; 4194 4195 // Count how many bytes are to be pushed on the stack, including the linkage 4196 // area, and parameter passing area. We start with 24/48 bytes, which is 4197 // prereserved space for [SP][CR][LR][3 x unused]. 4198 unsigned NumBytes = 4199 CalculateParameterAndLinkageAreaSize(DAG, isPPC64, isVarArg, CallConv, 4200 Outs, OutVals, 4201 nAltivecParamsAtEnd); 4202 4203 // Calculate by how many bytes the stack has to be adjusted in case of tail 4204 // call optimization. 4205 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 4206 4207 // To protect arguments on the stack from being clobbered in a tail call, 4208 // force all the loads to happen before doing any other lowering. 4209 if (isTailCall) 4210 Chain = DAG.getStackArgumentTokenFactor(Chain); 4211 4212 // Adjust the stack pointer for the new arguments... 4213 // These operations are automatically eliminated by the prolog/epilog pass 4214 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), 4215 dl); 4216 SDValue CallSeqStart = Chain; 4217 4218 // Load the return address and frame pointer so it can be move somewhere else 4219 // later. 4220 SDValue LROp, FPOp; 4221 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 4222 dl); 4223 4224 // Set up a copy of the stack pointer for use loading and storing any 4225 // arguments that may not fit in the registers available for argument 4226 // passing. 4227 SDValue StackPtr; 4228 if (isPPC64) 4229 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4230 else 4231 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4232 4233 // Figure out which arguments are going to go in registers, and which in 4234 // memory. Also, if this is a vararg function, floating point operations 4235 // must be stored to our stack, and loaded into integer regs as well, if 4236 // any integer regs are available for argument passing. 4237 unsigned ArgOffset = PPCFrameLowering::getLinkageSize(isPPC64, true); 4238 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 4239 4240 static const uint16_t GPR_32[] = { // 32-bit registers. 4241 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 4242 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 4243 }; 4244 static const uint16_t GPR_64[] = { // 64-bit registers. 4245 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4246 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4247 }; 4248 static const uint16_t *FPR = GetFPR(); 4249 4250 static const uint16_t VR[] = { 4251 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4252 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4253 }; 4254 const unsigned NumGPRs = array_lengthof(GPR_32); 4255 const unsigned NumFPRs = 13; 4256 const unsigned NumVRs = array_lengthof(VR); 4257 4258 const uint16_t *GPR = isPPC64 ? GPR_64 : GPR_32; 4259 4260 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 4261 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 4262 4263 SmallVector<SDValue, 8> MemOpChains; 4264 for (unsigned i = 0; i != NumOps; ++i) { 4265 SDValue Arg = OutVals[i]; 4266 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4267 4268 // PtrOff will be used to store the current argument to the stack if a 4269 // register cannot be found for it. 4270 SDValue PtrOff; 4271 4272 PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); 4273 4274 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 4275 4276 // On PPC64, promote integers to 64-bit values. 4277 if (isPPC64 && Arg.getValueType() == MVT::i32) { 4278 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 4279 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 4280 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 4281 } 4282 4283 // FIXME memcpy is used way more than necessary. Correctness first. 4284 // Note: "by value" is code for passing a structure by value, not 4285 // basic types. 4286 if (Flags.isByVal()) { 4287 unsigned Size = Flags.getByValSize(); 4288 // Very small objects are passed right-justified. Everything else is 4289 // passed left-justified. 4290 if (Size==1 || Size==2) { 4291 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 4292 if (GPR_idx != NumGPRs) { 4293 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 4294 MachinePointerInfo(), VT, 4295 false, false, 0); 4296 MemOpChains.push_back(Load.getValue(1)); 4297 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4298 4299 ArgOffset += PtrByteSize; 4300 } else { 4301 SDValue Const = DAG.getConstant(PtrByteSize - Size, 4302 PtrOff.getValueType()); 4303 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 4304 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 4305 CallSeqStart, 4306 Flags, DAG, dl); 4307 ArgOffset += PtrByteSize; 4308 } 4309 continue; 4310 } 4311 // Copy entire object into memory. There are cases where gcc-generated 4312 // code assumes it is there, even if it could be put entirely into 4313 // registers. (This is not what the doc says.) 4314 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 4315 CallSeqStart, 4316 Flags, DAG, dl); 4317 4318 // For small aggregates (Darwin only) and aggregates >= PtrByteSize, 4319 // copy the pieces of the object that fit into registers from the 4320 // parameter save area. 4321 for (unsigned j=0; j<Size; j+=PtrByteSize) { 4322 SDValue Const = DAG.getConstant(j, PtrOff.getValueType()); 4323 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 4324 if (GPR_idx != NumGPRs) { 4325 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 4326 MachinePointerInfo(), 4327 false, false, false, 0); 4328 MemOpChains.push_back(Load.getValue(1)); 4329 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4330 ArgOffset += PtrByteSize; 4331 } else { 4332 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 4333 break; 4334 } 4335 } 4336 continue; 4337 } 4338 4339 switch (Arg.getSimpleValueType().SimpleTy) { 4340 default: llvm_unreachable("Unexpected ValueType for argument!"); 4341 case MVT::i32: 4342 case MVT::i64: 4343 if (GPR_idx != NumGPRs) { 4344 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 4345 } else { 4346 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4347 isPPC64, isTailCall, false, MemOpChains, 4348 TailCallArguments, dl); 4349 } 4350 ArgOffset += PtrByteSize; 4351 break; 4352 case MVT::f32: 4353 case MVT::f64: 4354 if (FPR_idx != NumFPRs) { 4355 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 4356 4357 if (isVarArg) { 4358 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 4359 MachinePointerInfo(), false, false, 0); 4360 MemOpChains.push_back(Store); 4361 4362 // Float varargs are always shadowed in available integer registers 4363 if (GPR_idx != NumGPRs) { 4364 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 4365 MachinePointerInfo(), false, false, 4366 false, 0); 4367 MemOpChains.push_back(Load.getValue(1)); 4368 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4369 } 4370 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 4371 SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType()); 4372 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 4373 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 4374 MachinePointerInfo(), 4375 false, false, false, 0); 4376 MemOpChains.push_back(Load.getValue(1)); 4377 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4378 } 4379 } else { 4380 // If we have any FPRs remaining, we may also have GPRs remaining. 4381 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 4382 // GPRs. 4383 if (GPR_idx != NumGPRs) 4384 ++GPR_idx; 4385 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 4386 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 4387 ++GPR_idx; 4388 } 4389 } else 4390 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4391 isPPC64, isTailCall, false, MemOpChains, 4392 TailCallArguments, dl); 4393 if (isPPC64) 4394 ArgOffset += 8; 4395 else 4396 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 4397 break; 4398 case MVT::v4f32: 4399 case MVT::v4i32: 4400 case MVT::v8i16: 4401 case MVT::v16i8: 4402 if (isVarArg) { 4403 // These go aligned on the stack, or in the corresponding R registers 4404 // when within range. The Darwin PPC ABI doc claims they also go in 4405 // V registers; in fact gcc does this only for arguments that are 4406 // prototyped, not for those that match the ... We do it for all 4407 // arguments, seems to work. 4408 while (ArgOffset % 16 !=0) { 4409 ArgOffset += PtrByteSize; 4410 if (GPR_idx != NumGPRs) 4411 GPR_idx++; 4412 } 4413 // We could elide this store in the case where the object fits 4414 // entirely in R registers. Maybe later. 4415 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4416 DAG.getConstant(ArgOffset, PtrVT)); 4417 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 4418 MachinePointerInfo(), false, false, 0); 4419 MemOpChains.push_back(Store); 4420 if (VR_idx != NumVRs) { 4421 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, 4422 MachinePointerInfo(), 4423 false, false, false, 0); 4424 MemOpChains.push_back(Load.getValue(1)); 4425 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 4426 } 4427 ArgOffset += 16; 4428 for (unsigned i=0; i<16; i+=PtrByteSize) { 4429 if (GPR_idx == NumGPRs) 4430 break; 4431 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 4432 DAG.getConstant(i, PtrVT)); 4433 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 4434 false, false, false, 0); 4435 MemOpChains.push_back(Load.getValue(1)); 4436 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4437 } 4438 break; 4439 } 4440 4441 // Non-varargs Altivec params generally go in registers, but have 4442 // stack space allocated at the end. 4443 if (VR_idx != NumVRs) { 4444 // Doesn't have GPR space allocated. 4445 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 4446 } else if (nAltivecParamsAtEnd==0) { 4447 // We are emitting Altivec params in order. 4448 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4449 isPPC64, isTailCall, true, MemOpChains, 4450 TailCallArguments, dl); 4451 ArgOffset += 16; 4452 } 4453 break; 4454 } 4455 } 4456 // If all Altivec parameters fit in registers, as they usually do, 4457 // they get stack space following the non-Altivec parameters. We 4458 // don't track this here because nobody below needs it. 4459 // If there are more Altivec parameters than fit in registers emit 4460 // the stores here. 4461 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 4462 unsigned j = 0; 4463 // Offset is aligned; skip 1st 12 params which go in V registers. 4464 ArgOffset = ((ArgOffset+15)/16)*16; 4465 ArgOffset += 12*16; 4466 for (unsigned i = 0; i != NumOps; ++i) { 4467 SDValue Arg = OutVals[i]; 4468 EVT ArgType = Outs[i].VT; 4469 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 4470 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 4471 if (++j > NumVRs) { 4472 SDValue PtrOff; 4473 // We are emitting Altivec params in order. 4474 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4475 isPPC64, isTailCall, true, MemOpChains, 4476 TailCallArguments, dl); 4477 ArgOffset += 16; 4478 } 4479 } 4480 } 4481 } 4482 4483 if (!MemOpChains.empty()) 4484 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 4485 &MemOpChains[0], MemOpChains.size()); 4486 4487 // On Darwin, R12 must contain the address of an indirect callee. This does 4488 // not mean the MTCTR instruction must use R12; it's easier to model this as 4489 // an extra parameter, so do that. 4490 if (!isTailCall && 4491 !dyn_cast<GlobalAddressSDNode>(Callee) && 4492 !dyn_cast<ExternalSymbolSDNode>(Callee) && 4493 !isBLACompatibleAddress(Callee, DAG)) 4494 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 : 4495 PPC::R12), Callee)); 4496 4497 // Build a sequence of copy-to-reg nodes chained together with token chain 4498 // and flag operands which copy the outgoing args into the appropriate regs. 4499 SDValue InFlag; 4500 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 4501 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 4502 RegsToPass[i].second, InFlag); 4503 InFlag = Chain.getValue(1); 4504 } 4505 4506 if (isTailCall) 4507 PrepareTailCall(DAG, InFlag, Chain, dl, isPPC64, SPDiff, NumBytes, LROp, 4508 FPOp, true, TailCallArguments); 4509 4510 return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG, 4511 RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes, 4512 Ins, InVals); 4513} 4514 4515bool 4516PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 4517 MachineFunction &MF, bool isVarArg, 4518 const SmallVectorImpl<ISD::OutputArg> &Outs, 4519 LLVMContext &Context) const { 4520 SmallVector<CCValAssign, 16> RVLocs; 4521 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 4522 RVLocs, Context); 4523 return CCInfo.CheckReturn(Outs, RetCC_PPC); 4524} 4525 4526SDValue 4527PPCTargetLowering::LowerReturn(SDValue Chain, 4528 CallingConv::ID CallConv, bool isVarArg, 4529 const SmallVectorImpl<ISD::OutputArg> &Outs, 4530 const SmallVectorImpl<SDValue> &OutVals, 4531 SDLoc dl, SelectionDAG &DAG) const { 4532 4533 SmallVector<CCValAssign, 16> RVLocs; 4534 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 4535 getTargetMachine(), RVLocs, *DAG.getContext()); 4536 CCInfo.AnalyzeReturn(Outs, RetCC_PPC); 4537 4538 SDValue Flag; 4539 SmallVector<SDValue, 4> RetOps(1, Chain); 4540 4541 // Copy the result values into the output registers. 4542 for (unsigned i = 0; i != RVLocs.size(); ++i) { 4543 CCValAssign &VA = RVLocs[i]; 4544 assert(VA.isRegLoc() && "Can only return in registers!"); 4545 4546 SDValue Arg = OutVals[i]; 4547 4548 switch (VA.getLocInfo()) { 4549 default: llvm_unreachable("Unknown loc info!"); 4550 case CCValAssign::Full: break; 4551 case CCValAssign::AExt: 4552 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 4553 break; 4554 case CCValAssign::ZExt: 4555 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 4556 break; 4557 case CCValAssign::SExt: 4558 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 4559 break; 4560 } 4561 4562 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 4563 Flag = Chain.getValue(1); 4564 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 4565 } 4566 4567 RetOps[0] = Chain; // Update chain. 4568 4569 // Add the flag if we have it. 4570 if (Flag.getNode()) 4571 RetOps.push_back(Flag); 4572 4573 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, 4574 &RetOps[0], RetOps.size()); 4575} 4576 4577SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG, 4578 const PPCSubtarget &Subtarget) const { 4579 // When we pop the dynamic allocation we need to restore the SP link. 4580 SDLoc dl(Op); 4581 4582 // Get the corect type for pointers. 4583 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4584 4585 // Construct the stack pointer operand. 4586 bool isPPC64 = Subtarget.isPPC64(); 4587 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 4588 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 4589 4590 // Get the operands for the STACKRESTORE. 4591 SDValue Chain = Op.getOperand(0); 4592 SDValue SaveSP = Op.getOperand(1); 4593 4594 // Load the old link SP. 4595 SDValue LoadLinkSP = DAG.getLoad(PtrVT, dl, Chain, StackPtr, 4596 MachinePointerInfo(), 4597 false, false, false, 0); 4598 4599 // Restore the stack pointer. 4600 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 4601 4602 // Store the old link SP. 4603 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo(), 4604 false, false, 0); 4605} 4606 4607 4608 4609SDValue 4610PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const { 4611 MachineFunction &MF = DAG.getMachineFunction(); 4612 bool isPPC64 = PPCSubTarget.isPPC64(); 4613 bool isDarwinABI = PPCSubTarget.isDarwinABI(); 4614 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4615 4616 // Get current frame pointer save index. The users of this index will be 4617 // primarily DYNALLOC instructions. 4618 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 4619 int RASI = FI->getReturnAddrSaveIndex(); 4620 4621 // If the frame pointer save index hasn't been defined yet. 4622 if (!RASI) { 4623 // Find out what the fix offset of the frame pointer save area. 4624 int LROffset = PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI); 4625 // Allocate the frame index for frame pointer save area. 4626 RASI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, LROffset, true); 4627 // Save the result. 4628 FI->setReturnAddrSaveIndex(RASI); 4629 } 4630 return DAG.getFrameIndex(RASI, PtrVT); 4631} 4632 4633SDValue 4634PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 4635 MachineFunction &MF = DAG.getMachineFunction(); 4636 bool isPPC64 = PPCSubTarget.isPPC64(); 4637 bool isDarwinABI = PPCSubTarget.isDarwinABI(); 4638 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4639 4640 // Get current frame pointer save index. The users of this index will be 4641 // primarily DYNALLOC instructions. 4642 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 4643 int FPSI = FI->getFramePointerSaveIndex(); 4644 4645 // If the frame pointer save index hasn't been defined yet. 4646 if (!FPSI) { 4647 // Find out what the fix offset of the frame pointer save area. 4648 int FPOffset = PPCFrameLowering::getFramePointerSaveOffset(isPPC64, 4649 isDarwinABI); 4650 4651 // Allocate the frame index for frame pointer save area. 4652 FPSI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 4653 // Save the result. 4654 FI->setFramePointerSaveIndex(FPSI); 4655 } 4656 return DAG.getFrameIndex(FPSI, PtrVT); 4657} 4658 4659SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 4660 SelectionDAG &DAG, 4661 const PPCSubtarget &Subtarget) const { 4662 // Get the inputs. 4663 SDValue Chain = Op.getOperand(0); 4664 SDValue Size = Op.getOperand(1); 4665 SDLoc dl(Op); 4666 4667 // Get the corect type for pointers. 4668 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4669 // Negate the size. 4670 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 4671 DAG.getConstant(0, PtrVT), Size); 4672 // Construct a node for the frame pointer save index. 4673 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 4674 // Build a DYNALLOC node. 4675 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 4676 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 4677 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops, 3); 4678} 4679 4680SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 4681 SelectionDAG &DAG) const { 4682 SDLoc DL(Op); 4683 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, 4684 DAG.getVTList(MVT::i32, MVT::Other), 4685 Op.getOperand(0), Op.getOperand(1)); 4686} 4687 4688SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 4689 SelectionDAG &DAG) const { 4690 SDLoc DL(Op); 4691 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 4692 Op.getOperand(0), Op.getOperand(1)); 4693} 4694 4695/// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 4696/// possible. 4697SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 4698 // Not FP? Not a fsel. 4699 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 4700 !Op.getOperand(2).getValueType().isFloatingPoint()) 4701 return Op; 4702 4703 // We might be able to do better than this under some circumstances, but in 4704 // general, fsel-based lowering of select is a finite-math-only optimization. 4705 // For more information, see section F.3 of the 2.06 ISA specification. 4706 if (!DAG.getTarget().Options.NoInfsFPMath || 4707 !DAG.getTarget().Options.NoNaNsFPMath) 4708 return Op; 4709 4710 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 4711 4712 EVT ResVT = Op.getValueType(); 4713 EVT CmpVT = Op.getOperand(0).getValueType(); 4714 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 4715 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 4716 SDLoc dl(Op); 4717 4718 // If the RHS of the comparison is a 0.0, we don't need to do the 4719 // subtraction at all. 4720 SDValue Sel1; 4721 if (isFloatingPointZero(RHS)) 4722 switch (CC) { 4723 default: break; // SETUO etc aren't handled by fsel. 4724 case ISD::SETNE: 4725 std::swap(TV, FV); 4726 case ISD::SETEQ: 4727 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 4728 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 4729 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 4730 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 4731 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 4732 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 4733 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV); 4734 case ISD::SETULT: 4735 case ISD::SETLT: 4736 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 4737 case ISD::SETOGE: 4738 case ISD::SETGE: 4739 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 4740 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 4741 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 4742 case ISD::SETUGT: 4743 case ISD::SETGT: 4744 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 4745 case ISD::SETOLE: 4746 case ISD::SETLE: 4747 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 4748 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 4749 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 4750 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 4751 } 4752 4753 SDValue Cmp; 4754 switch (CC) { 4755 default: break; // SETUO etc aren't handled by fsel. 4756 case ISD::SETNE: 4757 std::swap(TV, FV); 4758 case ISD::SETEQ: 4759 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 4760 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 4761 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 4762 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 4763 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 4764 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 4765 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 4766 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV); 4767 case ISD::SETULT: 4768 case ISD::SETLT: 4769 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 4770 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 4771 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 4772 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 4773 case ISD::SETOGE: 4774 case ISD::SETGE: 4775 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 4776 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 4777 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 4778 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 4779 case ISD::SETUGT: 4780 case ISD::SETGT: 4781 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS); 4782 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 4783 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 4784 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 4785 case ISD::SETOLE: 4786 case ISD::SETLE: 4787 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS); 4788 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 4789 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 4790 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 4791 } 4792 return Op; 4793} 4794 4795// FIXME: Split this code up when LegalizeDAGTypes lands. 4796SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 4797 SDLoc dl) const { 4798 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 4799 SDValue Src = Op.getOperand(0); 4800 if (Src.getValueType() == MVT::f32) 4801 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 4802 4803 SDValue Tmp; 4804 switch (Op.getSimpleValueType().SimpleTy) { 4805 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 4806 case MVT::i32: 4807 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIWZ : 4808 (PPCSubTarget.hasFPCVT() ? PPCISD::FCTIWUZ : 4809 PPCISD::FCTIDZ), 4810 dl, MVT::f64, Src); 4811 break; 4812 case MVT::i64: 4813 assert((Op.getOpcode() == ISD::FP_TO_SINT || PPCSubTarget.hasFPCVT()) && 4814 "i64 FP_TO_UINT is supported only with FPCVT"); 4815 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 4816 PPCISD::FCTIDUZ, 4817 dl, MVT::f64, Src); 4818 break; 4819 } 4820 4821 // Convert the FP value to an int value through memory. 4822 bool i32Stack = Op.getValueType() == MVT::i32 && PPCSubTarget.hasSTFIWX() && 4823 (Op.getOpcode() == ISD::FP_TO_SINT || PPCSubTarget.hasFPCVT()); 4824 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64); 4825 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex(); 4826 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(FI); 4827 4828 // Emit a store to the stack slot. 4829 SDValue Chain; 4830 if (i32Stack) { 4831 MachineFunction &MF = DAG.getMachineFunction(); 4832 MachineMemOperand *MMO = 4833 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4); 4834 SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr }; 4835 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 4836 DAG.getVTList(MVT::Other), Ops, array_lengthof(Ops), 4837 MVT::i32, MMO); 4838 } else 4839 Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, 4840 MPI, false, false, 0); 4841 4842 // Result is a load from the stack slot. If loading 4 bytes, make sure to 4843 // add in a bias. 4844 if (Op.getValueType() == MVT::i32 && !i32Stack) { 4845 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 4846 DAG.getConstant(4, FIPtr.getValueType())); 4847 MPI = MachinePointerInfo(); 4848 } 4849 4850 return DAG.getLoad(Op.getValueType(), dl, Chain, FIPtr, MPI, 4851 false, false, false, 0); 4852} 4853 4854SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, 4855 SelectionDAG &DAG) const { 4856 SDLoc dl(Op); 4857 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 4858 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 4859 return SDValue(); 4860 4861 assert((Op.getOpcode() == ISD::SINT_TO_FP || PPCSubTarget.hasFPCVT()) && 4862 "UINT_TO_FP is supported only with FPCVT"); 4863 4864 // If we have FCFIDS, then use it when converting to single-precision. 4865 // Otherwise, convert to double-precision and then round. 4866 unsigned FCFOp = (PPCSubTarget.hasFPCVT() && Op.getValueType() == MVT::f32) ? 4867 (Op.getOpcode() == ISD::UINT_TO_FP ? 4868 PPCISD::FCFIDUS : PPCISD::FCFIDS) : 4869 (Op.getOpcode() == ISD::UINT_TO_FP ? 4870 PPCISD::FCFIDU : PPCISD::FCFID); 4871 MVT FCFTy = (PPCSubTarget.hasFPCVT() && Op.getValueType() == MVT::f32) ? 4872 MVT::f32 : MVT::f64; 4873 4874 if (Op.getOperand(0).getValueType() == MVT::i64) { 4875 SDValue SINT = Op.getOperand(0); 4876 // When converting to single-precision, we actually need to convert 4877 // to double-precision first and then round to single-precision. 4878 // To avoid double-rounding effects during that operation, we have 4879 // to prepare the input operand. Bits that might be truncated when 4880 // converting to double-precision are replaced by a bit that won't 4881 // be lost at this stage, but is below the single-precision rounding 4882 // position. 4883 // 4884 // However, if -enable-unsafe-fp-math is in effect, accept double 4885 // rounding to avoid the extra overhead. 4886 if (Op.getValueType() == MVT::f32 && 4887 !PPCSubTarget.hasFPCVT() && 4888 !DAG.getTarget().Options.UnsafeFPMath) { 4889 4890 // Twiddle input to make sure the low 11 bits are zero. (If this 4891 // is the case, we are guaranteed the value will fit into the 53 bit 4892 // mantissa of an IEEE double-precision value without rounding.) 4893 // If any of those low 11 bits were not zero originally, make sure 4894 // bit 12 (value 2048) is set instead, so that the final rounding 4895 // to single-precision gets the correct result. 4896 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, 4897 SINT, DAG.getConstant(2047, MVT::i64)); 4898 Round = DAG.getNode(ISD::ADD, dl, MVT::i64, 4899 Round, DAG.getConstant(2047, MVT::i64)); 4900 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); 4901 Round = DAG.getNode(ISD::AND, dl, MVT::i64, 4902 Round, DAG.getConstant(-2048, MVT::i64)); 4903 4904 // However, we cannot use that value unconditionally: if the magnitude 4905 // of the input value is small, the bit-twiddling we did above might 4906 // end up visibly changing the output. Fortunately, in that case, we 4907 // don't need to twiddle bits since the original input will convert 4908 // exactly to double-precision floating-point already. Therefore, 4909 // construct a conditional to use the original value if the top 11 4910 // bits are all sign-bit copies, and use the rounded value computed 4911 // above otherwise. 4912 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, 4913 SINT, DAG.getConstant(53, MVT::i32)); 4914 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, 4915 Cond, DAG.getConstant(1, MVT::i64)); 4916 Cond = DAG.getSetCC(dl, MVT::i32, 4917 Cond, DAG.getConstant(1, MVT::i64), ISD::SETUGT); 4918 4919 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); 4920 } 4921 4922 SDValue Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); 4923 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits); 4924 4925 if (Op.getValueType() == MVT::f32 && !PPCSubTarget.hasFPCVT()) 4926 FP = DAG.getNode(ISD::FP_ROUND, dl, 4927 MVT::f32, FP, DAG.getIntPtrConstant(0)); 4928 return FP; 4929 } 4930 4931 assert(Op.getOperand(0).getValueType() == MVT::i32 && 4932 "Unhandled INT_TO_FP type in custom expander!"); 4933 // Since we only generate this in 64-bit mode, we can take advantage of 4934 // 64-bit registers. In particular, sign extend the input value into the 4935 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 4936 // then lfd it and fcfid it. 4937 MachineFunction &MF = DAG.getMachineFunction(); 4938 MachineFrameInfo *FrameInfo = MF.getFrameInfo(); 4939 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4940 4941 SDValue Ld; 4942 if (PPCSubTarget.hasLFIWAX() || PPCSubTarget.hasFPCVT()) { 4943 int FrameIdx = FrameInfo->CreateStackObject(4, 4, false); 4944 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 4945 4946 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 4947 MachinePointerInfo::getFixedStack(FrameIdx), 4948 false, false, 0); 4949 4950 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 4951 "Expected an i32 store"); 4952 MachineMemOperand *MMO = 4953 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx), 4954 MachineMemOperand::MOLoad, 4, 4); 4955 SDValue Ops[] = { Store, FIdx }; 4956 Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ? 4957 PPCISD::LFIWZX : PPCISD::LFIWAX, 4958 dl, DAG.getVTList(MVT::f64, MVT::Other), 4959 Ops, 2, MVT::i32, MMO); 4960 } else { 4961 assert(PPCSubTarget.isPPC64() && 4962 "i32->FP without LFIWAX supported only on PPC64"); 4963 4964 int FrameIdx = FrameInfo->CreateStackObject(8, 8, false); 4965 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 4966 4967 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, 4968 Op.getOperand(0)); 4969 4970 // STD the extended value into the stack slot. 4971 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Ext64, FIdx, 4972 MachinePointerInfo::getFixedStack(FrameIdx), 4973 false, false, 0); 4974 4975 // Load the value as a double. 4976 Ld = DAG.getLoad(MVT::f64, dl, Store, FIdx, 4977 MachinePointerInfo::getFixedStack(FrameIdx), 4978 false, false, false, 0); 4979 } 4980 4981 // FCFID it and return it. 4982 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld); 4983 if (Op.getValueType() == MVT::f32 && !PPCSubTarget.hasFPCVT()) 4984 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, DAG.getIntPtrConstant(0)); 4985 return FP; 4986} 4987 4988SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 4989 SelectionDAG &DAG) const { 4990 SDLoc dl(Op); 4991 /* 4992 The rounding mode is in bits 30:31 of FPSR, and has the following 4993 settings: 4994 00 Round to nearest 4995 01 Round to 0 4996 10 Round to +inf 4997 11 Round to -inf 4998 4999 FLT_ROUNDS, on the other hand, expects the following: 5000 -1 Undefined 5001 0 Round to 0 5002 1 Round to nearest 5003 2 Round to +inf 5004 3 Round to -inf 5005 5006 To perform the conversion, we do: 5007 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 5008 */ 5009 5010 MachineFunction &MF = DAG.getMachineFunction(); 5011 EVT VT = Op.getValueType(); 5012 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 5013 SDValue MFFSreg, InFlag; 5014 5015 // Save FP Control Word to register 5016 EVT NodeTys[] = { 5017 MVT::f64, // return register 5018 MVT::Glue // unused in this context 5019 }; 5020 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, &InFlag, 0); 5021 5022 // Save FP register to stack slot 5023 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8, false); 5024 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 5025 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, 5026 StackSlot, MachinePointerInfo(), false, false,0); 5027 5028 // Load FP Control Word from low 32 bits of stack slot. 5029 SDValue Four = DAG.getConstant(4, PtrVT); 5030 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 5031 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo(), 5032 false, false, false, 0); 5033 5034 // Transform as necessary 5035 SDValue CWD1 = 5036 DAG.getNode(ISD::AND, dl, MVT::i32, 5037 CWD, DAG.getConstant(3, MVT::i32)); 5038 SDValue CWD2 = 5039 DAG.getNode(ISD::SRL, dl, MVT::i32, 5040 DAG.getNode(ISD::AND, dl, MVT::i32, 5041 DAG.getNode(ISD::XOR, dl, MVT::i32, 5042 CWD, DAG.getConstant(3, MVT::i32)), 5043 DAG.getConstant(3, MVT::i32)), 5044 DAG.getConstant(1, MVT::i32)); 5045 5046 SDValue RetVal = 5047 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 5048 5049 return DAG.getNode((VT.getSizeInBits() < 16 ? 5050 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal); 5051} 5052 5053SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 5054 EVT VT = Op.getValueType(); 5055 unsigned BitWidth = VT.getSizeInBits(); 5056 SDLoc dl(Op); 5057 assert(Op.getNumOperands() == 3 && 5058 VT == Op.getOperand(1).getValueType() && 5059 "Unexpected SHL!"); 5060 5061 // Expand into a bunch of logical ops. Note that these ops 5062 // depend on the PPC behavior for oversized shift amounts. 5063 SDValue Lo = Op.getOperand(0); 5064 SDValue Hi = Op.getOperand(1); 5065 SDValue Amt = Op.getOperand(2); 5066 EVT AmtVT = Amt.getValueType(); 5067 5068 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 5069 DAG.getConstant(BitWidth, AmtVT), Amt); 5070 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 5071 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 5072 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 5073 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 5074 DAG.getConstant(-BitWidth, AmtVT)); 5075 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 5076 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 5077 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 5078 SDValue OutOps[] = { OutLo, OutHi }; 5079 return DAG.getMergeValues(OutOps, 2, dl); 5080} 5081 5082SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 5083 EVT VT = Op.getValueType(); 5084 SDLoc dl(Op); 5085 unsigned BitWidth = VT.getSizeInBits(); 5086 assert(Op.getNumOperands() == 3 && 5087 VT == Op.getOperand(1).getValueType() && 5088 "Unexpected SRL!"); 5089 5090 // Expand into a bunch of logical ops. Note that these ops 5091 // depend on the PPC behavior for oversized shift amounts. 5092 SDValue Lo = Op.getOperand(0); 5093 SDValue Hi = Op.getOperand(1); 5094 SDValue Amt = Op.getOperand(2); 5095 EVT AmtVT = Amt.getValueType(); 5096 5097 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 5098 DAG.getConstant(BitWidth, AmtVT), Amt); 5099 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 5100 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 5101 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 5102 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 5103 DAG.getConstant(-BitWidth, AmtVT)); 5104 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 5105 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 5106 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 5107 SDValue OutOps[] = { OutLo, OutHi }; 5108 return DAG.getMergeValues(OutOps, 2, dl); 5109} 5110 5111SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 5112 SDLoc dl(Op); 5113 EVT VT = Op.getValueType(); 5114 unsigned BitWidth = VT.getSizeInBits(); 5115 assert(Op.getNumOperands() == 3 && 5116 VT == Op.getOperand(1).getValueType() && 5117 "Unexpected SRA!"); 5118 5119 // Expand into a bunch of logical ops, followed by a select_cc. 5120 SDValue Lo = Op.getOperand(0); 5121 SDValue Hi = Op.getOperand(1); 5122 SDValue Amt = Op.getOperand(2); 5123 EVT AmtVT = Amt.getValueType(); 5124 5125 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 5126 DAG.getConstant(BitWidth, AmtVT), Amt); 5127 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 5128 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 5129 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 5130 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 5131 DAG.getConstant(-BitWidth, AmtVT)); 5132 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 5133 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 5134 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, AmtVT), 5135 Tmp4, Tmp6, ISD::SETLE); 5136 SDValue OutOps[] = { OutLo, OutHi }; 5137 return DAG.getMergeValues(OutOps, 2, dl); 5138} 5139 5140//===----------------------------------------------------------------------===// 5141// Vector related lowering. 5142// 5143 5144/// BuildSplatI - Build a canonical splati of Val with an element size of 5145/// SplatSize. Cast the result to VT. 5146static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, 5147 SelectionDAG &DAG, SDLoc dl) { 5148 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 5149 5150 static const EVT VTys[] = { // canonical VT to use for each size. 5151 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 5152 }; 5153 5154 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 5155 5156 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 5157 if (Val == -1) 5158 SplatSize = 1; 5159 5160 EVT CanonicalVT = VTys[SplatSize-1]; 5161 5162 // Build a canonical splat for this value. 5163 SDValue Elt = DAG.getConstant(Val, MVT::i32); 5164 SmallVector<SDValue, 8> Ops; 5165 Ops.assign(CanonicalVT.getVectorNumElements(), Elt); 5166 SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT, 5167 &Ops[0], Ops.size()); 5168 return DAG.getNode(ISD::BITCAST, dl, ReqVT, Res); 5169} 5170 5171/// BuildIntrinsicOp - Return a unary operator intrinsic node with the 5172/// specified intrinsic ID. 5173static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, 5174 SelectionDAG &DAG, SDLoc dl, 5175 EVT DestVT = MVT::Other) { 5176 if (DestVT == MVT::Other) DestVT = Op.getValueType(); 5177 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 5178 DAG.getConstant(IID, MVT::i32), Op); 5179} 5180 5181/// BuildIntrinsicOp - Return a binary operator intrinsic node with the 5182/// specified intrinsic ID. 5183static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 5184 SelectionDAG &DAG, SDLoc dl, 5185 EVT DestVT = MVT::Other) { 5186 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 5187 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 5188 DAG.getConstant(IID, MVT::i32), LHS, RHS); 5189} 5190 5191/// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 5192/// specified intrinsic ID. 5193static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 5194 SDValue Op2, SelectionDAG &DAG, 5195 SDLoc dl, EVT DestVT = MVT::Other) { 5196 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 5197 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 5198 DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2); 5199} 5200 5201 5202/// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 5203/// amount. The result has the specified value type. 5204static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, 5205 EVT VT, SelectionDAG &DAG, SDLoc dl) { 5206 // Force LHS/RHS to be the right type. 5207 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 5208 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 5209 5210 int Ops[16]; 5211 for (unsigned i = 0; i != 16; ++i) 5212 Ops[i] = i + Amt; 5213 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 5214 return DAG.getNode(ISD::BITCAST, dl, VT, T); 5215} 5216 5217// If this is a case we can't handle, return null and let the default 5218// expansion code take care of it. If we CAN select this case, and if it 5219// selects to a single instruction, return Op. Otherwise, if we can codegen 5220// this case more efficiently than a constant pool load, lower it to the 5221// sequence of ops that should be used. 5222SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 5223 SelectionDAG &DAG) const { 5224 SDLoc dl(Op); 5225 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 5226 assert(BVN != 0 && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 5227 5228 // Check if this is a splat of a constant value. 5229 APInt APSplatBits, APSplatUndef; 5230 unsigned SplatBitSize; 5231 bool HasAnyUndefs; 5232 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 5233 HasAnyUndefs, 0, true) || SplatBitSize > 32) 5234 return SDValue(); 5235 5236 unsigned SplatBits = APSplatBits.getZExtValue(); 5237 unsigned SplatUndef = APSplatUndef.getZExtValue(); 5238 unsigned SplatSize = SplatBitSize / 8; 5239 5240 // First, handle single instruction cases. 5241 5242 // All zeros? 5243 if (SplatBits == 0) { 5244 // Canonicalize all zero vectors to be v4i32. 5245 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 5246 SDValue Z = DAG.getConstant(0, MVT::i32); 5247 Z = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Z, Z, Z, Z); 5248 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 5249 } 5250 return Op; 5251 } 5252 5253 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 5254 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 5255 (32-SplatBitSize)); 5256 if (SextVal >= -16 && SextVal <= 15) 5257 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); 5258 5259 5260 // Two instruction sequences. 5261 5262 // If this value is in the range [-32,30] and is even, use: 5263 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2) 5264 // If this value is in the range [17,31] and is odd, use: 5265 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16) 5266 // If this value is in the range [-31,-17] and is odd, use: 5267 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16) 5268 // Note the last two are three-instruction sequences. 5269 if (SextVal >= -32 && SextVal <= 31) { 5270 // To avoid having these optimizations undone by constant folding, 5271 // we convert to a pseudo that will be expanded later into one of 5272 // the above forms. 5273 SDValue Elt = DAG.getConstant(SextVal, MVT::i32); 5274 EVT VT = Op.getValueType(); 5275 int Size = VT == MVT::v16i8 ? 1 : (VT == MVT::v8i16 ? 2 : 4); 5276 SDValue EltSize = DAG.getConstant(Size, MVT::i32); 5277 return DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize); 5278 } 5279 5280 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 5281 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 5282 // for fneg/fabs. 5283 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 5284 // Make -1 and vspltisw -1: 5285 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); 5286 5287 // Make the VSLW intrinsic, computing 0x8000_0000. 5288 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 5289 OnesV, DAG, dl); 5290 5291 // xor by OnesV to invert it. 5292 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 5293 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 5294 } 5295 5296 // Check to see if this is a wide variety of vsplti*, binop self cases. 5297 static const signed char SplatCsts[] = { 5298 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 5299 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 5300 }; 5301 5302 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 5303 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 5304 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 5305 int i = SplatCsts[idx]; 5306 5307 // Figure out what shift amount will be used by altivec if shifted by i in 5308 // this splat size. 5309 unsigned TypeShiftAmt = i & (SplatBitSize-1); 5310 5311 // vsplti + shl self. 5312 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { 5313 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 5314 static const unsigned IIDs[] = { // Intrinsic to use for each size. 5315 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 5316 Intrinsic::ppc_altivec_vslw 5317 }; 5318 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 5319 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 5320 } 5321 5322 // vsplti + srl self. 5323 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 5324 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 5325 static const unsigned IIDs[] = { // Intrinsic to use for each size. 5326 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 5327 Intrinsic::ppc_altivec_vsrw 5328 }; 5329 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 5330 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 5331 } 5332 5333 // vsplti + sra self. 5334 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 5335 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 5336 static const unsigned IIDs[] = { // Intrinsic to use for each size. 5337 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 5338 Intrinsic::ppc_altivec_vsraw 5339 }; 5340 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 5341 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 5342 } 5343 5344 // vsplti + rol self. 5345 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 5346 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 5347 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 5348 static const unsigned IIDs[] = { // Intrinsic to use for each size. 5349 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 5350 Intrinsic::ppc_altivec_vrlw 5351 }; 5352 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 5353 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 5354 } 5355 5356 // t = vsplti c, result = vsldoi t, t, 1 5357 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { 5358 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 5359 return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG, dl); 5360 } 5361 // t = vsplti c, result = vsldoi t, t, 2 5362 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { 5363 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 5364 return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG, dl); 5365 } 5366 // t = vsplti c, result = vsldoi t, t, 3 5367 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 5368 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 5369 return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG, dl); 5370 } 5371 } 5372 5373 return SDValue(); 5374} 5375 5376/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 5377/// the specified operations to build the shuffle. 5378static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 5379 SDValue RHS, SelectionDAG &DAG, 5380 SDLoc dl) { 5381 unsigned OpNum = (PFEntry >> 26) & 0x0F; 5382 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 5383 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 5384 5385 enum { 5386 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 5387 OP_VMRGHW, 5388 OP_VMRGLW, 5389 OP_VSPLTISW0, 5390 OP_VSPLTISW1, 5391 OP_VSPLTISW2, 5392 OP_VSPLTISW3, 5393 OP_VSLDOI4, 5394 OP_VSLDOI8, 5395 OP_VSLDOI12 5396 }; 5397 5398 if (OpNum == OP_COPY) { 5399 if (LHSID == (1*9+2)*9+3) return LHS; 5400 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 5401 return RHS; 5402 } 5403 5404 SDValue OpLHS, OpRHS; 5405 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 5406 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 5407 5408 int ShufIdxs[16]; 5409 switch (OpNum) { 5410 default: llvm_unreachable("Unknown i32 permute!"); 5411 case OP_VMRGHW: 5412 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 5413 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 5414 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 5415 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 5416 break; 5417 case OP_VMRGLW: 5418 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 5419 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 5420 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 5421 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 5422 break; 5423 case OP_VSPLTISW0: 5424 for (unsigned i = 0; i != 16; ++i) 5425 ShufIdxs[i] = (i&3)+0; 5426 break; 5427 case OP_VSPLTISW1: 5428 for (unsigned i = 0; i != 16; ++i) 5429 ShufIdxs[i] = (i&3)+4; 5430 break; 5431 case OP_VSPLTISW2: 5432 for (unsigned i = 0; i != 16; ++i) 5433 ShufIdxs[i] = (i&3)+8; 5434 break; 5435 case OP_VSPLTISW3: 5436 for (unsigned i = 0; i != 16; ++i) 5437 ShufIdxs[i] = (i&3)+12; 5438 break; 5439 case OP_VSLDOI4: 5440 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 5441 case OP_VSLDOI8: 5442 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 5443 case OP_VSLDOI12: 5444 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 5445 } 5446 EVT VT = OpLHS.getValueType(); 5447 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 5448 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 5449 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 5450 return DAG.getNode(ISD::BITCAST, dl, VT, T); 5451} 5452 5453/// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 5454/// is a shuffle we can handle in a single instruction, return it. Otherwise, 5455/// return the code it can be lowered into. Worst case, it can always be 5456/// lowered into a vperm. 5457SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 5458 SelectionDAG &DAG) const { 5459 SDLoc dl(Op); 5460 SDValue V1 = Op.getOperand(0); 5461 SDValue V2 = Op.getOperand(1); 5462 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5463 EVT VT = Op.getValueType(); 5464 5465 // Cases that are handled by instructions that take permute immediates 5466 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 5467 // selected by the instruction selector. 5468 if (V2.getOpcode() == ISD::UNDEF) { 5469 if (PPC::isSplatShuffleMask(SVOp, 1) || 5470 PPC::isSplatShuffleMask(SVOp, 2) || 5471 PPC::isSplatShuffleMask(SVOp, 4) || 5472 PPC::isVPKUWUMShuffleMask(SVOp, true) || 5473 PPC::isVPKUHUMShuffleMask(SVOp, true) || 5474 PPC::isVSLDOIShuffleMask(SVOp, true) != -1 || 5475 PPC::isVMRGLShuffleMask(SVOp, 1, true) || 5476 PPC::isVMRGLShuffleMask(SVOp, 2, true) || 5477 PPC::isVMRGLShuffleMask(SVOp, 4, true) || 5478 PPC::isVMRGHShuffleMask(SVOp, 1, true) || 5479 PPC::isVMRGHShuffleMask(SVOp, 2, true) || 5480 PPC::isVMRGHShuffleMask(SVOp, 4, true)) { 5481 return Op; 5482 } 5483 } 5484 5485 // Altivec has a variety of "shuffle immediates" that take two vector inputs 5486 // and produce a fixed permutation. If any of these match, do not lower to 5487 // VPERM. 5488 if (PPC::isVPKUWUMShuffleMask(SVOp, false) || 5489 PPC::isVPKUHUMShuffleMask(SVOp, false) || 5490 PPC::isVSLDOIShuffleMask(SVOp, false) != -1 || 5491 PPC::isVMRGLShuffleMask(SVOp, 1, false) || 5492 PPC::isVMRGLShuffleMask(SVOp, 2, false) || 5493 PPC::isVMRGLShuffleMask(SVOp, 4, false) || 5494 PPC::isVMRGHShuffleMask(SVOp, 1, false) || 5495 PPC::isVMRGHShuffleMask(SVOp, 2, false) || 5496 PPC::isVMRGHShuffleMask(SVOp, 4, false)) 5497 return Op; 5498 5499 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 5500 // perfect shuffle table to emit an optimal matching sequence. 5501 ArrayRef<int> PermMask = SVOp->getMask(); 5502 5503 unsigned PFIndexes[4]; 5504 bool isFourElementShuffle = true; 5505 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 5506 unsigned EltNo = 8; // Start out undef. 5507 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 5508 if (PermMask[i*4+j] < 0) 5509 continue; // Undef, ignore it. 5510 5511 unsigned ByteSource = PermMask[i*4+j]; 5512 if ((ByteSource & 3) != j) { 5513 isFourElementShuffle = false; 5514 break; 5515 } 5516 5517 if (EltNo == 8) { 5518 EltNo = ByteSource/4; 5519 } else if (EltNo != ByteSource/4) { 5520 isFourElementShuffle = false; 5521 break; 5522 } 5523 } 5524 PFIndexes[i] = EltNo; 5525 } 5526 5527 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 5528 // perfect shuffle vector to determine if it is cost effective to do this as 5529 // discrete instructions, or whether we should use a vperm. 5530 if (isFourElementShuffle) { 5531 // Compute the index in the perfect shuffle table. 5532 unsigned PFTableIndex = 5533 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 5534 5535 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 5536 unsigned Cost = (PFEntry >> 30); 5537 5538 // Determining when to avoid vperm is tricky. Many things affect the cost 5539 // of vperm, particularly how many times the perm mask needs to be computed. 5540 // For example, if the perm mask can be hoisted out of a loop or is already 5541 // used (perhaps because there are multiple permutes with the same shuffle 5542 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 5543 // the loop requires an extra register. 5544 // 5545 // As a compromise, we only emit discrete instructions if the shuffle can be 5546 // generated in 3 or fewer operations. When we have loop information 5547 // available, if this block is within a loop, we should avoid using vperm 5548 // for 3-operation perms and use a constant pool load instead. 5549 if (Cost < 3) 5550 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 5551 } 5552 5553 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 5554 // vector that will get spilled to the constant pool. 5555 if (V2.getOpcode() == ISD::UNDEF) V2 = V1; 5556 5557 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 5558 // that it is in input element units, not in bytes. Convert now. 5559 EVT EltVT = V1.getValueType().getVectorElementType(); 5560 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 5561 5562 SmallVector<SDValue, 16> ResultMask; 5563 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 5564 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 5565 5566 for (unsigned j = 0; j != BytesPerElement; ++j) 5567 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j, 5568 MVT::i32)); 5569 } 5570 5571 SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, 5572 &ResultMask[0], ResultMask.size()); 5573 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), V1, V2, VPermMask); 5574} 5575 5576/// getAltivecCompareInfo - Given an intrinsic, return false if it is not an 5577/// altivec comparison. If it is, return true and fill in Opc/isDot with 5578/// information about the intrinsic. 5579static bool getAltivecCompareInfo(SDValue Intrin, int &CompareOpc, 5580 bool &isDot) { 5581 unsigned IntrinsicID = 5582 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 5583 CompareOpc = -1; 5584 isDot = false; 5585 switch (IntrinsicID) { 5586 default: return false; 5587 // Comparison predicates. 5588 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break; 5589 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break; 5590 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break; 5591 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break; 5592 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break; 5593 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break; 5594 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break; 5595 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break; 5596 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break; 5597 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break; 5598 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break; 5599 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break; 5600 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break; 5601 5602 // Normal Comparisons. 5603 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break; 5604 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break; 5605 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break; 5606 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break; 5607 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break; 5608 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break; 5609 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break; 5610 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break; 5611 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break; 5612 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break; 5613 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break; 5614 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break; 5615 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break; 5616 } 5617 return true; 5618} 5619 5620/// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 5621/// lower, do it, otherwise return null. 5622SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 5623 SelectionDAG &DAG) const { 5624 // If this is a lowered altivec predicate compare, CompareOpc is set to the 5625 // opcode number of the comparison. 5626 SDLoc dl(Op); 5627 int CompareOpc; 5628 bool isDot; 5629 if (!getAltivecCompareInfo(Op, CompareOpc, isDot)) 5630 return SDValue(); // Don't custom lower most intrinsics. 5631 5632 // If this is a non-dot comparison, make the VCMP node and we are done. 5633 if (!isDot) { 5634 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 5635 Op.getOperand(1), Op.getOperand(2), 5636 DAG.getConstant(CompareOpc, MVT::i32)); 5637 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 5638 } 5639 5640 // Create the PPCISD altivec 'dot' comparison node. 5641 SDValue Ops[] = { 5642 Op.getOperand(2), // LHS 5643 Op.getOperand(3), // RHS 5644 DAG.getConstant(CompareOpc, MVT::i32) 5645 }; 5646 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue }; 5647 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3); 5648 5649 // Now that we have the comparison, emit a copy from the CR to a GPR. 5650 // This is flagged to the above dot comparison. 5651 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32, 5652 DAG.getRegister(PPC::CR6, MVT::i32), 5653 CompNode.getValue(1)); 5654 5655 // Unpack the result based on how the target uses it. 5656 unsigned BitNo; // Bit # of CR6. 5657 bool InvertBit; // Invert result? 5658 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 5659 default: // Can't happen, don't crash on invalid number though. 5660 case 0: // Return the value of the EQ bit of CR6. 5661 BitNo = 0; InvertBit = false; 5662 break; 5663 case 1: // Return the inverted value of the EQ bit of CR6. 5664 BitNo = 0; InvertBit = true; 5665 break; 5666 case 2: // Return the value of the LT bit of CR6. 5667 BitNo = 2; InvertBit = false; 5668 break; 5669 case 3: // Return the inverted value of the LT bit of CR6. 5670 BitNo = 2; InvertBit = true; 5671 break; 5672 } 5673 5674 // Shift the bit into the low position. 5675 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 5676 DAG.getConstant(8-(3-BitNo), MVT::i32)); 5677 // Isolate the bit. 5678 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 5679 DAG.getConstant(1, MVT::i32)); 5680 5681 // If we are supposed to, toggle the bit. 5682 if (InvertBit) 5683 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 5684 DAG.getConstant(1, MVT::i32)); 5685 return Flags; 5686} 5687 5688SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 5689 SelectionDAG &DAG) const { 5690 SDLoc dl(Op); 5691 // Create a stack slot that is 16-byte aligned. 5692 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 5693 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 5694 EVT PtrVT = getPointerTy(); 5695 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 5696 5697 // Store the input value into Value#0 of the stack slot. 5698 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, 5699 Op.getOperand(0), FIdx, MachinePointerInfo(), 5700 false, false, 0); 5701 // Load it out. 5702 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo(), 5703 false, false, false, 0); 5704} 5705 5706SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 5707 SDLoc dl(Op); 5708 if (Op.getValueType() == MVT::v4i32) { 5709 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 5710 5711 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); 5712 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. 5713 5714 SDValue RHSSwap = // = vrlw RHS, 16 5715 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 5716 5717 // Shrinkify inputs to v8i16. 5718 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 5719 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 5720 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 5721 5722 // Low parts multiplied together, generating 32-bit results (we ignore the 5723 // top parts). 5724 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 5725 LHS, RHS, DAG, dl, MVT::v4i32); 5726 5727 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 5728 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 5729 // Shift the high parts up 16 bits. 5730 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 5731 Neg16, DAG, dl); 5732 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 5733 } else if (Op.getValueType() == MVT::v8i16) { 5734 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 5735 5736 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); 5737 5738 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 5739 LHS, RHS, Zero, DAG, dl); 5740 } else if (Op.getValueType() == MVT::v16i8) { 5741 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 5742 5743 // Multiply the even 8-bit parts, producing 16-bit sums. 5744 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 5745 LHS, RHS, DAG, dl, MVT::v8i16); 5746 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 5747 5748 // Multiply the odd 8-bit parts, producing 16-bit sums. 5749 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 5750 LHS, RHS, DAG, dl, MVT::v8i16); 5751 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 5752 5753 // Merge the results together. 5754 int Ops[16]; 5755 for (unsigned i = 0; i != 8; ++i) { 5756 Ops[i*2 ] = 2*i+1; 5757 Ops[i*2+1] = 2*i+1+16; 5758 } 5759 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 5760 } else { 5761 llvm_unreachable("Unknown mul to lower!"); 5762 } 5763} 5764 5765/// LowerOperation - Provide custom lowering hooks for some operations. 5766/// 5767SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 5768 switch (Op.getOpcode()) { 5769 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 5770 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 5771 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 5772 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 5773 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 5774 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 5775 case ISD::SETCC: return LowerSETCC(Op, DAG); 5776 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 5777 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 5778 case ISD::VASTART: 5779 return LowerVASTART(Op, DAG, PPCSubTarget); 5780 5781 case ISD::VAARG: 5782 return LowerVAARG(Op, DAG, PPCSubTarget); 5783 5784 case ISD::VACOPY: 5785 return LowerVACOPY(Op, DAG, PPCSubTarget); 5786 5787 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, PPCSubTarget); 5788 case ISD::DYNAMIC_STACKALLOC: 5789 return LowerDYNAMIC_STACKALLOC(Op, DAG, PPCSubTarget); 5790 5791 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 5792 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 5793 5794 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 5795 case ISD::FP_TO_UINT: 5796 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, 5797 SDLoc(Op)); 5798 case ISD::UINT_TO_FP: 5799 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 5800 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 5801 5802 // Lower 64-bit shifts. 5803 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 5804 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 5805 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 5806 5807 // Vector-related lowering. 5808 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 5809 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 5810 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 5811 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 5812 case ISD::MUL: return LowerMUL(Op, DAG); 5813 5814 // For counter-based loop handling. 5815 case ISD::INTRINSIC_W_CHAIN: return SDValue(); 5816 5817 // Frame & Return address. 5818 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 5819 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 5820 } 5821} 5822 5823void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 5824 SmallVectorImpl<SDValue>&Results, 5825 SelectionDAG &DAG) const { 5826 const TargetMachine &TM = getTargetMachine(); 5827 SDLoc dl(N); 5828 switch (N->getOpcode()) { 5829 default: 5830 llvm_unreachable("Do not know how to custom type legalize this operation!"); 5831 case ISD::INTRINSIC_W_CHAIN: { 5832 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 5833 Intrinsic::ppc_is_decremented_ctr_nonzero) 5834 break; 5835 5836 assert(N->getValueType(0) == MVT::i1 && 5837 "Unexpected result type for CTR decrement intrinsic"); 5838 EVT SVT = getSetCCResultType(*DAG.getContext(), N->getValueType(0)); 5839 SDVTList VTs = DAG.getVTList(SVT, MVT::Other); 5840 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0), 5841 N->getOperand(1)); 5842 5843 Results.push_back(NewInt); 5844 Results.push_back(NewInt.getValue(1)); 5845 break; 5846 } 5847 case ISD::VAARG: { 5848 if (!TM.getSubtarget<PPCSubtarget>().isSVR4ABI() 5849 || TM.getSubtarget<PPCSubtarget>().isPPC64()) 5850 return; 5851 5852 EVT VT = N->getValueType(0); 5853 5854 if (VT == MVT::i64) { 5855 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG, PPCSubTarget); 5856 5857 Results.push_back(NewNode); 5858 Results.push_back(NewNode.getValue(1)); 5859 } 5860 return; 5861 } 5862 case ISD::FP_ROUND_INREG: { 5863 assert(N->getValueType(0) == MVT::ppcf128); 5864 assert(N->getOperand(0).getValueType() == MVT::ppcf128); 5865 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 5866 MVT::f64, N->getOperand(0), 5867 DAG.getIntPtrConstant(0)); 5868 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 5869 MVT::f64, N->getOperand(0), 5870 DAG.getIntPtrConstant(1)); 5871 5872 // Add the two halves of the long double in round-to-zero mode. 5873 SDValue FPreg = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi); 5874 5875 // We know the low half is about to be thrown away, so just use something 5876 // convenient. 5877 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128, 5878 FPreg, FPreg)); 5879 return; 5880 } 5881 case ISD::FP_TO_SINT: 5882 // LowerFP_TO_INT() can only handle f32 and f64. 5883 if (N->getOperand(0).getValueType() == MVT::ppcf128) 5884 return; 5885 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 5886 return; 5887 } 5888} 5889 5890 5891//===----------------------------------------------------------------------===// 5892// Other Lowering Code 5893//===----------------------------------------------------------------------===// 5894 5895MachineBasicBlock * 5896PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 5897 bool is64bit, unsigned BinOpcode) const { 5898 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 5899 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5900 5901 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5902 MachineFunction *F = BB->getParent(); 5903 MachineFunction::iterator It = BB; 5904 ++It; 5905 5906 unsigned dest = MI->getOperand(0).getReg(); 5907 unsigned ptrA = MI->getOperand(1).getReg(); 5908 unsigned ptrB = MI->getOperand(2).getReg(); 5909 unsigned incr = MI->getOperand(3).getReg(); 5910 DebugLoc dl = MI->getDebugLoc(); 5911 5912 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 5913 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 5914 F->insert(It, loopMBB); 5915 F->insert(It, exitMBB); 5916 exitMBB->splice(exitMBB->begin(), BB, 5917 llvm::next(MachineBasicBlock::iterator(MI)), 5918 BB->end()); 5919 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5920 5921 MachineRegisterInfo &RegInfo = F->getRegInfo(); 5922 unsigned TmpReg = (!BinOpcode) ? incr : 5923 RegInfo.createVirtualRegister( 5924 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass : 5925 (const TargetRegisterClass *) &PPC::GPRCRegClass); 5926 5927 // thisMBB: 5928 // ... 5929 // fallthrough --> loopMBB 5930 BB->addSuccessor(loopMBB); 5931 5932 // loopMBB: 5933 // l[wd]arx dest, ptr 5934 // add r0, dest, incr 5935 // st[wd]cx. r0, ptr 5936 // bne- loopMBB 5937 // fallthrough --> exitMBB 5938 BB = loopMBB; 5939 BuildMI(BB, dl, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest) 5940 .addReg(ptrA).addReg(ptrB); 5941 if (BinOpcode) 5942 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 5943 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 5944 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 5945 BuildMI(BB, dl, TII->get(PPC::BCC)) 5946 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 5947 BB->addSuccessor(loopMBB); 5948 BB->addSuccessor(exitMBB); 5949 5950 // exitMBB: 5951 // ... 5952 BB = exitMBB; 5953 return BB; 5954} 5955 5956MachineBasicBlock * 5957PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI, 5958 MachineBasicBlock *BB, 5959 bool is8bit, // operation 5960 unsigned BinOpcode) const { 5961 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 5962 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5963 // In 64 bit mode we have to use 64 bits for addresses, even though the 5964 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 5965 // registers without caring whether they're 32 or 64, but here we're 5966 // doing actual arithmetic on the addresses. 5967 bool is64bit = PPCSubTarget.isPPC64(); 5968 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 5969 5970 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5971 MachineFunction *F = BB->getParent(); 5972 MachineFunction::iterator It = BB; 5973 ++It; 5974 5975 unsigned dest = MI->getOperand(0).getReg(); 5976 unsigned ptrA = MI->getOperand(1).getReg(); 5977 unsigned ptrB = MI->getOperand(2).getReg(); 5978 unsigned incr = MI->getOperand(3).getReg(); 5979 DebugLoc dl = MI->getDebugLoc(); 5980 5981 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 5982 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 5983 F->insert(It, loopMBB); 5984 F->insert(It, exitMBB); 5985 exitMBB->splice(exitMBB->begin(), BB, 5986 llvm::next(MachineBasicBlock::iterator(MI)), 5987 BB->end()); 5988 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5989 5990 MachineRegisterInfo &RegInfo = F->getRegInfo(); 5991 const TargetRegisterClass *RC = 5992 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass : 5993 (const TargetRegisterClass *) &PPC::GPRCRegClass; 5994 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 5995 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 5996 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 5997 unsigned Incr2Reg = RegInfo.createVirtualRegister(RC); 5998 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 5999 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 6000 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 6001 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 6002 unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC); 6003 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 6004 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 6005 unsigned Ptr1Reg; 6006 unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC); 6007 6008 // thisMBB: 6009 // ... 6010 // fallthrough --> loopMBB 6011 BB->addSuccessor(loopMBB); 6012 6013 // The 4-byte load must be aligned, while a char or short may be 6014 // anywhere in the word. Hence all this nasty bookkeeping code. 6015 // add ptr1, ptrA, ptrB [copy if ptrA==0] 6016 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 6017 // xori shift, shift1, 24 [16] 6018 // rlwinm ptr, ptr1, 0, 0, 29 6019 // slw incr2, incr, shift 6020 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 6021 // slw mask, mask2, shift 6022 // loopMBB: 6023 // lwarx tmpDest, ptr 6024 // add tmp, tmpDest, incr2 6025 // andc tmp2, tmpDest, mask 6026 // and tmp3, tmp, mask 6027 // or tmp4, tmp3, tmp2 6028 // stwcx. tmp4, ptr 6029 // bne- loopMBB 6030 // fallthrough --> exitMBB 6031 // srw dest, tmpDest, shift 6032 if (ptrA != ZeroReg) { 6033 Ptr1Reg = RegInfo.createVirtualRegister(RC); 6034 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 6035 .addReg(ptrA).addReg(ptrB); 6036 } else { 6037 Ptr1Reg = ptrB; 6038 } 6039 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 6040 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 6041 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 6042 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 6043 if (is64bit) 6044 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 6045 .addReg(Ptr1Reg).addImm(0).addImm(61); 6046 else 6047 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 6048 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 6049 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg) 6050 .addReg(incr).addReg(ShiftReg); 6051 if (is8bit) 6052 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 6053 else { 6054 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 6055 BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535); 6056 } 6057 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 6058 .addReg(Mask2Reg).addReg(ShiftReg); 6059 6060 BB = loopMBB; 6061 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 6062 .addReg(ZeroReg).addReg(PtrReg); 6063 if (BinOpcode) 6064 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 6065 .addReg(Incr2Reg).addReg(TmpDestReg); 6066 BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg) 6067 .addReg(TmpDestReg).addReg(MaskReg); 6068 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg) 6069 .addReg(TmpReg).addReg(MaskReg); 6070 BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg) 6071 .addReg(Tmp3Reg).addReg(Tmp2Reg); 6072 BuildMI(BB, dl, TII->get(PPC::STWCX)) 6073 .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg); 6074 BuildMI(BB, dl, TII->get(PPC::BCC)) 6075 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 6076 BB->addSuccessor(loopMBB); 6077 BB->addSuccessor(exitMBB); 6078 6079 // exitMBB: 6080 // ... 6081 BB = exitMBB; 6082 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg) 6083 .addReg(ShiftReg); 6084 return BB; 6085} 6086 6087llvm::MachineBasicBlock* 6088PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI, 6089 MachineBasicBlock *MBB) const { 6090 DebugLoc DL = MI->getDebugLoc(); 6091 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6092 6093 MachineFunction *MF = MBB->getParent(); 6094 MachineRegisterInfo &MRI = MF->getRegInfo(); 6095 6096 const BasicBlock *BB = MBB->getBasicBlock(); 6097 MachineFunction::iterator I = MBB; 6098 ++I; 6099 6100 // Memory Reference 6101 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 6102 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 6103 6104 unsigned DstReg = MI->getOperand(0).getReg(); 6105 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 6106 assert(RC->hasType(MVT::i32) && "Invalid destination!"); 6107 unsigned mainDstReg = MRI.createVirtualRegister(RC); 6108 unsigned restoreDstReg = MRI.createVirtualRegister(RC); 6109 6110 MVT PVT = getPointerTy(); 6111 assert((PVT == MVT::i64 || PVT == MVT::i32) && 6112 "Invalid Pointer Size!"); 6113 // For v = setjmp(buf), we generate 6114 // 6115 // thisMBB: 6116 // SjLjSetup mainMBB 6117 // bl mainMBB 6118 // v_restore = 1 6119 // b sinkMBB 6120 // 6121 // mainMBB: 6122 // buf[LabelOffset] = LR 6123 // v_main = 0 6124 // 6125 // sinkMBB: 6126 // v = phi(main, restore) 6127 // 6128 6129 MachineBasicBlock *thisMBB = MBB; 6130 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 6131 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 6132 MF->insert(I, mainMBB); 6133 MF->insert(I, sinkMBB); 6134 6135 MachineInstrBuilder MIB; 6136 6137 // Transfer the remainder of BB and its successor edges to sinkMBB. 6138 sinkMBB->splice(sinkMBB->begin(), MBB, 6139 llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); 6140 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 6141 6142 // Note that the structure of the jmp_buf used here is not compatible 6143 // with that used by libc, and is not designed to be. Specifically, it 6144 // stores only those 'reserved' registers that LLVM does not otherwise 6145 // understand how to spill. Also, by convention, by the time this 6146 // intrinsic is called, Clang has already stored the frame address in the 6147 // first slot of the buffer and stack address in the third. Following the 6148 // X86 target code, we'll store the jump address in the second slot. We also 6149 // need to save the TOC pointer (R2) to handle jumps between shared 6150 // libraries, and that will be stored in the fourth slot. The thread 6151 // identifier (R13) is not affected. 6152 6153 // thisMBB: 6154 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 6155 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 6156 const int64_t BPOffset = 4 * PVT.getStoreSize(); 6157 6158 // Prepare IP either in reg. 6159 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 6160 unsigned LabelReg = MRI.createVirtualRegister(PtrRC); 6161 unsigned BufReg = MI->getOperand(1).getReg(); 6162 6163 if (PPCSubTarget.isPPC64() && PPCSubTarget.isSVR4ABI()) { 6164 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) 6165 .addReg(PPC::X2) 6166 .addImm(TOCOffset) 6167 .addReg(BufReg); 6168 MIB.setMemRefs(MMOBegin, MMOEnd); 6169 } 6170 6171 // Naked functions never have a base pointer, and so we use r1. For all 6172 // other functions, this decision must be delayed until during PEI. 6173 unsigned BaseReg; 6174 if (MF->getFunction()->getAttributes().hasAttribute( 6175 AttributeSet::FunctionIndex, Attribute::Naked)) 6176 BaseReg = PPCSubTarget.isPPC64() ? PPC::X1 : PPC::R1; 6177 else 6178 BaseReg = PPCSubTarget.isPPC64() ? PPC::BP8 : PPC::BP; 6179 6180 MIB = BuildMI(*thisMBB, MI, DL, 6181 TII->get(PPCSubTarget.isPPC64() ? PPC::STD : PPC::STW)) 6182 .addReg(BaseReg) 6183 .addImm(BPOffset) 6184 .addReg(BufReg); 6185 MIB.setMemRefs(MMOBegin, MMOEnd); 6186 6187 // Setup 6188 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); 6189 const PPCRegisterInfo *TRI = 6190 static_cast<const PPCRegisterInfo*>(getTargetMachine().getRegisterInfo()); 6191 MIB.addRegMask(TRI->getNoPreservedMask()); 6192 6193 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1); 6194 6195 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup)) 6196 .addMBB(mainMBB); 6197 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB); 6198 6199 thisMBB->addSuccessor(mainMBB, /* weight */ 0); 6200 thisMBB->addSuccessor(sinkMBB, /* weight */ 1); 6201 6202 // mainMBB: 6203 // mainDstReg = 0 6204 MIB = BuildMI(mainMBB, DL, 6205 TII->get(PPCSubTarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg); 6206 6207 // Store IP 6208 if (PPCSubTarget.isPPC64()) { 6209 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD)) 6210 .addReg(LabelReg) 6211 .addImm(LabelOffset) 6212 .addReg(BufReg); 6213 } else { 6214 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW)) 6215 .addReg(LabelReg) 6216 .addImm(LabelOffset) 6217 .addReg(BufReg); 6218 } 6219 6220 MIB.setMemRefs(MMOBegin, MMOEnd); 6221 6222 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); 6223 mainMBB->addSuccessor(sinkMBB); 6224 6225 // sinkMBB: 6226 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 6227 TII->get(PPC::PHI), DstReg) 6228 .addReg(mainDstReg).addMBB(mainMBB) 6229 .addReg(restoreDstReg).addMBB(thisMBB); 6230 6231 MI->eraseFromParent(); 6232 return sinkMBB; 6233} 6234 6235MachineBasicBlock * 6236PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI, 6237 MachineBasicBlock *MBB) const { 6238 DebugLoc DL = MI->getDebugLoc(); 6239 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6240 6241 MachineFunction *MF = MBB->getParent(); 6242 MachineRegisterInfo &MRI = MF->getRegInfo(); 6243 6244 // Memory Reference 6245 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 6246 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 6247 6248 MVT PVT = getPointerTy(); 6249 assert((PVT == MVT::i64 || PVT == MVT::i32) && 6250 "Invalid Pointer Size!"); 6251 6252 const TargetRegisterClass *RC = 6253 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 6254 unsigned Tmp = MRI.createVirtualRegister(RC); 6255 // Since FP is only updated here but NOT referenced, it's treated as GPR. 6256 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; 6257 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; 6258 unsigned BP = (PVT == MVT::i64) ? PPC::X30 : PPC::R30; 6259 6260 MachineInstrBuilder MIB; 6261 6262 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 6263 const int64_t SPOffset = 2 * PVT.getStoreSize(); 6264 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 6265 const int64_t BPOffset = 4 * PVT.getStoreSize(); 6266 6267 unsigned BufReg = MI->getOperand(0).getReg(); 6268 6269 // Reload FP (the jumped-to function may not have had a 6270 // frame pointer, and if so, then its r31 will be restored 6271 // as necessary). 6272 if (PVT == MVT::i64) { 6273 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP) 6274 .addImm(0) 6275 .addReg(BufReg); 6276 } else { 6277 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP) 6278 .addImm(0) 6279 .addReg(BufReg); 6280 } 6281 MIB.setMemRefs(MMOBegin, MMOEnd); 6282 6283 // Reload IP 6284 if (PVT == MVT::i64) { 6285 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp) 6286 .addImm(LabelOffset) 6287 .addReg(BufReg); 6288 } else { 6289 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp) 6290 .addImm(LabelOffset) 6291 .addReg(BufReg); 6292 } 6293 MIB.setMemRefs(MMOBegin, MMOEnd); 6294 6295 // Reload SP 6296 if (PVT == MVT::i64) { 6297 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP) 6298 .addImm(SPOffset) 6299 .addReg(BufReg); 6300 } else { 6301 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP) 6302 .addImm(SPOffset) 6303 .addReg(BufReg); 6304 } 6305 MIB.setMemRefs(MMOBegin, MMOEnd); 6306 6307 // Reload BP 6308 if (PVT == MVT::i64) { 6309 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP) 6310 .addImm(BPOffset) 6311 .addReg(BufReg); 6312 } else { 6313 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP) 6314 .addImm(BPOffset) 6315 .addReg(BufReg); 6316 } 6317 MIB.setMemRefs(MMOBegin, MMOEnd); 6318 6319 // Reload TOC 6320 if (PVT == MVT::i64 && PPCSubTarget.isSVR4ABI()) { 6321 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) 6322 .addImm(TOCOffset) 6323 .addReg(BufReg); 6324 6325 MIB.setMemRefs(MMOBegin, MMOEnd); 6326 } 6327 6328 // Jump 6329 BuildMI(*MBB, MI, DL, 6330 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp); 6331 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR)); 6332 6333 MI->eraseFromParent(); 6334 return MBB; 6335} 6336 6337MachineBasicBlock * 6338PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 6339 MachineBasicBlock *BB) const { 6340 if (MI->getOpcode() == PPC::EH_SjLj_SetJmp32 || 6341 MI->getOpcode() == PPC::EH_SjLj_SetJmp64) { 6342 return emitEHSjLjSetJmp(MI, BB); 6343 } else if (MI->getOpcode() == PPC::EH_SjLj_LongJmp32 || 6344 MI->getOpcode() == PPC::EH_SjLj_LongJmp64) { 6345 return emitEHSjLjLongJmp(MI, BB); 6346 } 6347 6348 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6349 6350 // To "insert" these instructions we actually have to insert their 6351 // control-flow patterns. 6352 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 6353 MachineFunction::iterator It = BB; 6354 ++It; 6355 6356 MachineFunction *F = BB->getParent(); 6357 6358 if (PPCSubTarget.hasISEL() && (MI->getOpcode() == PPC::SELECT_CC_I4 || 6359 MI->getOpcode() == PPC::SELECT_CC_I8)) { 6360 SmallVector<MachineOperand, 2> Cond; 6361 Cond.push_back(MI->getOperand(4)); 6362 Cond.push_back(MI->getOperand(1)); 6363 6364 DebugLoc dl = MI->getDebugLoc(); 6365 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6366 TII->insertSelect(*BB, MI, dl, MI->getOperand(0).getReg(), 6367 Cond, MI->getOperand(2).getReg(), 6368 MI->getOperand(3).getReg()); 6369 } else if (MI->getOpcode() == PPC::SELECT_CC_I4 || 6370 MI->getOpcode() == PPC::SELECT_CC_I8 || 6371 MI->getOpcode() == PPC::SELECT_CC_F4 || 6372 MI->getOpcode() == PPC::SELECT_CC_F8 || 6373 MI->getOpcode() == PPC::SELECT_CC_VRRC) { 6374 6375 6376 // The incoming instruction knows the destination vreg to set, the 6377 // condition code register to branch on, the true/false values to 6378 // select between, and a branch opcode to use. 6379 6380 // thisMBB: 6381 // ... 6382 // TrueVal = ... 6383 // cmpTY ccX, r1, r2 6384 // bCC copy1MBB 6385 // fallthrough --> copy0MBB 6386 MachineBasicBlock *thisMBB = BB; 6387 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 6388 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 6389 unsigned SelectPred = MI->getOperand(4).getImm(); 6390 DebugLoc dl = MI->getDebugLoc(); 6391 F->insert(It, copy0MBB); 6392 F->insert(It, sinkMBB); 6393 6394 // Transfer the remainder of BB and its successor edges to sinkMBB. 6395 sinkMBB->splice(sinkMBB->begin(), BB, 6396 llvm::next(MachineBasicBlock::iterator(MI)), 6397 BB->end()); 6398 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 6399 6400 // Next, add the true and fallthrough blocks as its successors. 6401 BB->addSuccessor(copy0MBB); 6402 BB->addSuccessor(sinkMBB); 6403 6404 BuildMI(BB, dl, TII->get(PPC::BCC)) 6405 .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 6406 6407 // copy0MBB: 6408 // %FalseValue = ... 6409 // # fallthrough to sinkMBB 6410 BB = copy0MBB; 6411 6412 // Update machine-CFG edges 6413 BB->addSuccessor(sinkMBB); 6414 6415 // sinkMBB: 6416 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 6417 // ... 6418 BB = sinkMBB; 6419 BuildMI(*BB, BB->begin(), dl, 6420 TII->get(PPC::PHI), MI->getOperand(0).getReg()) 6421 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 6422 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 6423 } 6424 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 6425 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 6426 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 6427 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 6428 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 6429 BB = EmitAtomicBinary(MI, BB, false, PPC::ADD4); 6430 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 6431 BB = EmitAtomicBinary(MI, BB, true, PPC::ADD8); 6432 6433 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 6434 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 6435 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 6436 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 6437 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 6438 BB = EmitAtomicBinary(MI, BB, false, PPC::AND); 6439 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 6440 BB = EmitAtomicBinary(MI, BB, true, PPC::AND8); 6441 6442 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 6443 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 6444 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 6445 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 6446 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 6447 BB = EmitAtomicBinary(MI, BB, false, PPC::OR); 6448 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 6449 BB = EmitAtomicBinary(MI, BB, true, PPC::OR8); 6450 6451 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 6452 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 6453 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 6454 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 6455 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 6456 BB = EmitAtomicBinary(MI, BB, false, PPC::XOR); 6457 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 6458 BB = EmitAtomicBinary(MI, BB, true, PPC::XOR8); 6459 6460 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 6461 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ANDC); 6462 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 6463 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ANDC); 6464 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 6465 BB = EmitAtomicBinary(MI, BB, false, PPC::ANDC); 6466 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 6467 BB = EmitAtomicBinary(MI, BB, true, PPC::ANDC8); 6468 6469 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 6470 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 6471 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 6472 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 6473 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 6474 BB = EmitAtomicBinary(MI, BB, false, PPC::SUBF); 6475 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 6476 BB = EmitAtomicBinary(MI, BB, true, PPC::SUBF8); 6477 6478 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I8) 6479 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 6480 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I16) 6481 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 6482 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I32) 6483 BB = EmitAtomicBinary(MI, BB, false, 0); 6484 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I64) 6485 BB = EmitAtomicBinary(MI, BB, true, 0); 6486 6487 else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 6488 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64) { 6489 bool is64bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 6490 6491 unsigned dest = MI->getOperand(0).getReg(); 6492 unsigned ptrA = MI->getOperand(1).getReg(); 6493 unsigned ptrB = MI->getOperand(2).getReg(); 6494 unsigned oldval = MI->getOperand(3).getReg(); 6495 unsigned newval = MI->getOperand(4).getReg(); 6496 DebugLoc dl = MI->getDebugLoc(); 6497 6498 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 6499 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 6500 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 6501 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 6502 F->insert(It, loop1MBB); 6503 F->insert(It, loop2MBB); 6504 F->insert(It, midMBB); 6505 F->insert(It, exitMBB); 6506 exitMBB->splice(exitMBB->begin(), BB, 6507 llvm::next(MachineBasicBlock::iterator(MI)), 6508 BB->end()); 6509 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 6510 6511 // thisMBB: 6512 // ... 6513 // fallthrough --> loopMBB 6514 BB->addSuccessor(loop1MBB); 6515 6516 // loop1MBB: 6517 // l[wd]arx dest, ptr 6518 // cmp[wd] dest, oldval 6519 // bne- midMBB 6520 // loop2MBB: 6521 // st[wd]cx. newval, ptr 6522 // bne- loopMBB 6523 // b exitBB 6524 // midMBB: 6525 // st[wd]cx. dest, ptr 6526 // exitBB: 6527 BB = loop1MBB; 6528 BuildMI(BB, dl, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest) 6529 .addReg(ptrA).addReg(ptrB); 6530 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 6531 .addReg(oldval).addReg(dest); 6532 BuildMI(BB, dl, TII->get(PPC::BCC)) 6533 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 6534 BB->addSuccessor(loop2MBB); 6535 BB->addSuccessor(midMBB); 6536 6537 BB = loop2MBB; 6538 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 6539 .addReg(newval).addReg(ptrA).addReg(ptrB); 6540 BuildMI(BB, dl, TII->get(PPC::BCC)) 6541 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 6542 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 6543 BB->addSuccessor(loop1MBB); 6544 BB->addSuccessor(exitMBB); 6545 6546 BB = midMBB; 6547 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 6548 .addReg(dest).addReg(ptrA).addReg(ptrB); 6549 BB->addSuccessor(exitMBB); 6550 6551 // exitMBB: 6552 // ... 6553 BB = exitMBB; 6554 } else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 6555 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 6556 // We must use 64-bit registers for addresses when targeting 64-bit, 6557 // since we're actually doing arithmetic on them. Other registers 6558 // can be 32-bit. 6559 bool is64bit = PPCSubTarget.isPPC64(); 6560 bool is8bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 6561 6562 unsigned dest = MI->getOperand(0).getReg(); 6563 unsigned ptrA = MI->getOperand(1).getReg(); 6564 unsigned ptrB = MI->getOperand(2).getReg(); 6565 unsigned oldval = MI->getOperand(3).getReg(); 6566 unsigned newval = MI->getOperand(4).getReg(); 6567 DebugLoc dl = MI->getDebugLoc(); 6568 6569 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 6570 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 6571 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 6572 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 6573 F->insert(It, loop1MBB); 6574 F->insert(It, loop2MBB); 6575 F->insert(It, midMBB); 6576 F->insert(It, exitMBB); 6577 exitMBB->splice(exitMBB->begin(), BB, 6578 llvm::next(MachineBasicBlock::iterator(MI)), 6579 BB->end()); 6580 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 6581 6582 MachineRegisterInfo &RegInfo = F->getRegInfo(); 6583 const TargetRegisterClass *RC = 6584 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass : 6585 (const TargetRegisterClass *) &PPC::GPRCRegClass; 6586 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 6587 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 6588 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 6589 unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC); 6590 unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC); 6591 unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC); 6592 unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC); 6593 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 6594 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 6595 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 6596 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 6597 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 6598 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 6599 unsigned Ptr1Reg; 6600 unsigned TmpReg = RegInfo.createVirtualRegister(RC); 6601 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 6602 // thisMBB: 6603 // ... 6604 // fallthrough --> loopMBB 6605 BB->addSuccessor(loop1MBB); 6606 6607 // The 4-byte load must be aligned, while a char or short may be 6608 // anywhere in the word. Hence all this nasty bookkeeping code. 6609 // add ptr1, ptrA, ptrB [copy if ptrA==0] 6610 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 6611 // xori shift, shift1, 24 [16] 6612 // rlwinm ptr, ptr1, 0, 0, 29 6613 // slw newval2, newval, shift 6614 // slw oldval2, oldval,shift 6615 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 6616 // slw mask, mask2, shift 6617 // and newval3, newval2, mask 6618 // and oldval3, oldval2, mask 6619 // loop1MBB: 6620 // lwarx tmpDest, ptr 6621 // and tmp, tmpDest, mask 6622 // cmpw tmp, oldval3 6623 // bne- midMBB 6624 // loop2MBB: 6625 // andc tmp2, tmpDest, mask 6626 // or tmp4, tmp2, newval3 6627 // stwcx. tmp4, ptr 6628 // bne- loop1MBB 6629 // b exitBB 6630 // midMBB: 6631 // stwcx. tmpDest, ptr 6632 // exitBB: 6633 // srw dest, tmpDest, shift 6634 if (ptrA != ZeroReg) { 6635 Ptr1Reg = RegInfo.createVirtualRegister(RC); 6636 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 6637 .addReg(ptrA).addReg(ptrB); 6638 } else { 6639 Ptr1Reg = ptrB; 6640 } 6641 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 6642 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 6643 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 6644 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 6645 if (is64bit) 6646 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 6647 .addReg(Ptr1Reg).addImm(0).addImm(61); 6648 else 6649 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 6650 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 6651 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 6652 .addReg(newval).addReg(ShiftReg); 6653 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 6654 .addReg(oldval).addReg(ShiftReg); 6655 if (is8bit) 6656 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 6657 else { 6658 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 6659 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 6660 .addReg(Mask3Reg).addImm(65535); 6661 } 6662 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 6663 .addReg(Mask2Reg).addReg(ShiftReg); 6664 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 6665 .addReg(NewVal2Reg).addReg(MaskReg); 6666 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 6667 .addReg(OldVal2Reg).addReg(MaskReg); 6668 6669 BB = loop1MBB; 6670 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 6671 .addReg(ZeroReg).addReg(PtrReg); 6672 BuildMI(BB, dl, TII->get(PPC::AND),TmpReg) 6673 .addReg(TmpDestReg).addReg(MaskReg); 6674 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 6675 .addReg(TmpReg).addReg(OldVal3Reg); 6676 BuildMI(BB, dl, TII->get(PPC::BCC)) 6677 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 6678 BB->addSuccessor(loop2MBB); 6679 BB->addSuccessor(midMBB); 6680 6681 BB = loop2MBB; 6682 BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg) 6683 .addReg(TmpDestReg).addReg(MaskReg); 6684 BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg) 6685 .addReg(Tmp2Reg).addReg(NewVal3Reg); 6686 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg) 6687 .addReg(ZeroReg).addReg(PtrReg); 6688 BuildMI(BB, dl, TII->get(PPC::BCC)) 6689 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 6690 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 6691 BB->addSuccessor(loop1MBB); 6692 BB->addSuccessor(exitMBB); 6693 6694 BB = midMBB; 6695 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg) 6696 .addReg(ZeroReg).addReg(PtrReg); 6697 BB->addSuccessor(exitMBB); 6698 6699 // exitMBB: 6700 // ... 6701 BB = exitMBB; 6702 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg) 6703 .addReg(ShiftReg); 6704 } else if (MI->getOpcode() == PPC::FADDrtz) { 6705 // This pseudo performs an FADD with rounding mode temporarily forced 6706 // to round-to-zero. We emit this via custom inserter since the FPSCR 6707 // is not modeled at the SelectionDAG level. 6708 unsigned Dest = MI->getOperand(0).getReg(); 6709 unsigned Src1 = MI->getOperand(1).getReg(); 6710 unsigned Src2 = MI->getOperand(2).getReg(); 6711 DebugLoc dl = MI->getDebugLoc(); 6712 6713 MachineRegisterInfo &RegInfo = F->getRegInfo(); 6714 unsigned MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 6715 6716 // Save FPSCR value. 6717 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); 6718 6719 // Set rounding mode to round-to-zero. 6720 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31); 6721 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30); 6722 6723 // Perform addition. 6724 BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2); 6725 6726 // Restore FPSCR value. 6727 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF)).addImm(1).addReg(MFFSReg); 6728 } else { 6729 llvm_unreachable("Unexpected instr type to insert"); 6730 } 6731 6732 MI->eraseFromParent(); // The pseudo instruction is gone now. 6733 return BB; 6734} 6735 6736//===----------------------------------------------------------------------===// 6737// Target Optimization Hooks 6738//===----------------------------------------------------------------------===// 6739 6740SDValue PPCTargetLowering::DAGCombineFastRecip(SDValue Op, 6741 DAGCombinerInfo &DCI) const { 6742 if (DCI.isAfterLegalizeVectorOps()) 6743 return SDValue(); 6744 6745 EVT VT = Op.getValueType(); 6746 6747 if ((VT == MVT::f32 && PPCSubTarget.hasFRES()) || 6748 (VT == MVT::f64 && PPCSubTarget.hasFRE()) || 6749 (VT == MVT::v4f32 && PPCSubTarget.hasAltivec())) { 6750 6751 // Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i) 6752 // For the reciprocal, we need to find the zero of the function: 6753 // F(X) = A X - 1 [which has a zero at X = 1/A] 6754 // => 6755 // X_{i+1} = X_i (2 - A X_i) = X_i + X_i (1 - A X_i) [this second form 6756 // does not require additional intermediate precision] 6757 6758 // Convergence is quadratic, so we essentially double the number of digits 6759 // correct after every iteration. The minimum architected relative 6760 // accuracy is 2^-5. When hasRecipPrec(), this is 2^-14. IEEE float has 6761 // 23 digits and double has 52 digits. 6762 int Iterations = PPCSubTarget.hasRecipPrec() ? 1 : 3; 6763 if (VT.getScalarType() == MVT::f64) 6764 ++Iterations; 6765 6766 SelectionDAG &DAG = DCI.DAG; 6767 SDLoc dl(Op); 6768 6769 SDValue FPOne = 6770 DAG.getConstantFP(1.0, VT.getScalarType()); 6771 if (VT.isVector()) { 6772 assert(VT.getVectorNumElements() == 4 && 6773 "Unknown vector type"); 6774 FPOne = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, 6775 FPOne, FPOne, FPOne, FPOne); 6776 } 6777 6778 SDValue Est = DAG.getNode(PPCISD::FRE, dl, VT, Op); 6779 DCI.AddToWorklist(Est.getNode()); 6780 6781 // Newton iterations: Est = Est + Est (1 - Arg * Est) 6782 for (int i = 0; i < Iterations; ++i) { 6783 SDValue NewEst = DAG.getNode(ISD::FMUL, dl, VT, Op, Est); 6784 DCI.AddToWorklist(NewEst.getNode()); 6785 6786 NewEst = DAG.getNode(ISD::FSUB, dl, VT, FPOne, NewEst); 6787 DCI.AddToWorklist(NewEst.getNode()); 6788 6789 NewEst = DAG.getNode(ISD::FMUL, dl, VT, Est, NewEst); 6790 DCI.AddToWorklist(NewEst.getNode()); 6791 6792 Est = DAG.getNode(ISD::FADD, dl, VT, Est, NewEst); 6793 DCI.AddToWorklist(Est.getNode()); 6794 } 6795 6796 return Est; 6797 } 6798 6799 return SDValue(); 6800} 6801 6802SDValue PPCTargetLowering::DAGCombineFastRecipFSQRT(SDValue Op, 6803 DAGCombinerInfo &DCI) const { 6804 if (DCI.isAfterLegalizeVectorOps()) 6805 return SDValue(); 6806 6807 EVT VT = Op.getValueType(); 6808 6809 if ((VT == MVT::f32 && PPCSubTarget.hasFRSQRTES()) || 6810 (VT == MVT::f64 && PPCSubTarget.hasFRSQRTE()) || 6811 (VT == MVT::v4f32 && PPCSubTarget.hasAltivec())) { 6812 6813 // Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i) 6814 // For the reciprocal sqrt, we need to find the zero of the function: 6815 // F(X) = 1/X^2 - A [which has a zero at X = 1/sqrt(A)] 6816 // => 6817 // X_{i+1} = X_i (1.5 - A X_i^2 / 2) 6818 // As a result, we precompute A/2 prior to the iteration loop. 6819 6820 // Convergence is quadratic, so we essentially double the number of digits 6821 // correct after every iteration. The minimum architected relative 6822 // accuracy is 2^-5. When hasRecipPrec(), this is 2^-14. IEEE float has 6823 // 23 digits and double has 52 digits. 6824 int Iterations = PPCSubTarget.hasRecipPrec() ? 1 : 3; 6825 if (VT.getScalarType() == MVT::f64) 6826 ++Iterations; 6827 6828 SelectionDAG &DAG = DCI.DAG; 6829 SDLoc dl(Op); 6830 6831 SDValue FPThreeHalves = 6832 DAG.getConstantFP(1.5, VT.getScalarType()); 6833 if (VT.isVector()) { 6834 assert(VT.getVectorNumElements() == 4 && 6835 "Unknown vector type"); 6836 FPThreeHalves = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, 6837 FPThreeHalves, FPThreeHalves, 6838 FPThreeHalves, FPThreeHalves); 6839 } 6840 6841 SDValue Est = DAG.getNode(PPCISD::FRSQRTE, dl, VT, Op); 6842 DCI.AddToWorklist(Est.getNode()); 6843 6844 // We now need 0.5*Arg which we can write as (1.5*Arg - Arg) so that 6845 // this entire sequence requires only one FP constant. 6846 SDValue HalfArg = DAG.getNode(ISD::FMUL, dl, VT, FPThreeHalves, Op); 6847 DCI.AddToWorklist(HalfArg.getNode()); 6848 6849 HalfArg = DAG.getNode(ISD::FSUB, dl, VT, HalfArg, Op); 6850 DCI.AddToWorklist(HalfArg.getNode()); 6851 6852 // Newton iterations: Est = Est * (1.5 - HalfArg * Est * Est) 6853 for (int i = 0; i < Iterations; ++i) { 6854 SDValue NewEst = DAG.getNode(ISD::FMUL, dl, VT, Est, Est); 6855 DCI.AddToWorklist(NewEst.getNode()); 6856 6857 NewEst = DAG.getNode(ISD::FMUL, dl, VT, HalfArg, NewEst); 6858 DCI.AddToWorklist(NewEst.getNode()); 6859 6860 NewEst = DAG.getNode(ISD::FSUB, dl, VT, FPThreeHalves, NewEst); 6861 DCI.AddToWorklist(NewEst.getNode()); 6862 6863 Est = DAG.getNode(ISD::FMUL, dl, VT, Est, NewEst); 6864 DCI.AddToWorklist(Est.getNode()); 6865 } 6866 6867 return Est; 6868 } 6869 6870 return SDValue(); 6871} 6872 6873// Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does 6874// not enforce equality of the chain operands. 6875static bool isConsecutiveLS(LSBaseSDNode *LS, LSBaseSDNode *Base, 6876 unsigned Bytes, int Dist, 6877 SelectionDAG &DAG) { 6878 EVT VT = LS->getMemoryVT(); 6879 if (VT.getSizeInBits() / 8 != Bytes) 6880 return false; 6881 6882 SDValue Loc = LS->getBasePtr(); 6883 SDValue BaseLoc = Base->getBasePtr(); 6884 if (Loc.getOpcode() == ISD::FrameIndex) { 6885 if (BaseLoc.getOpcode() != ISD::FrameIndex) 6886 return false; 6887 const MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 6888 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 6889 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 6890 int FS = MFI->getObjectSize(FI); 6891 int BFS = MFI->getObjectSize(BFI); 6892 if (FS != BFS || FS != (int)Bytes) return false; 6893 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes); 6894 } 6895 6896 // Handle X+C 6897 if (DAG.isBaseWithConstantOffset(Loc) && Loc.getOperand(0) == BaseLoc && 6898 cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue() == Dist*Bytes) 6899 return true; 6900 6901 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6902 const GlobalValue *GV1 = NULL; 6903 const GlobalValue *GV2 = NULL; 6904 int64_t Offset1 = 0; 6905 int64_t Offset2 = 0; 6906 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); 6907 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 6908 if (isGA1 && isGA2 && GV1 == GV2) 6909 return Offset1 == (Offset2 + Dist*Bytes); 6910 return false; 6911} 6912 6913// Return true is there is a nearyby consecutive load to the one provided 6914// (regardless of alignment). We search up and down the chain, looking though 6915// token factors and other loads (but nothing else). As a result, a true 6916// results indicates that it is safe to create a new consecutive load adjacent 6917// to the load provided. 6918static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) { 6919 SDValue Chain = LD->getChain(); 6920 EVT VT = LD->getMemoryVT(); 6921 6922 SmallSet<SDNode *, 16> LoadRoots; 6923 SmallVector<SDNode *, 8> Queue(1, Chain.getNode()); 6924 SmallSet<SDNode *, 16> Visited; 6925 6926 // First, search up the chain, branching to follow all token-factor operands. 6927 // If we find a consecutive load, then we're done, otherwise, record all 6928 // nodes just above the top-level loads and token factors. 6929 while (!Queue.empty()) { 6930 SDNode *ChainNext = Queue.pop_back_val(); 6931 if (!Visited.insert(ChainNext)) 6932 continue; 6933 6934 if (LoadSDNode *ChainLD = dyn_cast<LoadSDNode>(ChainNext)) { 6935 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 6936 return true; 6937 6938 if (!Visited.count(ChainLD->getChain().getNode())) 6939 Queue.push_back(ChainLD->getChain().getNode()); 6940 } else if (ChainNext->getOpcode() == ISD::TokenFactor) { 6941 for (SDNode::op_iterator O = ChainNext->op_begin(), 6942 OE = ChainNext->op_end(); O != OE; ++O) 6943 if (!Visited.count(O->getNode())) 6944 Queue.push_back(O->getNode()); 6945 } else 6946 LoadRoots.insert(ChainNext); 6947 } 6948 6949 // Second, search down the chain, starting from the top-level nodes recorded 6950 // in the first phase. These top-level nodes are the nodes just above all 6951 // loads and token factors. Starting with their uses, recursively look though 6952 // all loads (just the chain uses) and token factors to find a consecutive 6953 // load. 6954 Visited.clear(); 6955 Queue.clear(); 6956 6957 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(), 6958 IE = LoadRoots.end(); I != IE; ++I) { 6959 Queue.push_back(*I); 6960 6961 while (!Queue.empty()) { 6962 SDNode *LoadRoot = Queue.pop_back_val(); 6963 if (!Visited.insert(LoadRoot)) 6964 continue; 6965 6966 if (LoadSDNode *ChainLD = dyn_cast<LoadSDNode>(LoadRoot)) 6967 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 6968 return true; 6969 6970 for (SDNode::use_iterator UI = LoadRoot->use_begin(), 6971 UE = LoadRoot->use_end(); UI != UE; ++UI) 6972 if (((isa<LoadSDNode>(*UI) && 6973 cast<LoadSDNode>(*UI)->getChain().getNode() == LoadRoot) || 6974 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI)) 6975 Queue.push_back(*UI); 6976 } 6977 } 6978 6979 return false; 6980} 6981 6982SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 6983 DAGCombinerInfo &DCI) const { 6984 const TargetMachine &TM = getTargetMachine(); 6985 SelectionDAG &DAG = DCI.DAG; 6986 SDLoc dl(N); 6987 switch (N->getOpcode()) { 6988 default: break; 6989 case PPCISD::SHL: 6990 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 6991 if (C->isNullValue()) // 0 << V -> 0. 6992 return N->getOperand(0); 6993 } 6994 break; 6995 case PPCISD::SRL: 6996 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 6997 if (C->isNullValue()) // 0 >>u V -> 0. 6998 return N->getOperand(0); 6999 } 7000 break; 7001 case PPCISD::SRA: 7002 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 7003 if (C->isNullValue() || // 0 >>s V -> 0. 7004 C->isAllOnesValue()) // -1 >>s V -> -1. 7005 return N->getOperand(0); 7006 } 7007 break; 7008 case ISD::FDIV: { 7009 assert(TM.Options.UnsafeFPMath && 7010 "Reciprocal estimates require UnsafeFPMath"); 7011 7012 if (N->getOperand(1).getOpcode() == ISD::FSQRT) { 7013 SDValue RV = 7014 DAGCombineFastRecipFSQRT(N->getOperand(1).getOperand(0), DCI); 7015 if (RV.getNode() != 0) { 7016 DCI.AddToWorklist(RV.getNode()); 7017 return DAG.getNode(ISD::FMUL, dl, N->getValueType(0), 7018 N->getOperand(0), RV); 7019 } 7020 } else if (N->getOperand(1).getOpcode() == ISD::FP_EXTEND && 7021 N->getOperand(1).getOperand(0).getOpcode() == ISD::FSQRT) { 7022 SDValue RV = 7023 DAGCombineFastRecipFSQRT(N->getOperand(1).getOperand(0).getOperand(0), 7024 DCI); 7025 if (RV.getNode() != 0) { 7026 DCI.AddToWorklist(RV.getNode()); 7027 RV = DAG.getNode(ISD::FP_EXTEND, SDLoc(N->getOperand(1)), 7028 N->getValueType(0), RV); 7029 DCI.AddToWorklist(RV.getNode()); 7030 return DAG.getNode(ISD::FMUL, dl, N->getValueType(0), 7031 N->getOperand(0), RV); 7032 } 7033 } else if (N->getOperand(1).getOpcode() == ISD::FP_ROUND && 7034 N->getOperand(1).getOperand(0).getOpcode() == ISD::FSQRT) { 7035 SDValue RV = 7036 DAGCombineFastRecipFSQRT(N->getOperand(1).getOperand(0).getOperand(0), 7037 DCI); 7038 if (RV.getNode() != 0) { 7039 DCI.AddToWorklist(RV.getNode()); 7040 RV = DAG.getNode(ISD::FP_ROUND, SDLoc(N->getOperand(1)), 7041 N->getValueType(0), RV, 7042 N->getOperand(1).getOperand(1)); 7043 DCI.AddToWorklist(RV.getNode()); 7044 return DAG.getNode(ISD::FMUL, dl, N->getValueType(0), 7045 N->getOperand(0), RV); 7046 } 7047 } 7048 7049 SDValue RV = DAGCombineFastRecip(N->getOperand(1), DCI); 7050 if (RV.getNode() != 0) { 7051 DCI.AddToWorklist(RV.getNode()); 7052 return DAG.getNode(ISD::FMUL, dl, N->getValueType(0), 7053 N->getOperand(0), RV); 7054 } 7055 7056 } 7057 break; 7058 case ISD::FSQRT: { 7059 assert(TM.Options.UnsafeFPMath && 7060 "Reciprocal estimates require UnsafeFPMath"); 7061 7062 // Compute this as 1/(1/sqrt(X)), which is the reciprocal of the 7063 // reciprocal sqrt. 7064 SDValue RV = DAGCombineFastRecipFSQRT(N->getOperand(0), DCI); 7065 if (RV.getNode() != 0) { 7066 DCI.AddToWorklist(RV.getNode()); 7067 RV = DAGCombineFastRecip(RV, DCI); 7068 if (RV.getNode() != 0) { 7069 // Unfortunately, RV is now NaN if the input was exactly 0. Select out 7070 // this case and force the answer to 0. 7071 7072 EVT VT = RV.getValueType(); 7073 7074 SDValue Zero = DAG.getConstantFP(0.0, VT.getScalarType()); 7075 if (VT.isVector()) { 7076 assert(VT.getVectorNumElements() == 4 && "Unknown vector type"); 7077 Zero = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Zero, Zero, Zero, Zero); 7078 } 7079 7080 SDValue ZeroCmp = 7081 DAG.getSetCC(dl, getSetCCResultType(*DAG.getContext(), VT), 7082 N->getOperand(0), Zero, ISD::SETEQ); 7083 DCI.AddToWorklist(ZeroCmp.getNode()); 7084 DCI.AddToWorklist(RV.getNode()); 7085 7086 RV = DAG.getNode(VT.isVector() ? ISD::VSELECT : ISD::SELECT, dl, VT, 7087 ZeroCmp, Zero, RV); 7088 return RV; 7089 } 7090 } 7091 7092 } 7093 break; 7094 case ISD::SINT_TO_FP: 7095 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) { 7096 if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) { 7097 // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores. 7098 // We allow the src/dst to be either f32/f64, but the intermediate 7099 // type must be i64. 7100 if (N->getOperand(0).getValueType() == MVT::i64 && 7101 N->getOperand(0).getOperand(0).getValueType() != MVT::ppcf128) { 7102 SDValue Val = N->getOperand(0).getOperand(0); 7103 if (Val.getValueType() == MVT::f32) { 7104 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 7105 DCI.AddToWorklist(Val.getNode()); 7106 } 7107 7108 Val = DAG.getNode(PPCISD::FCTIDZ, dl, MVT::f64, Val); 7109 DCI.AddToWorklist(Val.getNode()); 7110 Val = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Val); 7111 DCI.AddToWorklist(Val.getNode()); 7112 if (N->getValueType(0) == MVT::f32) { 7113 Val = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Val, 7114 DAG.getIntPtrConstant(0)); 7115 DCI.AddToWorklist(Val.getNode()); 7116 } 7117 return Val; 7118 } else if (N->getOperand(0).getValueType() == MVT::i32) { 7119 // If the intermediate type is i32, we can avoid the load/store here 7120 // too. 7121 } 7122 } 7123 } 7124 break; 7125 case ISD::STORE: 7126 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). 7127 if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() && 7128 !cast<StoreSDNode>(N)->isTruncatingStore() && 7129 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && 7130 N->getOperand(1).getValueType() == MVT::i32 && 7131 N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) { 7132 SDValue Val = N->getOperand(1).getOperand(0); 7133 if (Val.getValueType() == MVT::f32) { 7134 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 7135 DCI.AddToWorklist(Val.getNode()); 7136 } 7137 Val = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Val); 7138 DCI.AddToWorklist(Val.getNode()); 7139 7140 SDValue Ops[] = { 7141 N->getOperand(0), Val, N->getOperand(2), 7142 DAG.getValueType(N->getOperand(1).getValueType()) 7143 }; 7144 7145 Val = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 7146 DAG.getVTList(MVT::Other), Ops, array_lengthof(Ops), 7147 cast<StoreSDNode>(N)->getMemoryVT(), 7148 cast<StoreSDNode>(N)->getMemOperand()); 7149 DCI.AddToWorklist(Val.getNode()); 7150 return Val; 7151 } 7152 7153 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 7154 if (cast<StoreSDNode>(N)->isUnindexed() && 7155 N->getOperand(1).getOpcode() == ISD::BSWAP && 7156 N->getOperand(1).getNode()->hasOneUse() && 7157 (N->getOperand(1).getValueType() == MVT::i32 || 7158 N->getOperand(1).getValueType() == MVT::i16 || 7159 (TM.getSubtarget<PPCSubtarget>().hasLDBRX() && 7160 TM.getSubtarget<PPCSubtarget>().isPPC64() && 7161 N->getOperand(1).getValueType() == MVT::i64))) { 7162 SDValue BSwapOp = N->getOperand(1).getOperand(0); 7163 // Do an any-extend to 32-bits if this is a half-word input. 7164 if (BSwapOp.getValueType() == MVT::i16) 7165 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 7166 7167 SDValue Ops[] = { 7168 N->getOperand(0), BSwapOp, N->getOperand(2), 7169 DAG.getValueType(N->getOperand(1).getValueType()) 7170 }; 7171 return 7172 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 7173 Ops, array_lengthof(Ops), 7174 cast<StoreSDNode>(N)->getMemoryVT(), 7175 cast<StoreSDNode>(N)->getMemOperand()); 7176 } 7177 break; 7178 case ISD::LOAD: { 7179 LoadSDNode *LD = cast<LoadSDNode>(N); 7180 EVT VT = LD->getValueType(0); 7181 Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); 7182 unsigned ABIAlignment = getDataLayout()->getABITypeAlignment(Ty); 7183 if (ISD::isNON_EXTLoad(N) && VT.isVector() && 7184 TM.getSubtarget<PPCSubtarget>().hasAltivec() && 7185 (VT == MVT::v16i8 || VT == MVT::v8i16 || 7186 VT == MVT::v4i32 || VT == MVT::v4f32) && 7187 LD->getAlignment() < ABIAlignment) { 7188 // This is a type-legal unaligned Altivec load. 7189 SDValue Chain = LD->getChain(); 7190 SDValue Ptr = LD->getBasePtr(); 7191 7192 // This implements the loading of unaligned vectors as described in 7193 // the venerable Apple Velocity Engine overview. Specifically: 7194 // https://developer.apple.com/hardwaredrivers/ve/alignment.html 7195 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html 7196 // 7197 // The general idea is to expand a sequence of one or more unaligned 7198 // loads into a alignment-based permutation-control instruction (lvsl), 7199 // a series of regular vector loads (which always truncate their 7200 // input address to an aligned address), and a series of permutations. 7201 // The results of these permutations are the requested loaded values. 7202 // The trick is that the last "extra" load is not taken from the address 7203 // you might suspect (sizeof(vector) bytes after the last requested 7204 // load), but rather sizeof(vector) - 1 bytes after the last 7205 // requested vector. The point of this is to avoid a page fault if the 7206 // base address happend to be aligned. This works because if the base 7207 // address is aligned, then adding less than a full vector length will 7208 // cause the last vector in the sequence to be (re)loaded. Otherwise, 7209 // the next vector will be fetched as you might suspect was necessary. 7210 7211 // We might be able to reuse the permutation generation from 7212 // a different base address offset from this one by an aligned amount. 7213 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this 7214 // optimization later. 7215 SDValue PermCntl = BuildIntrinsicOp(Intrinsic::ppc_altivec_lvsl, Ptr, 7216 DAG, dl, MVT::v16i8); 7217 7218 // Refine the alignment of the original load (a "new" load created here 7219 // which was identical to the first except for the alignment would be 7220 // merged with the existing node regardless). 7221 MachineFunction &MF = DAG.getMachineFunction(); 7222 MachineMemOperand *MMO = 7223 MF.getMachineMemOperand(LD->getPointerInfo(), 7224 LD->getMemOperand()->getFlags(), 7225 LD->getMemoryVT().getStoreSize(), 7226 ABIAlignment); 7227 LD->refineAlignment(MMO); 7228 SDValue BaseLoad = SDValue(LD, 0); 7229 7230 // Note that the value of IncOffset (which is provided to the next 7231 // load's pointer info offset value, and thus used to calculate the 7232 // alignment), and the value of IncValue (which is actually used to 7233 // increment the pointer value) are different! This is because we 7234 // require the next load to appear to be aligned, even though it 7235 // is actually offset from the base pointer by a lesser amount. 7236 int IncOffset = VT.getSizeInBits() / 8; 7237 int IncValue = IncOffset; 7238 7239 // Walk (both up and down) the chain looking for another load at the real 7240 // (aligned) offset (the alignment of the other load does not matter in 7241 // this case). If found, then do not use the offset reduction trick, as 7242 // that will prevent the loads from being later combined (as they would 7243 // otherwise be duplicates). 7244 if (!findConsecutiveLoad(LD, DAG)) 7245 --IncValue; 7246 7247 SDValue Increment = DAG.getConstant(IncValue, getPointerTy()); 7248 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 7249 7250 SDValue ExtraLoad = 7251 DAG.getLoad(VT, dl, Chain, Ptr, 7252 LD->getPointerInfo().getWithOffset(IncOffset), 7253 LD->isVolatile(), LD->isNonTemporal(), 7254 LD->isInvariant(), ABIAlignment); 7255 7256 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 7257 BaseLoad.getValue(1), ExtraLoad.getValue(1)); 7258 7259 if (BaseLoad.getValueType() != MVT::v4i32) 7260 BaseLoad = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, BaseLoad); 7261 7262 if (ExtraLoad.getValueType() != MVT::v4i32) 7263 ExtraLoad = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, ExtraLoad); 7264 7265 SDValue Perm = BuildIntrinsicOp(Intrinsic::ppc_altivec_vperm, 7266 BaseLoad, ExtraLoad, PermCntl, DAG, dl); 7267 7268 if (VT != MVT::v4i32) 7269 Perm = DAG.getNode(ISD::BITCAST, dl, VT, Perm); 7270 7271 // Now we need to be really careful about how we update the users of the 7272 // original load. We cannot just call DCI.CombineTo (or 7273 // DAG.ReplaceAllUsesWith for that matter), because the load still has 7274 // uses created here (the permutation for example) that need to stay. 7275 SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 7276 while (UI != UE) { 7277 SDUse &Use = UI.getUse(); 7278 SDNode *User = *UI; 7279 // Note: BaseLoad is checked here because it might not be N, but a 7280 // bitcast of N. 7281 if (User == Perm.getNode() || User == BaseLoad.getNode() || 7282 User == TF.getNode() || Use.getResNo() > 1) { 7283 ++UI; 7284 continue; 7285 } 7286 7287 SDValue To = Use.getResNo() ? TF : Perm; 7288 ++UI; 7289 7290 SmallVector<SDValue, 8> Ops; 7291 for (SDNode::op_iterator O = User->op_begin(), 7292 OE = User->op_end(); O != OE; ++O) { 7293 if (*O == Use) 7294 Ops.push_back(To); 7295 else 7296 Ops.push_back(*O); 7297 } 7298 7299 DAG.UpdateNodeOperands(User, Ops.data(), Ops.size()); 7300 } 7301 7302 return SDValue(N, 0); 7303 } 7304 } 7305 break; 7306 case ISD::INTRINSIC_WO_CHAIN: 7307 if (cast<ConstantSDNode>(N->getOperand(0))->getZExtValue() == 7308 Intrinsic::ppc_altivec_lvsl && 7309 N->getOperand(1)->getOpcode() == ISD::ADD) { 7310 SDValue Add = N->getOperand(1); 7311 7312 if (DAG.MaskedValueIsZero(Add->getOperand(1), 7313 APInt::getAllOnesValue(4 /* 16 byte alignment */).zext( 7314 Add.getValueType().getScalarType().getSizeInBits()))) { 7315 SDNode *BasePtr = Add->getOperand(0).getNode(); 7316 for (SDNode::use_iterator UI = BasePtr->use_begin(), 7317 UE = BasePtr->use_end(); UI != UE; ++UI) { 7318 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 7319 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == 7320 Intrinsic::ppc_altivec_lvsl) { 7321 // We've found another LVSL, and this address if an aligned 7322 // multiple of that one. The results will be the same, so use the 7323 // one we've just found instead. 7324 7325 return SDValue(*UI, 0); 7326 } 7327 } 7328 } 7329 } 7330 7331 break; 7332 case ISD::BSWAP: 7333 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 7334 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 7335 N->getOperand(0).hasOneUse() && 7336 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 || 7337 (TM.getSubtarget<PPCSubtarget>().hasLDBRX() && 7338 TM.getSubtarget<PPCSubtarget>().isPPC64() && 7339 N->getValueType(0) == MVT::i64))) { 7340 SDValue Load = N->getOperand(0); 7341 LoadSDNode *LD = cast<LoadSDNode>(Load); 7342 // Create the byte-swapping load. 7343 SDValue Ops[] = { 7344 LD->getChain(), // Chain 7345 LD->getBasePtr(), // Ptr 7346 DAG.getValueType(N->getValueType(0)) // VT 7347 }; 7348 SDValue BSLoad = 7349 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 7350 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 7351 MVT::i64 : MVT::i32, MVT::Other), 7352 Ops, 3, LD->getMemoryVT(), LD->getMemOperand()); 7353 7354 // If this is an i16 load, insert the truncate. 7355 SDValue ResVal = BSLoad; 7356 if (N->getValueType(0) == MVT::i16) 7357 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 7358 7359 // First, combine the bswap away. This makes the value produced by the 7360 // load dead. 7361 DCI.CombineTo(N, ResVal); 7362 7363 // Next, combine the load away, we give it a bogus result value but a real 7364 // chain result. The result value is dead because the bswap is dead. 7365 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 7366 7367 // Return N so it doesn't get rechecked! 7368 return SDValue(N, 0); 7369 } 7370 7371 break; 7372 case PPCISD::VCMP: { 7373 // If a VCMPo node already exists with exactly the same operands as this 7374 // node, use its result instead of this node (VCMPo computes both a CR6 and 7375 // a normal output). 7376 // 7377 if (!N->getOperand(0).hasOneUse() && 7378 !N->getOperand(1).hasOneUse() && 7379 !N->getOperand(2).hasOneUse()) { 7380 7381 // Scan all of the users of the LHS, looking for VCMPo's that match. 7382 SDNode *VCMPoNode = 0; 7383 7384 SDNode *LHSN = N->getOperand(0).getNode(); 7385 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 7386 UI != E; ++UI) 7387 if (UI->getOpcode() == PPCISD::VCMPo && 7388 UI->getOperand(1) == N->getOperand(1) && 7389 UI->getOperand(2) == N->getOperand(2) && 7390 UI->getOperand(0) == N->getOperand(0)) { 7391 VCMPoNode = *UI; 7392 break; 7393 } 7394 7395 // If there is no VCMPo node, or if the flag value has a single use, don't 7396 // transform this. 7397 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 7398 break; 7399 7400 // Look at the (necessarily single) use of the flag value. If it has a 7401 // chain, this transformation is more complex. Note that multiple things 7402 // could use the value result, which we should ignore. 7403 SDNode *FlagUser = 0; 7404 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 7405 FlagUser == 0; ++UI) { 7406 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 7407 SDNode *User = *UI; 7408 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 7409 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 7410 FlagUser = User; 7411 break; 7412 } 7413 } 7414 } 7415 7416 // If the user is a MFOCRF instruction, we know this is safe. 7417 // Otherwise we give up for right now. 7418 if (FlagUser->getOpcode() == PPCISD::MFOCRF) 7419 return SDValue(VCMPoNode, 0); 7420 } 7421 break; 7422 } 7423 case ISD::BR_CC: { 7424 // If this is a branch on an altivec predicate comparison, lower this so 7425 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This 7426 // lowering is done pre-legalize, because the legalizer lowers the predicate 7427 // compare down to code that is difficult to reassemble. 7428 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 7429 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 7430 7431 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero 7432 // value. If so, pass-through the AND to get to the intrinsic. 7433 if (LHS.getOpcode() == ISD::AND && 7434 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN && 7435 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() == 7436 Intrinsic::ppc_is_decremented_ctr_nonzero && 7437 isa<ConstantSDNode>(LHS.getOperand(1)) && 7438 !cast<ConstantSDNode>(LHS.getOperand(1))->getConstantIntValue()-> 7439 isZero()) 7440 LHS = LHS.getOperand(0); 7441 7442 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN && 7443 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 7444 Intrinsic::ppc_is_decremented_ctr_nonzero && 7445 isa<ConstantSDNode>(RHS)) { 7446 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && 7447 "Counter decrement comparison is not EQ or NE"); 7448 7449 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 7450 bool isBDNZ = (CC == ISD::SETEQ && Val) || 7451 (CC == ISD::SETNE && !Val); 7452 7453 // We now need to make the intrinsic dead (it cannot be instruction 7454 // selected). 7455 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0)); 7456 assert(LHS.getNode()->hasOneUse() && 7457 "Counter decrement has more than one use"); 7458 7459 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other, 7460 N->getOperand(0), N->getOperand(4)); 7461 } 7462 7463 int CompareOpc; 7464 bool isDot; 7465 7466 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 7467 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 7468 getAltivecCompareInfo(LHS, CompareOpc, isDot)) { 7469 assert(isDot && "Can't compare against a vector result!"); 7470 7471 // If this is a comparison against something other than 0/1, then we know 7472 // that the condition is never/always true. 7473 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 7474 if (Val != 0 && Val != 1) { 7475 if (CC == ISD::SETEQ) // Cond never true, remove branch. 7476 return N->getOperand(0); 7477 // Always !=, turn it into an unconditional branch. 7478 return DAG.getNode(ISD::BR, dl, MVT::Other, 7479 N->getOperand(0), N->getOperand(4)); 7480 } 7481 7482 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 7483 7484 // Create the PPCISD altivec 'dot' comparison node. 7485 SDValue Ops[] = { 7486 LHS.getOperand(2), // LHS of compare 7487 LHS.getOperand(3), // RHS of compare 7488 DAG.getConstant(CompareOpc, MVT::i32) 7489 }; 7490 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue }; 7491 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3); 7492 7493 // Unpack the result based on how the target uses it. 7494 PPC::Predicate CompOpc; 7495 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 7496 default: // Can't happen, don't crash on invalid number though. 7497 case 0: // Branch on the value of the EQ bit of CR6. 7498 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 7499 break; 7500 case 1: // Branch on the inverted value of the EQ bit of CR6. 7501 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 7502 break; 7503 case 2: // Branch on the value of the LT bit of CR6. 7504 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 7505 break; 7506 case 3: // Branch on the inverted value of the LT bit of CR6. 7507 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 7508 break; 7509 } 7510 7511 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 7512 DAG.getConstant(CompOpc, MVT::i32), 7513 DAG.getRegister(PPC::CR6, MVT::i32), 7514 N->getOperand(4), CompNode.getValue(1)); 7515 } 7516 break; 7517 } 7518 } 7519 7520 return SDValue(); 7521} 7522 7523//===----------------------------------------------------------------------===// 7524// Inline Assembly Support 7525//===----------------------------------------------------------------------===// 7526 7527void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 7528 APInt &KnownZero, 7529 APInt &KnownOne, 7530 const SelectionDAG &DAG, 7531 unsigned Depth) const { 7532 KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0); 7533 switch (Op.getOpcode()) { 7534 default: break; 7535 case PPCISD::LBRX: { 7536 // lhbrx is known to have the top bits cleared out. 7537 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 7538 KnownZero = 0xFFFF0000; 7539 break; 7540 } 7541 case ISD::INTRINSIC_WO_CHAIN: { 7542 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 7543 default: break; 7544 case Intrinsic::ppc_altivec_vcmpbfp_p: 7545 case Intrinsic::ppc_altivec_vcmpeqfp_p: 7546 case Intrinsic::ppc_altivec_vcmpequb_p: 7547 case Intrinsic::ppc_altivec_vcmpequh_p: 7548 case Intrinsic::ppc_altivec_vcmpequw_p: 7549 case Intrinsic::ppc_altivec_vcmpgefp_p: 7550 case Intrinsic::ppc_altivec_vcmpgtfp_p: 7551 case Intrinsic::ppc_altivec_vcmpgtsb_p: 7552 case Intrinsic::ppc_altivec_vcmpgtsh_p: 7553 case Intrinsic::ppc_altivec_vcmpgtsw_p: 7554 case Intrinsic::ppc_altivec_vcmpgtub_p: 7555 case Intrinsic::ppc_altivec_vcmpgtuh_p: 7556 case Intrinsic::ppc_altivec_vcmpgtuw_p: 7557 KnownZero = ~1U; // All bits but the low one are known to be zero. 7558 break; 7559 } 7560 } 7561 } 7562} 7563 7564 7565/// getConstraintType - Given a constraint, return the type of 7566/// constraint it is for this target. 7567PPCTargetLowering::ConstraintType 7568PPCTargetLowering::getConstraintType(const std::string &Constraint) const { 7569 if (Constraint.size() == 1) { 7570 switch (Constraint[0]) { 7571 default: break; 7572 case 'b': 7573 case 'r': 7574 case 'f': 7575 case 'v': 7576 case 'y': 7577 return C_RegisterClass; 7578 case 'Z': 7579 // FIXME: While Z does indicate a memory constraint, it specifically 7580 // indicates an r+r address (used in conjunction with the 'y' modifier 7581 // in the replacement string). Currently, we're forcing the base 7582 // register to be r0 in the asm printer (which is interpreted as zero) 7583 // and forming the complete address in the second register. This is 7584 // suboptimal. 7585 return C_Memory; 7586 } 7587 } 7588 return TargetLowering::getConstraintType(Constraint); 7589} 7590 7591/// Examine constraint type and operand type and determine a weight value. 7592/// This object must already have been set up with the operand type 7593/// and the current alternative constraint selected. 7594TargetLowering::ConstraintWeight 7595PPCTargetLowering::getSingleConstraintMatchWeight( 7596 AsmOperandInfo &info, const char *constraint) const { 7597 ConstraintWeight weight = CW_Invalid; 7598 Value *CallOperandVal = info.CallOperandVal; 7599 // If we don't have a value, we can't do a match, 7600 // but allow it at the lowest weight. 7601 if (CallOperandVal == NULL) 7602 return CW_Default; 7603 Type *type = CallOperandVal->getType(); 7604 // Look at the constraint type. 7605 switch (*constraint) { 7606 default: 7607 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 7608 break; 7609 case 'b': 7610 if (type->isIntegerTy()) 7611 weight = CW_Register; 7612 break; 7613 case 'f': 7614 if (type->isFloatTy()) 7615 weight = CW_Register; 7616 break; 7617 case 'd': 7618 if (type->isDoubleTy()) 7619 weight = CW_Register; 7620 break; 7621 case 'v': 7622 if (type->isVectorTy()) 7623 weight = CW_Register; 7624 break; 7625 case 'y': 7626 weight = CW_Register; 7627 break; 7628 case 'Z': 7629 weight = CW_Memory; 7630 break; 7631 } 7632 return weight; 7633} 7634 7635std::pair<unsigned, const TargetRegisterClass*> 7636PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 7637 MVT VT) const { 7638 if (Constraint.size() == 1) { 7639 // GCC RS6000 Constraint Letters 7640 switch (Constraint[0]) { 7641 case 'b': // R1-R31 7642 if (VT == MVT::i64 && PPCSubTarget.isPPC64()) 7643 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass); 7644 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass); 7645 case 'r': // R0-R31 7646 if (VT == MVT::i64 && PPCSubTarget.isPPC64()) 7647 return std::make_pair(0U, &PPC::G8RCRegClass); 7648 return std::make_pair(0U, &PPC::GPRCRegClass); 7649 case 'f': 7650 if (VT == MVT::f32 || VT == MVT::i32) 7651 return std::make_pair(0U, &PPC::F4RCRegClass); 7652 if (VT == MVT::f64 || VT == MVT::i64) 7653 return std::make_pair(0U, &PPC::F8RCRegClass); 7654 break; 7655 case 'v': 7656 return std::make_pair(0U, &PPC::VRRCRegClass); 7657 case 'y': // crrc 7658 return std::make_pair(0U, &PPC::CRRCRegClass); 7659 } 7660 } 7661 7662 std::pair<unsigned, const TargetRegisterClass*> R = 7663 TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 7664 7665 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers 7666 // (which we call X[0-9]+). If a 64-bit value has been requested, and a 7667 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent 7668 // register. 7669 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use 7670 // the AsmName field from *RegisterInfo.td, then this would not be necessary. 7671 if (R.first && VT == MVT::i64 && PPCSubTarget.isPPC64() && 7672 PPC::GPRCRegClass.contains(R.first)) { 7673 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 7674 return std::make_pair(TRI->getMatchingSuperReg(R.first, 7675 PPC::sub_32, &PPC::G8RCRegClass), 7676 &PPC::G8RCRegClass); 7677 } 7678 7679 return R; 7680} 7681 7682 7683/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 7684/// vector. If it is invalid, don't add anything to Ops. 7685void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 7686 std::string &Constraint, 7687 std::vector<SDValue>&Ops, 7688 SelectionDAG &DAG) const { 7689 SDValue Result(0,0); 7690 7691 // Only support length 1 constraints. 7692 if (Constraint.length() > 1) return; 7693 7694 char Letter = Constraint[0]; 7695 switch (Letter) { 7696 default: break; 7697 case 'I': 7698 case 'J': 7699 case 'K': 7700 case 'L': 7701 case 'M': 7702 case 'N': 7703 case 'O': 7704 case 'P': { 7705 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 7706 if (!CST) return; // Must be an immediate to match. 7707 unsigned Value = CST->getZExtValue(); 7708 switch (Letter) { 7709 default: llvm_unreachable("Unknown constraint letter!"); 7710 case 'I': // "I" is a signed 16-bit constant. 7711 if ((short)Value == (int)Value) 7712 Result = DAG.getTargetConstant(Value, Op.getValueType()); 7713 break; 7714 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 7715 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 7716 if ((short)Value == 0) 7717 Result = DAG.getTargetConstant(Value, Op.getValueType()); 7718 break; 7719 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 7720 if ((Value >> 16) == 0) 7721 Result = DAG.getTargetConstant(Value, Op.getValueType()); 7722 break; 7723 case 'M': // "M" is a constant that is greater than 31. 7724 if (Value > 31) 7725 Result = DAG.getTargetConstant(Value, Op.getValueType()); 7726 break; 7727 case 'N': // "N" is a positive constant that is an exact power of two. 7728 if ((int)Value > 0 && isPowerOf2_32(Value)) 7729 Result = DAG.getTargetConstant(Value, Op.getValueType()); 7730 break; 7731 case 'O': // "O" is the constant zero. 7732 if (Value == 0) 7733 Result = DAG.getTargetConstant(Value, Op.getValueType()); 7734 break; 7735 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 7736 if ((short)-Value == (int)-Value) 7737 Result = DAG.getTargetConstant(Value, Op.getValueType()); 7738 break; 7739 } 7740 break; 7741 } 7742 } 7743 7744 if (Result.getNode()) { 7745 Ops.push_back(Result); 7746 return; 7747 } 7748 7749 // Handle standard constraint letters. 7750 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 7751} 7752 7753// isLegalAddressingMode - Return true if the addressing mode represented 7754// by AM is legal for this target, for a load/store of the specified type. 7755bool PPCTargetLowering::isLegalAddressingMode(const AddrMode &AM, 7756 Type *Ty) const { 7757 // FIXME: PPC does not allow r+i addressing modes for vectors! 7758 7759 // PPC allows a sign-extended 16-bit immediate field. 7760 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 7761 return false; 7762 7763 // No global is ever allowed as a base. 7764 if (AM.BaseGV) 7765 return false; 7766 7767 // PPC only support r+r, 7768 switch (AM.Scale) { 7769 case 0: // "r+i" or just "i", depending on HasBaseReg. 7770 break; 7771 case 1: 7772 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 7773 return false; 7774 // Otherwise we have r+r or r+i. 7775 break; 7776 case 2: 7777 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 7778 return false; 7779 // Allow 2*r as r+r. 7780 break; 7781 default: 7782 // No other scales are supported. 7783 return false; 7784 } 7785 7786 return true; 7787} 7788 7789SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 7790 SelectionDAG &DAG) const { 7791 MachineFunction &MF = DAG.getMachineFunction(); 7792 MachineFrameInfo *MFI = MF.getFrameInfo(); 7793 MFI->setReturnAddressIsTaken(true); 7794 7795 SDLoc dl(Op); 7796 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 7797 7798 // Make sure the function does not optimize away the store of the RA to 7799 // the stack. 7800 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 7801 FuncInfo->setLRStoreRequired(); 7802 bool isPPC64 = PPCSubTarget.isPPC64(); 7803 bool isDarwinABI = PPCSubTarget.isDarwinABI(); 7804 7805 if (Depth > 0) { 7806 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 7807 SDValue Offset = 7808 7809 DAG.getConstant(PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI), 7810 isPPC64? MVT::i64 : MVT::i32); 7811 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 7812 DAG.getNode(ISD::ADD, dl, getPointerTy(), 7813 FrameAddr, Offset), 7814 MachinePointerInfo(), false, false, false, 0); 7815 } 7816 7817 // Just load the return address off the stack. 7818 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 7819 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 7820 RetAddrFI, MachinePointerInfo(), false, false, false, 0); 7821} 7822 7823SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 7824 SelectionDAG &DAG) const { 7825 SDLoc dl(Op); 7826 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 7827 7828 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 7829 bool isPPC64 = PtrVT == MVT::i64; 7830 7831 MachineFunction &MF = DAG.getMachineFunction(); 7832 MachineFrameInfo *MFI = MF.getFrameInfo(); 7833 MFI->setFrameAddressIsTaken(true); 7834 7835 // Naked functions never have a frame pointer, and so we use r1. For all 7836 // other functions, this decision must be delayed until during PEI. 7837 unsigned FrameReg; 7838 if (MF.getFunction()->getAttributes().hasAttribute( 7839 AttributeSet::FunctionIndex, Attribute::Naked)) 7840 FrameReg = isPPC64 ? PPC::X1 : PPC::R1; 7841 else 7842 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; 7843 7844 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 7845 PtrVT); 7846 while (Depth--) 7847 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 7848 FrameAddr, MachinePointerInfo(), false, false, 7849 false, 0); 7850 return FrameAddr; 7851} 7852 7853bool 7854PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 7855 // The PowerPC target isn't yet aware of offsets. 7856 return false; 7857} 7858 7859/// getOptimalMemOpType - Returns the target specific optimal type for load 7860/// and store operations as a result of memset, memcpy, and memmove 7861/// lowering. If DstAlign is zero that means it's safe to destination 7862/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 7863/// means there isn't a need to check it against alignment requirement, 7864/// probably because the source does not need to be loaded. If 'IsMemset' is 7865/// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 7866/// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 7867/// source is constant so it does not need to be loaded. 7868/// It returns EVT::Other if the type should be determined using generic 7869/// target-independent logic. 7870EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, 7871 unsigned DstAlign, unsigned SrcAlign, 7872 bool IsMemset, bool ZeroMemset, 7873 bool MemcpyStrSrc, 7874 MachineFunction &MF) const { 7875 if (this->PPCSubTarget.isPPC64()) { 7876 return MVT::i64; 7877 } else { 7878 return MVT::i32; 7879 } 7880} 7881 7882bool PPCTargetLowering::allowsUnalignedMemoryAccesses(EVT VT, 7883 bool *Fast) const { 7884 if (DisablePPCUnaligned) 7885 return false; 7886 7887 // PowerPC supports unaligned memory access for simple non-vector types. 7888 // Although accessing unaligned addresses is not as efficient as accessing 7889 // aligned addresses, it is generally more efficient than manual expansion, 7890 // and generally only traps for software emulation when crossing page 7891 // boundaries. 7892 7893 if (!VT.isSimple()) 7894 return false; 7895 7896 if (VT.getSimpleVT().isVector()) 7897 return false; 7898 7899 if (VT == MVT::ppcf128) 7900 return false; 7901 7902 if (Fast) 7903 *Fast = true; 7904 7905 return true; 7906} 7907 7908bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 7909 VT = VT.getScalarType(); 7910 7911 if (!VT.isSimple()) 7912 return false; 7913 7914 switch (VT.getSimpleVT().SimpleTy) { 7915 case MVT::f32: 7916 case MVT::f64: 7917 return true; 7918 default: 7919 break; 7920 } 7921 7922 return false; 7923} 7924 7925Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { 7926 if (DisableILPPref || PPCSubTarget.enableMachineScheduler()) 7927 return TargetLowering::getSchedulingPreference(N); 7928 7929 return Sched::ILP; 7930} 7931 7932// Create a fast isel object. 7933FastISel * 7934PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo, 7935 const TargetLibraryInfo *LibInfo) const { 7936 return PPC::createFastISel(FuncInfo, LibInfo); 7937} 7938