1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that X86 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "x86-isel" 16#include "X86ISelLowering.h" 17#include "X86.h" 18#include "X86InstrBuilder.h" 19#include "X86TargetMachine.h" 20#include "X86TargetObjectFile.h" 21#include "Utils/X86ShuffleDecode.h" 22#include "llvm/CallingConv.h" 23#include "llvm/Constants.h" 24#include "llvm/DerivedTypes.h" 25#include "llvm/GlobalAlias.h" 26#include "llvm/GlobalVariable.h" 27#include "llvm/Function.h" 28#include "llvm/Instructions.h" 29#include "llvm/Intrinsics.h" 30#include "llvm/LLVMContext.h" 31#include "llvm/CodeGen/IntrinsicLowering.h" 32#include "llvm/CodeGen/MachineFrameInfo.h" 33#include "llvm/CodeGen/MachineFunction.h" 34#include "llvm/CodeGen/MachineInstrBuilder.h" 35#include "llvm/CodeGen/MachineJumpTableInfo.h" 36#include "llvm/CodeGen/MachineModuleInfo.h" 37#include "llvm/CodeGen/MachineRegisterInfo.h" 38#include "llvm/MC/MCAsmInfo.h" 39#include "llvm/MC/MCContext.h" 40#include "llvm/MC/MCExpr.h" 41#include "llvm/MC/MCSymbol.h" 42#include "llvm/ADT/SmallSet.h" 43#include "llvm/ADT/Statistic.h" 44#include "llvm/ADT/StringExtras.h" 45#include "llvm/ADT/VariadicFunction.h" 46#include "llvm/Support/CallSite.h" 47#include "llvm/Support/Debug.h" 48#include "llvm/Support/ErrorHandling.h" 49#include "llvm/Support/MathExtras.h" 50#include "llvm/Target/TargetOptions.h" 51#include <bitset> 52#include <cctype> 53using namespace llvm; 54 55STATISTIC(NumTailCalls, "Number of tail calls"); 56 57// Forward declarations. 58static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 59 SDValue V2); 60 61/// Generate a DAG to grab 128-bits from a vector > 128 bits. This 62/// sets things up to match to an AVX VEXTRACTF128 instruction or a 63/// simple subregister reference. Idx is an index in the 128 bits we 64/// want. It need not be aligned to a 128-bit bounday. That makes 65/// lowering EXTRACT_VECTOR_ELT operations easier. 66static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal, 67 SelectionDAG &DAG, DebugLoc dl) { 68 EVT VT = Vec.getValueType(); 69 assert(VT.is256BitVector() && "Unexpected vector size!"); 70 EVT ElVT = VT.getVectorElementType(); 71 unsigned Factor = VT.getSizeInBits()/128; 72 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT, 73 VT.getVectorNumElements()/Factor); 74 75 // Extract from UNDEF is UNDEF. 76 if (Vec.getOpcode() == ISD::UNDEF) 77 return DAG.getUNDEF(ResultVT); 78 79 // Extract the relevant 128 bits. Generate an EXTRACT_SUBVECTOR 80 // we can match to VEXTRACTF128. 81 unsigned ElemsPerChunk = 128 / ElVT.getSizeInBits(); 82 83 // This is the index of the first element of the 128-bit chunk 84 // we want. 85 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / 128) 86 * ElemsPerChunk); 87 88 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal); 89 SDValue Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, 90 VecIdx); 91 92 return Result; 93} 94 95/// Generate a DAG to put 128-bits into a vector > 128 bits. This 96/// sets things up to match to an AVX VINSERTF128 instruction or a 97/// simple superregister reference. Idx is an index in the 128 bits 98/// we want. It need not be aligned to a 128-bit bounday. That makes 99/// lowering INSERT_VECTOR_ELT operations easier. 100static SDValue Insert128BitVector(SDValue Result, SDValue Vec, 101 unsigned IdxVal, SelectionDAG &DAG, 102 DebugLoc dl) { 103 // Inserting UNDEF is Result 104 if (Vec.getOpcode() == ISD::UNDEF) 105 return Result; 106 107 EVT VT = Vec.getValueType(); 108 assert(VT.is128BitVector() && "Unexpected vector size!"); 109 110 EVT ElVT = VT.getVectorElementType(); 111 EVT ResultVT = Result.getValueType(); 112 113 // Insert the relevant 128 bits. 114 unsigned ElemsPerChunk = 128/ElVT.getSizeInBits(); 115 116 // This is the index of the first element of the 128-bit chunk 117 // we want. 118 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/128) 119 * ElemsPerChunk); 120 121 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal); 122 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, 123 VecIdx); 124} 125 126/// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128 127/// instructions. This is used because creating CONCAT_VECTOR nodes of 128/// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower 129/// large BUILD_VECTORS. 130static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT, 131 unsigned NumElems, SelectionDAG &DAG, 132 DebugLoc dl) { 133 SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl); 134 return Insert128BitVector(V, V2, NumElems/2, DAG, dl); 135} 136 137static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) { 138 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 139 bool is64Bit = Subtarget->is64Bit(); 140 141 if (Subtarget->isTargetEnvMacho()) { 142 if (is64Bit) 143 return new X86_64MachoTargetObjectFile(); 144 return new TargetLoweringObjectFileMachO(); 145 } 146 147 if (Subtarget->isTargetLinux()) 148 return new X86LinuxTargetObjectFile(); 149 if (Subtarget->isTargetELF()) 150 return new TargetLoweringObjectFileELF(); 151 if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho()) 152 return new TargetLoweringObjectFileCOFF(); 153 llvm_unreachable("unknown subtarget type"); 154} 155 156X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) 157 : TargetLowering(TM, createTLOF(TM)) { 158 Subtarget = &TM.getSubtarget<X86Subtarget>(); 159 X86ScalarSSEf64 = Subtarget->hasSSE2(); 160 X86ScalarSSEf32 = Subtarget->hasSSE1(); 161 X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; 162 163 RegInfo = TM.getRegisterInfo(); 164 TD = getTargetData(); 165 166 // Set up the TargetLowering object. 167 static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }; 168 169 // X86 is weird, it always uses i8 for shift amounts and setcc results. 170 setBooleanContents(ZeroOrOneBooleanContent); 171 // X86-SSE is even stranger. It uses -1 or 0 for vector masks. 172 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 173 174 // For 64-bit since we have so many registers use the ILP scheduler, for 175 // 32-bit code use the register pressure specific scheduling. 176 // For Atom, always use ILP scheduling. 177 if (Subtarget->isAtom()) 178 setSchedulingPreference(Sched::ILP); 179 else if (Subtarget->is64Bit()) 180 setSchedulingPreference(Sched::ILP); 181 else 182 setSchedulingPreference(Sched::RegPressure); 183 setStackPointerRegisterToSaveRestore(X86StackPtr); 184 185 // Bypass i32 with i8 on Atom when compiling with O2 186 if (Subtarget->hasSlowDivide() && TM.getOptLevel() >= CodeGenOpt::Default) 187 addBypassSlowDivType(Type::getInt32Ty(getGlobalContext()), Type::getInt8Ty(getGlobalContext())); 188 189 if (Subtarget->isTargetWindows() && !Subtarget->isTargetCygMing()) { 190 // Setup Windows compiler runtime calls. 191 setLibcallName(RTLIB::SDIV_I64, "_alldiv"); 192 setLibcallName(RTLIB::UDIV_I64, "_aulldiv"); 193 setLibcallName(RTLIB::SREM_I64, "_allrem"); 194 setLibcallName(RTLIB::UREM_I64, "_aullrem"); 195 setLibcallName(RTLIB::MUL_I64, "_allmul"); 196 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall); 197 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall); 198 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall); 199 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall); 200 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall); 201 202 // The _ftol2 runtime function has an unusual calling conv, which 203 // is modeled by a special pseudo-instruction. 204 setLibcallName(RTLIB::FPTOUINT_F64_I64, 0); 205 setLibcallName(RTLIB::FPTOUINT_F32_I64, 0); 206 setLibcallName(RTLIB::FPTOUINT_F64_I32, 0); 207 setLibcallName(RTLIB::FPTOUINT_F32_I32, 0); 208 } 209 210 if (Subtarget->isTargetDarwin()) { 211 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. 212 setUseUnderscoreSetJmp(false); 213 setUseUnderscoreLongJmp(false); 214 } else if (Subtarget->isTargetMingw()) { 215 // MS runtime is weird: it exports _setjmp, but longjmp! 216 setUseUnderscoreSetJmp(true); 217 setUseUnderscoreLongJmp(false); 218 } else { 219 setUseUnderscoreSetJmp(true); 220 setUseUnderscoreLongJmp(true); 221 } 222 223 // Set up the register classes. 224 addRegisterClass(MVT::i8, &X86::GR8RegClass); 225 addRegisterClass(MVT::i16, &X86::GR16RegClass); 226 addRegisterClass(MVT::i32, &X86::GR32RegClass); 227 if (Subtarget->is64Bit()) 228 addRegisterClass(MVT::i64, &X86::GR64RegClass); 229 230 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 231 232 // We don't accept any truncstore of integer registers. 233 setTruncStoreAction(MVT::i64, MVT::i32, Expand); 234 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 235 setTruncStoreAction(MVT::i64, MVT::i8 , Expand); 236 setTruncStoreAction(MVT::i32, MVT::i16, Expand); 237 setTruncStoreAction(MVT::i32, MVT::i8 , Expand); 238 setTruncStoreAction(MVT::i16, MVT::i8, Expand); 239 240 // SETOEQ and SETUNE require checking two conditions. 241 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand); 242 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand); 243 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand); 244 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand); 245 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand); 246 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand); 247 248 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this 249 // operation. 250 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); 251 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); 252 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); 253 254 if (Subtarget->is64Bit()) { 255 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 256 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom); 257 } else if (!TM.Options.UseSoftFloat) { 258 // We have an algorithm for SSE2->double, and we turn this into a 259 // 64-bit FILD followed by conditional FADD for other targets. 260 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom); 261 // We have an algorithm for SSE2, and we turn this into a 64-bit 262 // FILD for other targets. 263 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom); 264 } 265 266 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have 267 // this operation. 268 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); 269 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); 270 271 if (!TM.Options.UseSoftFloat) { 272 // SSE has no i16 to fp conversion, only i32 273 if (X86ScalarSSEf32) { 274 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 275 // f32 and f64 cases are Legal, f80 case is not 276 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 277 } else { 278 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); 279 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 280 } 281 } else { 282 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 283 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote); 284 } 285 286 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 287 // are Legal, f80 is custom lowered. 288 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); 289 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); 290 291 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have 292 // this operation. 293 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); 294 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); 295 296 if (X86ScalarSSEf32) { 297 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); 298 // f32 and f64 cases are Legal, f80 case is not 299 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 300 } else { 301 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); 302 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 303 } 304 305 // Handle FP_TO_UINT by promoting the destination to a larger signed 306 // conversion. 307 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); 308 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); 309 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); 310 311 if (Subtarget->is64Bit()) { 312 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); 313 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 314 } else if (!TM.Options.UseSoftFloat) { 315 // Since AVX is a superset of SSE3, only check for SSE here. 316 if (Subtarget->hasSSE1() && !Subtarget->hasSSE3()) 317 // Expand FP_TO_UINT into a select. 318 // FIXME: We would like to use a Custom expander here eventually to do 319 // the optimal thing for SSE vs. the default expansion in the legalizer. 320 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); 321 else 322 // With SSE3 we can use fisttpll to convert to a signed i64; without 323 // SSE, we're stuck with a fistpll. 324 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom); 325 } 326 327 if (isTargetFTOL()) { 328 // Use the _ftol2 runtime function, which has a pseudo-instruction 329 // to handle its weird calling convention. 330 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom); 331 } 332 333 // TODO: when we have SSE, these could be more efficient, by using movd/movq. 334 if (!X86ScalarSSEf64) { 335 setOperationAction(ISD::BITCAST , MVT::f32 , Expand); 336 setOperationAction(ISD::BITCAST , MVT::i32 , Expand); 337 if (Subtarget->is64Bit()) { 338 setOperationAction(ISD::BITCAST , MVT::f64 , Expand); 339 // Without SSE, i64->f64 goes through memory. 340 setOperationAction(ISD::BITCAST , MVT::i64 , Expand); 341 } 342 } 343 344 // Scalar integer divide and remainder are lowered to use operations that 345 // produce two results, to match the available instructions. This exposes 346 // the two-result form to trivial CSE, which is able to combine x/y and x%y 347 // into a single instruction. 348 // 349 // Scalar integer multiply-high is also lowered to use two-result 350 // operations, to match the available instructions. However, plain multiply 351 // (low) operations are left as Legal, as there are single-result 352 // instructions for this in x86. Using the two-result multiply instructions 353 // when both high and low results are needed must be arranged by dagcombine. 354 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) { 355 MVT VT = IntVTs[i]; 356 setOperationAction(ISD::MULHS, VT, Expand); 357 setOperationAction(ISD::MULHU, VT, Expand); 358 setOperationAction(ISD::SDIV, VT, Expand); 359 setOperationAction(ISD::UDIV, VT, Expand); 360 setOperationAction(ISD::SREM, VT, Expand); 361 setOperationAction(ISD::UREM, VT, Expand); 362 363 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences. 364 setOperationAction(ISD::ADDC, VT, Custom); 365 setOperationAction(ISD::ADDE, VT, Custom); 366 setOperationAction(ISD::SUBC, VT, Custom); 367 setOperationAction(ISD::SUBE, VT, Custom); 368 } 369 370 setOperationAction(ISD::BR_JT , MVT::Other, Expand); 371 setOperationAction(ISD::BRCOND , MVT::Other, Custom); 372 setOperationAction(ISD::BR_CC , MVT::Other, Expand); 373 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); 374 if (Subtarget->is64Bit()) 375 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); 376 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal); 377 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); 378 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 379 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); 380 setOperationAction(ISD::FREM , MVT::f32 , Expand); 381 setOperationAction(ISD::FREM , MVT::f64 , Expand); 382 setOperationAction(ISD::FREM , MVT::f80 , Expand); 383 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom); 384 385 // Promote the i8 variants and force them on up to i32 which has a shorter 386 // encoding. 387 setOperationAction(ISD::CTTZ , MVT::i8 , Promote); 388 AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32); 389 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote); 390 AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32); 391 if (Subtarget->hasBMI()) { 392 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand); 393 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand); 394 if (Subtarget->is64Bit()) 395 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); 396 } else { 397 setOperationAction(ISD::CTTZ , MVT::i16 , Custom); 398 setOperationAction(ISD::CTTZ , MVT::i32 , Custom); 399 if (Subtarget->is64Bit()) 400 setOperationAction(ISD::CTTZ , MVT::i64 , Custom); 401 } 402 403 if (Subtarget->hasLZCNT()) { 404 // When promoting the i8 variants, force them to i32 for a shorter 405 // encoding. 406 setOperationAction(ISD::CTLZ , MVT::i8 , Promote); 407 AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32); 408 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote); 409 AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32); 410 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand); 411 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand); 412 if (Subtarget->is64Bit()) 413 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); 414 } else { 415 setOperationAction(ISD::CTLZ , MVT::i8 , Custom); 416 setOperationAction(ISD::CTLZ , MVT::i16 , Custom); 417 setOperationAction(ISD::CTLZ , MVT::i32 , Custom); 418 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom); 419 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom); 420 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom); 421 if (Subtarget->is64Bit()) { 422 setOperationAction(ISD::CTLZ , MVT::i64 , Custom); 423 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom); 424 } 425 } 426 427 if (Subtarget->hasPOPCNT()) { 428 setOperationAction(ISD::CTPOP , MVT::i8 , Promote); 429 } else { 430 setOperationAction(ISD::CTPOP , MVT::i8 , Expand); 431 setOperationAction(ISD::CTPOP , MVT::i16 , Expand); 432 setOperationAction(ISD::CTPOP , MVT::i32 , Expand); 433 if (Subtarget->is64Bit()) 434 setOperationAction(ISD::CTPOP , MVT::i64 , Expand); 435 } 436 437 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); 438 setOperationAction(ISD::BSWAP , MVT::i16 , Expand); 439 440 // These should be promoted to a larger select which is supported. 441 setOperationAction(ISD::SELECT , MVT::i1 , Promote); 442 // X86 wants to expand cmov itself. 443 setOperationAction(ISD::SELECT , MVT::i8 , Custom); 444 setOperationAction(ISD::SELECT , MVT::i16 , Custom); 445 setOperationAction(ISD::SELECT , MVT::i32 , Custom); 446 setOperationAction(ISD::SELECT , MVT::f32 , Custom); 447 setOperationAction(ISD::SELECT , MVT::f64 , Custom); 448 setOperationAction(ISD::SELECT , MVT::f80 , Custom); 449 setOperationAction(ISD::SETCC , MVT::i8 , Custom); 450 setOperationAction(ISD::SETCC , MVT::i16 , Custom); 451 setOperationAction(ISD::SETCC , MVT::i32 , Custom); 452 setOperationAction(ISD::SETCC , MVT::f32 , Custom); 453 setOperationAction(ISD::SETCC , MVT::f64 , Custom); 454 setOperationAction(ISD::SETCC , MVT::f80 , Custom); 455 if (Subtarget->is64Bit()) { 456 setOperationAction(ISD::SELECT , MVT::i64 , Custom); 457 setOperationAction(ISD::SETCC , MVT::i64 , Custom); 458 } 459 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom); 460 461 // Darwin ABI issue. 462 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); 463 setOperationAction(ISD::JumpTable , MVT::i32 , Custom); 464 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); 465 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom); 466 if (Subtarget->is64Bit()) 467 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 468 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom); 469 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom); 470 if (Subtarget->is64Bit()) { 471 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom); 472 setOperationAction(ISD::JumpTable , MVT::i64 , Custom); 473 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); 474 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom); 475 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom); 476 } 477 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86) 478 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom); 479 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom); 480 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom); 481 if (Subtarget->is64Bit()) { 482 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom); 483 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom); 484 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom); 485 } 486 487 if (Subtarget->hasSSE1()) 488 setOperationAction(ISD::PREFETCH , MVT::Other, Legal); 489 490 setOperationAction(ISD::MEMBARRIER , MVT::Other, Custom); 491 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom); 492 493 // On X86 and X86-64, atomic operations are lowered to locked instructions. 494 // Locked instructions, in turn, have implicit fence semantics (all memory 495 // operations are flushed before issuing the locked instruction, and they 496 // are not buffered), so we can fold away the common pattern of 497 // fence-atomic-fence. 498 setShouldFoldAtomicFences(true); 499 500 // Expand certain atomics 501 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) { 502 MVT VT = IntVTs[i]; 503 setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Custom); 504 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom); 505 setOperationAction(ISD::ATOMIC_STORE, VT, Custom); 506 } 507 508 if (!Subtarget->is64Bit()) { 509 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom); 510 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom); 511 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); 512 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom); 513 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Custom); 514 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom); 515 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Custom); 516 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom); 517 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i64, Custom); 518 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i64, Custom); 519 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i64, Custom); 520 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i64, Custom); 521 } 522 523 if (Subtarget->hasCmpxchg16b()) { 524 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, Custom); 525 } 526 527 // FIXME - use subtarget debug flags 528 if (!Subtarget->isTargetDarwin() && 529 !Subtarget->isTargetELF() && 530 !Subtarget->isTargetCygMing()) { 531 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand); 532 } 533 534 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 535 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 536 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 537 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 538 if (Subtarget->is64Bit()) { 539 setExceptionPointerRegister(X86::RAX); 540 setExceptionSelectorRegister(X86::RDX); 541 } else { 542 setExceptionPointerRegister(X86::EAX); 543 setExceptionSelectorRegister(X86::EDX); 544 } 545 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 546 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom); 547 548 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 549 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 550 551 setOperationAction(ISD::TRAP, MVT::Other, Legal); 552 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); 553 554 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 555 setOperationAction(ISD::VASTART , MVT::Other, Custom); 556 setOperationAction(ISD::VAEND , MVT::Other, Expand); 557 if (Subtarget->is64Bit()) { 558 setOperationAction(ISD::VAARG , MVT::Other, Custom); 559 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 560 } else { 561 setOperationAction(ISD::VAARG , MVT::Other, Expand); 562 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 563 } 564 565 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 566 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 567 568 if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho()) 569 setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? 570 MVT::i64 : MVT::i32, Custom); 571 else if (TM.Options.EnableSegmentedStacks) 572 setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? 573 MVT::i64 : MVT::i32, Custom); 574 else 575 setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? 576 MVT::i64 : MVT::i32, Expand); 577 578 if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) { 579 // f32 and f64 use SSE. 580 // Set up the FP register classes. 581 addRegisterClass(MVT::f32, &X86::FR32RegClass); 582 addRegisterClass(MVT::f64, &X86::FR64RegClass); 583 584 // Use ANDPD to simulate FABS. 585 setOperationAction(ISD::FABS , MVT::f64, Custom); 586 setOperationAction(ISD::FABS , MVT::f32, Custom); 587 588 // Use XORP to simulate FNEG. 589 setOperationAction(ISD::FNEG , MVT::f64, Custom); 590 setOperationAction(ISD::FNEG , MVT::f32, Custom); 591 592 // Use ANDPD and ORPD to simulate FCOPYSIGN. 593 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 594 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 595 596 // Lower this to FGETSIGNx86 plus an AND. 597 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom); 598 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom); 599 600 // We don't support sin/cos/fmod 601 setOperationAction(ISD::FSIN , MVT::f64, Expand); 602 setOperationAction(ISD::FCOS , MVT::f64, Expand); 603 setOperationAction(ISD::FSIN , MVT::f32, Expand); 604 setOperationAction(ISD::FCOS , MVT::f32, Expand); 605 606 // Expand FP immediates into loads from the stack, except for the special 607 // cases we handle. 608 addLegalFPImmediate(APFloat(+0.0)); // xorpd 609 addLegalFPImmediate(APFloat(+0.0f)); // xorps 610 } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) { 611 // Use SSE for f32, x87 for f64. 612 // Set up the FP register classes. 613 addRegisterClass(MVT::f32, &X86::FR32RegClass); 614 addRegisterClass(MVT::f64, &X86::RFP64RegClass); 615 616 // Use ANDPS to simulate FABS. 617 setOperationAction(ISD::FABS , MVT::f32, Custom); 618 619 // Use XORP to simulate FNEG. 620 setOperationAction(ISD::FNEG , MVT::f32, Custom); 621 622 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 623 624 // Use ANDPS and ORPS to simulate FCOPYSIGN. 625 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 626 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 627 628 // We don't support sin/cos/fmod 629 setOperationAction(ISD::FSIN , MVT::f32, Expand); 630 setOperationAction(ISD::FCOS , MVT::f32, Expand); 631 632 // Special cases we handle for FP constants. 633 addLegalFPImmediate(APFloat(+0.0f)); // xorps 634 addLegalFPImmediate(APFloat(+0.0)); // FLD0 635 addLegalFPImmediate(APFloat(+1.0)); // FLD1 636 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 637 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 638 639 if (!TM.Options.UnsafeFPMath) { 640 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 641 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 642 } 643 } else if (!TM.Options.UseSoftFloat) { 644 // f32 and f64 in x87. 645 // Set up the FP register classes. 646 addRegisterClass(MVT::f64, &X86::RFP64RegClass); 647 addRegisterClass(MVT::f32, &X86::RFP32RegClass); 648 649 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 650 setOperationAction(ISD::UNDEF, MVT::f32, Expand); 651 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 652 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 653 654 if (!TM.Options.UnsafeFPMath) { 655 setOperationAction(ISD::FSIN , MVT::f32 , Expand); 656 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 657 setOperationAction(ISD::FCOS , MVT::f32 , Expand); 658 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 659 } 660 addLegalFPImmediate(APFloat(+0.0)); // FLD0 661 addLegalFPImmediate(APFloat(+1.0)); // FLD1 662 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 663 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 664 addLegalFPImmediate(APFloat(+0.0f)); // FLD0 665 addLegalFPImmediate(APFloat(+1.0f)); // FLD1 666 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS 667 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS 668 } 669 670 // We don't support FMA. 671 setOperationAction(ISD::FMA, MVT::f64, Expand); 672 setOperationAction(ISD::FMA, MVT::f32, Expand); 673 674 // Long double always uses X87. 675 if (!TM.Options.UseSoftFloat) { 676 addRegisterClass(MVT::f80, &X86::RFP80RegClass); 677 setOperationAction(ISD::UNDEF, MVT::f80, Expand); 678 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); 679 { 680 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended); 681 addLegalFPImmediate(TmpFlt); // FLD0 682 TmpFlt.changeSign(); 683 addLegalFPImmediate(TmpFlt); // FLD0/FCHS 684 685 bool ignored; 686 APFloat TmpFlt2(+1.0); 687 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven, 688 &ignored); 689 addLegalFPImmediate(TmpFlt2); // FLD1 690 TmpFlt2.changeSign(); 691 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS 692 } 693 694 if (!TM.Options.UnsafeFPMath) { 695 setOperationAction(ISD::FSIN , MVT::f80 , Expand); 696 setOperationAction(ISD::FCOS , MVT::f80 , Expand); 697 } 698 699 setOperationAction(ISD::FFLOOR, MVT::f80, Expand); 700 setOperationAction(ISD::FCEIL, MVT::f80, Expand); 701 setOperationAction(ISD::FTRUNC, MVT::f80, Expand); 702 setOperationAction(ISD::FRINT, MVT::f80, Expand); 703 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand); 704 setOperationAction(ISD::FMA, MVT::f80, Expand); 705 } 706 707 // Always use a library call for pow. 708 setOperationAction(ISD::FPOW , MVT::f32 , Expand); 709 setOperationAction(ISD::FPOW , MVT::f64 , Expand); 710 setOperationAction(ISD::FPOW , MVT::f80 , Expand); 711 712 setOperationAction(ISD::FLOG, MVT::f80, Expand); 713 setOperationAction(ISD::FLOG2, MVT::f80, Expand); 714 setOperationAction(ISD::FLOG10, MVT::f80, Expand); 715 setOperationAction(ISD::FEXP, MVT::f80, Expand); 716 setOperationAction(ISD::FEXP2, MVT::f80, Expand); 717 718 // First set operation action for all vector types to either promote 719 // (for widening) or expand (for scalarization). Then we will selectively 720 // turn on ones that can be effectively codegen'd. 721 for (int VT = MVT::FIRST_VECTOR_VALUETYPE; 722 VT <= MVT::LAST_VECTOR_VALUETYPE; ++VT) { 723 setOperationAction(ISD::ADD , (MVT::SimpleValueType)VT, Expand); 724 setOperationAction(ISD::SUB , (MVT::SimpleValueType)VT, Expand); 725 setOperationAction(ISD::FADD, (MVT::SimpleValueType)VT, Expand); 726 setOperationAction(ISD::FNEG, (MVT::SimpleValueType)VT, Expand); 727 setOperationAction(ISD::FSUB, (MVT::SimpleValueType)VT, Expand); 728 setOperationAction(ISD::MUL , (MVT::SimpleValueType)VT, Expand); 729 setOperationAction(ISD::FMUL, (MVT::SimpleValueType)VT, Expand); 730 setOperationAction(ISD::SDIV, (MVT::SimpleValueType)VT, Expand); 731 setOperationAction(ISD::UDIV, (MVT::SimpleValueType)VT, Expand); 732 setOperationAction(ISD::FDIV, (MVT::SimpleValueType)VT, Expand); 733 setOperationAction(ISD::SREM, (MVT::SimpleValueType)VT, Expand); 734 setOperationAction(ISD::UREM, (MVT::SimpleValueType)VT, Expand); 735 setOperationAction(ISD::LOAD, (MVT::SimpleValueType)VT, Expand); 736 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::SimpleValueType)VT, Expand); 737 setOperationAction(ISD::EXTRACT_VECTOR_ELT,(MVT::SimpleValueType)VT,Expand); 738 setOperationAction(ISD::INSERT_VECTOR_ELT,(MVT::SimpleValueType)VT, Expand); 739 setOperationAction(ISD::EXTRACT_SUBVECTOR,(MVT::SimpleValueType)VT,Expand); 740 setOperationAction(ISD::INSERT_SUBVECTOR,(MVT::SimpleValueType)VT,Expand); 741 setOperationAction(ISD::FABS, (MVT::SimpleValueType)VT, Expand); 742 setOperationAction(ISD::FSIN, (MVT::SimpleValueType)VT, Expand); 743 setOperationAction(ISD::FCOS, (MVT::SimpleValueType)VT, Expand); 744 setOperationAction(ISD::FREM, (MVT::SimpleValueType)VT, Expand); 745 setOperationAction(ISD::FMA, (MVT::SimpleValueType)VT, Expand); 746 setOperationAction(ISD::FPOWI, (MVT::SimpleValueType)VT, Expand); 747 setOperationAction(ISD::FSQRT, (MVT::SimpleValueType)VT, Expand); 748 setOperationAction(ISD::FCOPYSIGN, (MVT::SimpleValueType)VT, Expand); 749 setOperationAction(ISD::FFLOOR, (MVT::SimpleValueType)VT, Expand); 750 setOperationAction(ISD::SMUL_LOHI, (MVT::SimpleValueType)VT, Expand); 751 setOperationAction(ISD::UMUL_LOHI, (MVT::SimpleValueType)VT, Expand); 752 setOperationAction(ISD::SDIVREM, (MVT::SimpleValueType)VT, Expand); 753 setOperationAction(ISD::UDIVREM, (MVT::SimpleValueType)VT, Expand); 754 setOperationAction(ISD::FPOW, (MVT::SimpleValueType)VT, Expand); 755 setOperationAction(ISD::CTPOP, (MVT::SimpleValueType)VT, Expand); 756 setOperationAction(ISD::CTTZ, (MVT::SimpleValueType)VT, Expand); 757 setOperationAction(ISD::CTTZ_ZERO_UNDEF, (MVT::SimpleValueType)VT, Expand); 758 setOperationAction(ISD::CTLZ, (MVT::SimpleValueType)VT, Expand); 759 setOperationAction(ISD::CTLZ_ZERO_UNDEF, (MVT::SimpleValueType)VT, Expand); 760 setOperationAction(ISD::SHL, (MVT::SimpleValueType)VT, Expand); 761 setOperationAction(ISD::SRA, (MVT::SimpleValueType)VT, Expand); 762 setOperationAction(ISD::SRL, (MVT::SimpleValueType)VT, Expand); 763 setOperationAction(ISD::ROTL, (MVT::SimpleValueType)VT, Expand); 764 setOperationAction(ISD::ROTR, (MVT::SimpleValueType)VT, Expand); 765 setOperationAction(ISD::BSWAP, (MVT::SimpleValueType)VT, Expand); 766 setOperationAction(ISD::SETCC, (MVT::SimpleValueType)VT, Expand); 767 setOperationAction(ISD::FLOG, (MVT::SimpleValueType)VT, Expand); 768 setOperationAction(ISD::FLOG2, (MVT::SimpleValueType)VT, Expand); 769 setOperationAction(ISD::FLOG10, (MVT::SimpleValueType)VT, Expand); 770 setOperationAction(ISD::FEXP, (MVT::SimpleValueType)VT, Expand); 771 setOperationAction(ISD::FEXP2, (MVT::SimpleValueType)VT, Expand); 772 setOperationAction(ISD::FP_TO_UINT, (MVT::SimpleValueType)VT, Expand); 773 setOperationAction(ISD::FP_TO_SINT, (MVT::SimpleValueType)VT, Expand); 774 setOperationAction(ISD::UINT_TO_FP, (MVT::SimpleValueType)VT, Expand); 775 setOperationAction(ISD::SINT_TO_FP, (MVT::SimpleValueType)VT, Expand); 776 setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT,Expand); 777 setOperationAction(ISD::TRUNCATE, (MVT::SimpleValueType)VT, Expand); 778 setOperationAction(ISD::SIGN_EXTEND, (MVT::SimpleValueType)VT, Expand); 779 setOperationAction(ISD::ZERO_EXTEND, (MVT::SimpleValueType)VT, Expand); 780 setOperationAction(ISD::ANY_EXTEND, (MVT::SimpleValueType)VT, Expand); 781 setOperationAction(ISD::VSELECT, (MVT::SimpleValueType)VT, Expand); 782 for (int InnerVT = MVT::FIRST_VECTOR_VALUETYPE; 783 InnerVT <= MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) 784 setTruncStoreAction((MVT::SimpleValueType)VT, 785 (MVT::SimpleValueType)InnerVT, Expand); 786 setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT, Expand); 787 setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT, Expand); 788 setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT, Expand); 789 } 790 791 // FIXME: In order to prevent SSE instructions being expanded to MMX ones 792 // with -msoft-float, disable use of MMX as well. 793 if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) { 794 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass); 795 // No operations on x86mmx supported, everything uses intrinsics. 796 } 797 798 // MMX-sized vectors (other than x86mmx) are expected to be expanded 799 // into smaller operations. 800 setOperationAction(ISD::MULHS, MVT::v8i8, Expand); 801 setOperationAction(ISD::MULHS, MVT::v4i16, Expand); 802 setOperationAction(ISD::MULHS, MVT::v2i32, Expand); 803 setOperationAction(ISD::MULHS, MVT::v1i64, Expand); 804 setOperationAction(ISD::AND, MVT::v8i8, Expand); 805 setOperationAction(ISD::AND, MVT::v4i16, Expand); 806 setOperationAction(ISD::AND, MVT::v2i32, Expand); 807 setOperationAction(ISD::AND, MVT::v1i64, Expand); 808 setOperationAction(ISD::OR, MVT::v8i8, Expand); 809 setOperationAction(ISD::OR, MVT::v4i16, Expand); 810 setOperationAction(ISD::OR, MVT::v2i32, Expand); 811 setOperationAction(ISD::OR, MVT::v1i64, Expand); 812 setOperationAction(ISD::XOR, MVT::v8i8, Expand); 813 setOperationAction(ISD::XOR, MVT::v4i16, Expand); 814 setOperationAction(ISD::XOR, MVT::v2i32, Expand); 815 setOperationAction(ISD::XOR, MVT::v1i64, Expand); 816 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand); 817 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand); 818 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand); 819 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand); 820 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand); 821 setOperationAction(ISD::SELECT, MVT::v8i8, Expand); 822 setOperationAction(ISD::SELECT, MVT::v4i16, Expand); 823 setOperationAction(ISD::SELECT, MVT::v2i32, Expand); 824 setOperationAction(ISD::SELECT, MVT::v1i64, Expand); 825 setOperationAction(ISD::BITCAST, MVT::v8i8, Expand); 826 setOperationAction(ISD::BITCAST, MVT::v4i16, Expand); 827 setOperationAction(ISD::BITCAST, MVT::v2i32, Expand); 828 setOperationAction(ISD::BITCAST, MVT::v1i64, Expand); 829 830 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) { 831 addRegisterClass(MVT::v4f32, &X86::VR128RegClass); 832 833 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 834 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 835 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 836 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 837 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 838 setOperationAction(ISD::FNEG, MVT::v4f32, Custom); 839 setOperationAction(ISD::FABS, MVT::v4f32, Custom); 840 setOperationAction(ISD::LOAD, MVT::v4f32, Legal); 841 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 842 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 843 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 844 setOperationAction(ISD::SELECT, MVT::v4f32, Custom); 845 } 846 847 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) { 848 addRegisterClass(MVT::v2f64, &X86::VR128RegClass); 849 850 // FIXME: Unfortunately -soft-float and -no-implicit-float means XMM 851 // registers cannot be used even for integer operations. 852 addRegisterClass(MVT::v16i8, &X86::VR128RegClass); 853 addRegisterClass(MVT::v8i16, &X86::VR128RegClass); 854 addRegisterClass(MVT::v4i32, &X86::VR128RegClass); 855 addRegisterClass(MVT::v2i64, &X86::VR128RegClass); 856 857 setOperationAction(ISD::ADD, MVT::v16i8, Legal); 858 setOperationAction(ISD::ADD, MVT::v8i16, Legal); 859 setOperationAction(ISD::ADD, MVT::v4i32, Legal); 860 setOperationAction(ISD::ADD, MVT::v2i64, Legal); 861 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 862 setOperationAction(ISD::SUB, MVT::v16i8, Legal); 863 setOperationAction(ISD::SUB, MVT::v8i16, Legal); 864 setOperationAction(ISD::SUB, MVT::v4i32, Legal); 865 setOperationAction(ISD::SUB, MVT::v2i64, Legal); 866 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 867 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 868 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 869 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 870 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 871 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 872 setOperationAction(ISD::FNEG, MVT::v2f64, Custom); 873 setOperationAction(ISD::FABS, MVT::v2f64, Custom); 874 875 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 876 setOperationAction(ISD::SETCC, MVT::v16i8, Custom); 877 setOperationAction(ISD::SETCC, MVT::v8i16, Custom); 878 setOperationAction(ISD::SETCC, MVT::v4i32, Custom); 879 880 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); 881 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); 882 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 883 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 884 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 885 886 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 887 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) { 888 MVT VT = (MVT::SimpleValueType)i; 889 // Do not attempt to custom lower non-power-of-2 vectors 890 if (!isPowerOf2_32(VT.getVectorNumElements())) 891 continue; 892 // Do not attempt to custom lower non-128-bit vectors 893 if (!VT.is128BitVector()) 894 continue; 895 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 896 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 897 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 898 } 899 900 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 901 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 902 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 903 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 904 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); 905 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 906 907 if (Subtarget->is64Bit()) { 908 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 909 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 910 } 911 912 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. 913 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) { 914 MVT VT = (MVT::SimpleValueType)i; 915 916 // Do not attempt to promote non-128-bit vectors 917 if (!VT.is128BitVector()) 918 continue; 919 920 setOperationAction(ISD::AND, VT, Promote); 921 AddPromotedToType (ISD::AND, VT, MVT::v2i64); 922 setOperationAction(ISD::OR, VT, Promote); 923 AddPromotedToType (ISD::OR, VT, MVT::v2i64); 924 setOperationAction(ISD::XOR, VT, Promote); 925 AddPromotedToType (ISD::XOR, VT, MVT::v2i64); 926 setOperationAction(ISD::LOAD, VT, Promote); 927 AddPromotedToType (ISD::LOAD, VT, MVT::v2i64); 928 setOperationAction(ISD::SELECT, VT, Promote); 929 AddPromotedToType (ISD::SELECT, VT, MVT::v2i64); 930 } 931 932 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 933 934 // Custom lower v2i64 and v2f64 selects. 935 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 936 setOperationAction(ISD::LOAD, MVT::v2i64, Legal); 937 setOperationAction(ISD::SELECT, MVT::v2f64, Custom); 938 setOperationAction(ISD::SELECT, MVT::v2i64, Custom); 939 940 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 941 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 942 943 setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, Legal); 944 } 945 946 if (Subtarget->hasSSE41()) { 947 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 948 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 949 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 950 setOperationAction(ISD::FRINT, MVT::f32, Legal); 951 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 952 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 953 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 954 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 955 setOperationAction(ISD::FRINT, MVT::f64, Legal); 956 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 957 958 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 959 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 960 961 // FIXME: Do we need to handle scalar-to-vector here? 962 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 963 964 setOperationAction(ISD::VSELECT, MVT::v2f64, Legal); 965 setOperationAction(ISD::VSELECT, MVT::v2i64, Legal); 966 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal); 967 setOperationAction(ISD::VSELECT, MVT::v4i32, Legal); 968 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 969 970 // i8 and i16 vectors are custom , because the source register and source 971 // source memory operand types are not the same width. f32 vectors are 972 // custom since the immediate controlling the insert encodes additional 973 // information. 974 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 975 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 976 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 977 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 978 979 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom); 980 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom); 981 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom); 982 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 983 984 // FIXME: these should be Legal but thats only for the case where 985 // the index is constant. For now custom expand to deal with that. 986 if (Subtarget->is64Bit()) { 987 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 988 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 989 } 990 } 991 992 if (Subtarget->hasSSE2()) { 993 setOperationAction(ISD::SRL, MVT::v8i16, Custom); 994 setOperationAction(ISD::SRL, MVT::v16i8, Custom); 995 996 setOperationAction(ISD::SHL, MVT::v8i16, Custom); 997 setOperationAction(ISD::SHL, MVT::v16i8, Custom); 998 999 setOperationAction(ISD::SRA, MVT::v8i16, Custom); 1000 setOperationAction(ISD::SRA, MVT::v16i8, Custom); 1001 1002 if (Subtarget->hasAVX2()) { 1003 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 1004 setOperationAction(ISD::SRL, MVT::v4i32, Legal); 1005 1006 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 1007 setOperationAction(ISD::SHL, MVT::v4i32, Legal); 1008 1009 setOperationAction(ISD::SRA, MVT::v4i32, Legal); 1010 } else { 1011 setOperationAction(ISD::SRL, MVT::v2i64, Custom); 1012 setOperationAction(ISD::SRL, MVT::v4i32, Custom); 1013 1014 setOperationAction(ISD::SHL, MVT::v2i64, Custom); 1015 setOperationAction(ISD::SHL, MVT::v4i32, Custom); 1016 1017 setOperationAction(ISD::SRA, MVT::v4i32, Custom); 1018 } 1019 } 1020 1021 if (!TM.Options.UseSoftFloat && Subtarget->hasAVX()) { 1022 addRegisterClass(MVT::v32i8, &X86::VR256RegClass); 1023 addRegisterClass(MVT::v16i16, &X86::VR256RegClass); 1024 addRegisterClass(MVT::v8i32, &X86::VR256RegClass); 1025 addRegisterClass(MVT::v8f32, &X86::VR256RegClass); 1026 addRegisterClass(MVT::v4i64, &X86::VR256RegClass); 1027 addRegisterClass(MVT::v4f64, &X86::VR256RegClass); 1028 1029 setOperationAction(ISD::LOAD, MVT::v8f32, Legal); 1030 setOperationAction(ISD::LOAD, MVT::v4f64, Legal); 1031 setOperationAction(ISD::LOAD, MVT::v4i64, Legal); 1032 1033 setOperationAction(ISD::FADD, MVT::v8f32, Legal); 1034 setOperationAction(ISD::FSUB, MVT::v8f32, Legal); 1035 setOperationAction(ISD::FMUL, MVT::v8f32, Legal); 1036 setOperationAction(ISD::FDIV, MVT::v8f32, Legal); 1037 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal); 1038 setOperationAction(ISD::FFLOOR, MVT::v8f32, Legal); 1039 setOperationAction(ISD::FNEG, MVT::v8f32, Custom); 1040 setOperationAction(ISD::FABS, MVT::v8f32, Custom); 1041 1042 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 1043 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 1044 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 1045 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 1046 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 1047 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal); 1048 setOperationAction(ISD::FNEG, MVT::v4f64, Custom); 1049 setOperationAction(ISD::FABS, MVT::v4f64, Custom); 1050 1051 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal); 1052 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal); 1053 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal); 1054 1055 setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, Legal); 1056 1057 setOperationAction(ISD::SRL, MVT::v16i16, Custom); 1058 setOperationAction(ISD::SRL, MVT::v32i8, Custom); 1059 1060 setOperationAction(ISD::SHL, MVT::v16i16, Custom); 1061 setOperationAction(ISD::SHL, MVT::v32i8, Custom); 1062 1063 setOperationAction(ISD::SRA, MVT::v16i16, Custom); 1064 setOperationAction(ISD::SRA, MVT::v32i8, Custom); 1065 1066 setOperationAction(ISD::SETCC, MVT::v32i8, Custom); 1067 setOperationAction(ISD::SETCC, MVT::v16i16, Custom); 1068 setOperationAction(ISD::SETCC, MVT::v8i32, Custom); 1069 setOperationAction(ISD::SETCC, MVT::v4i64, Custom); 1070 1071 setOperationAction(ISD::SELECT, MVT::v4f64, Custom); 1072 setOperationAction(ISD::SELECT, MVT::v4i64, Custom); 1073 setOperationAction(ISD::SELECT, MVT::v8f32, Custom); 1074 1075 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 1076 setOperationAction(ISD::VSELECT, MVT::v4i64, Legal); 1077 setOperationAction(ISD::VSELECT, MVT::v8i32, Legal); 1078 setOperationAction(ISD::VSELECT, MVT::v8f32, Legal); 1079 1080 if (Subtarget->hasFMA() || Subtarget->hasFMA4()) { 1081 setOperationAction(ISD::FMA, MVT::v8f32, Custom); 1082 setOperationAction(ISD::FMA, MVT::v4f64, Custom); 1083 setOperationAction(ISD::FMA, MVT::v4f32, Custom); 1084 setOperationAction(ISD::FMA, MVT::v2f64, Custom); 1085 setOperationAction(ISD::FMA, MVT::f32, Custom); 1086 setOperationAction(ISD::FMA, MVT::f64, Custom); 1087 } 1088 1089 if (Subtarget->hasAVX2()) { 1090 setOperationAction(ISD::ADD, MVT::v4i64, Legal); 1091 setOperationAction(ISD::ADD, MVT::v8i32, Legal); 1092 setOperationAction(ISD::ADD, MVT::v16i16, Legal); 1093 setOperationAction(ISD::ADD, MVT::v32i8, Legal); 1094 1095 setOperationAction(ISD::SUB, MVT::v4i64, Legal); 1096 setOperationAction(ISD::SUB, MVT::v8i32, Legal); 1097 setOperationAction(ISD::SUB, MVT::v16i16, Legal); 1098 setOperationAction(ISD::SUB, MVT::v32i8, Legal); 1099 1100 setOperationAction(ISD::MUL, MVT::v4i64, Custom); 1101 setOperationAction(ISD::MUL, MVT::v8i32, Legal); 1102 setOperationAction(ISD::MUL, MVT::v16i16, Legal); 1103 // Don't lower v32i8 because there is no 128-bit byte mul 1104 1105 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal); 1106 1107 setOperationAction(ISD::SRL, MVT::v4i64, Legal); 1108 setOperationAction(ISD::SRL, MVT::v8i32, Legal); 1109 1110 setOperationAction(ISD::SHL, MVT::v4i64, Legal); 1111 setOperationAction(ISD::SHL, MVT::v8i32, Legal); 1112 1113 setOperationAction(ISD::SRA, MVT::v8i32, Legal); 1114 } else { 1115 setOperationAction(ISD::ADD, MVT::v4i64, Custom); 1116 setOperationAction(ISD::ADD, MVT::v8i32, Custom); 1117 setOperationAction(ISD::ADD, MVT::v16i16, Custom); 1118 setOperationAction(ISD::ADD, MVT::v32i8, Custom); 1119 1120 setOperationAction(ISD::SUB, MVT::v4i64, Custom); 1121 setOperationAction(ISD::SUB, MVT::v8i32, Custom); 1122 setOperationAction(ISD::SUB, MVT::v16i16, Custom); 1123 setOperationAction(ISD::SUB, MVT::v32i8, Custom); 1124 1125 setOperationAction(ISD::MUL, MVT::v4i64, Custom); 1126 setOperationAction(ISD::MUL, MVT::v8i32, Custom); 1127 setOperationAction(ISD::MUL, MVT::v16i16, Custom); 1128 // Don't lower v32i8 because there is no 128-bit byte mul 1129 1130 setOperationAction(ISD::SRL, MVT::v4i64, Custom); 1131 setOperationAction(ISD::SRL, MVT::v8i32, Custom); 1132 1133 setOperationAction(ISD::SHL, MVT::v4i64, Custom); 1134 setOperationAction(ISD::SHL, MVT::v8i32, Custom); 1135 1136 setOperationAction(ISD::SRA, MVT::v8i32, Custom); 1137 } 1138 1139 // Custom lower several nodes for 256-bit types. 1140 for (int i = MVT::FIRST_VECTOR_VALUETYPE; 1141 i <= MVT::LAST_VECTOR_VALUETYPE; ++i) { 1142 MVT VT = (MVT::SimpleValueType)i; 1143 1144 // Extract subvector is special because the value type 1145 // (result) is 128-bit but the source is 256-bit wide. 1146 if (VT.is128BitVector()) 1147 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 1148 1149 // Do not attempt to custom lower other non-256-bit vectors 1150 if (!VT.is256BitVector()) 1151 continue; 1152 1153 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 1154 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 1155 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 1156 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 1157 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); 1158 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); 1159 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); 1160 } 1161 1162 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64. 1163 for (int i = MVT::v32i8; i != MVT::v4i64; ++i) { 1164 MVT VT = (MVT::SimpleValueType)i; 1165 1166 // Do not attempt to promote non-256-bit vectors 1167 if (!VT.is256BitVector()) 1168 continue; 1169 1170 setOperationAction(ISD::AND, VT, Promote); 1171 AddPromotedToType (ISD::AND, VT, MVT::v4i64); 1172 setOperationAction(ISD::OR, VT, Promote); 1173 AddPromotedToType (ISD::OR, VT, MVT::v4i64); 1174 setOperationAction(ISD::XOR, VT, Promote); 1175 AddPromotedToType (ISD::XOR, VT, MVT::v4i64); 1176 setOperationAction(ISD::LOAD, VT, Promote); 1177 AddPromotedToType (ISD::LOAD, VT, MVT::v4i64); 1178 setOperationAction(ISD::SELECT, VT, Promote); 1179 AddPromotedToType (ISD::SELECT, VT, MVT::v4i64); 1180 } 1181 } 1182 1183 // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion 1184 // of this type with custom code. 1185 for (int VT = MVT::FIRST_VECTOR_VALUETYPE; 1186 VT != MVT::LAST_VECTOR_VALUETYPE; VT++) { 1187 setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT, 1188 Custom); 1189 } 1190 1191 // We want to custom lower some of our intrinsics. 1192 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 1193 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 1194 1195 1196 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't 1197 // handle type legalization for these operations here. 1198 // 1199 // FIXME: We really should do custom legalization for addition and 1200 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better 1201 // than generic legalization for 64-bit multiplication-with-overflow, though. 1202 for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) { 1203 // Add/Sub/Mul with overflow operations are custom lowered. 1204 MVT VT = IntVTs[i]; 1205 setOperationAction(ISD::SADDO, VT, Custom); 1206 setOperationAction(ISD::UADDO, VT, Custom); 1207 setOperationAction(ISD::SSUBO, VT, Custom); 1208 setOperationAction(ISD::USUBO, VT, Custom); 1209 setOperationAction(ISD::SMULO, VT, Custom); 1210 setOperationAction(ISD::UMULO, VT, Custom); 1211 } 1212 1213 // There are no 8-bit 3-address imul/mul instructions 1214 setOperationAction(ISD::SMULO, MVT::i8, Expand); 1215 setOperationAction(ISD::UMULO, MVT::i8, Expand); 1216 1217 if (!Subtarget->is64Bit()) { 1218 // These libcalls are not available in 32-bit. 1219 setLibcallName(RTLIB::SHL_I128, 0); 1220 setLibcallName(RTLIB::SRL_I128, 0); 1221 setLibcallName(RTLIB::SRA_I128, 0); 1222 } 1223 1224 // We have target-specific dag combine patterns for the following nodes: 1225 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 1226 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); 1227 setTargetDAGCombine(ISD::VSELECT); 1228 setTargetDAGCombine(ISD::SELECT); 1229 setTargetDAGCombine(ISD::SHL); 1230 setTargetDAGCombine(ISD::SRA); 1231 setTargetDAGCombine(ISD::SRL); 1232 setTargetDAGCombine(ISD::OR); 1233 setTargetDAGCombine(ISD::AND); 1234 setTargetDAGCombine(ISD::ADD); 1235 setTargetDAGCombine(ISD::FADD); 1236 setTargetDAGCombine(ISD::FSUB); 1237 setTargetDAGCombine(ISD::FMA); 1238 setTargetDAGCombine(ISD::SUB); 1239 setTargetDAGCombine(ISD::LOAD); 1240 setTargetDAGCombine(ISD::STORE); 1241 setTargetDAGCombine(ISD::ZERO_EXTEND); 1242 setTargetDAGCombine(ISD::ANY_EXTEND); 1243 setTargetDAGCombine(ISD::SIGN_EXTEND); 1244 setTargetDAGCombine(ISD::TRUNCATE); 1245 setTargetDAGCombine(ISD::UINT_TO_FP); 1246 setTargetDAGCombine(ISD::SINT_TO_FP); 1247 setTargetDAGCombine(ISD::SETCC); 1248 setTargetDAGCombine(ISD::FP_TO_SINT); 1249 if (Subtarget->is64Bit()) 1250 setTargetDAGCombine(ISD::MUL); 1251 setTargetDAGCombine(ISD::XOR); 1252 1253 computeRegisterProperties(); 1254 1255 // On Darwin, -Os means optimize for size without hurting performance, 1256 // do not reduce the limit. 1257 maxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores 1258 maxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8; 1259 maxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores 1260 maxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4; 1261 maxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores 1262 maxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4; 1263 setPrefLoopAlignment(4); // 2^4 bytes. 1264 benefitFromCodePlacementOpt = true; 1265 1266 // Predictable cmov don't hurt on atom because it's in-order. 1267 predictableSelectIsExpensive = !Subtarget->isAtom(); 1268 1269 setPrefFunctionAlignment(4); // 2^4 bytes. 1270} 1271 1272 1273EVT X86TargetLowering::getSetCCResultType(EVT VT) const { 1274 if (!VT.isVector()) return MVT::i8; 1275 return VT.changeVectorElementTypeToInteger(); 1276} 1277 1278 1279/// getMaxByValAlign - Helper for getByValTypeAlignment to determine 1280/// the desired ByVal argument alignment. 1281static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) { 1282 if (MaxAlign == 16) 1283 return; 1284 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1285 if (VTy->getBitWidth() == 128) 1286 MaxAlign = 16; 1287 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1288 unsigned EltAlign = 0; 1289 getMaxByValAlign(ATy->getElementType(), EltAlign); 1290 if (EltAlign > MaxAlign) 1291 MaxAlign = EltAlign; 1292 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 1293 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1294 unsigned EltAlign = 0; 1295 getMaxByValAlign(STy->getElementType(i), EltAlign); 1296 if (EltAlign > MaxAlign) 1297 MaxAlign = EltAlign; 1298 if (MaxAlign == 16) 1299 break; 1300 } 1301 } 1302} 1303 1304/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1305/// function arguments in the caller parameter area. For X86, aggregates 1306/// that contain SSE vectors are placed at 16-byte boundaries while the rest 1307/// are at 4-byte boundaries. 1308unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const { 1309 if (Subtarget->is64Bit()) { 1310 // Max of 8 and alignment of type. 1311 unsigned TyAlign = TD->getABITypeAlignment(Ty); 1312 if (TyAlign > 8) 1313 return TyAlign; 1314 return 8; 1315 } 1316 1317 unsigned Align = 4; 1318 if (Subtarget->hasSSE1()) 1319 getMaxByValAlign(Ty, Align); 1320 return Align; 1321} 1322 1323/// getOptimalMemOpType - Returns the target specific optimal type for load 1324/// and store operations as a result of memset, memcpy, and memmove 1325/// lowering. If DstAlign is zero that means it's safe to destination 1326/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 1327/// means there isn't a need to check it against alignment requirement, 1328/// probably because the source does not need to be loaded. If 1329/// 'IsZeroVal' is true, that means it's safe to return a 1330/// non-scalar-integer type, e.g. empty string source, constant, or loaded 1331/// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is 1332/// constant so it does not need to be loaded. 1333/// It returns EVT::Other if the type should be determined using generic 1334/// target-independent logic. 1335EVT 1336X86TargetLowering::getOptimalMemOpType(uint64_t Size, 1337 unsigned DstAlign, unsigned SrcAlign, 1338 bool IsZeroVal, 1339 bool MemcpyStrSrc, 1340 MachineFunction &MF) const { 1341 // FIXME: This turns off use of xmm stores for memset/memcpy on targets like 1342 // linux. This is because the stack realignment code can't handle certain 1343 // cases like PR2962. This should be removed when PR2962 is fixed. 1344 const Function *F = MF.getFunction(); 1345 if (IsZeroVal && 1346 !F->getFnAttributes().hasNoImplicitFloatAttr()) { 1347 if (Size >= 16 && 1348 (Subtarget->isUnalignedMemAccessFast() || 1349 ((DstAlign == 0 || DstAlign >= 16) && 1350 (SrcAlign == 0 || SrcAlign >= 16))) && 1351 Subtarget->getStackAlignment() >= 16) { 1352 if (Subtarget->getStackAlignment() >= 32) { 1353 if (Subtarget->hasAVX2()) 1354 return MVT::v8i32; 1355 if (Subtarget->hasAVX()) 1356 return MVT::v8f32; 1357 } 1358 if (Subtarget->hasSSE2()) 1359 return MVT::v4i32; 1360 if (Subtarget->hasSSE1()) 1361 return MVT::v4f32; 1362 } else if (!MemcpyStrSrc && Size >= 8 && 1363 !Subtarget->is64Bit() && 1364 Subtarget->getStackAlignment() >= 8 && 1365 Subtarget->hasSSE2()) { 1366 // Do not use f64 to lower memcpy if source is string constant. It's 1367 // better to use i32 to avoid the loads. 1368 return MVT::f64; 1369 } 1370 } 1371 if (Subtarget->is64Bit() && Size >= 8) 1372 return MVT::i64; 1373 return MVT::i32; 1374} 1375 1376/// getJumpTableEncoding - Return the entry encoding for a jump table in the 1377/// current function. The returned value is a member of the 1378/// MachineJumpTableInfo::JTEntryKind enum. 1379unsigned X86TargetLowering::getJumpTableEncoding() const { 1380 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF 1381 // symbol. 1382 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1383 Subtarget->isPICStyleGOT()) 1384 return MachineJumpTableInfo::EK_Custom32; 1385 1386 // Otherwise, use the normal jump table encoding heuristics. 1387 return TargetLowering::getJumpTableEncoding(); 1388} 1389 1390const MCExpr * 1391X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, 1392 const MachineBasicBlock *MBB, 1393 unsigned uid,MCContext &Ctx) const{ 1394 assert(getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1395 Subtarget->isPICStyleGOT()); 1396 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF 1397 // entries. 1398 return MCSymbolRefExpr::Create(MBB->getSymbol(), 1399 MCSymbolRefExpr::VK_GOTOFF, Ctx); 1400} 1401 1402/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 1403/// jumptable. 1404SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table, 1405 SelectionDAG &DAG) const { 1406 if (!Subtarget->is64Bit()) 1407 // This doesn't have DebugLoc associated with it, but is not really the 1408 // same as a Register. 1409 return DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), getPointerTy()); 1410 return Table; 1411} 1412 1413/// getPICJumpTableRelocBaseExpr - This returns the relocation base for the 1414/// given PIC jumptable, the same as getPICJumpTableRelocBase, but as an 1415/// MCExpr. 1416const MCExpr *X86TargetLowering:: 1417getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, 1418 MCContext &Ctx) const { 1419 // X86-64 uses RIP relative addressing based on the jump table label. 1420 if (Subtarget->isPICStyleRIPRel()) 1421 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 1422 1423 // Otherwise, the reference is relative to the PIC base. 1424 return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx); 1425} 1426 1427// FIXME: Why this routine is here? Move to RegInfo! 1428std::pair<const TargetRegisterClass*, uint8_t> 1429X86TargetLowering::findRepresentativeClass(EVT VT) const{ 1430 const TargetRegisterClass *RRC = 0; 1431 uint8_t Cost = 1; 1432 switch (VT.getSimpleVT().SimpleTy) { 1433 default: 1434 return TargetLowering::findRepresentativeClass(VT); 1435 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64: 1436 RRC = Subtarget->is64Bit() ? 1437 (const TargetRegisterClass*)&X86::GR64RegClass : 1438 (const TargetRegisterClass*)&X86::GR32RegClass; 1439 break; 1440 case MVT::x86mmx: 1441 RRC = &X86::VR64RegClass; 1442 break; 1443 case MVT::f32: case MVT::f64: 1444 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 1445 case MVT::v4f32: case MVT::v2f64: 1446 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32: 1447 case MVT::v4f64: 1448 RRC = &X86::VR128RegClass; 1449 break; 1450 } 1451 return std::make_pair(RRC, Cost); 1452} 1453 1454bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace, 1455 unsigned &Offset) const { 1456 if (!Subtarget->isTargetLinux()) 1457 return false; 1458 1459 if (Subtarget->is64Bit()) { 1460 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs: 1461 Offset = 0x28; 1462 if (getTargetMachine().getCodeModel() == CodeModel::Kernel) 1463 AddressSpace = 256; 1464 else 1465 AddressSpace = 257; 1466 } else { 1467 // %gs:0x14 on i386 1468 Offset = 0x14; 1469 AddressSpace = 256; 1470 } 1471 return true; 1472} 1473 1474 1475//===----------------------------------------------------------------------===// 1476// Return Value Calling Convention Implementation 1477//===----------------------------------------------------------------------===// 1478 1479#include "X86GenCallingConv.inc" 1480 1481bool 1482X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv, 1483 MachineFunction &MF, bool isVarArg, 1484 const SmallVectorImpl<ISD::OutputArg> &Outs, 1485 LLVMContext &Context) const { 1486 SmallVector<CCValAssign, 16> RVLocs; 1487 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1488 RVLocs, Context); 1489 return CCInfo.CheckReturn(Outs, RetCC_X86); 1490} 1491 1492SDValue 1493X86TargetLowering::LowerReturn(SDValue Chain, 1494 CallingConv::ID CallConv, bool isVarArg, 1495 const SmallVectorImpl<ISD::OutputArg> &Outs, 1496 const SmallVectorImpl<SDValue> &OutVals, 1497 DebugLoc dl, SelectionDAG &DAG) const { 1498 MachineFunction &MF = DAG.getMachineFunction(); 1499 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1500 1501 SmallVector<CCValAssign, 16> RVLocs; 1502 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1503 RVLocs, *DAG.getContext()); 1504 CCInfo.AnalyzeReturn(Outs, RetCC_X86); 1505 1506 // Add the regs to the liveout set for the function. 1507 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 1508 for (unsigned i = 0; i != RVLocs.size(); ++i) 1509 if (RVLocs[i].isRegLoc() && !MRI.isLiveOut(RVLocs[i].getLocReg())) 1510 MRI.addLiveOut(RVLocs[i].getLocReg()); 1511 1512 SDValue Flag; 1513 1514 SmallVector<SDValue, 6> RetOps; 1515 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 1516 // Operand #1 = Bytes To Pop 1517 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), 1518 MVT::i16)); 1519 1520 // Copy the result values into the output registers. 1521 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1522 CCValAssign &VA = RVLocs[i]; 1523 assert(VA.isRegLoc() && "Can only return in registers!"); 1524 SDValue ValToCopy = OutVals[i]; 1525 EVT ValVT = ValToCopy.getValueType(); 1526 1527 // Promote values to the appropriate types 1528 if (VA.getLocInfo() == CCValAssign::SExt) 1529 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy); 1530 else if (VA.getLocInfo() == CCValAssign::ZExt) 1531 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy); 1532 else if (VA.getLocInfo() == CCValAssign::AExt) 1533 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy); 1534 else if (VA.getLocInfo() == CCValAssign::BCvt) 1535 ValToCopy = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), ValToCopy); 1536 1537 // If this is x86-64, and we disabled SSE, we can't return FP values, 1538 // or SSE or MMX vectors. 1539 if ((ValVT == MVT::f32 || ValVT == MVT::f64 || 1540 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) && 1541 (Subtarget->is64Bit() && !Subtarget->hasSSE1())) { 1542 report_fatal_error("SSE register return with SSE disabled"); 1543 } 1544 // Likewise we can't return F64 values with SSE1 only. gcc does so, but 1545 // llvm-gcc has never done it right and no one has noticed, so this 1546 // should be OK for now. 1547 if (ValVT == MVT::f64 && 1548 (Subtarget->is64Bit() && !Subtarget->hasSSE2())) 1549 report_fatal_error("SSE2 register return with SSE2 disabled"); 1550 1551 // Returns in ST0/ST1 are handled specially: these are pushed as operands to 1552 // the RET instruction and handled by the FP Stackifier. 1553 if (VA.getLocReg() == X86::ST0 || 1554 VA.getLocReg() == X86::ST1) { 1555 // If this is a copy from an xmm register to ST(0), use an FPExtend to 1556 // change the value to the FP stack register class. 1557 if (isScalarFPTypeInSSEReg(VA.getValVT())) 1558 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy); 1559 RetOps.push_back(ValToCopy); 1560 // Don't emit a copytoreg. 1561 continue; 1562 } 1563 1564 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64 1565 // which is returned in RAX / RDX. 1566 if (Subtarget->is64Bit()) { 1567 if (ValVT == MVT::x86mmx) { 1568 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) { 1569 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy); 1570 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, 1571 ValToCopy); 1572 // If we don't have SSE2 available, convert to v4f32 so the generated 1573 // register is legal. 1574 if (!Subtarget->hasSSE2()) 1575 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy); 1576 } 1577 } 1578 } 1579 1580 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag); 1581 Flag = Chain.getValue(1); 1582 } 1583 1584 // The x86-64 ABI for returning structs by value requires that we copy 1585 // the sret argument into %rax for the return. We saved the argument into 1586 // a virtual register in the entry block, so now we copy the value out 1587 // and into %rax. 1588 if (Subtarget->is64Bit() && 1589 DAG.getMachineFunction().getFunction()->hasStructRetAttr()) { 1590 MachineFunction &MF = DAG.getMachineFunction(); 1591 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1592 unsigned Reg = FuncInfo->getSRetReturnReg(); 1593 assert(Reg && 1594 "SRetReturnReg should have been set in LowerFormalArguments()."); 1595 SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy()); 1596 1597 Chain = DAG.getCopyToReg(Chain, dl, X86::RAX, Val, Flag); 1598 Flag = Chain.getValue(1); 1599 1600 // RAX now acts like a return value. 1601 MRI.addLiveOut(X86::RAX); 1602 } 1603 1604 RetOps[0] = Chain; // Update chain. 1605 1606 // Add the flag if we have it. 1607 if (Flag.getNode()) 1608 RetOps.push_back(Flag); 1609 1610 return DAG.getNode(X86ISD::RET_FLAG, dl, 1611 MVT::Other, &RetOps[0], RetOps.size()); 1612} 1613 1614bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { 1615 if (N->getNumValues() != 1) 1616 return false; 1617 if (!N->hasNUsesOfValue(1, 0)) 1618 return false; 1619 1620 SDValue TCChain = Chain; 1621 SDNode *Copy = *N->use_begin(); 1622 if (Copy->getOpcode() == ISD::CopyToReg) { 1623 // If the copy has a glue operand, we conservatively assume it isn't safe to 1624 // perform a tail call. 1625 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) 1626 return false; 1627 TCChain = Copy->getOperand(0); 1628 } else if (Copy->getOpcode() != ISD::FP_EXTEND) 1629 return false; 1630 1631 bool HasRet = false; 1632 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 1633 UI != UE; ++UI) { 1634 if (UI->getOpcode() != X86ISD::RET_FLAG) 1635 return false; 1636 HasRet = true; 1637 } 1638 1639 if (!HasRet) 1640 return false; 1641 1642 Chain = TCChain; 1643 return true; 1644} 1645 1646EVT 1647X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT, 1648 ISD::NodeType ExtendKind) const { 1649 MVT ReturnMVT; 1650 // TODO: Is this also valid on 32-bit? 1651 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND) 1652 ReturnMVT = MVT::i8; 1653 else 1654 ReturnMVT = MVT::i32; 1655 1656 EVT MinVT = getRegisterType(Context, ReturnMVT); 1657 return VT.bitsLT(MinVT) ? MinVT : VT; 1658} 1659 1660/// LowerCallResult - Lower the result values of a call into the 1661/// appropriate copies out of appropriate physical registers. 1662/// 1663SDValue 1664X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1665 CallingConv::ID CallConv, bool isVarArg, 1666 const SmallVectorImpl<ISD::InputArg> &Ins, 1667 DebugLoc dl, SelectionDAG &DAG, 1668 SmallVectorImpl<SDValue> &InVals) const { 1669 1670 // Assign locations to each value returned by this call. 1671 SmallVector<CCValAssign, 16> RVLocs; 1672 bool Is64Bit = Subtarget->is64Bit(); 1673 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1674 getTargetMachine(), RVLocs, *DAG.getContext()); 1675 CCInfo.AnalyzeCallResult(Ins, RetCC_X86); 1676 1677 // Copy all of the result registers out of their specified physreg. 1678 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1679 CCValAssign &VA = RVLocs[i]; 1680 EVT CopyVT = VA.getValVT(); 1681 1682 // If this is x86-64, and we disabled SSE, we can't return FP values 1683 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) && 1684 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) { 1685 report_fatal_error("SSE register return with SSE disabled"); 1686 } 1687 1688 SDValue Val; 1689 1690 // If this is a call to a function that returns an fp value on the floating 1691 // point stack, we must guarantee the value is popped from the stack, so 1692 // a CopyFromReg is not good enough - the copy instruction may be eliminated 1693 // if the return value is not used. We use the FpPOP_RETVAL instruction 1694 // instead. 1695 if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) { 1696 // If we prefer to use the value in xmm registers, copy it out as f80 and 1697 // use a truncate to move it from fp stack reg to xmm reg. 1698 if (isScalarFPTypeInSSEReg(VA.getValVT())) CopyVT = MVT::f80; 1699 SDValue Ops[] = { Chain, InFlag }; 1700 Chain = SDValue(DAG.getMachineNode(X86::FpPOP_RETVAL, dl, CopyVT, 1701 MVT::Other, MVT::Glue, Ops, 2), 1); 1702 Val = Chain.getValue(0); 1703 1704 // Round the f80 to the right size, which also moves it to the appropriate 1705 // xmm register. 1706 if (CopyVT != VA.getValVT()) 1707 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val, 1708 // This truncation won't change the value. 1709 DAG.getIntPtrConstant(1)); 1710 } else { 1711 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), 1712 CopyVT, InFlag).getValue(1); 1713 Val = Chain.getValue(0); 1714 } 1715 InFlag = Chain.getValue(2); 1716 InVals.push_back(Val); 1717 } 1718 1719 return Chain; 1720} 1721 1722 1723//===----------------------------------------------------------------------===// 1724// C & StdCall & Fast Calling Convention implementation 1725//===----------------------------------------------------------------------===// 1726// StdCall calling convention seems to be standard for many Windows' API 1727// routines and around. It differs from C calling convention just a little: 1728// callee should clean up the stack, not caller. Symbols should be also 1729// decorated in some fancy way :) It doesn't support any vector arguments. 1730// For info on fast calling convention see Fast Calling Convention (tail call) 1731// implementation LowerX86_32FastCCCallTo. 1732 1733/// CallIsStructReturn - Determines whether a call uses struct return 1734/// semantics. 1735enum StructReturnType { 1736 NotStructReturn, 1737 RegStructReturn, 1738 StackStructReturn 1739}; 1740static StructReturnType 1741callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) { 1742 if (Outs.empty()) 1743 return NotStructReturn; 1744 1745 const ISD::ArgFlagsTy &Flags = Outs[0].Flags; 1746 if (!Flags.isSRet()) 1747 return NotStructReturn; 1748 if (Flags.isInReg()) 1749 return RegStructReturn; 1750 return StackStructReturn; 1751} 1752 1753/// ArgsAreStructReturn - Determines whether a function uses struct 1754/// return semantics. 1755static StructReturnType 1756argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) { 1757 if (Ins.empty()) 1758 return NotStructReturn; 1759 1760 const ISD::ArgFlagsTy &Flags = Ins[0].Flags; 1761 if (!Flags.isSRet()) 1762 return NotStructReturn; 1763 if (Flags.isInReg()) 1764 return RegStructReturn; 1765 return StackStructReturn; 1766} 1767 1768/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1769/// by "Src" to address "Dst" with size and alignment information specified by 1770/// the specific parameter attribute. The copy will be passed as a byval 1771/// function parameter. 1772static SDValue 1773CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 1774 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 1775 DebugLoc dl) { 1776 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 1777 1778 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 1779 /*isVolatile*/false, /*AlwaysInline=*/true, 1780 MachinePointerInfo(), MachinePointerInfo()); 1781} 1782 1783/// IsTailCallConvention - Return true if the calling convention is one that 1784/// supports tail call optimization. 1785static bool IsTailCallConvention(CallingConv::ID CC) { 1786 return (CC == CallingConv::Fast || CC == CallingConv::GHC); 1787} 1788 1789bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 1790 if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls) 1791 return false; 1792 1793 CallSite CS(CI); 1794 CallingConv::ID CalleeCC = CS.getCallingConv(); 1795 if (!IsTailCallConvention(CalleeCC) && CalleeCC != CallingConv::C) 1796 return false; 1797 1798 return true; 1799} 1800 1801/// FuncIsMadeTailCallSafe - Return true if the function is being made into 1802/// a tailcall target by changing its ABI. 1803static bool FuncIsMadeTailCallSafe(CallingConv::ID CC, 1804 bool GuaranteedTailCallOpt) { 1805 return GuaranteedTailCallOpt && IsTailCallConvention(CC); 1806} 1807 1808SDValue 1809X86TargetLowering::LowerMemArgument(SDValue Chain, 1810 CallingConv::ID CallConv, 1811 const SmallVectorImpl<ISD::InputArg> &Ins, 1812 DebugLoc dl, SelectionDAG &DAG, 1813 const CCValAssign &VA, 1814 MachineFrameInfo *MFI, 1815 unsigned i) const { 1816 // Create the nodes corresponding to a load from this parameter slot. 1817 ISD::ArgFlagsTy Flags = Ins[i].Flags; 1818 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(CallConv, 1819 getTargetMachine().Options.GuaranteedTailCallOpt); 1820 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal(); 1821 EVT ValVT; 1822 1823 // If value is passed by pointer we have address passed instead of the value 1824 // itself. 1825 if (VA.getLocInfo() == CCValAssign::Indirect) 1826 ValVT = VA.getLocVT(); 1827 else 1828 ValVT = VA.getValVT(); 1829 1830 // FIXME: For now, all byval parameter objects are marked mutable. This can be 1831 // changed with more analysis. 1832 // In case of tail call optimization mark all arguments mutable. Since they 1833 // could be overwritten by lowering of arguments in case of a tail call. 1834 if (Flags.isByVal()) { 1835 unsigned Bytes = Flags.getByValSize(); 1836 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects. 1837 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable); 1838 return DAG.getFrameIndex(FI, getPointerTy()); 1839 } else { 1840 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8, 1841 VA.getLocMemOffset(), isImmutable); 1842 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 1843 return DAG.getLoad(ValVT, dl, Chain, FIN, 1844 MachinePointerInfo::getFixedStack(FI), 1845 false, false, false, 0); 1846 } 1847} 1848 1849SDValue 1850X86TargetLowering::LowerFormalArguments(SDValue Chain, 1851 CallingConv::ID CallConv, 1852 bool isVarArg, 1853 const SmallVectorImpl<ISD::InputArg> &Ins, 1854 DebugLoc dl, 1855 SelectionDAG &DAG, 1856 SmallVectorImpl<SDValue> &InVals) 1857 const { 1858 MachineFunction &MF = DAG.getMachineFunction(); 1859 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1860 1861 const Function* Fn = MF.getFunction(); 1862 if (Fn->hasExternalLinkage() && 1863 Subtarget->isTargetCygMing() && 1864 Fn->getName() == "main") 1865 FuncInfo->setForceFramePointer(true); 1866 1867 MachineFrameInfo *MFI = MF.getFrameInfo(); 1868 bool Is64Bit = Subtarget->is64Bit(); 1869 bool IsWindows = Subtarget->isTargetWindows(); 1870 bool IsWin64 = Subtarget->isTargetWin64(); 1871 1872 assert(!(isVarArg && IsTailCallConvention(CallConv)) && 1873 "Var args not supported with calling convention fastcc or ghc"); 1874 1875 // Assign locations to all of the incoming arguments. 1876 SmallVector<CCValAssign, 16> ArgLocs; 1877 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1878 ArgLocs, *DAG.getContext()); 1879 1880 // Allocate shadow area for Win64 1881 if (IsWin64) { 1882 CCInfo.AllocateStack(32, 8); 1883 } 1884 1885 CCInfo.AnalyzeFormalArguments(Ins, CC_X86); 1886 1887 unsigned LastVal = ~0U; 1888 SDValue ArgValue; 1889 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1890 CCValAssign &VA = ArgLocs[i]; 1891 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 1892 // places. 1893 assert(VA.getValNo() != LastVal && 1894 "Don't support value assigned to multiple locs yet"); 1895 (void)LastVal; 1896 LastVal = VA.getValNo(); 1897 1898 if (VA.isRegLoc()) { 1899 EVT RegVT = VA.getLocVT(); 1900 const TargetRegisterClass *RC; 1901 if (RegVT == MVT::i32) 1902 RC = &X86::GR32RegClass; 1903 else if (Is64Bit && RegVT == MVT::i64) 1904 RC = &X86::GR64RegClass; 1905 else if (RegVT == MVT::f32) 1906 RC = &X86::FR32RegClass; 1907 else if (RegVT == MVT::f64) 1908 RC = &X86::FR64RegClass; 1909 else if (RegVT.is256BitVector()) 1910 RC = &X86::VR256RegClass; 1911 else if (RegVT.is128BitVector()) 1912 RC = &X86::VR128RegClass; 1913 else if (RegVT == MVT::x86mmx) 1914 RC = &X86::VR64RegClass; 1915 else 1916 llvm_unreachable("Unknown argument type!"); 1917 1918 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 1919 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 1920 1921 // If this is an 8 or 16-bit value, it is really passed promoted to 32 1922 // bits. Insert an assert[sz]ext to capture this, then truncate to the 1923 // right size. 1924 if (VA.getLocInfo() == CCValAssign::SExt) 1925 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 1926 DAG.getValueType(VA.getValVT())); 1927 else if (VA.getLocInfo() == CCValAssign::ZExt) 1928 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 1929 DAG.getValueType(VA.getValVT())); 1930 else if (VA.getLocInfo() == CCValAssign::BCvt) 1931 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 1932 1933 if (VA.isExtInLoc()) { 1934 // Handle MMX values passed in XMM regs. 1935 if (RegVT.isVector()) { 1936 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), 1937 ArgValue); 1938 } else 1939 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 1940 } 1941 } else { 1942 assert(VA.isMemLoc()); 1943 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i); 1944 } 1945 1946 // If value is passed via pointer - do a load. 1947 if (VA.getLocInfo() == CCValAssign::Indirect) 1948 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, 1949 MachinePointerInfo(), false, false, false, 0); 1950 1951 InVals.push_back(ArgValue); 1952 } 1953 1954 // The x86-64 ABI for returning structs by value requires that we copy 1955 // the sret argument into %rax for the return. Save the argument into 1956 // a virtual register so that we can access it from the return points. 1957 if (Is64Bit && MF.getFunction()->hasStructRetAttr()) { 1958 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1959 unsigned Reg = FuncInfo->getSRetReturnReg(); 1960 if (!Reg) { 1961 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64)); 1962 FuncInfo->setSRetReturnReg(Reg); 1963 } 1964 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]); 1965 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain); 1966 } 1967 1968 unsigned StackSize = CCInfo.getNextStackOffset(); 1969 // Align stack specially for tail calls. 1970 if (FuncIsMadeTailCallSafe(CallConv, 1971 MF.getTarget().Options.GuaranteedTailCallOpt)) 1972 StackSize = GetAlignedArgumentStackSize(StackSize, DAG); 1973 1974 // If the function takes variable number of arguments, make a frame index for 1975 // the start of the first vararg value... for expansion of llvm.va_start. 1976 if (isVarArg) { 1977 if (Is64Bit || (CallConv != CallingConv::X86_FastCall && 1978 CallConv != CallingConv::X86_ThisCall)) { 1979 FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize,true)); 1980 } 1981 if (Is64Bit) { 1982 unsigned TotalNumIntRegs = 0, TotalNumXMMRegs = 0; 1983 1984 // FIXME: We should really autogenerate these arrays 1985 static const uint16_t GPR64ArgRegsWin64[] = { 1986 X86::RCX, X86::RDX, X86::R8, X86::R9 1987 }; 1988 static const uint16_t GPR64ArgRegs64Bit[] = { 1989 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 1990 }; 1991 static const uint16_t XMMArgRegs64Bit[] = { 1992 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1993 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1994 }; 1995 const uint16_t *GPR64ArgRegs; 1996 unsigned NumXMMRegs = 0; 1997 1998 if (IsWin64) { 1999 // The XMM registers which might contain var arg parameters are shadowed 2000 // in their paired GPR. So we only need to save the GPR to their home 2001 // slots. 2002 TotalNumIntRegs = 4; 2003 GPR64ArgRegs = GPR64ArgRegsWin64; 2004 } else { 2005 TotalNumIntRegs = 6; TotalNumXMMRegs = 8; 2006 GPR64ArgRegs = GPR64ArgRegs64Bit; 2007 2008 NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs64Bit, 2009 TotalNumXMMRegs); 2010 } 2011 unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 2012 TotalNumIntRegs); 2013 2014 bool NoImplicitFloatOps = Fn->getFnAttributes().hasNoImplicitFloatAttr(); 2015 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) && 2016 "SSE register cannot be used when SSE is disabled!"); 2017 assert(!(NumXMMRegs && MF.getTarget().Options.UseSoftFloat && 2018 NoImplicitFloatOps) && 2019 "SSE register cannot be used when SSE is disabled!"); 2020 if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps || 2021 !Subtarget->hasSSE1()) 2022 // Kernel mode asks for SSE to be disabled, so don't push them 2023 // on the stack. 2024 TotalNumXMMRegs = 0; 2025 2026 if (IsWin64) { 2027 const TargetFrameLowering &TFI = *getTargetMachine().getFrameLowering(); 2028 // Get to the caller-allocated home save location. Add 8 to account 2029 // for the return address. 2030 int HomeOffset = TFI.getOffsetOfLocalArea() + 8; 2031 FuncInfo->setRegSaveFrameIndex( 2032 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false)); 2033 // Fixup to set vararg frame on shadow area (4 x i64). 2034 if (NumIntRegs < 4) 2035 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex()); 2036 } else { 2037 // For X86-64, if there are vararg parameters that are passed via 2038 // registers, then we must store them to their spots on the stack so 2039 // they may be loaded by deferencing the result of va_next. 2040 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8); 2041 FuncInfo->setVarArgsFPOffset(TotalNumIntRegs * 8 + NumXMMRegs * 16); 2042 FuncInfo->setRegSaveFrameIndex( 2043 MFI->CreateStackObject(TotalNumIntRegs * 8 + TotalNumXMMRegs * 16, 16, 2044 false)); 2045 } 2046 2047 // Store the integer parameter registers. 2048 SmallVector<SDValue, 8> MemOps; 2049 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), 2050 getPointerTy()); 2051 unsigned Offset = FuncInfo->getVarArgsGPOffset(); 2052 for (; NumIntRegs != TotalNumIntRegs; ++NumIntRegs) { 2053 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN, 2054 DAG.getIntPtrConstant(Offset)); 2055 unsigned VReg = MF.addLiveIn(GPR64ArgRegs[NumIntRegs], 2056 &X86::GR64RegClass); 2057 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 2058 SDValue Store = 2059 DAG.getStore(Val.getValue(1), dl, Val, FIN, 2060 MachinePointerInfo::getFixedStack( 2061 FuncInfo->getRegSaveFrameIndex(), Offset), 2062 false, false, 0); 2063 MemOps.push_back(Store); 2064 Offset += 8; 2065 } 2066 2067 if (TotalNumXMMRegs != 0 && NumXMMRegs != TotalNumXMMRegs) { 2068 // Now store the XMM (fp + vector) parameter registers. 2069 SmallVector<SDValue, 11> SaveXMMOps; 2070 SaveXMMOps.push_back(Chain); 2071 2072 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass); 2073 SDValue ALVal = DAG.getCopyFromReg(DAG.getEntryNode(), dl, AL, MVT::i8); 2074 SaveXMMOps.push_back(ALVal); 2075 2076 SaveXMMOps.push_back(DAG.getIntPtrConstant( 2077 FuncInfo->getRegSaveFrameIndex())); 2078 SaveXMMOps.push_back(DAG.getIntPtrConstant( 2079 FuncInfo->getVarArgsFPOffset())); 2080 2081 for (; NumXMMRegs != TotalNumXMMRegs; ++NumXMMRegs) { 2082 unsigned VReg = MF.addLiveIn(XMMArgRegs64Bit[NumXMMRegs], 2083 &X86::VR128RegClass); 2084 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::v4f32); 2085 SaveXMMOps.push_back(Val); 2086 } 2087 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl, 2088 MVT::Other, 2089 &SaveXMMOps[0], SaveXMMOps.size())); 2090 } 2091 2092 if (!MemOps.empty()) 2093 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2094 &MemOps[0], MemOps.size()); 2095 } 2096 } 2097 2098 // Some CCs need callee pop. 2099 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, 2100 MF.getTarget().Options.GuaranteedTailCallOpt)) { 2101 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything. 2102 } else { 2103 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing. 2104 // If this is an sret function, the return should pop the hidden pointer. 2105 if (!Is64Bit && !IsTailCallConvention(CallConv) && !IsWindows && 2106 argsAreStructReturn(Ins) == StackStructReturn) 2107 FuncInfo->setBytesToPopOnReturn(4); 2108 } 2109 2110 if (!Is64Bit) { 2111 // RegSaveFrameIndex is X86-64 only. 2112 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA); 2113 if (CallConv == CallingConv::X86_FastCall || 2114 CallConv == CallingConv::X86_ThisCall) 2115 // fastcc functions can't have varargs. 2116 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA); 2117 } 2118 2119 FuncInfo->setArgumentStackSize(StackSize); 2120 2121 return Chain; 2122} 2123 2124SDValue 2125X86TargetLowering::LowerMemOpCallTo(SDValue Chain, 2126 SDValue StackPtr, SDValue Arg, 2127 DebugLoc dl, SelectionDAG &DAG, 2128 const CCValAssign &VA, 2129 ISD::ArgFlagsTy Flags) const { 2130 unsigned LocMemOffset = VA.getLocMemOffset(); 2131 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 2132 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 2133 if (Flags.isByVal()) 2134 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl); 2135 2136 return DAG.getStore(Chain, dl, Arg, PtrOff, 2137 MachinePointerInfo::getStack(LocMemOffset), 2138 false, false, 0); 2139} 2140 2141/// EmitTailCallLoadRetAddr - Emit a load of return address if tail call 2142/// optimization is performed and it is required. 2143SDValue 2144X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, 2145 SDValue &OutRetAddr, SDValue Chain, 2146 bool IsTailCall, bool Is64Bit, 2147 int FPDiff, DebugLoc dl) const { 2148 // Adjust the Return address stack slot. 2149 EVT VT = getPointerTy(); 2150 OutRetAddr = getReturnAddressFrameIndex(DAG); 2151 2152 // Load the "old" Return address. 2153 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(), 2154 false, false, false, 0); 2155 return SDValue(OutRetAddr.getNode(), 1); 2156} 2157 2158/// EmitTailCallStoreRetAddr - Emit a store of the return address if tail call 2159/// optimization is performed and it is required (FPDiff!=0). 2160static SDValue 2161EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF, 2162 SDValue Chain, SDValue RetAddrFrIdx, 2163 bool Is64Bit, int FPDiff, DebugLoc dl) { 2164 // Store the return address to the appropriate stack slot. 2165 if (!FPDiff) return Chain; 2166 // Calculate the new stack slot for the return address. 2167 int SlotSize = Is64Bit ? 8 : 4; 2168 int NewReturnAddrFI = 2169 MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize, false); 2170 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 2171 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT); 2172 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx, 2173 MachinePointerInfo::getFixedStack(NewReturnAddrFI), 2174 false, false, 0); 2175 return Chain; 2176} 2177 2178SDValue 2179X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 2180 SmallVectorImpl<SDValue> &InVals) const { 2181 SelectionDAG &DAG = CLI.DAG; 2182 DebugLoc &dl = CLI.DL; 2183 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; 2184 SmallVector<SDValue, 32> &OutVals = CLI.OutVals; 2185 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; 2186 SDValue Chain = CLI.Chain; 2187 SDValue Callee = CLI.Callee; 2188 CallingConv::ID CallConv = CLI.CallConv; 2189 bool &isTailCall = CLI.IsTailCall; 2190 bool isVarArg = CLI.IsVarArg; 2191 2192 MachineFunction &MF = DAG.getMachineFunction(); 2193 bool Is64Bit = Subtarget->is64Bit(); 2194 bool IsWin64 = Subtarget->isTargetWin64(); 2195 bool IsWindows = Subtarget->isTargetWindows(); 2196 StructReturnType SR = callIsStructReturn(Outs); 2197 bool IsSibcall = false; 2198 2199 if (MF.getTarget().Options.DisableTailCalls) 2200 isTailCall = false; 2201 2202 if (isTailCall) { 2203 // Check if it's really possible to do a tail call. 2204 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 2205 isVarArg, SR != NotStructReturn, 2206 MF.getFunction()->hasStructRetAttr(), CLI.RetTy, 2207 Outs, OutVals, Ins, DAG); 2208 2209 // Sibcalls are automatically detected tailcalls which do not require 2210 // ABI changes. 2211 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall) 2212 IsSibcall = true; 2213 2214 if (isTailCall) 2215 ++NumTailCalls; 2216 } 2217 2218 assert(!(isVarArg && IsTailCallConvention(CallConv)) && 2219 "Var args not supported with calling convention fastcc or ghc"); 2220 2221 // Analyze operands of the call, assigning locations to each operand. 2222 SmallVector<CCValAssign, 16> ArgLocs; 2223 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 2224 ArgLocs, *DAG.getContext()); 2225 2226 // Allocate shadow area for Win64 2227 if (IsWin64) { 2228 CCInfo.AllocateStack(32, 8); 2229 } 2230 2231 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2232 2233 // Get a count of how many bytes are to be pushed on the stack. 2234 unsigned NumBytes = CCInfo.getNextStackOffset(); 2235 if (IsSibcall) 2236 // This is a sibcall. The memory operands are available in caller's 2237 // own caller's stack. 2238 NumBytes = 0; 2239 else if (getTargetMachine().Options.GuaranteedTailCallOpt && 2240 IsTailCallConvention(CallConv)) 2241 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG); 2242 2243 int FPDiff = 0; 2244 if (isTailCall && !IsSibcall) { 2245 // Lower arguments at fp - stackoffset + fpdiff. 2246 unsigned NumBytesCallerPushed = 2247 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn(); 2248 FPDiff = NumBytesCallerPushed - NumBytes; 2249 2250 // Set the delta of movement of the returnaddr stackslot. 2251 // But only set if delta is greater than previous delta. 2252 if (FPDiff < (MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta())) 2253 MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff); 2254 } 2255 2256 if (!IsSibcall) 2257 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 2258 2259 SDValue RetAddrFrIdx; 2260 // Load return address for tail calls. 2261 if (isTailCall && FPDiff) 2262 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall, 2263 Is64Bit, FPDiff, dl); 2264 2265 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 2266 SmallVector<SDValue, 8> MemOpChains; 2267 SDValue StackPtr; 2268 2269 // Walk the register/memloc assignments, inserting copies/loads. In the case 2270 // of tail call optimization arguments are handle later. 2271 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2272 CCValAssign &VA = ArgLocs[i]; 2273 EVT RegVT = VA.getLocVT(); 2274 SDValue Arg = OutVals[i]; 2275 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2276 bool isByVal = Flags.isByVal(); 2277 2278 // Promote the value if needed. 2279 switch (VA.getLocInfo()) { 2280 default: llvm_unreachable("Unknown loc info!"); 2281 case CCValAssign::Full: break; 2282 case CCValAssign::SExt: 2283 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg); 2284 break; 2285 case CCValAssign::ZExt: 2286 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg); 2287 break; 2288 case CCValAssign::AExt: 2289 if (RegVT.is128BitVector()) { 2290 // Special case: passing MMX values in XMM registers. 2291 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 2292 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg); 2293 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg); 2294 } else 2295 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg); 2296 break; 2297 case CCValAssign::BCvt: 2298 Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg); 2299 break; 2300 case CCValAssign::Indirect: { 2301 // Store the argument. 2302 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT()); 2303 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 2304 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot, 2305 MachinePointerInfo::getFixedStack(FI), 2306 false, false, 0); 2307 Arg = SpillSlot; 2308 break; 2309 } 2310 } 2311 2312 if (VA.isRegLoc()) { 2313 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 2314 if (isVarArg && IsWin64) { 2315 // Win64 ABI requires argument XMM reg to be copied to the corresponding 2316 // shadow reg if callee is a varargs function. 2317 unsigned ShadowReg = 0; 2318 switch (VA.getLocReg()) { 2319 case X86::XMM0: ShadowReg = X86::RCX; break; 2320 case X86::XMM1: ShadowReg = X86::RDX; break; 2321 case X86::XMM2: ShadowReg = X86::R8; break; 2322 case X86::XMM3: ShadowReg = X86::R9; break; 2323 } 2324 if (ShadowReg) 2325 RegsToPass.push_back(std::make_pair(ShadowReg, Arg)); 2326 } 2327 } else if (!IsSibcall && (!isTailCall || isByVal)) { 2328 assert(VA.isMemLoc()); 2329 if (StackPtr.getNode() == 0) 2330 StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr, getPointerTy()); 2331 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 2332 dl, DAG, VA, Flags)); 2333 } 2334 } 2335 2336 if (!MemOpChains.empty()) 2337 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2338 &MemOpChains[0], MemOpChains.size()); 2339 2340 if (Subtarget->isPICStyleGOT()) { 2341 // ELF / PIC requires GOT in the EBX register before function calls via PLT 2342 // GOT pointer. 2343 if (!isTailCall) { 2344 RegsToPass.push_back(std::make_pair(unsigned(X86::EBX), 2345 DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), getPointerTy()))); 2346 } else { 2347 // If we are tail calling and generating PIC/GOT style code load the 2348 // address of the callee into ECX. The value in ecx is used as target of 2349 // the tail jump. This is done to circumvent the ebx/callee-saved problem 2350 // for tail calls on PIC/GOT architectures. Normally we would just put the 2351 // address of GOT into ebx and then call target@PLT. But for tail calls 2352 // ebx would be restored (since ebx is callee saved) before jumping to the 2353 // target@PLT. 2354 2355 // Note: The actual moving to ECX is done further down. 2356 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 2357 if (G && !G->getGlobal()->hasHiddenVisibility() && 2358 !G->getGlobal()->hasProtectedVisibility()) 2359 Callee = LowerGlobalAddress(Callee, DAG); 2360 else if (isa<ExternalSymbolSDNode>(Callee)) 2361 Callee = LowerExternalSymbol(Callee, DAG); 2362 } 2363 } 2364 2365 if (Is64Bit && isVarArg && !IsWin64) { 2366 // From AMD64 ABI document: 2367 // For calls that may call functions that use varargs or stdargs 2368 // (prototype-less calls or calls to functions containing ellipsis (...) in 2369 // the declaration) %al is used as hidden argument to specify the number 2370 // of SSE registers used. The contents of %al do not need to match exactly 2371 // the number of registers, but must be an ubound on the number of SSE 2372 // registers used and is in the range 0 - 8 inclusive. 2373 2374 // Count the number of XMM registers allocated. 2375 static const uint16_t XMMArgRegs[] = { 2376 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 2377 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 2378 }; 2379 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 2380 assert((Subtarget->hasSSE1() || !NumXMMRegs) 2381 && "SSE registers cannot be used when SSE is disabled"); 2382 2383 RegsToPass.push_back(std::make_pair(unsigned(X86::AL), 2384 DAG.getConstant(NumXMMRegs, MVT::i8))); 2385 } 2386 2387 // For tail calls lower the arguments to the 'real' stack slot. 2388 if (isTailCall) { 2389 // Force all the incoming stack arguments to be loaded from the stack 2390 // before any new outgoing arguments are stored to the stack, because the 2391 // outgoing stack slots may alias the incoming argument stack slots, and 2392 // the alias isn't otherwise explicit. This is slightly more conservative 2393 // than necessary, because it means that each store effectively depends 2394 // on every argument instead of just those arguments it would clobber. 2395 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain); 2396 2397 SmallVector<SDValue, 8> MemOpChains2; 2398 SDValue FIN; 2399 int FI = 0; 2400 if (getTargetMachine().Options.GuaranteedTailCallOpt) { 2401 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2402 CCValAssign &VA = ArgLocs[i]; 2403 if (VA.isRegLoc()) 2404 continue; 2405 assert(VA.isMemLoc()); 2406 SDValue Arg = OutVals[i]; 2407 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2408 // Create frame index. 2409 int32_t Offset = VA.getLocMemOffset()+FPDiff; 2410 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8; 2411 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 2412 FIN = DAG.getFrameIndex(FI, getPointerTy()); 2413 2414 if (Flags.isByVal()) { 2415 // Copy relative to framepointer. 2416 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset()); 2417 if (StackPtr.getNode() == 0) 2418 StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr, 2419 getPointerTy()); 2420 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source); 2421 2422 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN, 2423 ArgChain, 2424 Flags, DAG, dl)); 2425 } else { 2426 // Store relative to framepointer. 2427 MemOpChains2.push_back( 2428 DAG.getStore(ArgChain, dl, Arg, FIN, 2429 MachinePointerInfo::getFixedStack(FI), 2430 false, false, 0)); 2431 } 2432 } 2433 } 2434 2435 if (!MemOpChains2.empty()) 2436 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2437 &MemOpChains2[0], MemOpChains2.size()); 2438 2439 // Store the return address to the appropriate stack slot. 2440 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx, Is64Bit, 2441 FPDiff, dl); 2442 } 2443 2444 // Build a sequence of copy-to-reg nodes chained together with token chain 2445 // and flag operands which copy the outgoing args into registers. 2446 SDValue InFlag; 2447 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 2448 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 2449 RegsToPass[i].second, InFlag); 2450 InFlag = Chain.getValue(1); 2451 } 2452 2453 if (getTargetMachine().getCodeModel() == CodeModel::Large) { 2454 assert(Is64Bit && "Large code model is only legal in 64-bit mode."); 2455 // In the 64-bit large code model, we have to make all calls 2456 // through a register, since the call instruction's 32-bit 2457 // pc-relative offset may not be large enough to hold the whole 2458 // address. 2459 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 2460 // If the callee is a GlobalAddress node (quite common, every direct call 2461 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack 2462 // it. 2463 2464 // We should use extra load for direct calls to dllimported functions in 2465 // non-JIT mode. 2466 const GlobalValue *GV = G->getGlobal(); 2467 if (!GV->hasDLLImportLinkage()) { 2468 unsigned char OpFlags = 0; 2469 bool ExtraLoad = false; 2470 unsigned WrapperKind = ISD::DELETED_NODE; 2471 2472 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to 2473 // external symbols most go through the PLT in PIC mode. If the symbol 2474 // has hidden or protected visibility, or if it is static or local, then 2475 // we don't need to use the PLT - we can directly call it. 2476 if (Subtarget->isTargetELF() && 2477 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 2478 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) { 2479 OpFlags = X86II::MO_PLT; 2480 } else if (Subtarget->isPICStyleStubAny() && 2481 (GV->isDeclaration() || GV->isWeakForLinker()) && 2482 (!Subtarget->getTargetTriple().isMacOSX() || 2483 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) { 2484 // PC-relative references to external symbols should go through $stub, 2485 // unless we're building with the leopard linker or later, which 2486 // automatically synthesizes these stubs. 2487 OpFlags = X86II::MO_DARWIN_STUB; 2488 } else if (Subtarget->isPICStyleRIPRel() && 2489 isa<Function>(GV) && 2490 cast<Function>(GV)->getFnAttributes().hasNonLazyBindAttr()) { 2491 // If the function is marked as non-lazy, generate an indirect call 2492 // which loads from the GOT directly. This avoids runtime overhead 2493 // at the cost of eager binding (and one extra byte of encoding). 2494 OpFlags = X86II::MO_GOTPCREL; 2495 WrapperKind = X86ISD::WrapperRIP; 2496 ExtraLoad = true; 2497 } 2498 2499 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 2500 G->getOffset(), OpFlags); 2501 2502 // Add a wrapper if needed. 2503 if (WrapperKind != ISD::DELETED_NODE) 2504 Callee = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Callee); 2505 // Add extra indirection if needed. 2506 if (ExtraLoad) 2507 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee, 2508 MachinePointerInfo::getGOT(), 2509 false, false, false, 0); 2510 } 2511 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 2512 unsigned char OpFlags = 0; 2513 2514 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to 2515 // external symbols should go through the PLT. 2516 if (Subtarget->isTargetELF() && 2517 getTargetMachine().getRelocationModel() == Reloc::PIC_) { 2518 OpFlags = X86II::MO_PLT; 2519 } else if (Subtarget->isPICStyleStubAny() && 2520 (!Subtarget->getTargetTriple().isMacOSX() || 2521 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) { 2522 // PC-relative references to external symbols should go through $stub, 2523 // unless we're building with the leopard linker or later, which 2524 // automatically synthesizes these stubs. 2525 OpFlags = X86II::MO_DARWIN_STUB; 2526 } 2527 2528 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(), 2529 OpFlags); 2530 } 2531 2532 // Returns a chain & a flag for retval copy to use. 2533 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2534 SmallVector<SDValue, 8> Ops; 2535 2536 if (!IsSibcall && isTailCall) { 2537 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 2538 DAG.getIntPtrConstant(0, true), InFlag); 2539 InFlag = Chain.getValue(1); 2540 } 2541 2542 Ops.push_back(Chain); 2543 Ops.push_back(Callee); 2544 2545 if (isTailCall) 2546 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32)); 2547 2548 // Add argument registers to the end of the list so that they are known live 2549 // into the call. 2550 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 2551 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 2552 RegsToPass[i].second.getValueType())); 2553 2554 // Add a register mask operand representing the call-preserved registers. 2555 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 2556 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv); 2557 assert(Mask && "Missing call preserved mask for calling convention"); 2558 Ops.push_back(DAG.getRegisterMask(Mask)); 2559 2560 if (InFlag.getNode()) 2561 Ops.push_back(InFlag); 2562 2563 if (isTailCall) { 2564 // We used to do: 2565 //// If this is the first return lowered for this function, add the regs 2566 //// to the liveout set for the function. 2567 // This isn't right, although it's probably harmless on x86; liveouts 2568 // should be computed from returns not tail calls. Consider a void 2569 // function making a tail call to a function returning int. 2570 return DAG.getNode(X86ISD::TC_RETURN, dl, 2571 NodeTys, &Ops[0], Ops.size()); 2572 } 2573 2574 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, &Ops[0], Ops.size()); 2575 InFlag = Chain.getValue(1); 2576 2577 // Create the CALLSEQ_END node. 2578 unsigned NumBytesForCalleeToPush; 2579 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, 2580 getTargetMachine().Options.GuaranteedTailCallOpt)) 2581 NumBytesForCalleeToPush = NumBytes; // Callee pops everything 2582 else if (!Is64Bit && !IsTailCallConvention(CallConv) && !IsWindows && 2583 SR == StackStructReturn) 2584 // If this is a call to a struct-return function, the callee 2585 // pops the hidden struct pointer, so we have to push it back. 2586 // This is common for Darwin/X86, Linux & Mingw32 targets. 2587 // For MSVC Win32 targets, the caller pops the hidden struct pointer. 2588 NumBytesForCalleeToPush = 4; 2589 else 2590 NumBytesForCalleeToPush = 0; // Callee pops nothing. 2591 2592 // Returns a flag for retval copy to use. 2593 if (!IsSibcall) { 2594 Chain = DAG.getCALLSEQ_END(Chain, 2595 DAG.getIntPtrConstant(NumBytes, true), 2596 DAG.getIntPtrConstant(NumBytesForCalleeToPush, 2597 true), 2598 InFlag); 2599 InFlag = Chain.getValue(1); 2600 } 2601 2602 // Handle result values, copying them out of physregs into vregs that we 2603 // return. 2604 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 2605 Ins, dl, DAG, InVals); 2606} 2607 2608 2609//===----------------------------------------------------------------------===// 2610// Fast Calling Convention (tail call) implementation 2611//===----------------------------------------------------------------------===// 2612 2613// Like std call, callee cleans arguments, convention except that ECX is 2614// reserved for storing the tail called function address. Only 2 registers are 2615// free for argument passing (inreg). Tail call optimization is performed 2616// provided: 2617// * tailcallopt is enabled 2618// * caller/callee are fastcc 2619// On X86_64 architecture with GOT-style position independent code only local 2620// (within module) calls are supported at the moment. 2621// To keep the stack aligned according to platform abi the function 2622// GetAlignedArgumentStackSize ensures that argument delta is always multiples 2623// of stack alignment. (Dynamic linkers need this - darwin's dyld for example) 2624// If a tail called function callee has more arguments than the caller the 2625// caller needs to make sure that there is room to move the RETADDR to. This is 2626// achieved by reserving an area the size of the argument delta right after the 2627// original REtADDR, but before the saved framepointer or the spilled registers 2628// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4) 2629// stack layout: 2630// arg1 2631// arg2 2632// RETADDR 2633// [ new RETADDR 2634// move area ] 2635// (possible EBP) 2636// ESI 2637// EDI 2638// local1 .. 2639 2640/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned 2641/// for a 16 byte align requirement. 2642unsigned 2643X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, 2644 SelectionDAG& DAG) const { 2645 MachineFunction &MF = DAG.getMachineFunction(); 2646 const TargetMachine &TM = MF.getTarget(); 2647 const TargetFrameLowering &TFI = *TM.getFrameLowering(); 2648 unsigned StackAlignment = TFI.getStackAlignment(); 2649 uint64_t AlignMask = StackAlignment - 1; 2650 int64_t Offset = StackSize; 2651 uint64_t SlotSize = TD->getPointerSize(); 2652 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) { 2653 // Number smaller than 12 so just add the difference. 2654 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask)); 2655 } else { 2656 // Mask out lower bits, add stackalignment once plus the 12 bytes. 2657 Offset = ((~AlignMask) & Offset) + StackAlignment + 2658 (StackAlignment-SlotSize); 2659 } 2660 return Offset; 2661} 2662 2663/// MatchingStackOffset - Return true if the given stack call argument is 2664/// already available in the same position (relatively) of the caller's 2665/// incoming argument stack. 2666static 2667bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 2668 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 2669 const X86InstrInfo *TII) { 2670 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 2671 int FI = INT_MAX; 2672 if (Arg.getOpcode() == ISD::CopyFromReg) { 2673 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 2674 if (!TargetRegisterInfo::isVirtualRegister(VR)) 2675 return false; 2676 MachineInstr *Def = MRI->getVRegDef(VR); 2677 if (!Def) 2678 return false; 2679 if (!Flags.isByVal()) { 2680 if (!TII->isLoadFromStackSlot(Def, FI)) 2681 return false; 2682 } else { 2683 unsigned Opcode = Def->getOpcode(); 2684 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r) && 2685 Def->getOperand(1).isFI()) { 2686 FI = Def->getOperand(1).getIndex(); 2687 Bytes = Flags.getByValSize(); 2688 } else 2689 return false; 2690 } 2691 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 2692 if (Flags.isByVal()) 2693 // ByVal argument is passed in as a pointer but it's now being 2694 // dereferenced. e.g. 2695 // define @foo(%struct.X* %A) { 2696 // tail call @bar(%struct.X* byval %A) 2697 // } 2698 return false; 2699 SDValue Ptr = Ld->getBasePtr(); 2700 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 2701 if (!FINode) 2702 return false; 2703 FI = FINode->getIndex(); 2704 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) { 2705 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg); 2706 FI = FINode->getIndex(); 2707 Bytes = Flags.getByValSize(); 2708 } else 2709 return false; 2710 2711 assert(FI != INT_MAX); 2712 if (!MFI->isFixedObjectIndex(FI)) 2713 return false; 2714 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 2715} 2716 2717/// IsEligibleForTailCallOptimization - Check whether the call is eligible 2718/// for tail call optimization. Targets which want to do tail call 2719/// optimization should implement this function. 2720bool 2721X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 2722 CallingConv::ID CalleeCC, 2723 bool isVarArg, 2724 bool isCalleeStructRet, 2725 bool isCallerStructRet, 2726 Type *RetTy, 2727 const SmallVectorImpl<ISD::OutputArg> &Outs, 2728 const SmallVectorImpl<SDValue> &OutVals, 2729 const SmallVectorImpl<ISD::InputArg> &Ins, 2730 SelectionDAG& DAG) const { 2731 if (!IsTailCallConvention(CalleeCC) && 2732 CalleeCC != CallingConv::C) 2733 return false; 2734 2735 // If -tailcallopt is specified, make fastcc functions tail-callable. 2736 const MachineFunction &MF = DAG.getMachineFunction(); 2737 const Function *CallerF = DAG.getMachineFunction().getFunction(); 2738 2739 // If the function return type is x86_fp80 and the callee return type is not, 2740 // then the FP_EXTEND of the call result is not a nop. It's not safe to 2741 // perform a tailcall optimization here. 2742 if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty()) 2743 return false; 2744 2745 CallingConv::ID CallerCC = CallerF->getCallingConv(); 2746 bool CCMatch = CallerCC == CalleeCC; 2747 2748 if (getTargetMachine().Options.GuaranteedTailCallOpt) { 2749 if (IsTailCallConvention(CalleeCC) && CCMatch) 2750 return true; 2751 return false; 2752 } 2753 2754 // Look for obvious safe cases to perform tail call optimization that do not 2755 // require ABI changes. This is what gcc calls sibcall. 2756 2757 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to 2758 // emit a special epilogue. 2759 if (RegInfo->needsStackRealignment(MF)) 2760 return false; 2761 2762 // Also avoid sibcall optimization if either caller or callee uses struct 2763 // return semantics. 2764 if (isCalleeStructRet || isCallerStructRet) 2765 return false; 2766 2767 // An stdcall caller is expected to clean up its arguments; the callee 2768 // isn't going to do that. 2769 if (!CCMatch && CallerCC==CallingConv::X86_StdCall) 2770 return false; 2771 2772 // Do not sibcall optimize vararg calls unless all arguments are passed via 2773 // registers. 2774 if (isVarArg && !Outs.empty()) { 2775 2776 // Optimizing for varargs on Win64 is unlikely to be safe without 2777 // additional testing. 2778 if (Subtarget->isTargetWin64()) 2779 return false; 2780 2781 SmallVector<CCValAssign, 16> ArgLocs; 2782 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 2783 getTargetMachine(), ArgLocs, *DAG.getContext()); 2784 2785 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2786 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) 2787 if (!ArgLocs[i].isRegLoc()) 2788 return false; 2789 } 2790 2791 // If the call result is in ST0 / ST1, it needs to be popped off the x87 2792 // stack. Therefore, if it's not used by the call it is not safe to optimize 2793 // this into a sibcall. 2794 bool Unused = false; 2795 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 2796 if (!Ins[i].Used) { 2797 Unused = true; 2798 break; 2799 } 2800 } 2801 if (Unused) { 2802 SmallVector<CCValAssign, 16> RVLocs; 2803 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), 2804 getTargetMachine(), RVLocs, *DAG.getContext()); 2805 CCInfo.AnalyzeCallResult(Ins, RetCC_X86); 2806 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 2807 CCValAssign &VA = RVLocs[i]; 2808 if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) 2809 return false; 2810 } 2811 } 2812 2813 // If the calling conventions do not match, then we'd better make sure the 2814 // results are returned in the same way as what the caller expects. 2815 if (!CCMatch) { 2816 SmallVector<CCValAssign, 16> RVLocs1; 2817 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), 2818 getTargetMachine(), RVLocs1, *DAG.getContext()); 2819 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86); 2820 2821 SmallVector<CCValAssign, 16> RVLocs2; 2822 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), 2823 getTargetMachine(), RVLocs2, *DAG.getContext()); 2824 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86); 2825 2826 if (RVLocs1.size() != RVLocs2.size()) 2827 return false; 2828 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 2829 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 2830 return false; 2831 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 2832 return false; 2833 if (RVLocs1[i].isRegLoc()) { 2834 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 2835 return false; 2836 } else { 2837 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 2838 return false; 2839 } 2840 } 2841 } 2842 2843 // If the callee takes no arguments then go on to check the results of the 2844 // call. 2845 if (!Outs.empty()) { 2846 // Check if stack adjustment is needed. For now, do not do this if any 2847 // argument is passed on the stack. 2848 SmallVector<CCValAssign, 16> ArgLocs; 2849 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 2850 getTargetMachine(), ArgLocs, *DAG.getContext()); 2851 2852 // Allocate shadow area for Win64 2853 if (Subtarget->isTargetWin64()) { 2854 CCInfo.AllocateStack(32, 8); 2855 } 2856 2857 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2858 if (CCInfo.getNextStackOffset()) { 2859 MachineFunction &MF = DAG.getMachineFunction(); 2860 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) 2861 return false; 2862 2863 // Check if the arguments are already laid out in the right way as 2864 // the caller's fixed stack objects. 2865 MachineFrameInfo *MFI = MF.getFrameInfo(); 2866 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 2867 const X86InstrInfo *TII = 2868 ((const X86TargetMachine&)getTargetMachine()).getInstrInfo(); 2869 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2870 CCValAssign &VA = ArgLocs[i]; 2871 SDValue Arg = OutVals[i]; 2872 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2873 if (VA.getLocInfo() == CCValAssign::Indirect) 2874 return false; 2875 if (!VA.isRegLoc()) { 2876 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 2877 MFI, MRI, TII)) 2878 return false; 2879 } 2880 } 2881 } 2882 2883 // If the tailcall address may be in a register, then make sure it's 2884 // possible to register allocate for it. In 32-bit, the call address can 2885 // only target EAX, EDX, or ECX since the tail call must be scheduled after 2886 // callee-saved registers are restored. These happen to be the same 2887 // registers used to pass 'inreg' arguments so watch out for those. 2888 if (!Subtarget->is64Bit() && 2889 !isa<GlobalAddressSDNode>(Callee) && 2890 !isa<ExternalSymbolSDNode>(Callee)) { 2891 unsigned NumInRegs = 0; 2892 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2893 CCValAssign &VA = ArgLocs[i]; 2894 if (!VA.isRegLoc()) 2895 continue; 2896 unsigned Reg = VA.getLocReg(); 2897 switch (Reg) { 2898 default: break; 2899 case X86::EAX: case X86::EDX: case X86::ECX: 2900 if (++NumInRegs == 3) 2901 return false; 2902 break; 2903 } 2904 } 2905 } 2906 } 2907 2908 return true; 2909} 2910 2911FastISel * 2912X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, 2913 const TargetLibraryInfo *libInfo) const { 2914 return X86::createFastISel(funcInfo, libInfo); 2915} 2916 2917 2918//===----------------------------------------------------------------------===// 2919// Other Lowering Hooks 2920//===----------------------------------------------------------------------===// 2921 2922static bool MayFoldLoad(SDValue Op) { 2923 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode()); 2924} 2925 2926static bool MayFoldIntoStore(SDValue Op) { 2927 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin()); 2928} 2929 2930static bool isTargetShuffle(unsigned Opcode) { 2931 switch(Opcode) { 2932 default: return false; 2933 case X86ISD::PSHUFD: 2934 case X86ISD::PSHUFHW: 2935 case X86ISD::PSHUFLW: 2936 case X86ISD::SHUFP: 2937 case X86ISD::PALIGN: 2938 case X86ISD::MOVLHPS: 2939 case X86ISD::MOVLHPD: 2940 case X86ISD::MOVHLPS: 2941 case X86ISD::MOVLPS: 2942 case X86ISD::MOVLPD: 2943 case X86ISD::MOVSHDUP: 2944 case X86ISD::MOVSLDUP: 2945 case X86ISD::MOVDDUP: 2946 case X86ISD::MOVSS: 2947 case X86ISD::MOVSD: 2948 case X86ISD::UNPCKL: 2949 case X86ISD::UNPCKH: 2950 case X86ISD::VPERMILP: 2951 case X86ISD::VPERM2X128: 2952 case X86ISD::VPERMI: 2953 return true; 2954 } 2955} 2956 2957static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2958 SDValue V1, SelectionDAG &DAG) { 2959 switch(Opc) { 2960 default: llvm_unreachable("Unknown x86 shuffle node"); 2961 case X86ISD::MOVSHDUP: 2962 case X86ISD::MOVSLDUP: 2963 case X86ISD::MOVDDUP: 2964 return DAG.getNode(Opc, dl, VT, V1); 2965 } 2966} 2967 2968static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2969 SDValue V1, unsigned TargetMask, 2970 SelectionDAG &DAG) { 2971 switch(Opc) { 2972 default: llvm_unreachable("Unknown x86 shuffle node"); 2973 case X86ISD::PSHUFD: 2974 case X86ISD::PSHUFHW: 2975 case X86ISD::PSHUFLW: 2976 case X86ISD::VPERMILP: 2977 case X86ISD::VPERMI: 2978 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8)); 2979 } 2980} 2981 2982static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2983 SDValue V1, SDValue V2, unsigned TargetMask, 2984 SelectionDAG &DAG) { 2985 switch(Opc) { 2986 default: llvm_unreachable("Unknown x86 shuffle node"); 2987 case X86ISD::PALIGN: 2988 case X86ISD::SHUFP: 2989 case X86ISD::VPERM2X128: 2990 return DAG.getNode(Opc, dl, VT, V1, V2, 2991 DAG.getConstant(TargetMask, MVT::i8)); 2992 } 2993} 2994 2995static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2996 SDValue V1, SDValue V2, SelectionDAG &DAG) { 2997 switch(Opc) { 2998 default: llvm_unreachable("Unknown x86 shuffle node"); 2999 case X86ISD::MOVLHPS: 3000 case X86ISD::MOVLHPD: 3001 case X86ISD::MOVHLPS: 3002 case X86ISD::MOVLPS: 3003 case X86ISD::MOVLPD: 3004 case X86ISD::MOVSS: 3005 case X86ISD::MOVSD: 3006 case X86ISD::UNPCKL: 3007 case X86ISD::UNPCKH: 3008 return DAG.getNode(Opc, dl, VT, V1, V2); 3009 } 3010} 3011 3012SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const { 3013 MachineFunction &MF = DAG.getMachineFunction(); 3014 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 3015 int ReturnAddrIndex = FuncInfo->getRAIndex(); 3016 3017 if (ReturnAddrIndex == 0) { 3018 // Set up a frame object for the return address. 3019 uint64_t SlotSize = TD->getPointerSize(); 3020 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, -SlotSize, 3021 false); 3022 FuncInfo->setRAIndex(ReturnAddrIndex); 3023 } 3024 3025 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy()); 3026} 3027 3028 3029bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M, 3030 bool hasSymbolicDisplacement) { 3031 // Offset should fit into 32 bit immediate field. 3032 if (!isInt<32>(Offset)) 3033 return false; 3034 3035 // If we don't have a symbolic displacement - we don't have any extra 3036 // restrictions. 3037 if (!hasSymbolicDisplacement) 3038 return true; 3039 3040 // FIXME: Some tweaks might be needed for medium code model. 3041 if (M != CodeModel::Small && M != CodeModel::Kernel) 3042 return false; 3043 3044 // For small code model we assume that latest object is 16MB before end of 31 3045 // bits boundary. We may also accept pretty large negative constants knowing 3046 // that all objects are in the positive half of address space. 3047 if (M == CodeModel::Small && Offset < 16*1024*1024) 3048 return true; 3049 3050 // For kernel code model we know that all object resist in the negative half 3051 // of 32bits address space. We may not accept negative offsets, since they may 3052 // be just off and we may accept pretty large positive ones. 3053 if (M == CodeModel::Kernel && Offset > 0) 3054 return true; 3055 3056 return false; 3057} 3058 3059/// isCalleePop - Determines whether the callee is required to pop its 3060/// own arguments. Callee pop is necessary to support tail calls. 3061bool X86::isCalleePop(CallingConv::ID CallingConv, 3062 bool is64Bit, bool IsVarArg, bool TailCallOpt) { 3063 if (IsVarArg) 3064 return false; 3065 3066 switch (CallingConv) { 3067 default: 3068 return false; 3069 case CallingConv::X86_StdCall: 3070 return !is64Bit; 3071 case CallingConv::X86_FastCall: 3072 return !is64Bit; 3073 case CallingConv::X86_ThisCall: 3074 return !is64Bit; 3075 case CallingConv::Fast: 3076 return TailCallOpt; 3077 case CallingConv::GHC: 3078 return TailCallOpt; 3079 } 3080} 3081 3082/// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86 3083/// specific condition code, returning the condition code and the LHS/RHS of the 3084/// comparison to make. 3085static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP, 3086 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) { 3087 if (!isFP) { 3088 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 3089 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { 3090 // X > -1 -> X == 0, jump !sign. 3091 RHS = DAG.getConstant(0, RHS.getValueType()); 3092 return X86::COND_NS; 3093 } 3094 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { 3095 // X < 0 -> X == 0, jump on sign. 3096 return X86::COND_S; 3097 } 3098 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) { 3099 // X < 1 -> X <= 0 3100 RHS = DAG.getConstant(0, RHS.getValueType()); 3101 return X86::COND_LE; 3102 } 3103 } 3104 3105 switch (SetCCOpcode) { 3106 default: llvm_unreachable("Invalid integer condition!"); 3107 case ISD::SETEQ: return X86::COND_E; 3108 case ISD::SETGT: return X86::COND_G; 3109 case ISD::SETGE: return X86::COND_GE; 3110 case ISD::SETLT: return X86::COND_L; 3111 case ISD::SETLE: return X86::COND_LE; 3112 case ISD::SETNE: return X86::COND_NE; 3113 case ISD::SETULT: return X86::COND_B; 3114 case ISD::SETUGT: return X86::COND_A; 3115 case ISD::SETULE: return X86::COND_BE; 3116 case ISD::SETUGE: return X86::COND_AE; 3117 } 3118 } 3119 3120 // First determine if it is required or is profitable to flip the operands. 3121 3122 // If LHS is a foldable load, but RHS is not, flip the condition. 3123 if (ISD::isNON_EXTLoad(LHS.getNode()) && 3124 !ISD::isNON_EXTLoad(RHS.getNode())) { 3125 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode); 3126 std::swap(LHS, RHS); 3127 } 3128 3129 switch (SetCCOpcode) { 3130 default: break; 3131 case ISD::SETOLT: 3132 case ISD::SETOLE: 3133 case ISD::SETUGT: 3134 case ISD::SETUGE: 3135 std::swap(LHS, RHS); 3136 break; 3137 } 3138 3139 // On a floating point condition, the flags are set as follows: 3140 // ZF PF CF op 3141 // 0 | 0 | 0 | X > Y 3142 // 0 | 0 | 1 | X < Y 3143 // 1 | 0 | 0 | X == Y 3144 // 1 | 1 | 1 | unordered 3145 switch (SetCCOpcode) { 3146 default: llvm_unreachable("Condcode should be pre-legalized away"); 3147 case ISD::SETUEQ: 3148 case ISD::SETEQ: return X86::COND_E; 3149 case ISD::SETOLT: // flipped 3150 case ISD::SETOGT: 3151 case ISD::SETGT: return X86::COND_A; 3152 case ISD::SETOLE: // flipped 3153 case ISD::SETOGE: 3154 case ISD::SETGE: return X86::COND_AE; 3155 case ISD::SETUGT: // flipped 3156 case ISD::SETULT: 3157 case ISD::SETLT: return X86::COND_B; 3158 case ISD::SETUGE: // flipped 3159 case ISD::SETULE: 3160 case ISD::SETLE: return X86::COND_BE; 3161 case ISD::SETONE: 3162 case ISD::SETNE: return X86::COND_NE; 3163 case ISD::SETUO: return X86::COND_P; 3164 case ISD::SETO: return X86::COND_NP; 3165 case ISD::SETOEQ: 3166 case ISD::SETUNE: return X86::COND_INVALID; 3167 } 3168} 3169 3170/// hasFPCMov - is there a floating point cmov for the specific X86 condition 3171/// code. Current x86 isa includes the following FP cmov instructions: 3172/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. 3173static bool hasFPCMov(unsigned X86CC) { 3174 switch (X86CC) { 3175 default: 3176 return false; 3177 case X86::COND_B: 3178 case X86::COND_BE: 3179 case X86::COND_E: 3180 case X86::COND_P: 3181 case X86::COND_A: 3182 case X86::COND_AE: 3183 case X86::COND_NE: 3184 case X86::COND_NP: 3185 return true; 3186 } 3187} 3188 3189/// isFPImmLegal - Returns true if the target can instruction select the 3190/// specified FP immediate natively. If false, the legalizer will 3191/// materialize the FP immediate as a load from a constant pool. 3192bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 3193 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) { 3194 if (Imm.bitwiseIsEqual(LegalFPImmediates[i])) 3195 return true; 3196 } 3197 return false; 3198} 3199 3200/// isUndefOrInRange - Return true if Val is undef or if its value falls within 3201/// the specified range (L, H]. 3202static bool isUndefOrInRange(int Val, int Low, int Hi) { 3203 return (Val < 0) || (Val >= Low && Val < Hi); 3204} 3205 3206/// isUndefOrEqual - Val is either less than zero (undef) or equal to the 3207/// specified value. 3208static bool isUndefOrEqual(int Val, int CmpVal) { 3209 if (Val < 0 || Val == CmpVal) 3210 return true; 3211 return false; 3212} 3213 3214/// isSequentialOrUndefInRange - Return true if every element in Mask, beginning 3215/// from position Pos and ending in Pos+Size, falls within the specified 3216/// sequential range (L, L+Pos]. or is undef. 3217static bool isSequentialOrUndefInRange(ArrayRef<int> Mask, 3218 unsigned Pos, unsigned Size, int Low) { 3219 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low) 3220 if (!isUndefOrEqual(Mask[i], Low)) 3221 return false; 3222 return true; 3223} 3224 3225/// isPSHUFDMask - Return true if the node specifies a shuffle of elements that 3226/// is suitable for input to PSHUFD or PSHUFW. That is, it doesn't reference 3227/// the second operand. 3228static bool isPSHUFDMask(ArrayRef<int> Mask, EVT VT) { 3229 if (VT == MVT::v4f32 || VT == MVT::v4i32 ) 3230 return (Mask[0] < 4 && Mask[1] < 4 && Mask[2] < 4 && Mask[3] < 4); 3231 if (VT == MVT::v2f64 || VT == MVT::v2i64) 3232 return (Mask[0] < 2 && Mask[1] < 2); 3233 return false; 3234} 3235 3236/// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that 3237/// is suitable for input to PSHUFHW. 3238static bool isPSHUFHWMask(ArrayRef<int> Mask, EVT VT, bool HasAVX2) { 3239 if (VT != MVT::v8i16 && (!HasAVX2 || VT != MVT::v16i16)) 3240 return false; 3241 3242 // Lower quadword copied in order or undef. 3243 if (!isSequentialOrUndefInRange(Mask, 0, 4, 0)) 3244 return false; 3245 3246 // Upper quadword shuffled. 3247 for (unsigned i = 4; i != 8; ++i) 3248 if (!isUndefOrInRange(Mask[i], 4, 8)) 3249 return false; 3250 3251 if (VT == MVT::v16i16) { 3252 // Lower quadword copied in order or undef. 3253 if (!isSequentialOrUndefInRange(Mask, 8, 4, 8)) 3254 return false; 3255 3256 // Upper quadword shuffled. 3257 for (unsigned i = 12; i != 16; ++i) 3258 if (!isUndefOrInRange(Mask[i], 12, 16)) 3259 return false; 3260 } 3261 3262 return true; 3263} 3264 3265/// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that 3266/// is suitable for input to PSHUFLW. 3267static bool isPSHUFLWMask(ArrayRef<int> Mask, EVT VT, bool HasAVX2) { 3268 if (VT != MVT::v8i16 && (!HasAVX2 || VT != MVT::v16i16)) 3269 return false; 3270 3271 // Upper quadword copied in order. 3272 if (!isSequentialOrUndefInRange(Mask, 4, 4, 4)) 3273 return false; 3274 3275 // Lower quadword shuffled. 3276 for (unsigned i = 0; i != 4; ++i) 3277 if (!isUndefOrInRange(Mask[i], 0, 4)) 3278 return false; 3279 3280 if (VT == MVT::v16i16) { 3281 // Upper quadword copied in order. 3282 if (!isSequentialOrUndefInRange(Mask, 12, 4, 12)) 3283 return false; 3284 3285 // Lower quadword shuffled. 3286 for (unsigned i = 8; i != 12; ++i) 3287 if (!isUndefOrInRange(Mask[i], 8, 12)) 3288 return false; 3289 } 3290 3291 return true; 3292} 3293 3294/// isPALIGNRMask - Return true if the node specifies a shuffle of elements that 3295/// is suitable for input to PALIGNR. 3296static bool isPALIGNRMask(ArrayRef<int> Mask, EVT VT, 3297 const X86Subtarget *Subtarget) { 3298 if ((VT.getSizeInBits() == 128 && !Subtarget->hasSSSE3()) || 3299 (VT.getSizeInBits() == 256 && !Subtarget->hasAVX2())) 3300 return false; 3301 3302 unsigned NumElts = VT.getVectorNumElements(); 3303 unsigned NumLanes = VT.getSizeInBits()/128; 3304 unsigned NumLaneElts = NumElts/NumLanes; 3305 3306 // Do not handle 64-bit element shuffles with palignr. 3307 if (NumLaneElts == 2) 3308 return false; 3309 3310 for (unsigned l = 0; l != NumElts; l+=NumLaneElts) { 3311 unsigned i; 3312 for (i = 0; i != NumLaneElts; ++i) { 3313 if (Mask[i+l] >= 0) 3314 break; 3315 } 3316 3317 // Lane is all undef, go to next lane 3318 if (i == NumLaneElts) 3319 continue; 3320 3321 int Start = Mask[i+l]; 3322 3323 // Make sure its in this lane in one of the sources 3324 if (!isUndefOrInRange(Start, l, l+NumLaneElts) && 3325 !isUndefOrInRange(Start, l+NumElts, l+NumElts+NumLaneElts)) 3326 return false; 3327 3328 // If not lane 0, then we must match lane 0 3329 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Start, Mask[i]+l)) 3330 return false; 3331 3332 // Correct second source to be contiguous with first source 3333 if (Start >= (int)NumElts) 3334 Start -= NumElts - NumLaneElts; 3335 3336 // Make sure we're shifting in the right direction. 3337 if (Start <= (int)(i+l)) 3338 return false; 3339 3340 Start -= i; 3341 3342 // Check the rest of the elements to see if they are consecutive. 3343 for (++i; i != NumLaneElts; ++i) { 3344 int Idx = Mask[i+l]; 3345 3346 // Make sure its in this lane 3347 if (!isUndefOrInRange(Idx, l, l+NumLaneElts) && 3348 !isUndefOrInRange(Idx, l+NumElts, l+NumElts+NumLaneElts)) 3349 return false; 3350 3351 // If not lane 0, then we must match lane 0 3352 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Idx, Mask[i]+l)) 3353 return false; 3354 3355 if (Idx >= (int)NumElts) 3356 Idx -= NumElts - NumLaneElts; 3357 3358 if (!isUndefOrEqual(Idx, Start+i)) 3359 return false; 3360 3361 } 3362 } 3363 3364 return true; 3365} 3366 3367/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming 3368/// the two vector operands have swapped position. 3369static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask, 3370 unsigned NumElems) { 3371 for (unsigned i = 0; i != NumElems; ++i) { 3372 int idx = Mask[i]; 3373 if (idx < 0) 3374 continue; 3375 else if (idx < (int)NumElems) 3376 Mask[i] = idx + NumElems; 3377 else 3378 Mask[i] = idx - NumElems; 3379 } 3380} 3381 3382/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 3383/// specifies a shuffle of elements that is suitable for input to 128/256-bit 3384/// SHUFPS and SHUFPD. If Commuted is true, then it checks for sources to be 3385/// reverse of what x86 shuffles want. 3386static bool isSHUFPMask(ArrayRef<int> Mask, EVT VT, bool HasAVX, 3387 bool Commuted = false) { 3388 if (!HasAVX && VT.getSizeInBits() == 256) 3389 return false; 3390 3391 unsigned NumElems = VT.getVectorNumElements(); 3392 unsigned NumLanes = VT.getSizeInBits()/128; 3393 unsigned NumLaneElems = NumElems/NumLanes; 3394 3395 if (NumLaneElems != 2 && NumLaneElems != 4) 3396 return false; 3397 3398 // VSHUFPSY divides the resulting vector into 4 chunks. 3399 // The sources are also splitted into 4 chunks, and each destination 3400 // chunk must come from a different source chunk. 3401 // 3402 // SRC1 => X7 X6 X5 X4 X3 X2 X1 X0 3403 // SRC2 => Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y9 3404 // 3405 // DST => Y7..Y4, Y7..Y4, X7..X4, X7..X4, 3406 // Y3..Y0, Y3..Y0, X3..X0, X3..X0 3407 // 3408 // VSHUFPDY divides the resulting vector into 4 chunks. 3409 // The sources are also splitted into 4 chunks, and each destination 3410 // chunk must come from a different source chunk. 3411 // 3412 // SRC1 => X3 X2 X1 X0 3413 // SRC2 => Y3 Y2 Y1 Y0 3414 // 3415 // DST => Y3..Y2, X3..X2, Y1..Y0, X1..X0 3416 // 3417 unsigned HalfLaneElems = NumLaneElems/2; 3418 for (unsigned l = 0; l != NumElems; l += NumLaneElems) { 3419 for (unsigned i = 0; i != NumLaneElems; ++i) { 3420 int Idx = Mask[i+l]; 3421 unsigned RngStart = l + ((Commuted == (i<HalfLaneElems)) ? NumElems : 0); 3422 if (!isUndefOrInRange(Idx, RngStart, RngStart+NumLaneElems)) 3423 return false; 3424 // For VSHUFPSY, the mask of the second half must be the same as the 3425 // first but with the appropriate offsets. This works in the same way as 3426 // VPERMILPS works with masks. 3427 if (NumElems != 8 || l == 0 || Mask[i] < 0) 3428 continue; 3429 if (!isUndefOrEqual(Idx, Mask[i]+l)) 3430 return false; 3431 } 3432 } 3433 3434 return true; 3435} 3436 3437/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 3438/// specifies a shuffle of elements that is suitable for input to MOVHLPS. 3439static bool isMOVHLPSMask(ArrayRef<int> Mask, EVT VT) { 3440 if (!VT.is128BitVector()) 3441 return false; 3442 3443 unsigned NumElems = VT.getVectorNumElements(); 3444 3445 if (NumElems != 4) 3446 return false; 3447 3448 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3 3449 return isUndefOrEqual(Mask[0], 6) && 3450 isUndefOrEqual(Mask[1], 7) && 3451 isUndefOrEqual(Mask[2], 2) && 3452 isUndefOrEqual(Mask[3], 3); 3453} 3454 3455/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 3456/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 3457/// <2, 3, 2, 3> 3458static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, EVT VT) { 3459 if (!VT.is128BitVector()) 3460 return false; 3461 3462 unsigned NumElems = VT.getVectorNumElements(); 3463 3464 if (NumElems != 4) 3465 return false; 3466 3467 return isUndefOrEqual(Mask[0], 2) && 3468 isUndefOrEqual(Mask[1], 3) && 3469 isUndefOrEqual(Mask[2], 2) && 3470 isUndefOrEqual(Mask[3], 3); 3471} 3472 3473/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 3474/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. 3475static bool isMOVLPMask(ArrayRef<int> Mask, EVT VT) { 3476 if (!VT.is128BitVector()) 3477 return false; 3478 3479 unsigned NumElems = VT.getVectorNumElements(); 3480 3481 if (NumElems != 2 && NumElems != 4) 3482 return false; 3483 3484 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 3485 if (!isUndefOrEqual(Mask[i], i + NumElems)) 3486 return false; 3487 3488 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i) 3489 if (!isUndefOrEqual(Mask[i], i)) 3490 return false; 3491 3492 return true; 3493} 3494 3495/// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand 3496/// specifies a shuffle of elements that is suitable for input to MOVLHPS. 3497static bool isMOVLHPSMask(ArrayRef<int> Mask, EVT VT) { 3498 if (!VT.is128BitVector()) 3499 return false; 3500 3501 unsigned NumElems = VT.getVectorNumElements(); 3502 3503 if (NumElems != 2 && NumElems != 4) 3504 return false; 3505 3506 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 3507 if (!isUndefOrEqual(Mask[i], i)) 3508 return false; 3509 3510 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 3511 if (!isUndefOrEqual(Mask[i + e], i + NumElems)) 3512 return false; 3513 3514 return true; 3515} 3516 3517// 3518// Some special combinations that can be optimized. 3519// 3520static 3521SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp, 3522 SelectionDAG &DAG) { 3523 EVT VT = SVOp->getValueType(0); 3524 DebugLoc dl = SVOp->getDebugLoc(); 3525 3526 if (VT != MVT::v8i32 && VT != MVT::v8f32) 3527 return SDValue(); 3528 3529 ArrayRef<int> Mask = SVOp->getMask(); 3530 3531 // These are the special masks that may be optimized. 3532 static const int MaskToOptimizeEven[] = {0, 8, 2, 10, 4, 12, 6, 14}; 3533 static const int MaskToOptimizeOdd[] = {1, 9, 3, 11, 5, 13, 7, 15}; 3534 bool MatchEvenMask = true; 3535 bool MatchOddMask = true; 3536 for (int i=0; i<8; ++i) { 3537 if (!isUndefOrEqual(Mask[i], MaskToOptimizeEven[i])) 3538 MatchEvenMask = false; 3539 if (!isUndefOrEqual(Mask[i], MaskToOptimizeOdd[i])) 3540 MatchOddMask = false; 3541 } 3542 3543 if (!MatchEvenMask && !MatchOddMask) 3544 return SDValue(); 3545 3546 SDValue UndefNode = DAG.getNode(ISD::UNDEF, dl, VT); 3547 3548 SDValue Op0 = SVOp->getOperand(0); 3549 SDValue Op1 = SVOp->getOperand(1); 3550 3551 if (MatchEvenMask) { 3552 // Shift the second operand right to 32 bits. 3553 static const int ShiftRightMask[] = {-1, 0, -1, 2, -1, 4, -1, 6 }; 3554 Op1 = DAG.getVectorShuffle(VT, dl, Op1, UndefNode, ShiftRightMask); 3555 } else { 3556 // Shift the first operand left to 32 bits. 3557 static const int ShiftLeftMask[] = {1, -1, 3, -1, 5, -1, 7, -1 }; 3558 Op0 = DAG.getVectorShuffle(VT, dl, Op0, UndefNode, ShiftLeftMask); 3559 } 3560 static const int BlendMask[] = {0, 9, 2, 11, 4, 13, 6, 15}; 3561 return DAG.getVectorShuffle(VT, dl, Op0, Op1, BlendMask); 3562} 3563 3564/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 3565/// specifies a shuffle of elements that is suitable for input to UNPCKL. 3566static bool isUNPCKLMask(ArrayRef<int> Mask, EVT VT, 3567 bool HasAVX2, bool V2IsSplat = false) { 3568 unsigned NumElts = VT.getVectorNumElements(); 3569 3570 assert((VT.is128BitVector() || VT.is256BitVector()) && 3571 "Unsupported vector type for unpckh"); 3572 3573 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 && 3574 (!HasAVX2 || (NumElts != 16 && NumElts != 32))) 3575 return false; 3576 3577 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3578 // independently on 128-bit lanes. 3579 unsigned NumLanes = VT.getSizeInBits()/128; 3580 unsigned NumLaneElts = NumElts/NumLanes; 3581 3582 for (unsigned l = 0; l != NumLanes; ++l) { 3583 for (unsigned i = l*NumLaneElts, j = l*NumLaneElts; 3584 i != (l+1)*NumLaneElts; 3585 i += 2, ++j) { 3586 int BitI = Mask[i]; 3587 int BitI1 = Mask[i+1]; 3588 if (!isUndefOrEqual(BitI, j)) 3589 return false; 3590 if (V2IsSplat) { 3591 if (!isUndefOrEqual(BitI1, NumElts)) 3592 return false; 3593 } else { 3594 if (!isUndefOrEqual(BitI1, j + NumElts)) 3595 return false; 3596 } 3597 } 3598 } 3599 3600 return true; 3601} 3602 3603/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 3604/// specifies a shuffle of elements that is suitable for input to UNPCKH. 3605static bool isUNPCKHMask(ArrayRef<int> Mask, EVT VT, 3606 bool HasAVX2, bool V2IsSplat = false) { 3607 unsigned NumElts = VT.getVectorNumElements(); 3608 3609 assert((VT.is128BitVector() || VT.is256BitVector()) && 3610 "Unsupported vector type for unpckh"); 3611 3612 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 && 3613 (!HasAVX2 || (NumElts != 16 && NumElts != 32))) 3614 return false; 3615 3616 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3617 // independently on 128-bit lanes. 3618 unsigned NumLanes = VT.getSizeInBits()/128; 3619 unsigned NumLaneElts = NumElts/NumLanes; 3620 3621 for (unsigned l = 0; l != NumLanes; ++l) { 3622 for (unsigned i = l*NumLaneElts, j = (l*NumLaneElts)+NumLaneElts/2; 3623 i != (l+1)*NumLaneElts; i += 2, ++j) { 3624 int BitI = Mask[i]; 3625 int BitI1 = Mask[i+1]; 3626 if (!isUndefOrEqual(BitI, j)) 3627 return false; 3628 if (V2IsSplat) { 3629 if (isUndefOrEqual(BitI1, NumElts)) 3630 return false; 3631 } else { 3632 if (!isUndefOrEqual(BitI1, j+NumElts)) 3633 return false; 3634 } 3635 } 3636 } 3637 return true; 3638} 3639 3640/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 3641/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 3642/// <0, 0, 1, 1> 3643static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, EVT VT, 3644 bool HasAVX2) { 3645 unsigned NumElts = VT.getVectorNumElements(); 3646 3647 assert((VT.is128BitVector() || VT.is256BitVector()) && 3648 "Unsupported vector type for unpckh"); 3649 3650 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 && 3651 (!HasAVX2 || (NumElts != 16 && NumElts != 32))) 3652 return false; 3653 3654 // For 256-bit i64/f64, use MOVDDUPY instead, so reject the matching pattern 3655 // FIXME: Need a better way to get rid of this, there's no latency difference 3656 // between UNPCKLPD and MOVDDUP, the later should always be checked first and 3657 // the former later. We should also remove the "_undef" special mask. 3658 if (NumElts == 4 && VT.getSizeInBits() == 256) 3659 return false; 3660 3661 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3662 // independently on 128-bit lanes. 3663 unsigned NumLanes = VT.getSizeInBits()/128; 3664 unsigned NumLaneElts = NumElts/NumLanes; 3665 3666 for (unsigned l = 0; l != NumLanes; ++l) { 3667 for (unsigned i = l*NumLaneElts, j = l*NumLaneElts; 3668 i != (l+1)*NumLaneElts; 3669 i += 2, ++j) { 3670 int BitI = Mask[i]; 3671 int BitI1 = Mask[i+1]; 3672 3673 if (!isUndefOrEqual(BitI, j)) 3674 return false; 3675 if (!isUndefOrEqual(BitI1, j)) 3676 return false; 3677 } 3678 } 3679 3680 return true; 3681} 3682 3683/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form 3684/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef, 3685/// <2, 2, 3, 3> 3686static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, EVT VT, bool HasAVX2) { 3687 unsigned NumElts = VT.getVectorNumElements(); 3688 3689 assert((VT.is128BitVector() || VT.is256BitVector()) && 3690 "Unsupported vector type for unpckh"); 3691 3692 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 && 3693 (!HasAVX2 || (NumElts != 16 && NumElts != 32))) 3694 return false; 3695 3696 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3697 // independently on 128-bit lanes. 3698 unsigned NumLanes = VT.getSizeInBits()/128; 3699 unsigned NumLaneElts = NumElts/NumLanes; 3700 3701 for (unsigned l = 0; l != NumLanes; ++l) { 3702 for (unsigned i = l*NumLaneElts, j = (l*NumLaneElts)+NumLaneElts/2; 3703 i != (l+1)*NumLaneElts; i += 2, ++j) { 3704 int BitI = Mask[i]; 3705 int BitI1 = Mask[i+1]; 3706 if (!isUndefOrEqual(BitI, j)) 3707 return false; 3708 if (!isUndefOrEqual(BitI1, j)) 3709 return false; 3710 } 3711 } 3712 return true; 3713} 3714 3715/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 3716/// specifies a shuffle of elements that is suitable for input to MOVSS, 3717/// MOVSD, and MOVD, i.e. setting the lowest element. 3718static bool isMOVLMask(ArrayRef<int> Mask, EVT VT) { 3719 if (VT.getVectorElementType().getSizeInBits() < 32) 3720 return false; 3721 if (!VT.is128BitVector()) 3722 return false; 3723 3724 unsigned NumElts = VT.getVectorNumElements(); 3725 3726 if (!isUndefOrEqual(Mask[0], NumElts)) 3727 return false; 3728 3729 for (unsigned i = 1; i != NumElts; ++i) 3730 if (!isUndefOrEqual(Mask[i], i)) 3731 return false; 3732 3733 return true; 3734} 3735 3736/// isVPERM2X128Mask - Match 256-bit shuffles where the elements are considered 3737/// as permutations between 128-bit chunks or halves. As an example: this 3738/// shuffle bellow: 3739/// vector_shuffle <4, 5, 6, 7, 12, 13, 14, 15> 3740/// The first half comes from the second half of V1 and the second half from the 3741/// the second half of V2. 3742static bool isVPERM2X128Mask(ArrayRef<int> Mask, EVT VT, bool HasAVX) { 3743 if (!HasAVX || !VT.is256BitVector()) 3744 return false; 3745 3746 // The shuffle result is divided into half A and half B. In total the two 3747 // sources have 4 halves, namely: C, D, E, F. The final values of A and 3748 // B must come from C, D, E or F. 3749 unsigned HalfSize = VT.getVectorNumElements()/2; 3750 bool MatchA = false, MatchB = false; 3751 3752 // Check if A comes from one of C, D, E, F. 3753 for (unsigned Half = 0; Half != 4; ++Half) { 3754 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, Half*HalfSize)) { 3755 MatchA = true; 3756 break; 3757 } 3758 } 3759 3760 // Check if B comes from one of C, D, E, F. 3761 for (unsigned Half = 0; Half != 4; ++Half) { 3762 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, Half*HalfSize)) { 3763 MatchB = true; 3764 break; 3765 } 3766 } 3767 3768 return MatchA && MatchB; 3769} 3770 3771/// getShuffleVPERM2X128Immediate - Return the appropriate immediate to shuffle 3772/// the specified VECTOR_MASK mask with VPERM2F128/VPERM2I128 instructions. 3773static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) { 3774 EVT VT = SVOp->getValueType(0); 3775 3776 unsigned HalfSize = VT.getVectorNumElements()/2; 3777 3778 unsigned FstHalf = 0, SndHalf = 0; 3779 for (unsigned i = 0; i < HalfSize; ++i) { 3780 if (SVOp->getMaskElt(i) > 0) { 3781 FstHalf = SVOp->getMaskElt(i)/HalfSize; 3782 break; 3783 } 3784 } 3785 for (unsigned i = HalfSize; i < HalfSize*2; ++i) { 3786 if (SVOp->getMaskElt(i) > 0) { 3787 SndHalf = SVOp->getMaskElt(i)/HalfSize; 3788 break; 3789 } 3790 } 3791 3792 return (FstHalf | (SndHalf << 4)); 3793} 3794 3795/// isVPERMILPMask - Return true if the specified VECTOR_SHUFFLE operand 3796/// specifies a shuffle of elements that is suitable for input to VPERMILPD*. 3797/// Note that VPERMIL mask matching is different depending whether theunderlying 3798/// type is 32 or 64. In the VPERMILPS the high half of the mask should point 3799/// to the same elements of the low, but to the higher half of the source. 3800/// In VPERMILPD the two lanes could be shuffled independently of each other 3801/// with the same restriction that lanes can't be crossed. Also handles PSHUFDY. 3802static bool isVPERMILPMask(ArrayRef<int> Mask, EVT VT, bool HasAVX) { 3803 if (!HasAVX) 3804 return false; 3805 3806 unsigned NumElts = VT.getVectorNumElements(); 3807 // Only match 256-bit with 32/64-bit types 3808 if (VT.getSizeInBits() != 256 || (NumElts != 4 && NumElts != 8)) 3809 return false; 3810 3811 unsigned NumLanes = VT.getSizeInBits()/128; 3812 unsigned LaneSize = NumElts/NumLanes; 3813 for (unsigned l = 0; l != NumElts; l += LaneSize) { 3814 for (unsigned i = 0; i != LaneSize; ++i) { 3815 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize)) 3816 return false; 3817 if (NumElts != 8 || l == 0) 3818 continue; 3819 // VPERMILPS handling 3820 if (Mask[i] < 0) 3821 continue; 3822 if (!isUndefOrEqual(Mask[i+l], Mask[i]+l)) 3823 return false; 3824 } 3825 } 3826 3827 return true; 3828} 3829 3830/// isCommutedMOVLMask - Returns true if the shuffle mask is except the reverse 3831/// of what x86 movss want. X86 movs requires the lowest element to be lowest 3832/// element of vector 2 and the other elements to come from vector 1 in order. 3833static bool isCommutedMOVLMask(ArrayRef<int> Mask, EVT VT, 3834 bool V2IsSplat = false, bool V2IsUndef = false) { 3835 if (!VT.is128BitVector()) 3836 return false; 3837 3838 unsigned NumOps = VT.getVectorNumElements(); 3839 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16) 3840 return false; 3841 3842 if (!isUndefOrEqual(Mask[0], 0)) 3843 return false; 3844 3845 for (unsigned i = 1; i != NumOps; ++i) 3846 if (!(isUndefOrEqual(Mask[i], i+NumOps) || 3847 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) || 3848 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps)))) 3849 return false; 3850 3851 return true; 3852} 3853 3854/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3855/// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 3856/// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7> 3857static bool isMOVSHDUPMask(ArrayRef<int> Mask, EVT VT, 3858 const X86Subtarget *Subtarget) { 3859 if (!Subtarget->hasSSE3()) 3860 return false; 3861 3862 unsigned NumElems = VT.getVectorNumElements(); 3863 3864 if ((VT.getSizeInBits() == 128 && NumElems != 4) || 3865 (VT.getSizeInBits() == 256 && NumElems != 8)) 3866 return false; 3867 3868 // "i+1" is the value the indexed mask element must have 3869 for (unsigned i = 0; i != NumElems; i += 2) 3870 if (!isUndefOrEqual(Mask[i], i+1) || 3871 !isUndefOrEqual(Mask[i+1], i+1)) 3872 return false; 3873 3874 return true; 3875} 3876 3877/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3878/// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 3879/// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6> 3880static bool isMOVSLDUPMask(ArrayRef<int> Mask, EVT VT, 3881 const X86Subtarget *Subtarget) { 3882 if (!Subtarget->hasSSE3()) 3883 return false; 3884 3885 unsigned NumElems = VT.getVectorNumElements(); 3886 3887 if ((VT.getSizeInBits() == 128 && NumElems != 4) || 3888 (VT.getSizeInBits() == 256 && NumElems != 8)) 3889 return false; 3890 3891 // "i" is the value the indexed mask element must have 3892 for (unsigned i = 0; i != NumElems; i += 2) 3893 if (!isUndefOrEqual(Mask[i], i) || 3894 !isUndefOrEqual(Mask[i+1], i)) 3895 return false; 3896 3897 return true; 3898} 3899 3900/// isMOVDDUPYMask - Return true if the specified VECTOR_SHUFFLE operand 3901/// specifies a shuffle of elements that is suitable for input to 256-bit 3902/// version of MOVDDUP. 3903static bool isMOVDDUPYMask(ArrayRef<int> Mask, EVT VT, bool HasAVX) { 3904 if (!HasAVX || !VT.is256BitVector()) 3905 return false; 3906 3907 unsigned NumElts = VT.getVectorNumElements(); 3908 if (NumElts != 4) 3909 return false; 3910 3911 for (unsigned i = 0; i != NumElts/2; ++i) 3912 if (!isUndefOrEqual(Mask[i], 0)) 3913 return false; 3914 for (unsigned i = NumElts/2; i != NumElts; ++i) 3915 if (!isUndefOrEqual(Mask[i], NumElts/2)) 3916 return false; 3917 return true; 3918} 3919 3920/// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3921/// specifies a shuffle of elements that is suitable for input to 128-bit 3922/// version of MOVDDUP. 3923static bool isMOVDDUPMask(ArrayRef<int> Mask, EVT VT) { 3924 if (!VT.is128BitVector()) 3925 return false; 3926 3927 unsigned e = VT.getVectorNumElements() / 2; 3928 for (unsigned i = 0; i != e; ++i) 3929 if (!isUndefOrEqual(Mask[i], i)) 3930 return false; 3931 for (unsigned i = 0; i != e; ++i) 3932 if (!isUndefOrEqual(Mask[e+i], i)) 3933 return false; 3934 return true; 3935} 3936 3937/// isVEXTRACTF128Index - Return true if the specified 3938/// EXTRACT_SUBVECTOR operand specifies a vector extract that is 3939/// suitable for input to VEXTRACTF128. 3940bool X86::isVEXTRACTF128Index(SDNode *N) { 3941 if (!isa<ConstantSDNode>(N->getOperand(1).getNode())) 3942 return false; 3943 3944 // The index should be aligned on a 128-bit boundary. 3945 uint64_t Index = 3946 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue(); 3947 3948 unsigned VL = N->getValueType(0).getVectorNumElements(); 3949 unsigned VBits = N->getValueType(0).getSizeInBits(); 3950 unsigned ElSize = VBits / VL; 3951 bool Result = (Index * ElSize) % 128 == 0; 3952 3953 return Result; 3954} 3955 3956/// isVINSERTF128Index - Return true if the specified INSERT_SUBVECTOR 3957/// operand specifies a subvector insert that is suitable for input to 3958/// VINSERTF128. 3959bool X86::isVINSERTF128Index(SDNode *N) { 3960 if (!isa<ConstantSDNode>(N->getOperand(2).getNode())) 3961 return false; 3962 3963 // The index should be aligned on a 128-bit boundary. 3964 uint64_t Index = 3965 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue(); 3966 3967 unsigned VL = N->getValueType(0).getVectorNumElements(); 3968 unsigned VBits = N->getValueType(0).getSizeInBits(); 3969 unsigned ElSize = VBits / VL; 3970 bool Result = (Index * ElSize) % 128 == 0; 3971 3972 return Result; 3973} 3974 3975/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 3976/// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions. 3977/// Handles 128-bit and 256-bit. 3978static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) { 3979 EVT VT = N->getValueType(0); 3980 3981 assert((VT.is128BitVector() || VT.is256BitVector()) && 3982 "Unsupported vector type for PSHUF/SHUFP"); 3983 3984 // Handle 128 and 256-bit vector lengths. AVX defines PSHUF/SHUFP to operate 3985 // independently on 128-bit lanes. 3986 unsigned NumElts = VT.getVectorNumElements(); 3987 unsigned NumLanes = VT.getSizeInBits()/128; 3988 unsigned NumLaneElts = NumElts/NumLanes; 3989 3990 assert((NumLaneElts == 2 || NumLaneElts == 4) && 3991 "Only supports 2 or 4 elements per lane"); 3992 3993 unsigned Shift = (NumLaneElts == 4) ? 1 : 0; 3994 unsigned Mask = 0; 3995 for (unsigned i = 0; i != NumElts; ++i) { 3996 int Elt = N->getMaskElt(i); 3997 if (Elt < 0) continue; 3998 Elt &= NumLaneElts - 1; 3999 unsigned ShAmt = (i << Shift) % 8; 4000 Mask |= Elt << ShAmt; 4001 } 4002 4003 return Mask; 4004} 4005 4006/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 4007/// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction. 4008static unsigned getShufflePSHUFHWImmediate(ShuffleVectorSDNode *N) { 4009 EVT VT = N->getValueType(0); 4010 4011 assert((VT == MVT::v8i16 || VT == MVT::v16i16) && 4012 "Unsupported vector type for PSHUFHW"); 4013 4014 unsigned NumElts = VT.getVectorNumElements(); 4015 4016 unsigned Mask = 0; 4017 for (unsigned l = 0; l != NumElts; l += 8) { 4018 // 8 nodes per lane, but we only care about the last 4. 4019 for (unsigned i = 0; i < 4; ++i) { 4020 int Elt = N->getMaskElt(l+i+4); 4021 if (Elt < 0) continue; 4022 Elt &= 0x3; // only 2-bits. 4023 Mask |= Elt << (i * 2); 4024 } 4025 } 4026 4027 return Mask; 4028} 4029 4030/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 4031/// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction. 4032static unsigned getShufflePSHUFLWImmediate(ShuffleVectorSDNode *N) { 4033 EVT VT = N->getValueType(0); 4034 4035 assert((VT == MVT::v8i16 || VT == MVT::v16i16) && 4036 "Unsupported vector type for PSHUFHW"); 4037 4038 unsigned NumElts = VT.getVectorNumElements(); 4039 4040 unsigned Mask = 0; 4041 for (unsigned l = 0; l != NumElts; l += 8) { 4042 // 8 nodes per lane, but we only care about the first 4. 4043 for (unsigned i = 0; i < 4; ++i) { 4044 int Elt = N->getMaskElt(l+i); 4045 if (Elt < 0) continue; 4046 Elt &= 0x3; // only 2-bits 4047 Mask |= Elt << (i * 2); 4048 } 4049 } 4050 4051 return Mask; 4052} 4053 4054/// getShufflePALIGNRImmediate - Return the appropriate immediate to shuffle 4055/// the specified VECTOR_SHUFFLE mask with the PALIGNR instruction. 4056static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) { 4057 EVT VT = SVOp->getValueType(0); 4058 unsigned EltSize = VT.getVectorElementType().getSizeInBits() >> 3; 4059 4060 unsigned NumElts = VT.getVectorNumElements(); 4061 unsigned NumLanes = VT.getSizeInBits()/128; 4062 unsigned NumLaneElts = NumElts/NumLanes; 4063 4064 int Val = 0; 4065 unsigned i; 4066 for (i = 0; i != NumElts; ++i) { 4067 Val = SVOp->getMaskElt(i); 4068 if (Val >= 0) 4069 break; 4070 } 4071 if (Val >= (int)NumElts) 4072 Val -= NumElts - NumLaneElts; 4073 4074 assert(Val - i > 0 && "PALIGNR imm should be positive"); 4075 return (Val - i) * EltSize; 4076} 4077 4078/// getExtractVEXTRACTF128Immediate - Return the appropriate immediate 4079/// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128 4080/// instructions. 4081unsigned X86::getExtractVEXTRACTF128Immediate(SDNode *N) { 4082 if (!isa<ConstantSDNode>(N->getOperand(1).getNode())) 4083 llvm_unreachable("Illegal extract subvector for VEXTRACTF128"); 4084 4085 uint64_t Index = 4086 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue(); 4087 4088 EVT VecVT = N->getOperand(0).getValueType(); 4089 EVT ElVT = VecVT.getVectorElementType(); 4090 4091 unsigned NumElemsPerChunk = 128 / ElVT.getSizeInBits(); 4092 return Index / NumElemsPerChunk; 4093} 4094 4095/// getInsertVINSERTF128Immediate - Return the appropriate immediate 4096/// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128 4097/// instructions. 4098unsigned X86::getInsertVINSERTF128Immediate(SDNode *N) { 4099 if (!isa<ConstantSDNode>(N->getOperand(2).getNode())) 4100 llvm_unreachable("Illegal insert subvector for VINSERTF128"); 4101 4102 uint64_t Index = 4103 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue(); 4104 4105 EVT VecVT = N->getValueType(0); 4106 EVT ElVT = VecVT.getVectorElementType(); 4107 4108 unsigned NumElemsPerChunk = 128 / ElVT.getSizeInBits(); 4109 return Index / NumElemsPerChunk; 4110} 4111 4112/// getShuffleCLImmediate - Return the appropriate immediate to shuffle 4113/// the specified VECTOR_SHUFFLE mask with VPERMQ and VPERMPD instructions. 4114/// Handles 256-bit. 4115static unsigned getShuffleCLImmediate(ShuffleVectorSDNode *N) { 4116 EVT VT = N->getValueType(0); 4117 4118 unsigned NumElts = VT.getVectorNumElements(); 4119 4120 assert((VT.is256BitVector() && NumElts == 4) && 4121 "Unsupported vector type for VPERMQ/VPERMPD"); 4122 4123 unsigned Mask = 0; 4124 for (unsigned i = 0; i != NumElts; ++i) { 4125 int Elt = N->getMaskElt(i); 4126 if (Elt < 0) 4127 continue; 4128 Mask |= Elt << (i*2); 4129 } 4130 4131 return Mask; 4132} 4133/// isZeroNode - Returns true if Elt is a constant zero or a floating point 4134/// constant +0.0. 4135bool X86::isZeroNode(SDValue Elt) { 4136 return ((isa<ConstantSDNode>(Elt) && 4137 cast<ConstantSDNode>(Elt)->isNullValue()) || 4138 (isa<ConstantFPSDNode>(Elt) && 4139 cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero())); 4140} 4141 4142/// CommuteVectorShuffle - Swap vector_shuffle operands as well as values in 4143/// their permute mask. 4144static SDValue CommuteVectorShuffle(ShuffleVectorSDNode *SVOp, 4145 SelectionDAG &DAG) { 4146 EVT VT = SVOp->getValueType(0); 4147 unsigned NumElems = VT.getVectorNumElements(); 4148 SmallVector<int, 8> MaskVec; 4149 4150 for (unsigned i = 0; i != NumElems; ++i) { 4151 int Idx = SVOp->getMaskElt(i); 4152 if (Idx >= 0) { 4153 if (Idx < (int)NumElems) 4154 Idx += NumElems; 4155 else 4156 Idx -= NumElems; 4157 } 4158 MaskVec.push_back(Idx); 4159 } 4160 return DAG.getVectorShuffle(VT, SVOp->getDebugLoc(), SVOp->getOperand(1), 4161 SVOp->getOperand(0), &MaskVec[0]); 4162} 4163 4164/// ShouldXformToMOVHLPS - Return true if the node should be transformed to 4165/// match movhlps. The lower half elements should come from upper half of 4166/// V1 (and in order), and the upper half elements should come from the upper 4167/// half of V2 (and in order). 4168static bool ShouldXformToMOVHLPS(ArrayRef<int> Mask, EVT VT) { 4169 if (!VT.is128BitVector()) 4170 return false; 4171 if (VT.getVectorNumElements() != 4) 4172 return false; 4173 for (unsigned i = 0, e = 2; i != e; ++i) 4174 if (!isUndefOrEqual(Mask[i], i+2)) 4175 return false; 4176 for (unsigned i = 2; i != 4; ++i) 4177 if (!isUndefOrEqual(Mask[i], i+4)) 4178 return false; 4179 return true; 4180} 4181 4182/// isScalarLoadToVector - Returns true if the node is a scalar load that 4183/// is promoted to a vector. It also returns the LoadSDNode by reference if 4184/// required. 4185static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = NULL) { 4186 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR) 4187 return false; 4188 N = N->getOperand(0).getNode(); 4189 if (!ISD::isNON_EXTLoad(N)) 4190 return false; 4191 if (LD) 4192 *LD = cast<LoadSDNode>(N); 4193 return true; 4194} 4195 4196// Test whether the given value is a vector value which will be legalized 4197// into a load. 4198static bool WillBeConstantPoolLoad(SDNode *N) { 4199 if (N->getOpcode() != ISD::BUILD_VECTOR) 4200 return false; 4201 4202 // Check for any non-constant elements. 4203 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 4204 switch (N->getOperand(i).getNode()->getOpcode()) { 4205 case ISD::UNDEF: 4206 case ISD::ConstantFP: 4207 case ISD::Constant: 4208 break; 4209 default: 4210 return false; 4211 } 4212 4213 // Vectors of all-zeros and all-ones are materialized with special 4214 // instructions rather than being loaded. 4215 return !ISD::isBuildVectorAllZeros(N) && 4216 !ISD::isBuildVectorAllOnes(N); 4217} 4218 4219/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to 4220/// match movlp{s|d}. The lower half elements should come from lower half of 4221/// V1 (and in order), and the upper half elements should come from the upper 4222/// half of V2 (and in order). And since V1 will become the source of the 4223/// MOVLP, it must be either a vector load or a scalar load to vector. 4224static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, 4225 ArrayRef<int> Mask, EVT VT) { 4226 if (!VT.is128BitVector()) 4227 return false; 4228 4229 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1)) 4230 return false; 4231 // Is V2 is a vector load, don't do this transformation. We will try to use 4232 // load folding shufps op. 4233 if (ISD::isNON_EXTLoad(V2) || WillBeConstantPoolLoad(V2)) 4234 return false; 4235 4236 unsigned NumElems = VT.getVectorNumElements(); 4237 4238 if (NumElems != 2 && NumElems != 4) 4239 return false; 4240 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 4241 if (!isUndefOrEqual(Mask[i], i)) 4242 return false; 4243 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i) 4244 if (!isUndefOrEqual(Mask[i], i+NumElems)) 4245 return false; 4246 return true; 4247} 4248 4249/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are 4250/// all the same. 4251static bool isSplatVector(SDNode *N) { 4252 if (N->getOpcode() != ISD::BUILD_VECTOR) 4253 return false; 4254 4255 SDValue SplatValue = N->getOperand(0); 4256 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 4257 if (N->getOperand(i) != SplatValue) 4258 return false; 4259 return true; 4260} 4261 4262/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 4263/// to an zero vector. 4264/// FIXME: move to dag combiner / method on ShuffleVectorSDNode 4265static bool isZeroShuffle(ShuffleVectorSDNode *N) { 4266 SDValue V1 = N->getOperand(0); 4267 SDValue V2 = N->getOperand(1); 4268 unsigned NumElems = N->getValueType(0).getVectorNumElements(); 4269 for (unsigned i = 0; i != NumElems; ++i) { 4270 int Idx = N->getMaskElt(i); 4271 if (Idx >= (int)NumElems) { 4272 unsigned Opc = V2.getOpcode(); 4273 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode())) 4274 continue; 4275 if (Opc != ISD::BUILD_VECTOR || 4276 !X86::isZeroNode(V2.getOperand(Idx-NumElems))) 4277 return false; 4278 } else if (Idx >= 0) { 4279 unsigned Opc = V1.getOpcode(); 4280 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode())) 4281 continue; 4282 if (Opc != ISD::BUILD_VECTOR || 4283 !X86::isZeroNode(V1.getOperand(Idx))) 4284 return false; 4285 } 4286 } 4287 return true; 4288} 4289 4290/// getZeroVector - Returns a vector of specified type with all zero elements. 4291/// 4292static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget, 4293 SelectionDAG &DAG, DebugLoc dl) { 4294 assert(VT.isVector() && "Expected a vector type"); 4295 unsigned Size = VT.getSizeInBits(); 4296 4297 // Always build SSE zero vectors as <4 x i32> bitcasted 4298 // to their dest type. This ensures they get CSE'd. 4299 SDValue Vec; 4300 if (Size == 128) { // SSE 4301 if (Subtarget->hasSSE2()) { // SSE2 4302 SDValue Cst = DAG.getTargetConstant(0, MVT::i32); 4303 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 4304 } else { // SSE1 4305 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); 4306 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst); 4307 } 4308 } else if (Size == 256) { // AVX 4309 if (Subtarget->hasAVX2()) { // AVX2 4310 SDValue Cst = DAG.getTargetConstant(0, MVT::i32); 4311 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 4312 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops, 8); 4313 } else { 4314 // 256-bit logic and arithmetic instructions in AVX are all 4315 // floating-point, no support for integer ops. Emit fp zeroed vectors. 4316 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); 4317 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 4318 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops, 8); 4319 } 4320 } else 4321 llvm_unreachable("Unexpected vector type"); 4322 4323 return DAG.getNode(ISD::BITCAST, dl, VT, Vec); 4324} 4325 4326/// getOnesVector - Returns a vector of specified type with all bits set. 4327/// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with 4328/// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately. 4329/// Then bitcast to their original type, ensuring they get CSE'd. 4330static SDValue getOnesVector(EVT VT, bool HasAVX2, SelectionDAG &DAG, 4331 DebugLoc dl) { 4332 assert(VT.isVector() && "Expected a vector type"); 4333 unsigned Size = VT.getSizeInBits(); 4334 4335 SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32); 4336 SDValue Vec; 4337 if (Size == 256) { 4338 if (HasAVX2) { // AVX2 4339 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 4340 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops, 8); 4341 } else { // AVX 4342 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 4343 Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl); 4344 } 4345 } else if (Size == 128) { 4346 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 4347 } else 4348 llvm_unreachable("Unexpected vector type"); 4349 4350 return DAG.getNode(ISD::BITCAST, dl, VT, Vec); 4351} 4352 4353/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements 4354/// that point to V2 points to its first element. 4355static void NormalizeMask(SmallVectorImpl<int> &Mask, unsigned NumElems) { 4356 for (unsigned i = 0; i != NumElems; ++i) { 4357 if (Mask[i] > (int)NumElems) { 4358 Mask[i] = NumElems; 4359 } 4360 } 4361} 4362 4363/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd 4364/// operation of specified width. 4365static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 4366 SDValue V2) { 4367 unsigned NumElems = VT.getVectorNumElements(); 4368 SmallVector<int, 8> Mask; 4369 Mask.push_back(NumElems); 4370 for (unsigned i = 1; i != NumElems; ++i) 4371 Mask.push_back(i); 4372 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4373} 4374 4375/// getUnpackl - Returns a vector_shuffle node for an unpackl operation. 4376static SDValue getUnpackl(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 4377 SDValue V2) { 4378 unsigned NumElems = VT.getVectorNumElements(); 4379 SmallVector<int, 8> Mask; 4380 for (unsigned i = 0, e = NumElems/2; i != e; ++i) { 4381 Mask.push_back(i); 4382 Mask.push_back(i + NumElems); 4383 } 4384 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4385} 4386 4387/// getUnpackh - Returns a vector_shuffle node for an unpackh operation. 4388static SDValue getUnpackh(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 4389 SDValue V2) { 4390 unsigned NumElems = VT.getVectorNumElements(); 4391 SmallVector<int, 8> Mask; 4392 for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) { 4393 Mask.push_back(i + Half); 4394 Mask.push_back(i + NumElems + Half); 4395 } 4396 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4397} 4398 4399// PromoteSplati8i16 - All i16 and i8 vector types can't be used directly by 4400// a generic shuffle instruction because the target has no such instructions. 4401// Generate shuffles which repeat i16 and i8 several times until they can be 4402// represented by v4f32 and then be manipulated by target suported shuffles. 4403static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) { 4404 EVT VT = V.getValueType(); 4405 int NumElems = VT.getVectorNumElements(); 4406 DebugLoc dl = V.getDebugLoc(); 4407 4408 while (NumElems > 4) { 4409 if (EltNo < NumElems/2) { 4410 V = getUnpackl(DAG, dl, VT, V, V); 4411 } else { 4412 V = getUnpackh(DAG, dl, VT, V, V); 4413 EltNo -= NumElems/2; 4414 } 4415 NumElems >>= 1; 4416 } 4417 return V; 4418} 4419 4420/// getLegalSplat - Generate a legal splat with supported x86 shuffles 4421static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) { 4422 EVT VT = V.getValueType(); 4423 DebugLoc dl = V.getDebugLoc(); 4424 unsigned Size = VT.getSizeInBits(); 4425 4426 if (Size == 128) { 4427 V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V); 4428 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo }; 4429 V = DAG.getVectorShuffle(MVT::v4f32, dl, V, DAG.getUNDEF(MVT::v4f32), 4430 &SplatMask[0]); 4431 } else if (Size == 256) { 4432 // To use VPERMILPS to splat scalars, the second half of indicies must 4433 // refer to the higher part, which is a duplication of the lower one, 4434 // because VPERMILPS can only handle in-lane permutations. 4435 int SplatMask[8] = { EltNo, EltNo, EltNo, EltNo, 4436 EltNo+4, EltNo+4, EltNo+4, EltNo+4 }; 4437 4438 V = DAG.getNode(ISD::BITCAST, dl, MVT::v8f32, V); 4439 V = DAG.getVectorShuffle(MVT::v8f32, dl, V, DAG.getUNDEF(MVT::v8f32), 4440 &SplatMask[0]); 4441 } else 4442 llvm_unreachable("Vector size not supported"); 4443 4444 return DAG.getNode(ISD::BITCAST, dl, VT, V); 4445} 4446 4447/// PromoteSplat - Splat is promoted to target supported vector shuffles. 4448static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) { 4449 EVT SrcVT = SV->getValueType(0); 4450 SDValue V1 = SV->getOperand(0); 4451 DebugLoc dl = SV->getDebugLoc(); 4452 4453 int EltNo = SV->getSplatIndex(); 4454 int NumElems = SrcVT.getVectorNumElements(); 4455 unsigned Size = SrcVT.getSizeInBits(); 4456 4457 assert(((Size == 128 && NumElems > 4) || Size == 256) && 4458 "Unknown how to promote splat for type"); 4459 4460 // Extract the 128-bit part containing the splat element and update 4461 // the splat element index when it refers to the higher register. 4462 if (Size == 256) { 4463 V1 = Extract128BitVector(V1, EltNo, DAG, dl); 4464 if (EltNo >= NumElems/2) 4465 EltNo -= NumElems/2; 4466 } 4467 4468 // All i16 and i8 vector types can't be used directly by a generic shuffle 4469 // instruction because the target has no such instruction. Generate shuffles 4470 // which repeat i16 and i8 several times until they fit in i32, and then can 4471 // be manipulated by target suported shuffles. 4472 EVT EltVT = SrcVT.getVectorElementType(); 4473 if (EltVT == MVT::i8 || EltVT == MVT::i16) 4474 V1 = PromoteSplati8i16(V1, DAG, EltNo); 4475 4476 // Recreate the 256-bit vector and place the same 128-bit vector 4477 // into the low and high part. This is necessary because we want 4478 // to use VPERM* to shuffle the vectors 4479 if (Size == 256) { 4480 V1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT, V1, V1); 4481 } 4482 4483 return getLegalSplat(DAG, V1, EltNo); 4484} 4485 4486/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified 4487/// vector of zero or undef vector. This produces a shuffle where the low 4488/// element of V2 is swizzled into the zero/undef vector, landing at element 4489/// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3). 4490static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx, 4491 bool IsZero, 4492 const X86Subtarget *Subtarget, 4493 SelectionDAG &DAG) { 4494 EVT VT = V2.getValueType(); 4495 SDValue V1 = IsZero 4496 ? getZeroVector(VT, Subtarget, DAG, V2.getDebugLoc()) : DAG.getUNDEF(VT); 4497 unsigned NumElems = VT.getVectorNumElements(); 4498 SmallVector<int, 16> MaskVec; 4499 for (unsigned i = 0; i != NumElems; ++i) 4500 // If this is the insertion idx, put the low elt of V2 here. 4501 MaskVec.push_back(i == Idx ? NumElems : i); 4502 return DAG.getVectorShuffle(VT, V2.getDebugLoc(), V1, V2, &MaskVec[0]); 4503} 4504 4505/// getTargetShuffleMask - Calculates the shuffle mask corresponding to the 4506/// target specific opcode. Returns true if the Mask could be calculated. 4507/// Sets IsUnary to true if only uses one source. 4508static bool getTargetShuffleMask(SDNode *N, MVT VT, 4509 SmallVectorImpl<int> &Mask, bool &IsUnary) { 4510 unsigned NumElems = VT.getVectorNumElements(); 4511 SDValue ImmN; 4512 4513 IsUnary = false; 4514 switch(N->getOpcode()) { 4515 case X86ISD::SHUFP: 4516 ImmN = N->getOperand(N->getNumOperands()-1); 4517 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4518 break; 4519 case X86ISD::UNPCKH: 4520 DecodeUNPCKHMask(VT, Mask); 4521 break; 4522 case X86ISD::UNPCKL: 4523 DecodeUNPCKLMask(VT, Mask); 4524 break; 4525 case X86ISD::MOVHLPS: 4526 DecodeMOVHLPSMask(NumElems, Mask); 4527 break; 4528 case X86ISD::MOVLHPS: 4529 DecodeMOVLHPSMask(NumElems, Mask); 4530 break; 4531 case X86ISD::PSHUFD: 4532 case X86ISD::VPERMILP: 4533 ImmN = N->getOperand(N->getNumOperands()-1); 4534 DecodePSHUFMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4535 IsUnary = true; 4536 break; 4537 case X86ISD::PSHUFHW: 4538 ImmN = N->getOperand(N->getNumOperands()-1); 4539 DecodePSHUFHWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4540 IsUnary = true; 4541 break; 4542 case X86ISD::PSHUFLW: 4543 ImmN = N->getOperand(N->getNumOperands()-1); 4544 DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4545 IsUnary = true; 4546 break; 4547 case X86ISD::VPERMI: 4548 ImmN = N->getOperand(N->getNumOperands()-1); 4549 DecodeVPERMMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4550 IsUnary = true; 4551 break; 4552 case X86ISD::MOVSS: 4553 case X86ISD::MOVSD: { 4554 // The index 0 always comes from the first element of the second source, 4555 // this is why MOVSS and MOVSD are used in the first place. The other 4556 // elements come from the other positions of the first source vector 4557 Mask.push_back(NumElems); 4558 for (unsigned i = 1; i != NumElems; ++i) { 4559 Mask.push_back(i); 4560 } 4561 break; 4562 } 4563 case X86ISD::VPERM2X128: 4564 ImmN = N->getOperand(N->getNumOperands()-1); 4565 DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4566 if (Mask.empty()) return false; 4567 break; 4568 case X86ISD::MOVDDUP: 4569 case X86ISD::MOVLHPD: 4570 case X86ISD::MOVLPD: 4571 case X86ISD::MOVLPS: 4572 case X86ISD::MOVSHDUP: 4573 case X86ISD::MOVSLDUP: 4574 case X86ISD::PALIGN: 4575 // Not yet implemented 4576 return false; 4577 default: llvm_unreachable("unknown target shuffle node"); 4578 } 4579 4580 return true; 4581} 4582 4583/// getShuffleScalarElt - Returns the scalar element that will make up the ith 4584/// element of the result of the vector shuffle. 4585static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG, 4586 unsigned Depth) { 4587 if (Depth == 6) 4588 return SDValue(); // Limit search depth. 4589 4590 SDValue V = SDValue(N, 0); 4591 EVT VT = V.getValueType(); 4592 unsigned Opcode = V.getOpcode(); 4593 4594 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars. 4595 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) { 4596 int Elt = SV->getMaskElt(Index); 4597 4598 if (Elt < 0) 4599 return DAG.getUNDEF(VT.getVectorElementType()); 4600 4601 unsigned NumElems = VT.getVectorNumElements(); 4602 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0) 4603 : SV->getOperand(1); 4604 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1); 4605 } 4606 4607 // Recurse into target specific vector shuffles to find scalars. 4608 if (isTargetShuffle(Opcode)) { 4609 MVT ShufVT = V.getValueType().getSimpleVT(); 4610 unsigned NumElems = ShufVT.getVectorNumElements(); 4611 SmallVector<int, 16> ShuffleMask; 4612 SDValue ImmN; 4613 bool IsUnary; 4614 4615 if (!getTargetShuffleMask(N, ShufVT, ShuffleMask, IsUnary)) 4616 return SDValue(); 4617 4618 int Elt = ShuffleMask[Index]; 4619 if (Elt < 0) 4620 return DAG.getUNDEF(ShufVT.getVectorElementType()); 4621 4622 SDValue NewV = (Elt < (int)NumElems) ? N->getOperand(0) 4623 : N->getOperand(1); 4624 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, 4625 Depth+1); 4626 } 4627 4628 // Actual nodes that may contain scalar elements 4629 if (Opcode == ISD::BITCAST) { 4630 V = V.getOperand(0); 4631 EVT SrcVT = V.getValueType(); 4632 unsigned NumElems = VT.getVectorNumElements(); 4633 4634 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems) 4635 return SDValue(); 4636 } 4637 4638 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) 4639 return (Index == 0) ? V.getOperand(0) 4640 : DAG.getUNDEF(VT.getVectorElementType()); 4641 4642 if (V.getOpcode() == ISD::BUILD_VECTOR) 4643 return V.getOperand(Index); 4644 4645 return SDValue(); 4646} 4647 4648/// getNumOfConsecutiveZeros - Return the number of elements of a vector 4649/// shuffle operation which come from a consecutively from a zero. The 4650/// search can start in two different directions, from left or right. 4651static 4652unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp, unsigned NumElems, 4653 bool ZerosFromLeft, SelectionDAG &DAG) { 4654 unsigned i; 4655 for (i = 0; i != NumElems; ++i) { 4656 unsigned Index = ZerosFromLeft ? i : NumElems-i-1; 4657 SDValue Elt = getShuffleScalarElt(SVOp, Index, DAG, 0); 4658 if (!(Elt.getNode() && 4659 (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt)))) 4660 break; 4661 } 4662 4663 return i; 4664} 4665 4666/// isShuffleMaskConsecutive - Check if the shuffle mask indicies [MaskI, MaskE) 4667/// correspond consecutively to elements from one of the vector operands, 4668/// starting from its index OpIdx. Also tell OpNum which source vector operand. 4669static 4670bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp, 4671 unsigned MaskI, unsigned MaskE, unsigned OpIdx, 4672 unsigned NumElems, unsigned &OpNum) { 4673 bool SeenV1 = false; 4674 bool SeenV2 = false; 4675 4676 for (unsigned i = MaskI; i != MaskE; ++i, ++OpIdx) { 4677 int Idx = SVOp->getMaskElt(i); 4678 // Ignore undef indicies 4679 if (Idx < 0) 4680 continue; 4681 4682 if (Idx < (int)NumElems) 4683 SeenV1 = true; 4684 else 4685 SeenV2 = true; 4686 4687 // Only accept consecutive elements from the same vector 4688 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2)) 4689 return false; 4690 } 4691 4692 OpNum = SeenV1 ? 0 : 1; 4693 return true; 4694} 4695 4696/// isVectorShiftRight - Returns true if the shuffle can be implemented as a 4697/// logical left shift of a vector. 4698static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4699 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4700 unsigned NumElems = SVOp->getValueType(0).getVectorNumElements(); 4701 unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, 4702 false /* check zeros from right */, DAG); 4703 unsigned OpSrc; 4704 4705 if (!NumZeros) 4706 return false; 4707 4708 // Considering the elements in the mask that are not consecutive zeros, 4709 // check if they consecutively come from only one of the source vectors. 4710 // 4711 // V1 = {X, A, B, C} 0 4712 // \ \ \ / 4713 // vector_shuffle V1, V2 <1, 2, 3, X> 4714 // 4715 if (!isShuffleMaskConsecutive(SVOp, 4716 0, // Mask Start Index 4717 NumElems-NumZeros, // Mask End Index(exclusive) 4718 NumZeros, // Where to start looking in the src vector 4719 NumElems, // Number of elements in vector 4720 OpSrc)) // Which source operand ? 4721 return false; 4722 4723 isLeft = false; 4724 ShAmt = NumZeros; 4725 ShVal = SVOp->getOperand(OpSrc); 4726 return true; 4727} 4728 4729/// isVectorShiftLeft - Returns true if the shuffle can be implemented as a 4730/// logical left shift of a vector. 4731static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4732 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4733 unsigned NumElems = SVOp->getValueType(0).getVectorNumElements(); 4734 unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, 4735 true /* check zeros from left */, DAG); 4736 unsigned OpSrc; 4737 4738 if (!NumZeros) 4739 return false; 4740 4741 // Considering the elements in the mask that are not consecutive zeros, 4742 // check if they consecutively come from only one of the source vectors. 4743 // 4744 // 0 { A, B, X, X } = V2 4745 // / \ / / 4746 // vector_shuffle V1, V2 <X, X, 4, 5> 4747 // 4748 if (!isShuffleMaskConsecutive(SVOp, 4749 NumZeros, // Mask Start Index 4750 NumElems, // Mask End Index(exclusive) 4751 0, // Where to start looking in the src vector 4752 NumElems, // Number of elements in vector 4753 OpSrc)) // Which source operand ? 4754 return false; 4755 4756 isLeft = true; 4757 ShAmt = NumZeros; 4758 ShVal = SVOp->getOperand(OpSrc); 4759 return true; 4760} 4761 4762/// isVectorShift - Returns true if the shuffle can be implemented as a 4763/// logical left or right shift of a vector. 4764static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4765 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4766 // Although the logic below support any bitwidth size, there are no 4767 // shift instructions which handle more than 128-bit vectors. 4768 if (!SVOp->getValueType(0).is128BitVector()) 4769 return false; 4770 4771 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) || 4772 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt)) 4773 return true; 4774 4775 return false; 4776} 4777 4778/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. 4779/// 4780static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros, 4781 unsigned NumNonZero, unsigned NumZero, 4782 SelectionDAG &DAG, 4783 const X86Subtarget* Subtarget, 4784 const TargetLowering &TLI) { 4785 if (NumNonZero > 8) 4786 return SDValue(); 4787 4788 DebugLoc dl = Op.getDebugLoc(); 4789 SDValue V(0, 0); 4790 bool First = true; 4791 for (unsigned i = 0; i < 16; ++i) { 4792 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; 4793 if (ThisIsNonZero && First) { 4794 if (NumZero) 4795 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl); 4796 else 4797 V = DAG.getUNDEF(MVT::v8i16); 4798 First = false; 4799 } 4800 4801 if ((i & 1) != 0) { 4802 SDValue ThisElt(0, 0), LastElt(0, 0); 4803 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; 4804 if (LastIsNonZero) { 4805 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl, 4806 MVT::i16, Op.getOperand(i-1)); 4807 } 4808 if (ThisIsNonZero) { 4809 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i)); 4810 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16, 4811 ThisElt, DAG.getConstant(8, MVT::i8)); 4812 if (LastIsNonZero) 4813 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt); 4814 } else 4815 ThisElt = LastElt; 4816 4817 if (ThisElt.getNode()) 4818 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt, 4819 DAG.getIntPtrConstant(i/2)); 4820 } 4821 } 4822 4823 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V); 4824} 4825 4826/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16. 4827/// 4828static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros, 4829 unsigned NumNonZero, unsigned NumZero, 4830 SelectionDAG &DAG, 4831 const X86Subtarget* Subtarget, 4832 const TargetLowering &TLI) { 4833 if (NumNonZero > 4) 4834 return SDValue(); 4835 4836 DebugLoc dl = Op.getDebugLoc(); 4837 SDValue V(0, 0); 4838 bool First = true; 4839 for (unsigned i = 0; i < 8; ++i) { 4840 bool isNonZero = (NonZeros & (1 << i)) != 0; 4841 if (isNonZero) { 4842 if (First) { 4843 if (NumZero) 4844 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl); 4845 else 4846 V = DAG.getUNDEF(MVT::v8i16); 4847 First = false; 4848 } 4849 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, 4850 MVT::v8i16, V, Op.getOperand(i), 4851 DAG.getIntPtrConstant(i)); 4852 } 4853 } 4854 4855 return V; 4856} 4857 4858/// getVShift - Return a vector logical shift node. 4859/// 4860static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, 4861 unsigned NumBits, SelectionDAG &DAG, 4862 const TargetLowering &TLI, DebugLoc dl) { 4863 assert(VT.is128BitVector() && "Unknown type for VShift"); 4864 EVT ShVT = MVT::v2i64; 4865 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ; 4866 SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp); 4867 return DAG.getNode(ISD::BITCAST, dl, VT, 4868 DAG.getNode(Opc, dl, ShVT, SrcOp, 4869 DAG.getConstant(NumBits, 4870 TLI.getShiftAmountTy(SrcOp.getValueType())))); 4871} 4872 4873SDValue 4874X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl, 4875 SelectionDAG &DAG) const { 4876 4877 // Check if the scalar load can be widened into a vector load. And if 4878 // the address is "base + cst" see if the cst can be "absorbed" into 4879 // the shuffle mask. 4880 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) { 4881 SDValue Ptr = LD->getBasePtr(); 4882 if (!ISD::isNormalLoad(LD) || LD->isVolatile()) 4883 return SDValue(); 4884 EVT PVT = LD->getValueType(0); 4885 if (PVT != MVT::i32 && PVT != MVT::f32) 4886 return SDValue(); 4887 4888 int FI = -1; 4889 int64_t Offset = 0; 4890 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) { 4891 FI = FINode->getIndex(); 4892 Offset = 0; 4893 } else if (DAG.isBaseWithConstantOffset(Ptr) && 4894 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 4895 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 4896 Offset = Ptr.getConstantOperandVal(1); 4897 Ptr = Ptr.getOperand(0); 4898 } else { 4899 return SDValue(); 4900 } 4901 4902 // FIXME: 256-bit vector instructions don't require a strict alignment, 4903 // improve this code to support it better. 4904 unsigned RequiredAlign = VT.getSizeInBits()/8; 4905 SDValue Chain = LD->getChain(); 4906 // Make sure the stack object alignment is at least 16 or 32. 4907 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 4908 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) { 4909 if (MFI->isFixedObjectIndex(FI)) { 4910 // Can't change the alignment. FIXME: It's possible to compute 4911 // the exact stack offset and reference FI + adjust offset instead. 4912 // If someone *really* cares about this. That's the way to implement it. 4913 return SDValue(); 4914 } else { 4915 MFI->setObjectAlignment(FI, RequiredAlign); 4916 } 4917 } 4918 4919 // (Offset % 16 or 32) must be multiple of 4. Then address is then 4920 // Ptr + (Offset & ~15). 4921 if (Offset < 0) 4922 return SDValue(); 4923 if ((Offset % RequiredAlign) & 3) 4924 return SDValue(); 4925 int64_t StartOffset = Offset & ~(RequiredAlign-1); 4926 if (StartOffset) 4927 Ptr = DAG.getNode(ISD::ADD, Ptr.getDebugLoc(), Ptr.getValueType(), 4928 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType())); 4929 4930 int EltNo = (Offset - StartOffset) >> 2; 4931 unsigned NumElems = VT.getVectorNumElements(); 4932 4933 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems); 4934 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr, 4935 LD->getPointerInfo().getWithOffset(StartOffset), 4936 false, false, false, 0); 4937 4938 SmallVector<int, 8> Mask; 4939 for (unsigned i = 0; i != NumElems; ++i) 4940 Mask.push_back(EltNo); 4941 4942 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]); 4943 } 4944 4945 return SDValue(); 4946} 4947 4948/// EltsFromConsecutiveLoads - Given the initializing elements 'Elts' of a 4949/// vector of type 'VT', see if the elements can be replaced by a single large 4950/// load which has the same value as a build_vector whose operands are 'elts'. 4951/// 4952/// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a 4953/// 4954/// FIXME: we'd also like to handle the case where the last elements are zero 4955/// rather than undef via VZEXT_LOAD, but we do not detect that case today. 4956/// There's even a handy isZeroNode for that purpose. 4957static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts, 4958 DebugLoc &DL, SelectionDAG &DAG) { 4959 EVT EltVT = VT.getVectorElementType(); 4960 unsigned NumElems = Elts.size(); 4961 4962 LoadSDNode *LDBase = NULL; 4963 unsigned LastLoadedElt = -1U; 4964 4965 // For each element in the initializer, see if we've found a load or an undef. 4966 // If we don't find an initial load element, or later load elements are 4967 // non-consecutive, bail out. 4968 for (unsigned i = 0; i < NumElems; ++i) { 4969 SDValue Elt = Elts[i]; 4970 4971 if (!Elt.getNode() || 4972 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode()))) 4973 return SDValue(); 4974 if (!LDBase) { 4975 if (Elt.getNode()->getOpcode() == ISD::UNDEF) 4976 return SDValue(); 4977 LDBase = cast<LoadSDNode>(Elt.getNode()); 4978 LastLoadedElt = i; 4979 continue; 4980 } 4981 if (Elt.getOpcode() == ISD::UNDEF) 4982 continue; 4983 4984 LoadSDNode *LD = cast<LoadSDNode>(Elt); 4985 if (!DAG.isConsecutiveLoad(LD, LDBase, EltVT.getSizeInBits()/8, i)) 4986 return SDValue(); 4987 LastLoadedElt = i; 4988 } 4989 4990 // If we have found an entire vector of loads and undefs, then return a large 4991 // load of the entire vector width starting at the base pointer. If we found 4992 // consecutive loads for the low half, generate a vzext_load node. 4993 if (LastLoadedElt == NumElems - 1) { 4994 if (DAG.InferPtrAlignment(LDBase->getBasePtr()) >= 16) 4995 return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), 4996 LDBase->getPointerInfo(), 4997 LDBase->isVolatile(), LDBase->isNonTemporal(), 4998 LDBase->isInvariant(), 0); 4999 return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), 5000 LDBase->getPointerInfo(), 5001 LDBase->isVolatile(), LDBase->isNonTemporal(), 5002 LDBase->isInvariant(), LDBase->getAlignment()); 5003 } 5004 if (NumElems == 4 && LastLoadedElt == 1 && 5005 DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) { 5006 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other); 5007 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() }; 5008 SDValue ResNode = 5009 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, 2, MVT::i64, 5010 LDBase->getPointerInfo(), 5011 LDBase->getAlignment(), 5012 false/*isVolatile*/, true/*ReadMem*/, 5013 false/*WriteMem*/); 5014 5015 // Make sure the newly-created LOAD is in the same position as LDBase in 5016 // terms of dependency. We create a TokenFactor for LDBase and ResNode, and 5017 // update uses of LDBase's output chain to use the TokenFactor. 5018 if (LDBase->hasAnyUseOfValue(1)) { 5019 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 5020 SDValue(LDBase, 1), SDValue(ResNode.getNode(), 1)); 5021 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain); 5022 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1), 5023 SDValue(ResNode.getNode(), 1)); 5024 } 5025 5026 return DAG.getNode(ISD::BITCAST, DL, VT, ResNode); 5027 } 5028 return SDValue(); 5029} 5030 5031/// LowerVectorBroadcast - Attempt to use the vbroadcast instruction 5032/// to generate a splat value for the following cases: 5033/// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant. 5034/// 2. A splat shuffle which uses a scalar_to_vector node which comes from 5035/// a scalar load, or a constant. 5036/// The VBROADCAST node is returned when a pattern is found, 5037/// or SDValue() otherwise. 5038SDValue 5039X86TargetLowering::LowerVectorBroadcast(SDValue Op, SelectionDAG &DAG) const { 5040 if (!Subtarget->hasAVX()) 5041 return SDValue(); 5042 5043 EVT VT = Op.getValueType(); 5044 DebugLoc dl = Op.getDebugLoc(); 5045 5046 assert((VT.is128BitVector() || VT.is256BitVector()) && 5047 "Unsupported vector type for broadcast."); 5048 5049 SDValue Ld; 5050 bool ConstSplatVal; 5051 5052 switch (Op.getOpcode()) { 5053 default: 5054 // Unknown pattern found. 5055 return SDValue(); 5056 5057 case ISD::BUILD_VECTOR: { 5058 // The BUILD_VECTOR node must be a splat. 5059 if (!isSplatVector(Op.getNode())) 5060 return SDValue(); 5061 5062 Ld = Op.getOperand(0); 5063 ConstSplatVal = (Ld.getOpcode() == ISD::Constant || 5064 Ld.getOpcode() == ISD::ConstantFP); 5065 5066 // The suspected load node has several users. Make sure that all 5067 // of its users are from the BUILD_VECTOR node. 5068 // Constants may have multiple users. 5069 if (!ConstSplatVal && !Ld->hasNUsesOfValue(VT.getVectorNumElements(), 0)) 5070 return SDValue(); 5071 break; 5072 } 5073 5074 case ISD::VECTOR_SHUFFLE: { 5075 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5076 5077 // Shuffles must have a splat mask where the first element is 5078 // broadcasted. 5079 if ((!SVOp->isSplat()) || SVOp->getMaskElt(0) != 0) 5080 return SDValue(); 5081 5082 SDValue Sc = Op.getOperand(0); 5083 if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR && 5084 Sc.getOpcode() != ISD::BUILD_VECTOR) { 5085 5086 if (!Subtarget->hasAVX2()) 5087 return SDValue(); 5088 5089 // Use the register form of the broadcast instruction available on AVX2. 5090 if (VT.is256BitVector()) 5091 Sc = Extract128BitVector(Sc, 0, DAG, dl); 5092 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc); 5093 } 5094 5095 Ld = Sc.getOperand(0); 5096 ConstSplatVal = (Ld.getOpcode() == ISD::Constant || 5097 Ld.getOpcode() == ISD::ConstantFP); 5098 5099 // The scalar_to_vector node and the suspected 5100 // load node must have exactly one user. 5101 // Constants may have multiple users. 5102 if (!ConstSplatVal && (!Sc.hasOneUse() || !Ld.hasOneUse())) 5103 return SDValue(); 5104 break; 5105 } 5106 } 5107 5108 bool Is256 = VT.is256BitVector(); 5109 5110 // Handle the broadcasting a single constant scalar from the constant pool 5111 // into a vector. On Sandybridge it is still better to load a constant vector 5112 // from the constant pool and not to broadcast it from a scalar. 5113 if (ConstSplatVal && Subtarget->hasAVX2()) { 5114 EVT CVT = Ld.getValueType(); 5115 assert(!CVT.isVector() && "Must not broadcast a vector type"); 5116 unsigned ScalarSize = CVT.getSizeInBits(); 5117 5118 if (ScalarSize == 32 || (Is256 && ScalarSize == 64)) { 5119 const Constant *C = 0; 5120 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld)) 5121 C = CI->getConstantIntValue(); 5122 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld)) 5123 C = CF->getConstantFPValue(); 5124 5125 assert(C && "Invalid constant type"); 5126 5127 SDValue CP = DAG.getConstantPool(C, getPointerTy()); 5128 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment(); 5129 Ld = DAG.getLoad(CVT, dl, DAG.getEntryNode(), CP, 5130 MachinePointerInfo::getConstantPool(), 5131 false, false, false, Alignment); 5132 5133 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5134 } 5135 } 5136 5137 bool IsLoad = ISD::isNormalLoad(Ld.getNode()); 5138 unsigned ScalarSize = Ld.getValueType().getSizeInBits(); 5139 5140 // Handle AVX2 in-register broadcasts. 5141 if (!IsLoad && Subtarget->hasAVX2() && 5142 (ScalarSize == 32 || (Is256 && ScalarSize == 64))) 5143 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5144 5145 // The scalar source must be a normal load. 5146 if (!IsLoad) 5147 return SDValue(); 5148 5149 if (ScalarSize == 32 || (Is256 && ScalarSize == 64)) 5150 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5151 5152 // The integer check is needed for the 64-bit into 128-bit so it doesn't match 5153 // double since there is no vbroadcastsd xmm 5154 if (Subtarget->hasAVX2() && Ld.getValueType().isInteger()) { 5155 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64) 5156 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5157 } 5158 5159 // Unsupported broadcast. 5160 return SDValue(); 5161} 5162 5163// LowerVectorFpExtend - Recognize the scalarized FP_EXTEND from v2f32 to v2f64 5164// and convert it into X86ISD::VFPEXT due to the current ISD::FP_EXTEND has the 5165// constraint of matching input/output vector elements. 5166SDValue 5167X86TargetLowering::LowerVectorFpExtend(SDValue &Op, SelectionDAG &DAG) const { 5168 DebugLoc DL = Op.getDebugLoc(); 5169 SDNode *N = Op.getNode(); 5170 EVT VT = Op.getValueType(); 5171 unsigned NumElts = Op.getNumOperands(); 5172 5173 // Check supported types and sub-targets. 5174 // 5175 // Only v2f32 -> v2f64 needs special handling. 5176 if (VT != MVT::v2f64 || !Subtarget->hasSSE2()) 5177 return SDValue(); 5178 5179 SDValue VecIn; 5180 EVT VecInVT; 5181 SmallVector<int, 8> Mask; 5182 EVT SrcVT = MVT::Other; 5183 5184 // Check the patterns could be translated into X86vfpext. 5185 for (unsigned i = 0; i < NumElts; ++i) { 5186 SDValue In = N->getOperand(i); 5187 unsigned Opcode = In.getOpcode(); 5188 5189 // Skip if the element is undefined. 5190 if (Opcode == ISD::UNDEF) { 5191 Mask.push_back(-1); 5192 continue; 5193 } 5194 5195 // Quit if one of the elements is not defined from 'fpext'. 5196 if (Opcode != ISD::FP_EXTEND) 5197 return SDValue(); 5198 5199 // Check how the source of 'fpext' is defined. 5200 SDValue L2In = In.getOperand(0); 5201 EVT L2InVT = L2In.getValueType(); 5202 5203 // Check the original type 5204 if (SrcVT == MVT::Other) 5205 SrcVT = L2InVT; 5206 else if (SrcVT != L2InVT) // Quit if non-homogenous typed. 5207 return SDValue(); 5208 5209 // Check whether the value being 'fpext'ed is extracted from the same 5210 // source. 5211 Opcode = L2In.getOpcode(); 5212 5213 // Quit if it's not extracted with a constant index. 5214 if (Opcode != ISD::EXTRACT_VECTOR_ELT || 5215 !isa<ConstantSDNode>(L2In.getOperand(1))) 5216 return SDValue(); 5217 5218 SDValue ExtractedFromVec = L2In.getOperand(0); 5219 5220 if (VecIn.getNode() == 0) { 5221 VecIn = ExtractedFromVec; 5222 VecInVT = ExtractedFromVec.getValueType(); 5223 } else if (VecIn != ExtractedFromVec) // Quit if built from more than 1 vec. 5224 return SDValue(); 5225 5226 Mask.push_back(cast<ConstantSDNode>(L2In.getOperand(1))->getZExtValue()); 5227 } 5228 5229 // Quit if all operands of BUILD_VECTOR are undefined. 5230 if (!VecIn.getNode()) 5231 return SDValue(); 5232 5233 // Fill the remaining mask as undef. 5234 for (unsigned i = NumElts; i < VecInVT.getVectorNumElements(); ++i) 5235 Mask.push_back(-1); 5236 5237 return DAG.getNode(X86ISD::VFPEXT, DL, VT, 5238 DAG.getVectorShuffle(VecInVT, DL, 5239 VecIn, DAG.getUNDEF(VecInVT), 5240 &Mask[0])); 5241} 5242 5243SDValue 5244X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { 5245 DebugLoc dl = Op.getDebugLoc(); 5246 5247 EVT VT = Op.getValueType(); 5248 EVT ExtVT = VT.getVectorElementType(); 5249 unsigned NumElems = Op.getNumOperands(); 5250 5251 // Vectors containing all zeros can be matched by pxor and xorps later 5252 if (ISD::isBuildVectorAllZeros(Op.getNode())) { 5253 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd 5254 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts. 5255 if (VT == MVT::v4i32 || VT == MVT::v8i32) 5256 return Op; 5257 5258 return getZeroVector(VT, Subtarget, DAG, dl); 5259 } 5260 5261 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width 5262 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use 5263 // vpcmpeqd on 256-bit vectors. 5264 if (ISD::isBuildVectorAllOnes(Op.getNode())) { 5265 if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasAVX2())) 5266 return Op; 5267 5268 return getOnesVector(VT, Subtarget->hasAVX2(), DAG, dl); 5269 } 5270 5271 SDValue Broadcast = LowerVectorBroadcast(Op, DAG); 5272 if (Broadcast.getNode()) 5273 return Broadcast; 5274 5275 SDValue FpExt = LowerVectorFpExtend(Op, DAG); 5276 if (FpExt.getNode()) 5277 return FpExt; 5278 5279 unsigned EVTBits = ExtVT.getSizeInBits(); 5280 5281 unsigned NumZero = 0; 5282 unsigned NumNonZero = 0; 5283 unsigned NonZeros = 0; 5284 bool IsAllConstants = true; 5285 SmallSet<SDValue, 8> Values; 5286 for (unsigned i = 0; i < NumElems; ++i) { 5287 SDValue Elt = Op.getOperand(i); 5288 if (Elt.getOpcode() == ISD::UNDEF) 5289 continue; 5290 Values.insert(Elt); 5291 if (Elt.getOpcode() != ISD::Constant && 5292 Elt.getOpcode() != ISD::ConstantFP) 5293 IsAllConstants = false; 5294 if (X86::isZeroNode(Elt)) 5295 NumZero++; 5296 else { 5297 NonZeros |= (1 << i); 5298 NumNonZero++; 5299 } 5300 } 5301 5302 // All undef vector. Return an UNDEF. All zero vectors were handled above. 5303 if (NumNonZero == 0) 5304 return DAG.getUNDEF(VT); 5305 5306 // Special case for single non-zero, non-undef, element. 5307 if (NumNonZero == 1) { 5308 unsigned Idx = CountTrailingZeros_32(NonZeros); 5309 SDValue Item = Op.getOperand(Idx); 5310 5311 // If this is an insertion of an i64 value on x86-32, and if the top bits of 5312 // the value are obviously zero, truncate the value to i32 and do the 5313 // insertion that way. Only do this if the value is non-constant or if the 5314 // value is a constant being inserted into element 0. It is cheaper to do 5315 // a constant pool load than it is to do a movd + shuffle. 5316 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() && 5317 (!IsAllConstants || Idx == 0)) { 5318 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) { 5319 // Handle SSE only. 5320 assert(VT == MVT::v2i64 && "Expected an SSE value type!"); 5321 EVT VecVT = MVT::v4i32; 5322 unsigned VecElts = 4; 5323 5324 // Truncate the value (which may itself be a constant) to i32, and 5325 // convert it to a vector with movd (S2V+shuffle to zero extend). 5326 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item); 5327 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item); 5328 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); 5329 5330 // Now we have our 32-bit value zero extended in the low element of 5331 // a vector. If Idx != 0, swizzle it into place. 5332 if (Idx != 0) { 5333 SmallVector<int, 4> Mask; 5334 Mask.push_back(Idx); 5335 for (unsigned i = 1; i != VecElts; ++i) 5336 Mask.push_back(i); 5337 Item = DAG.getVectorShuffle(VecVT, dl, Item, DAG.getUNDEF(VecVT), 5338 &Mask[0]); 5339 } 5340 return DAG.getNode(ISD::BITCAST, dl, VT, Item); 5341 } 5342 } 5343 5344 // If we have a constant or non-constant insertion into the low element of 5345 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into 5346 // the rest of the elements. This will be matched as movd/movq/movss/movsd 5347 // depending on what the source datatype is. 5348 if (Idx == 0) { 5349 if (NumZero == 0) 5350 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5351 5352 if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 || 5353 (ExtVT == MVT::i64 && Subtarget->is64Bit())) { 5354 if (VT.is256BitVector()) { 5355 SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl); 5356 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec, 5357 Item, DAG.getIntPtrConstant(0)); 5358 } 5359 assert(VT.is128BitVector() && "Expected an SSE value type!"); 5360 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5361 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. 5362 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); 5363 } 5364 5365 if (ExtVT == MVT::i16 || ExtVT == MVT::i8) { 5366 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item); 5367 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item); 5368 if (VT.is256BitVector()) { 5369 SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl); 5370 Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl); 5371 } else { 5372 assert(VT.is128BitVector() && "Expected an SSE value type!"); 5373 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); 5374 } 5375 return DAG.getNode(ISD::BITCAST, dl, VT, Item); 5376 } 5377 } 5378 5379 // Is it a vector logical left shift? 5380 if (NumElems == 2 && Idx == 1 && 5381 X86::isZeroNode(Op.getOperand(0)) && 5382 !X86::isZeroNode(Op.getOperand(1))) { 5383 unsigned NumBits = VT.getSizeInBits(); 5384 return getVShift(true, VT, 5385 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 5386 VT, Op.getOperand(1)), 5387 NumBits/2, DAG, *this, dl); 5388 } 5389 5390 if (IsAllConstants) // Otherwise, it's better to do a constpool load. 5391 return SDValue(); 5392 5393 // Otherwise, if this is a vector with i32 or f32 elements, and the element 5394 // is a non-constant being inserted into an element other than the low one, 5395 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka 5396 // movd/movss) to move this into the low element, then shuffle it into 5397 // place. 5398 if (EVTBits == 32) { 5399 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5400 5401 // Turn it into a shuffle of zero and zero-extended scalar to vector. 5402 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget, DAG); 5403 SmallVector<int, 8> MaskVec; 5404 for (unsigned i = 0; i != NumElems; ++i) 5405 MaskVec.push_back(i == Idx ? 0 : 1); 5406 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]); 5407 } 5408 } 5409 5410 // Splat is obviously ok. Let legalizer expand it to a shuffle. 5411 if (Values.size() == 1) { 5412 if (EVTBits == 32) { 5413 // Instead of a shuffle like this: 5414 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0> 5415 // Check if it's possible to issue this instead. 5416 // shuffle (vload ptr)), undef, <1, 1, 1, 1> 5417 unsigned Idx = CountTrailingZeros_32(NonZeros); 5418 SDValue Item = Op.getOperand(Idx); 5419 if (Op.getNode()->isOnlyUserOf(Item.getNode())) 5420 return LowerAsSplatVectorLoad(Item, VT, dl, DAG); 5421 } 5422 return SDValue(); 5423 } 5424 5425 // A vector full of immediates; various special cases are already 5426 // handled, so this is best done with a single constant-pool load. 5427 if (IsAllConstants) 5428 return SDValue(); 5429 5430 // For AVX-length vectors, build the individual 128-bit pieces and use 5431 // shuffles to put them in place. 5432 if (VT.is256BitVector()) { 5433 SmallVector<SDValue, 32> V; 5434 for (unsigned i = 0; i != NumElems; ++i) 5435 V.push_back(Op.getOperand(i)); 5436 5437 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2); 5438 5439 // Build both the lower and upper subvector. 5440 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT, &V[0], NumElems/2); 5441 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT, &V[NumElems / 2], 5442 NumElems/2); 5443 5444 // Recreate the wider vector with the lower and upper part. 5445 return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl); 5446 } 5447 5448 // Let legalizer expand 2-wide build_vectors. 5449 if (EVTBits == 64) { 5450 if (NumNonZero == 1) { 5451 // One half is zero or undef. 5452 unsigned Idx = CountTrailingZeros_32(NonZeros); 5453 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, 5454 Op.getOperand(Idx)); 5455 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG); 5456 } 5457 return SDValue(); 5458 } 5459 5460 // If element VT is < 32 bits, convert it to inserts into a zero vector. 5461 if (EVTBits == 8 && NumElems == 16) { 5462 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, 5463 Subtarget, *this); 5464 if (V.getNode()) return V; 5465 } 5466 5467 if (EVTBits == 16 && NumElems == 8) { 5468 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, 5469 Subtarget, *this); 5470 if (V.getNode()) return V; 5471 } 5472 5473 // If element VT is == 32 bits, turn it into a number of shuffles. 5474 SmallVector<SDValue, 8> V(NumElems); 5475 if (NumElems == 4 && NumZero > 0) { 5476 for (unsigned i = 0; i < 4; ++i) { 5477 bool isZero = !(NonZeros & (1 << i)); 5478 if (isZero) 5479 V[i] = getZeroVector(VT, Subtarget, DAG, dl); 5480 else 5481 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); 5482 } 5483 5484 for (unsigned i = 0; i < 2; ++i) { 5485 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) { 5486 default: break; 5487 case 0: 5488 V[i] = V[i*2]; // Must be a zero vector. 5489 break; 5490 case 1: 5491 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]); 5492 break; 5493 case 2: 5494 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]); 5495 break; 5496 case 3: 5497 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]); 5498 break; 5499 } 5500 } 5501 5502 bool Reverse1 = (NonZeros & 0x3) == 2; 5503 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2; 5504 int MaskVec[] = { 5505 Reverse1 ? 1 : 0, 5506 Reverse1 ? 0 : 1, 5507 static_cast<int>(Reverse2 ? NumElems+1 : NumElems), 5508 static_cast<int>(Reverse2 ? NumElems : NumElems+1) 5509 }; 5510 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]); 5511 } 5512 5513 if (Values.size() > 1 && VT.is128BitVector()) { 5514 // Check for a build vector of consecutive loads. 5515 for (unsigned i = 0; i < NumElems; ++i) 5516 V[i] = Op.getOperand(i); 5517 5518 // Check for elements which are consecutive loads. 5519 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG); 5520 if (LD.getNode()) 5521 return LD; 5522 5523 // For SSE 4.1, use insertps to put the high elements into the low element. 5524 if (getSubtarget()->hasSSE41()) { 5525 SDValue Result; 5526 if (Op.getOperand(0).getOpcode() != ISD::UNDEF) 5527 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0)); 5528 else 5529 Result = DAG.getUNDEF(VT); 5530 5531 for (unsigned i = 1; i < NumElems; ++i) { 5532 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue; 5533 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result, 5534 Op.getOperand(i), DAG.getIntPtrConstant(i)); 5535 } 5536 return Result; 5537 } 5538 5539 // Otherwise, expand into a number of unpckl*, start by extending each of 5540 // our (non-undef) elements to the full vector width with the element in the 5541 // bottom slot of the vector (which generates no code for SSE). 5542 for (unsigned i = 0; i < NumElems; ++i) { 5543 if (Op.getOperand(i).getOpcode() != ISD::UNDEF) 5544 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); 5545 else 5546 V[i] = DAG.getUNDEF(VT); 5547 } 5548 5549 // Next, we iteratively mix elements, e.g. for v4f32: 5550 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> 5551 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> 5552 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> 5553 unsigned EltStride = NumElems >> 1; 5554 while (EltStride != 0) { 5555 for (unsigned i = 0; i < EltStride; ++i) { 5556 // If V[i+EltStride] is undef and this is the first round of mixing, 5557 // then it is safe to just drop this shuffle: V[i] is already in the 5558 // right place, the one element (since it's the first round) being 5559 // inserted as undef can be dropped. This isn't safe for successive 5560 // rounds because they will permute elements within both vectors. 5561 if (V[i+EltStride].getOpcode() == ISD::UNDEF && 5562 EltStride == NumElems/2) 5563 continue; 5564 5565 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]); 5566 } 5567 EltStride >>= 1; 5568 } 5569 return V[0]; 5570 } 5571 return SDValue(); 5572} 5573 5574// LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction 5575// to create 256-bit vectors from two other 128-bit ones. 5576static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 5577 DebugLoc dl = Op.getDebugLoc(); 5578 EVT ResVT = Op.getValueType(); 5579 5580 assert(ResVT.is256BitVector() && "Value type must be 256-bit wide"); 5581 5582 SDValue V1 = Op.getOperand(0); 5583 SDValue V2 = Op.getOperand(1); 5584 unsigned NumElems = ResVT.getVectorNumElements(); 5585 5586 return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl); 5587} 5588 5589static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 5590 assert(Op.getNumOperands() == 2); 5591 5592 // 256-bit AVX can use the vinsertf128 instruction to create 256-bit vectors 5593 // from two other 128-bit ones. 5594 return LowerAVXCONCAT_VECTORS(Op, DAG); 5595} 5596 5597// Try to lower a shuffle node into a simple blend instruction. 5598static SDValue 5599LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp, 5600 const X86Subtarget *Subtarget, SelectionDAG &DAG) { 5601 SDValue V1 = SVOp->getOperand(0); 5602 SDValue V2 = SVOp->getOperand(1); 5603 DebugLoc dl = SVOp->getDebugLoc(); 5604 MVT VT = SVOp->getValueType(0).getSimpleVT(); 5605 unsigned NumElems = VT.getVectorNumElements(); 5606 5607 if (!Subtarget->hasSSE41()) 5608 return SDValue(); 5609 5610 unsigned ISDNo = 0; 5611 MVT OpTy; 5612 5613 switch (VT.SimpleTy) { 5614 default: return SDValue(); 5615 case MVT::v8i16: 5616 ISDNo = X86ISD::BLENDPW; 5617 OpTy = MVT::v8i16; 5618 break; 5619 case MVT::v4i32: 5620 case MVT::v4f32: 5621 ISDNo = X86ISD::BLENDPS; 5622 OpTy = MVT::v4f32; 5623 break; 5624 case MVT::v2i64: 5625 case MVT::v2f64: 5626 ISDNo = X86ISD::BLENDPD; 5627 OpTy = MVT::v2f64; 5628 break; 5629 case MVT::v8i32: 5630 case MVT::v8f32: 5631 if (!Subtarget->hasAVX()) 5632 return SDValue(); 5633 ISDNo = X86ISD::BLENDPS; 5634 OpTy = MVT::v8f32; 5635 break; 5636 case MVT::v4i64: 5637 case MVT::v4f64: 5638 if (!Subtarget->hasAVX()) 5639 return SDValue(); 5640 ISDNo = X86ISD::BLENDPD; 5641 OpTy = MVT::v4f64; 5642 break; 5643 } 5644 assert(ISDNo && "Invalid Op Number"); 5645 5646 unsigned MaskVals = 0; 5647 5648 for (unsigned i = 0; i != NumElems; ++i) { 5649 int EltIdx = SVOp->getMaskElt(i); 5650 if (EltIdx == (int)i || EltIdx < 0) 5651 MaskVals |= (1<<i); 5652 else if (EltIdx == (int)(i + NumElems)) 5653 continue; // Bit is set to zero; 5654 else 5655 return SDValue(); 5656 } 5657 5658 V1 = DAG.getNode(ISD::BITCAST, dl, OpTy, V1); 5659 V2 = DAG.getNode(ISD::BITCAST, dl, OpTy, V2); 5660 SDValue Ret = DAG.getNode(ISDNo, dl, OpTy, V1, V2, 5661 DAG.getConstant(MaskVals, MVT::i32)); 5662 return DAG.getNode(ISD::BITCAST, dl, VT, Ret); 5663} 5664 5665// v8i16 shuffles - Prefer shuffles in the following order: 5666// 1. [all] pshuflw, pshufhw, optional move 5667// 2. [ssse3] 1 x pshufb 5668// 3. [ssse3] 2 x pshufb + 1 x por 5669// 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw) 5670static SDValue 5671LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget, 5672 SelectionDAG &DAG) { 5673 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5674 SDValue V1 = SVOp->getOperand(0); 5675 SDValue V2 = SVOp->getOperand(1); 5676 DebugLoc dl = SVOp->getDebugLoc(); 5677 SmallVector<int, 8> MaskVals; 5678 5679 // Determine if more than 1 of the words in each of the low and high quadwords 5680 // of the result come from the same quadword of one of the two inputs. Undef 5681 // mask values count as coming from any quadword, for better codegen. 5682 unsigned LoQuad[] = { 0, 0, 0, 0 }; 5683 unsigned HiQuad[] = { 0, 0, 0, 0 }; 5684 std::bitset<4> InputQuads; 5685 for (unsigned i = 0; i < 8; ++i) { 5686 unsigned *Quad = i < 4 ? LoQuad : HiQuad; 5687 int EltIdx = SVOp->getMaskElt(i); 5688 MaskVals.push_back(EltIdx); 5689 if (EltIdx < 0) { 5690 ++Quad[0]; 5691 ++Quad[1]; 5692 ++Quad[2]; 5693 ++Quad[3]; 5694 continue; 5695 } 5696 ++Quad[EltIdx / 4]; 5697 InputQuads.set(EltIdx / 4); 5698 } 5699 5700 int BestLoQuad = -1; 5701 unsigned MaxQuad = 1; 5702 for (unsigned i = 0; i < 4; ++i) { 5703 if (LoQuad[i] > MaxQuad) { 5704 BestLoQuad = i; 5705 MaxQuad = LoQuad[i]; 5706 } 5707 } 5708 5709 int BestHiQuad = -1; 5710 MaxQuad = 1; 5711 for (unsigned i = 0; i < 4; ++i) { 5712 if (HiQuad[i] > MaxQuad) { 5713 BestHiQuad = i; 5714 MaxQuad = HiQuad[i]; 5715 } 5716 } 5717 5718 // For SSSE3, If all 8 words of the result come from only 1 quadword of each 5719 // of the two input vectors, shuffle them into one input vector so only a 5720 // single pshufb instruction is necessary. If There are more than 2 input 5721 // quads, disable the next transformation since it does not help SSSE3. 5722 bool V1Used = InputQuads[0] || InputQuads[1]; 5723 bool V2Used = InputQuads[2] || InputQuads[3]; 5724 if (Subtarget->hasSSSE3()) { 5725 if (InputQuads.count() == 2 && V1Used && V2Used) { 5726 BestLoQuad = InputQuads[0] ? 0 : 1; 5727 BestHiQuad = InputQuads[2] ? 2 : 3; 5728 } 5729 if (InputQuads.count() > 2) { 5730 BestLoQuad = -1; 5731 BestHiQuad = -1; 5732 } 5733 } 5734 5735 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update 5736 // the shuffle mask. If a quad is scored as -1, that means that it contains 5737 // words from all 4 input quadwords. 5738 SDValue NewV; 5739 if (BestLoQuad >= 0 || BestHiQuad >= 0) { 5740 int MaskV[] = { 5741 BestLoQuad < 0 ? 0 : BestLoQuad, 5742 BestHiQuad < 0 ? 1 : BestHiQuad 5743 }; 5744 NewV = DAG.getVectorShuffle(MVT::v2i64, dl, 5745 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1), 5746 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]); 5747 NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV); 5748 5749 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the 5750 // source words for the shuffle, to aid later transformations. 5751 bool AllWordsInNewV = true; 5752 bool InOrder[2] = { true, true }; 5753 for (unsigned i = 0; i != 8; ++i) { 5754 int idx = MaskVals[i]; 5755 if (idx != (int)i) 5756 InOrder[i/4] = false; 5757 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad) 5758 continue; 5759 AllWordsInNewV = false; 5760 break; 5761 } 5762 5763 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV; 5764 if (AllWordsInNewV) { 5765 for (int i = 0; i != 8; ++i) { 5766 int idx = MaskVals[i]; 5767 if (idx < 0) 5768 continue; 5769 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4; 5770 if ((idx != i) && idx < 4) 5771 pshufhw = false; 5772 if ((idx != i) && idx > 3) 5773 pshuflw = false; 5774 } 5775 V1 = NewV; 5776 V2Used = false; 5777 BestLoQuad = 0; 5778 BestHiQuad = 1; 5779 } 5780 5781 // If we've eliminated the use of V2, and the new mask is a pshuflw or 5782 // pshufhw, that's as cheap as it gets. Return the new shuffle. 5783 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) { 5784 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW; 5785 unsigned TargetMask = 0; 5786 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, 5787 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]); 5788 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode()); 5789 TargetMask = pshufhw ? getShufflePSHUFHWImmediate(SVOp): 5790 getShufflePSHUFLWImmediate(SVOp); 5791 V1 = NewV.getOperand(0); 5792 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG); 5793 } 5794 } 5795 5796 // If we have SSSE3, and all words of the result are from 1 input vector, 5797 // case 2 is generated, otherwise case 3 is generated. If no SSSE3 5798 // is present, fall back to case 4. 5799 if (Subtarget->hasSSSE3()) { 5800 SmallVector<SDValue,16> pshufbMask; 5801 5802 // If we have elements from both input vectors, set the high bit of the 5803 // shuffle mask element to zero out elements that come from V2 in the V1 5804 // mask, and elements that come from V1 in the V2 mask, so that the two 5805 // results can be OR'd together. 5806 bool TwoInputs = V1Used && V2Used; 5807 for (unsigned i = 0; i != 8; ++i) { 5808 int EltIdx = MaskVals[i] * 2; 5809 int Idx0 = (TwoInputs && (EltIdx >= 16)) ? 0x80 : EltIdx; 5810 int Idx1 = (TwoInputs && (EltIdx >= 16)) ? 0x80 : EltIdx+1; 5811 pshufbMask.push_back(DAG.getConstant(Idx0, MVT::i8)); 5812 pshufbMask.push_back(DAG.getConstant(Idx1, MVT::i8)); 5813 } 5814 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V1); 5815 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, 5816 DAG.getNode(ISD::BUILD_VECTOR, dl, 5817 MVT::v16i8, &pshufbMask[0], 16)); 5818 if (!TwoInputs) 5819 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 5820 5821 // Calculate the shuffle mask for the second input, shuffle it, and 5822 // OR it with the first shuffled input. 5823 pshufbMask.clear(); 5824 for (unsigned i = 0; i != 8; ++i) { 5825 int EltIdx = MaskVals[i] * 2; 5826 int Idx0 = (EltIdx < 16) ? 0x80 : EltIdx - 16; 5827 int Idx1 = (EltIdx < 16) ? 0x80 : EltIdx - 15; 5828 pshufbMask.push_back(DAG.getConstant(Idx0, MVT::i8)); 5829 pshufbMask.push_back(DAG.getConstant(Idx1, MVT::i8)); 5830 } 5831 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V2); 5832 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, 5833 DAG.getNode(ISD::BUILD_VECTOR, dl, 5834 MVT::v16i8, &pshufbMask[0], 16)); 5835 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2); 5836 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 5837 } 5838 5839 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order, 5840 // and update MaskVals with new element order. 5841 std::bitset<8> InOrder; 5842 if (BestLoQuad >= 0) { 5843 int MaskV[] = { -1, -1, -1, -1, 4, 5, 6, 7 }; 5844 for (int i = 0; i != 4; ++i) { 5845 int idx = MaskVals[i]; 5846 if (idx < 0) { 5847 InOrder.set(i); 5848 } else if ((idx / 4) == BestLoQuad) { 5849 MaskV[i] = idx & 3; 5850 InOrder.set(i); 5851 } 5852 } 5853 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16), 5854 &MaskV[0]); 5855 5856 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) { 5857 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode()); 5858 NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16, 5859 NewV.getOperand(0), 5860 getShufflePSHUFLWImmediate(SVOp), DAG); 5861 } 5862 } 5863 5864 // If BestHi >= 0, generate a pshufhw to put the high elements in order, 5865 // and update MaskVals with the new element order. 5866 if (BestHiQuad >= 0) { 5867 int MaskV[] = { 0, 1, 2, 3, -1, -1, -1, -1 }; 5868 for (unsigned i = 4; i != 8; ++i) { 5869 int idx = MaskVals[i]; 5870 if (idx < 0) { 5871 InOrder.set(i); 5872 } else if ((idx / 4) == BestHiQuad) { 5873 MaskV[i] = (idx & 3) + 4; 5874 InOrder.set(i); 5875 } 5876 } 5877 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16), 5878 &MaskV[0]); 5879 5880 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) { 5881 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode()); 5882 NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16, 5883 NewV.getOperand(0), 5884 getShufflePSHUFHWImmediate(SVOp), DAG); 5885 } 5886 } 5887 5888 // In case BestHi & BestLo were both -1, which means each quadword has a word 5889 // from each of the four input quadwords, calculate the InOrder bitvector now 5890 // before falling through to the insert/extract cleanup. 5891 if (BestLoQuad == -1 && BestHiQuad == -1) { 5892 NewV = V1; 5893 for (int i = 0; i != 8; ++i) 5894 if (MaskVals[i] < 0 || MaskVals[i] == i) 5895 InOrder.set(i); 5896 } 5897 5898 // The other elements are put in the right place using pextrw and pinsrw. 5899 for (unsigned i = 0; i != 8; ++i) { 5900 if (InOrder[i]) 5901 continue; 5902 int EltIdx = MaskVals[i]; 5903 if (EltIdx < 0) 5904 continue; 5905 SDValue ExtOp = (EltIdx < 8) ? 5906 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1, 5907 DAG.getIntPtrConstant(EltIdx)) : 5908 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2, 5909 DAG.getIntPtrConstant(EltIdx - 8)); 5910 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp, 5911 DAG.getIntPtrConstant(i)); 5912 } 5913 return NewV; 5914} 5915 5916// v16i8 shuffles - Prefer shuffles in the following order: 5917// 1. [ssse3] 1 x pshufb 5918// 2. [ssse3] 2 x pshufb + 1 x por 5919// 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw 5920static 5921SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp, 5922 SelectionDAG &DAG, 5923 const X86TargetLowering &TLI) { 5924 SDValue V1 = SVOp->getOperand(0); 5925 SDValue V2 = SVOp->getOperand(1); 5926 DebugLoc dl = SVOp->getDebugLoc(); 5927 ArrayRef<int> MaskVals = SVOp->getMask(); 5928 5929 // If we have SSSE3, case 1 is generated when all result bytes come from 5930 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is 5931 // present, fall back to case 3. 5932 5933 // If SSSE3, use 1 pshufb instruction per vector with elements in the result. 5934 if (TLI.getSubtarget()->hasSSSE3()) { 5935 SmallVector<SDValue,16> pshufbMask; 5936 5937 // If all result elements are from one input vector, then only translate 5938 // undef mask values to 0x80 (zero out result) in the pshufb mask. 5939 // 5940 // Otherwise, we have elements from both input vectors, and must zero out 5941 // elements that come from V2 in the first mask, and V1 in the second mask 5942 // so that we can OR them together. 5943 for (unsigned i = 0; i != 16; ++i) { 5944 int EltIdx = MaskVals[i]; 5945 if (EltIdx < 0 || EltIdx >= 16) 5946 EltIdx = 0x80; 5947 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 5948 } 5949 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, 5950 DAG.getNode(ISD::BUILD_VECTOR, dl, 5951 MVT::v16i8, &pshufbMask[0], 16)); 5952 5953 // As PSHUFB will zero elements with negative indices, it's safe to ignore 5954 // the 2nd operand if it's undefined or zero. 5955 if (V2.getOpcode() == ISD::UNDEF || 5956 ISD::isBuildVectorAllZeros(V2.getNode())) 5957 return V1; 5958 5959 // Calculate the shuffle mask for the second input, shuffle it, and 5960 // OR it with the first shuffled input. 5961 pshufbMask.clear(); 5962 for (unsigned i = 0; i != 16; ++i) { 5963 int EltIdx = MaskVals[i]; 5964 EltIdx = (EltIdx < 16) ? 0x80 : EltIdx - 16; 5965 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 5966 } 5967 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, 5968 DAG.getNode(ISD::BUILD_VECTOR, dl, 5969 MVT::v16i8, &pshufbMask[0], 16)); 5970 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2); 5971 } 5972 5973 // No SSSE3 - Calculate in place words and then fix all out of place words 5974 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from 5975 // the 16 different words that comprise the two doublequadword input vectors. 5976 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 5977 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2); 5978 SDValue NewV = V1; 5979 for (int i = 0; i != 8; ++i) { 5980 int Elt0 = MaskVals[i*2]; 5981 int Elt1 = MaskVals[i*2+1]; 5982 5983 // This word of the result is all undef, skip it. 5984 if (Elt0 < 0 && Elt1 < 0) 5985 continue; 5986 5987 // This word of the result is already in the correct place, skip it. 5988 if ((Elt0 == i*2) && (Elt1 == i*2+1)) 5989 continue; 5990 5991 SDValue Elt0Src = Elt0 < 16 ? V1 : V2; 5992 SDValue Elt1Src = Elt1 < 16 ? V1 : V2; 5993 SDValue InsElt; 5994 5995 // If Elt0 and Elt1 are defined, are consecutive, and can be load 5996 // using a single extract together, load it and store it. 5997 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) { 5998 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src, 5999 DAG.getIntPtrConstant(Elt1 / 2)); 6000 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt, 6001 DAG.getIntPtrConstant(i)); 6002 continue; 6003 } 6004 6005 // If Elt1 is defined, extract it from the appropriate source. If the 6006 // source byte is not also odd, shift the extracted word left 8 bits 6007 // otherwise clear the bottom 8 bits if we need to do an or. 6008 if (Elt1 >= 0) { 6009 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src, 6010 DAG.getIntPtrConstant(Elt1 / 2)); 6011 if ((Elt1 & 1) == 0) 6012 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt, 6013 DAG.getConstant(8, 6014 TLI.getShiftAmountTy(InsElt.getValueType()))); 6015 else if (Elt0 >= 0) 6016 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt, 6017 DAG.getConstant(0xFF00, MVT::i16)); 6018 } 6019 // If Elt0 is defined, extract it from the appropriate source. If the 6020 // source byte is not also even, shift the extracted word right 8 bits. If 6021 // Elt1 was also defined, OR the extracted values together before 6022 // inserting them in the result. 6023 if (Elt0 >= 0) { 6024 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, 6025 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2)); 6026 if ((Elt0 & 1) != 0) 6027 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0, 6028 DAG.getConstant(8, 6029 TLI.getShiftAmountTy(InsElt0.getValueType()))); 6030 else if (Elt1 >= 0) 6031 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0, 6032 DAG.getConstant(0x00FF, MVT::i16)); 6033 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0) 6034 : InsElt0; 6035 } 6036 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt, 6037 DAG.getIntPtrConstant(i)); 6038 } 6039 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV); 6040} 6041 6042// v32i8 shuffles - Translate to VPSHUFB if possible. 6043static 6044SDValue LowerVECTOR_SHUFFLEv32i8(ShuffleVectorSDNode *SVOp, 6045 const X86Subtarget *Subtarget, 6046 SelectionDAG &DAG) { 6047 EVT VT = SVOp->getValueType(0); 6048 SDValue V1 = SVOp->getOperand(0); 6049 SDValue V2 = SVOp->getOperand(1); 6050 DebugLoc dl = SVOp->getDebugLoc(); 6051 SmallVector<int, 32> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end()); 6052 6053 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 6054 bool V1IsAllZero = ISD::isBuildVectorAllZeros(V1.getNode()); 6055 bool V2IsAllZero = ISD::isBuildVectorAllZeros(V2.getNode()); 6056 6057 // VPSHUFB may be generated if 6058 // (1) one of input vector is undefined or zeroinitializer. 6059 // The mask value 0x80 puts 0 in the corresponding slot of the vector. 6060 // And (2) the mask indexes don't cross the 128-bit lane. 6061 if (VT != MVT::v32i8 || !Subtarget->hasAVX2() || 6062 (!V2IsUndef && !V2IsAllZero && !V1IsAllZero)) 6063 return SDValue(); 6064 6065 if (V1IsAllZero && !V2IsAllZero) { 6066 CommuteVectorShuffleMask(MaskVals, 32); 6067 V1 = V2; 6068 } 6069 SmallVector<SDValue, 32> pshufbMask; 6070 for (unsigned i = 0; i != 32; i++) { 6071 int EltIdx = MaskVals[i]; 6072 if (EltIdx < 0 || EltIdx >= 32) 6073 EltIdx = 0x80; 6074 else { 6075 if ((EltIdx >= 16 && i < 16) || (EltIdx < 16 && i >= 16)) 6076 // Cross lane is not allowed. 6077 return SDValue(); 6078 EltIdx &= 0xf; 6079 } 6080 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 6081 } 6082 return DAG.getNode(X86ISD::PSHUFB, dl, MVT::v32i8, V1, 6083 DAG.getNode(ISD::BUILD_VECTOR, dl, 6084 MVT::v32i8, &pshufbMask[0], 32)); 6085} 6086 6087/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide 6088/// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be 6089/// done when every pair / quad of shuffle mask elements point to elements in 6090/// the right sequence. e.g. 6091/// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15> 6092static 6093SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp, 6094 SelectionDAG &DAG, DebugLoc dl) { 6095 MVT VT = SVOp->getValueType(0).getSimpleVT(); 6096 unsigned NumElems = VT.getVectorNumElements(); 6097 MVT NewVT; 6098 unsigned Scale; 6099 switch (VT.SimpleTy) { 6100 default: llvm_unreachable("Unexpected!"); 6101 case MVT::v4f32: NewVT = MVT::v2f64; Scale = 2; break; 6102 case MVT::v4i32: NewVT = MVT::v2i64; Scale = 2; break; 6103 case MVT::v8i16: NewVT = MVT::v4i32; Scale = 2; break; 6104 case MVT::v16i8: NewVT = MVT::v4i32; Scale = 4; break; 6105 case MVT::v16i16: NewVT = MVT::v8i32; Scale = 2; break; 6106 case MVT::v32i8: NewVT = MVT::v8i32; Scale = 4; break; 6107 } 6108 6109 SmallVector<int, 8> MaskVec; 6110 for (unsigned i = 0; i != NumElems; i += Scale) { 6111 int StartIdx = -1; 6112 for (unsigned j = 0; j != Scale; ++j) { 6113 int EltIdx = SVOp->getMaskElt(i+j); 6114 if (EltIdx < 0) 6115 continue; 6116 if (StartIdx < 0) 6117 StartIdx = (EltIdx / Scale); 6118 if (EltIdx != (int)(StartIdx*Scale + j)) 6119 return SDValue(); 6120 } 6121 MaskVec.push_back(StartIdx); 6122 } 6123 6124 SDValue V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(0)); 6125 SDValue V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(1)); 6126 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]); 6127} 6128 6129/// getVZextMovL - Return a zero-extending vector move low node. 6130/// 6131static SDValue getVZextMovL(EVT VT, EVT OpVT, 6132 SDValue SrcOp, SelectionDAG &DAG, 6133 const X86Subtarget *Subtarget, DebugLoc dl) { 6134 if (VT == MVT::v2f64 || VT == MVT::v4f32) { 6135 LoadSDNode *LD = NULL; 6136 if (!isScalarLoadToVector(SrcOp.getNode(), &LD)) 6137 LD = dyn_cast<LoadSDNode>(SrcOp); 6138 if (!LD) { 6139 // movssrr and movsdrr do not clear top bits. Try to use movd, movq 6140 // instead. 6141 MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32; 6142 if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) && 6143 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR && 6144 SrcOp.getOperand(0).getOpcode() == ISD::BITCAST && 6145 SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) { 6146 // PR2108 6147 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32; 6148 return DAG.getNode(ISD::BITCAST, dl, VT, 6149 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT, 6150 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 6151 OpVT, 6152 SrcOp.getOperand(0) 6153 .getOperand(0)))); 6154 } 6155 } 6156 } 6157 6158 return DAG.getNode(ISD::BITCAST, dl, VT, 6159 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT, 6160 DAG.getNode(ISD::BITCAST, dl, 6161 OpVT, SrcOp))); 6162} 6163 6164/// LowerVECTOR_SHUFFLE_256 - Handle all 256-bit wide vectors shuffles 6165/// which could not be matched by any known target speficic shuffle 6166static SDValue 6167LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { 6168 6169 SDValue NewOp = Compact8x32ShuffleNode(SVOp, DAG); 6170 if (NewOp.getNode()) 6171 return NewOp; 6172 6173 EVT VT = SVOp->getValueType(0); 6174 6175 unsigned NumElems = VT.getVectorNumElements(); 6176 unsigned NumLaneElems = NumElems / 2; 6177 6178 DebugLoc dl = SVOp->getDebugLoc(); 6179 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 6180 EVT NVT = MVT::getVectorVT(EltVT, NumLaneElems); 6181 SDValue Output[2]; 6182 6183 SmallVector<int, 16> Mask; 6184 for (unsigned l = 0; l < 2; ++l) { 6185 // Build a shuffle mask for the output, discovering on the fly which 6186 // input vectors to use as shuffle operands (recorded in InputUsed). 6187 // If building a suitable shuffle vector proves too hard, then bail 6188 // out with UseBuildVector set. 6189 bool UseBuildVector = false; 6190 int InputUsed[2] = { -1, -1 }; // Not yet discovered. 6191 unsigned LaneStart = l * NumLaneElems; 6192 for (unsigned i = 0; i != NumLaneElems; ++i) { 6193 // The mask element. This indexes into the input. 6194 int Idx = SVOp->getMaskElt(i+LaneStart); 6195 if (Idx < 0) { 6196 // the mask element does not index into any input vector. 6197 Mask.push_back(-1); 6198 continue; 6199 } 6200 6201 // The input vector this mask element indexes into. 6202 int Input = Idx / NumLaneElems; 6203 6204 // Turn the index into an offset from the start of the input vector. 6205 Idx -= Input * NumLaneElems; 6206 6207 // Find or create a shuffle vector operand to hold this input. 6208 unsigned OpNo; 6209 for (OpNo = 0; OpNo < array_lengthof(InputUsed); ++OpNo) { 6210 if (InputUsed[OpNo] == Input) 6211 // This input vector is already an operand. 6212 break; 6213 if (InputUsed[OpNo] < 0) { 6214 // Create a new operand for this input vector. 6215 InputUsed[OpNo] = Input; 6216 break; 6217 } 6218 } 6219 6220 if (OpNo >= array_lengthof(InputUsed)) { 6221 // More than two input vectors used! Give up on trying to create a 6222 // shuffle vector. Insert all elements into a BUILD_VECTOR instead. 6223 UseBuildVector = true; 6224 break; 6225 } 6226 6227 // Add the mask index for the new shuffle vector. 6228 Mask.push_back(Idx + OpNo * NumLaneElems); 6229 } 6230 6231 if (UseBuildVector) { 6232 SmallVector<SDValue, 16> SVOps; 6233 for (unsigned i = 0; i != NumLaneElems; ++i) { 6234 // The mask element. This indexes into the input. 6235 int Idx = SVOp->getMaskElt(i+LaneStart); 6236 if (Idx < 0) { 6237 SVOps.push_back(DAG.getUNDEF(EltVT)); 6238 continue; 6239 } 6240 6241 // The input vector this mask element indexes into. 6242 int Input = Idx / NumElems; 6243 6244 // Turn the index into an offset from the start of the input vector. 6245 Idx -= Input * NumElems; 6246 6247 // Extract the vector element by hand. 6248 SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 6249 SVOp->getOperand(Input), 6250 DAG.getIntPtrConstant(Idx))); 6251 } 6252 6253 // Construct the output using a BUILD_VECTOR. 6254 Output[l] = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, &SVOps[0], 6255 SVOps.size()); 6256 } else if (InputUsed[0] < 0) { 6257 // No input vectors were used! The result is undefined. 6258 Output[l] = DAG.getUNDEF(NVT); 6259 } else { 6260 SDValue Op0 = Extract128BitVector(SVOp->getOperand(InputUsed[0] / 2), 6261 (InputUsed[0] % 2) * NumLaneElems, 6262 DAG, dl); 6263 // If only one input was used, use an undefined vector for the other. 6264 SDValue Op1 = (InputUsed[1] < 0) ? DAG.getUNDEF(NVT) : 6265 Extract128BitVector(SVOp->getOperand(InputUsed[1] / 2), 6266 (InputUsed[1] % 2) * NumLaneElems, DAG, dl); 6267 // At least one input vector was used. Create a new shuffle vector. 6268 Output[l] = DAG.getVectorShuffle(NVT, dl, Op0, Op1, &Mask[0]); 6269 } 6270 6271 Mask.clear(); 6272 } 6273 6274 // Concatenate the result back 6275 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Output[0], Output[1]); 6276} 6277 6278/// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with 6279/// 4 elements, and match them with several different shuffle types. 6280static SDValue 6281LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { 6282 SDValue V1 = SVOp->getOperand(0); 6283 SDValue V2 = SVOp->getOperand(1); 6284 DebugLoc dl = SVOp->getDebugLoc(); 6285 EVT VT = SVOp->getValueType(0); 6286 6287 assert(VT.is128BitVector() && "Unsupported vector size"); 6288 6289 std::pair<int, int> Locs[4]; 6290 int Mask1[] = { -1, -1, -1, -1 }; 6291 SmallVector<int, 8> PermMask(SVOp->getMask().begin(), SVOp->getMask().end()); 6292 6293 unsigned NumHi = 0; 6294 unsigned NumLo = 0; 6295 for (unsigned i = 0; i != 4; ++i) { 6296 int Idx = PermMask[i]; 6297 if (Idx < 0) { 6298 Locs[i] = std::make_pair(-1, -1); 6299 } else { 6300 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!"); 6301 if (Idx < 4) { 6302 Locs[i] = std::make_pair(0, NumLo); 6303 Mask1[NumLo] = Idx; 6304 NumLo++; 6305 } else { 6306 Locs[i] = std::make_pair(1, NumHi); 6307 if (2+NumHi < 4) 6308 Mask1[2+NumHi] = Idx; 6309 NumHi++; 6310 } 6311 } 6312 } 6313 6314 if (NumLo <= 2 && NumHi <= 2) { 6315 // If no more than two elements come from either vector. This can be 6316 // implemented with two shuffles. First shuffle gather the elements. 6317 // The second shuffle, which takes the first shuffle as both of its 6318 // vector operands, put the elements into the right order. 6319 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 6320 6321 int Mask2[] = { -1, -1, -1, -1 }; 6322 6323 for (unsigned i = 0; i != 4; ++i) 6324 if (Locs[i].first != -1) { 6325 unsigned Idx = (i < 2) ? 0 : 4; 6326 Idx += Locs[i].first * 2 + Locs[i].second; 6327 Mask2[i] = Idx; 6328 } 6329 6330 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]); 6331 } 6332 6333 if (NumLo == 3 || NumHi == 3) { 6334 // Otherwise, we must have three elements from one vector, call it X, and 6335 // one element from the other, call it Y. First, use a shufps to build an 6336 // intermediate vector with the one element from Y and the element from X 6337 // that will be in the same half in the final destination (the indexes don't 6338 // matter). Then, use a shufps to build the final vector, taking the half 6339 // containing the element from Y from the intermediate, and the other half 6340 // from X. 6341 if (NumHi == 3) { 6342 // Normalize it so the 3 elements come from V1. 6343 CommuteVectorShuffleMask(PermMask, 4); 6344 std::swap(V1, V2); 6345 } 6346 6347 // Find the element from V2. 6348 unsigned HiIndex; 6349 for (HiIndex = 0; HiIndex < 3; ++HiIndex) { 6350 int Val = PermMask[HiIndex]; 6351 if (Val < 0) 6352 continue; 6353 if (Val >= 4) 6354 break; 6355 } 6356 6357 Mask1[0] = PermMask[HiIndex]; 6358 Mask1[1] = -1; 6359 Mask1[2] = PermMask[HiIndex^1]; 6360 Mask1[3] = -1; 6361 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 6362 6363 if (HiIndex >= 2) { 6364 Mask1[0] = PermMask[0]; 6365 Mask1[1] = PermMask[1]; 6366 Mask1[2] = HiIndex & 1 ? 6 : 4; 6367 Mask1[3] = HiIndex & 1 ? 4 : 6; 6368 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 6369 } 6370 6371 Mask1[0] = HiIndex & 1 ? 2 : 0; 6372 Mask1[1] = HiIndex & 1 ? 0 : 2; 6373 Mask1[2] = PermMask[2]; 6374 Mask1[3] = PermMask[3]; 6375 if (Mask1[2] >= 0) 6376 Mask1[2] += 4; 6377 if (Mask1[3] >= 0) 6378 Mask1[3] += 4; 6379 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]); 6380 } 6381 6382 // Break it into (shuffle shuffle_hi, shuffle_lo). 6383 int LoMask[] = { -1, -1, -1, -1 }; 6384 int HiMask[] = { -1, -1, -1, -1 }; 6385 6386 int *MaskPtr = LoMask; 6387 unsigned MaskIdx = 0; 6388 unsigned LoIdx = 0; 6389 unsigned HiIdx = 2; 6390 for (unsigned i = 0; i != 4; ++i) { 6391 if (i == 2) { 6392 MaskPtr = HiMask; 6393 MaskIdx = 1; 6394 LoIdx = 0; 6395 HiIdx = 2; 6396 } 6397 int Idx = PermMask[i]; 6398 if (Idx < 0) { 6399 Locs[i] = std::make_pair(-1, -1); 6400 } else if (Idx < 4) { 6401 Locs[i] = std::make_pair(MaskIdx, LoIdx); 6402 MaskPtr[LoIdx] = Idx; 6403 LoIdx++; 6404 } else { 6405 Locs[i] = std::make_pair(MaskIdx, HiIdx); 6406 MaskPtr[HiIdx] = Idx; 6407 HiIdx++; 6408 } 6409 } 6410 6411 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]); 6412 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]); 6413 int MaskOps[] = { -1, -1, -1, -1 }; 6414 for (unsigned i = 0; i != 4; ++i) 6415 if (Locs[i].first != -1) 6416 MaskOps[i] = Locs[i].first * 4 + Locs[i].second; 6417 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]); 6418} 6419 6420static bool MayFoldVectorLoad(SDValue V) { 6421 if (V.hasOneUse() && V.getOpcode() == ISD::BITCAST) 6422 V = V.getOperand(0); 6423 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR) 6424 V = V.getOperand(0); 6425 if (V.hasOneUse() && V.getOpcode() == ISD::BUILD_VECTOR && 6426 V.getNumOperands() == 2 && V.getOperand(1).getOpcode() == ISD::UNDEF) 6427 // BUILD_VECTOR (load), undef 6428 V = V.getOperand(0); 6429 if (MayFoldLoad(V)) 6430 return true; 6431 return false; 6432} 6433 6434// FIXME: the version above should always be used. Since there's 6435// a bug where several vector shuffles can't be folded because the 6436// DAG is not updated during lowering and a node claims to have two 6437// uses while it only has one, use this version, and let isel match 6438// another instruction if the load really happens to have more than 6439// one use. Remove this version after this bug get fixed. 6440// rdar://8434668, PR8156 6441static bool RelaxedMayFoldVectorLoad(SDValue V) { 6442 if (V.hasOneUse() && V.getOpcode() == ISD::BITCAST) 6443 V = V.getOperand(0); 6444 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR) 6445 V = V.getOperand(0); 6446 if (ISD::isNormalLoad(V.getNode())) 6447 return true; 6448 return false; 6449} 6450 6451static 6452SDValue getMOVDDup(SDValue &Op, DebugLoc &dl, SDValue V1, SelectionDAG &DAG) { 6453 EVT VT = Op.getValueType(); 6454 6455 // Canonizalize to v2f64. 6456 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1); 6457 return DAG.getNode(ISD::BITCAST, dl, VT, 6458 getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64, 6459 V1, DAG)); 6460} 6461 6462static 6463SDValue getMOVLowToHigh(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, 6464 bool HasSSE2) { 6465 SDValue V1 = Op.getOperand(0); 6466 SDValue V2 = Op.getOperand(1); 6467 EVT VT = Op.getValueType(); 6468 6469 assert(VT != MVT::v2i64 && "unsupported shuffle type"); 6470 6471 if (HasSSE2 && VT == MVT::v2f64) 6472 return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG); 6473 6474 // v4f32 or v4i32: canonizalized to v4f32 (which is legal for SSE1) 6475 return DAG.getNode(ISD::BITCAST, dl, VT, 6476 getTargetShuffleNode(X86ISD::MOVLHPS, dl, MVT::v4f32, 6477 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V1), 6478 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V2), DAG)); 6479} 6480 6481static 6482SDValue getMOVHighToLow(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG) { 6483 SDValue V1 = Op.getOperand(0); 6484 SDValue V2 = Op.getOperand(1); 6485 EVT VT = Op.getValueType(); 6486 6487 assert((VT == MVT::v4i32 || VT == MVT::v4f32) && 6488 "unsupported shuffle type"); 6489 6490 if (V2.getOpcode() == ISD::UNDEF) 6491 V2 = V1; 6492 6493 // v4i32 or v4f32 6494 return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG); 6495} 6496 6497static 6498SDValue getMOVLP(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, bool HasSSE2) { 6499 SDValue V1 = Op.getOperand(0); 6500 SDValue V2 = Op.getOperand(1); 6501 EVT VT = Op.getValueType(); 6502 unsigned NumElems = VT.getVectorNumElements(); 6503 6504 // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second 6505 // operand of these instructions is only memory, so check if there's a 6506 // potencial load folding here, otherwise use SHUFPS or MOVSD to match the 6507 // same masks. 6508 bool CanFoldLoad = false; 6509 6510 // Trivial case, when V2 comes from a load. 6511 if (MayFoldVectorLoad(V2)) 6512 CanFoldLoad = true; 6513 6514 // When V1 is a load, it can be folded later into a store in isel, example: 6515 // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1) 6516 // turns into: 6517 // (MOVLPSmr addr:$src1, VR128:$src2) 6518 // So, recognize this potential and also use MOVLPS or MOVLPD 6519 else if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op)) 6520 CanFoldLoad = true; 6521 6522 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6523 if (CanFoldLoad) { 6524 if (HasSSE2 && NumElems == 2) 6525 return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG); 6526 6527 if (NumElems == 4) 6528 // If we don't care about the second element, proceed to use movss. 6529 if (SVOp->getMaskElt(1) != -1) 6530 return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG); 6531 } 6532 6533 // movl and movlp will both match v2i64, but v2i64 is never matched by 6534 // movl earlier because we make it strict to avoid messing with the movlp load 6535 // folding logic (see the code above getMOVLP call). Match it here then, 6536 // this is horrible, but will stay like this until we move all shuffle 6537 // matching to x86 specific nodes. Note that for the 1st condition all 6538 // types are matched with movsd. 6539 if (HasSSE2) { 6540 // FIXME: isMOVLMask should be checked and matched before getMOVLP, 6541 // as to remove this logic from here, as much as possible 6542 if (NumElems == 2 || !isMOVLMask(SVOp->getMask(), VT)) 6543 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG); 6544 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG); 6545 } 6546 6547 assert(VT != MVT::v4i32 && "unsupported shuffle type"); 6548 6549 // Invert the operand order and use SHUFPS to match it. 6550 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V2, V1, 6551 getShuffleSHUFImmediate(SVOp), DAG); 6552} 6553 6554SDValue 6555X86TargetLowering::NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG) const { 6556 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6557 EVT VT = Op.getValueType(); 6558 DebugLoc dl = Op.getDebugLoc(); 6559 SDValue V1 = Op.getOperand(0); 6560 SDValue V2 = Op.getOperand(1); 6561 6562 if (isZeroShuffle(SVOp)) 6563 return getZeroVector(VT, Subtarget, DAG, dl); 6564 6565 // Handle splat operations 6566 if (SVOp->isSplat()) { 6567 unsigned NumElem = VT.getVectorNumElements(); 6568 int Size = VT.getSizeInBits(); 6569 6570 // Use vbroadcast whenever the splat comes from a foldable load 6571 SDValue Broadcast = LowerVectorBroadcast(Op, DAG); 6572 if (Broadcast.getNode()) 6573 return Broadcast; 6574 6575 // Handle splats by matching through known shuffle masks 6576 if ((Size == 128 && NumElem <= 4) || 6577 (Size == 256 && NumElem < 8)) 6578 return SDValue(); 6579 6580 // All remaning splats are promoted to target supported vector shuffles. 6581 return PromoteSplat(SVOp, DAG); 6582 } 6583 6584 // If the shuffle can be profitably rewritten as a narrower shuffle, then 6585 // do it! 6586 if (VT == MVT::v8i16 || VT == MVT::v16i8 || 6587 VT == MVT::v16i16 || VT == MVT::v32i8) { 6588 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); 6589 if (NewOp.getNode()) 6590 return DAG.getNode(ISD::BITCAST, dl, VT, NewOp); 6591 } else if ((VT == MVT::v4i32 || 6592 (VT == MVT::v4f32 && Subtarget->hasSSE2()))) { 6593 // FIXME: Figure out a cleaner way to do this. 6594 // Try to make use of movq to zero out the top part. 6595 if (ISD::isBuildVectorAllZeros(V2.getNode())) { 6596 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); 6597 if (NewOp.getNode()) { 6598 EVT NewVT = NewOp.getValueType(); 6599 if (isCommutedMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), 6600 NewVT, true, false)) 6601 return getVZextMovL(VT, NewVT, NewOp.getOperand(0), 6602 DAG, Subtarget, dl); 6603 } 6604 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) { 6605 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); 6606 if (NewOp.getNode()) { 6607 EVT NewVT = NewOp.getValueType(); 6608 if (isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), NewVT)) 6609 return getVZextMovL(VT, NewVT, NewOp.getOperand(1), 6610 DAG, Subtarget, dl); 6611 } 6612 } 6613 } 6614 return SDValue(); 6615} 6616 6617SDValue 6618X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { 6619 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6620 SDValue V1 = Op.getOperand(0); 6621 SDValue V2 = Op.getOperand(1); 6622 EVT VT = Op.getValueType(); 6623 DebugLoc dl = Op.getDebugLoc(); 6624 unsigned NumElems = VT.getVectorNumElements(); 6625 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; 6626 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 6627 bool V1IsSplat = false; 6628 bool V2IsSplat = false; 6629 bool HasSSE2 = Subtarget->hasSSE2(); 6630 bool HasAVX = Subtarget->hasAVX(); 6631 bool HasAVX2 = Subtarget->hasAVX2(); 6632 MachineFunction &MF = DAG.getMachineFunction(); 6633 bool OptForSize = MF.getFunction()->getFnAttributes().hasOptimizeForSizeAttr(); 6634 6635 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles"); 6636 6637 if (V1IsUndef && V2IsUndef) 6638 return DAG.getUNDEF(VT); 6639 6640 assert(!V1IsUndef && "Op 1 of shuffle should not be undef"); 6641 6642 // Vector shuffle lowering takes 3 steps: 6643 // 6644 // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable 6645 // narrowing and commutation of operands should be handled. 6646 // 2) Matching of shuffles with known shuffle masks to x86 target specific 6647 // shuffle nodes. 6648 // 3) Rewriting of unmatched masks into new generic shuffle operations, 6649 // so the shuffle can be broken into other shuffles and the legalizer can 6650 // try the lowering again. 6651 // 6652 // The general idea is that no vector_shuffle operation should be left to 6653 // be matched during isel, all of them must be converted to a target specific 6654 // node here. 6655 6656 // Normalize the input vectors. Here splats, zeroed vectors, profitable 6657 // narrowing and commutation of operands should be handled. The actual code 6658 // doesn't include all of those, work in progress... 6659 SDValue NewOp = NormalizeVectorShuffle(Op, DAG); 6660 if (NewOp.getNode()) 6661 return NewOp; 6662 6663 SmallVector<int, 8> M(SVOp->getMask().begin(), SVOp->getMask().end()); 6664 6665 // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and 6666 // unpckh_undef). Only use pshufd if speed is more important than size. 6667 if (OptForSize && isUNPCKL_v_undef_Mask(M, VT, HasAVX2)) 6668 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG); 6669 if (OptForSize && isUNPCKH_v_undef_Mask(M, VT, HasAVX2)) 6670 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG); 6671 6672 if (isMOVDDUPMask(M, VT) && Subtarget->hasSSE3() && 6673 V2IsUndef && RelaxedMayFoldVectorLoad(V1)) 6674 return getMOVDDup(Op, dl, V1, DAG); 6675 6676 if (isMOVHLPS_v_undef_Mask(M, VT)) 6677 return getMOVHighToLow(Op, dl, DAG); 6678 6679 // Use to match splats 6680 if (HasSSE2 && isUNPCKHMask(M, VT, HasAVX2) && V2IsUndef && 6681 (VT == MVT::v2f64 || VT == MVT::v2i64)) 6682 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG); 6683 6684 if (isPSHUFDMask(M, VT)) { 6685 // The actual implementation will match the mask in the if above and then 6686 // during isel it can match several different instructions, not only pshufd 6687 // as its name says, sad but true, emulate the behavior for now... 6688 if (isMOVDDUPMask(M, VT) && ((VT == MVT::v4f32 || VT == MVT::v2i64))) 6689 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG); 6690 6691 unsigned TargetMask = getShuffleSHUFImmediate(SVOp); 6692 6693 if (HasAVX && (VT == MVT::v4f32 || VT == MVT::v2f64)) 6694 return getTargetShuffleNode(X86ISD::VPERMILP, dl, VT, V1, TargetMask, DAG); 6695 6696 if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32)) 6697 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG); 6698 6699 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V1, 6700 TargetMask, DAG); 6701 } 6702 6703 // Check if this can be converted into a logical shift. 6704 bool isLeft = false; 6705 unsigned ShAmt = 0; 6706 SDValue ShVal; 6707 bool isShift = HasSSE2 && isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt); 6708 if (isShift && ShVal.hasOneUse()) { 6709 // If the shifted value has multiple uses, it may be cheaper to use 6710 // v_set0 + movlhps or movhlps, etc. 6711 EVT EltVT = VT.getVectorElementType(); 6712 ShAmt *= EltVT.getSizeInBits(); 6713 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl); 6714 } 6715 6716 if (isMOVLMask(M, VT)) { 6717 if (ISD::isBuildVectorAllZeros(V1.getNode())) 6718 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl); 6719 if (!isMOVLPMask(M, VT)) { 6720 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64)) 6721 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG); 6722 6723 if (VT == MVT::v4i32 || VT == MVT::v4f32) 6724 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG); 6725 } 6726 } 6727 6728 // FIXME: fold these into legal mask. 6729 if (isMOVLHPSMask(M, VT) && !isUNPCKLMask(M, VT, HasAVX2)) 6730 return getMOVLowToHigh(Op, dl, DAG, HasSSE2); 6731 6732 if (isMOVHLPSMask(M, VT)) 6733 return getMOVHighToLow(Op, dl, DAG); 6734 6735 if (V2IsUndef && isMOVSHDUPMask(M, VT, Subtarget)) 6736 return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG); 6737 6738 if (V2IsUndef && isMOVSLDUPMask(M, VT, Subtarget)) 6739 return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG); 6740 6741 if (isMOVLPMask(M, VT)) 6742 return getMOVLP(Op, dl, DAG, HasSSE2); 6743 6744 if (ShouldXformToMOVHLPS(M, VT) || 6745 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), M, VT)) 6746 return CommuteVectorShuffle(SVOp, DAG); 6747 6748 if (isShift) { 6749 // No better options. Use a vshldq / vsrldq. 6750 EVT EltVT = VT.getVectorElementType(); 6751 ShAmt *= EltVT.getSizeInBits(); 6752 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl); 6753 } 6754 6755 bool Commuted = false; 6756 // FIXME: This should also accept a bitcast of a splat? Be careful, not 6757 // 1,1,1,1 -> v8i16 though. 6758 V1IsSplat = isSplatVector(V1.getNode()); 6759 V2IsSplat = isSplatVector(V2.getNode()); 6760 6761 // Canonicalize the splat or undef, if present, to be on the RHS. 6762 if (!V2IsUndef && V1IsSplat && !V2IsSplat) { 6763 CommuteVectorShuffleMask(M, NumElems); 6764 std::swap(V1, V2); 6765 std::swap(V1IsSplat, V2IsSplat); 6766 Commuted = true; 6767 } 6768 6769 if (isCommutedMOVLMask(M, VT, V2IsSplat, V2IsUndef)) { 6770 // Shuffling low element of v1 into undef, just return v1. 6771 if (V2IsUndef) 6772 return V1; 6773 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which 6774 // the instruction selector will not match, so get a canonical MOVL with 6775 // swapped operands to undo the commute. 6776 return getMOVL(DAG, dl, VT, V2, V1); 6777 } 6778 6779 if (isUNPCKLMask(M, VT, HasAVX2)) 6780 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG); 6781 6782 if (isUNPCKHMask(M, VT, HasAVX2)) 6783 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG); 6784 6785 if (V2IsSplat) { 6786 // Normalize mask so all entries that point to V2 points to its first 6787 // element then try to match unpck{h|l} again. If match, return a 6788 // new vector_shuffle with the corrected mask.p 6789 SmallVector<int, 8> NewMask(M.begin(), M.end()); 6790 NormalizeMask(NewMask, NumElems); 6791 if (isUNPCKLMask(NewMask, VT, HasAVX2, true)) 6792 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG); 6793 if (isUNPCKHMask(NewMask, VT, HasAVX2, true)) 6794 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG); 6795 } 6796 6797 if (Commuted) { 6798 // Commute is back and try unpck* again. 6799 // FIXME: this seems wrong. 6800 CommuteVectorShuffleMask(M, NumElems); 6801 std::swap(V1, V2); 6802 std::swap(V1IsSplat, V2IsSplat); 6803 Commuted = false; 6804 6805 if (isUNPCKLMask(M, VT, HasAVX2)) 6806 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG); 6807 6808 if (isUNPCKHMask(M, VT, HasAVX2)) 6809 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG); 6810 } 6811 6812 // Normalize the node to match x86 shuffle ops if needed 6813 if (!V2IsUndef && (isSHUFPMask(M, VT, HasAVX, /* Commuted */ true))) 6814 return CommuteVectorShuffle(SVOp, DAG); 6815 6816 // The checks below are all present in isShuffleMaskLegal, but they are 6817 // inlined here right now to enable us to directly emit target specific 6818 // nodes, and remove one by one until they don't return Op anymore. 6819 6820 if (isPALIGNRMask(M, VT, Subtarget)) 6821 return getTargetShuffleNode(X86ISD::PALIGN, dl, VT, V1, V2, 6822 getShufflePALIGNRImmediate(SVOp), 6823 DAG); 6824 6825 if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) && 6826 SVOp->getSplatIndex() == 0 && V2IsUndef) { 6827 if (VT == MVT::v2f64 || VT == MVT::v2i64) 6828 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG); 6829 } 6830 6831 if (isPSHUFHWMask(M, VT, HasAVX2)) 6832 return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1, 6833 getShufflePSHUFHWImmediate(SVOp), 6834 DAG); 6835 6836 if (isPSHUFLWMask(M, VT, HasAVX2)) 6837 return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1, 6838 getShufflePSHUFLWImmediate(SVOp), 6839 DAG); 6840 6841 if (isSHUFPMask(M, VT, HasAVX)) 6842 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V2, 6843 getShuffleSHUFImmediate(SVOp), DAG); 6844 6845 if (isUNPCKL_v_undef_Mask(M, VT, HasAVX2)) 6846 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG); 6847 if (isUNPCKH_v_undef_Mask(M, VT, HasAVX2)) 6848 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG); 6849 6850 //===--------------------------------------------------------------------===// 6851 // Generate target specific nodes for 128 or 256-bit shuffles only 6852 // supported in the AVX instruction set. 6853 // 6854 6855 // Handle VMOVDDUPY permutations 6856 if (V2IsUndef && isMOVDDUPYMask(M, VT, HasAVX)) 6857 return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG); 6858 6859 // Handle VPERMILPS/D* permutations 6860 if (isVPERMILPMask(M, VT, HasAVX)) { 6861 if (HasAVX2 && VT == MVT::v8i32) 6862 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, 6863 getShuffleSHUFImmediate(SVOp), DAG); 6864 return getTargetShuffleNode(X86ISD::VPERMILP, dl, VT, V1, 6865 getShuffleSHUFImmediate(SVOp), DAG); 6866 } 6867 6868 // Handle VPERM2F128/VPERM2I128 permutations 6869 if (isVPERM2X128Mask(M, VT, HasAVX)) 6870 return getTargetShuffleNode(X86ISD::VPERM2X128, dl, VT, V1, 6871 V2, getShuffleVPERM2X128Immediate(SVOp), DAG); 6872 6873 SDValue BlendOp = LowerVECTOR_SHUFFLEtoBlend(SVOp, Subtarget, DAG); 6874 if (BlendOp.getNode()) 6875 return BlendOp; 6876 6877 if (V2IsUndef && HasAVX2 && (VT == MVT::v8i32 || VT == MVT::v8f32)) { 6878 SmallVector<SDValue, 8> permclMask; 6879 for (unsigned i = 0; i != 8; ++i) { 6880 permclMask.push_back(DAG.getConstant((M[i]>=0) ? M[i] : 0, MVT::i32)); 6881 } 6882 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, 6883 &permclMask[0], 8); 6884 // Bitcast is for VPERMPS since mask is v8i32 but node takes v8f32 6885 return DAG.getNode(X86ISD::VPERMV, dl, VT, 6886 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1); 6887 } 6888 6889 if (V2IsUndef && HasAVX2 && (VT == MVT::v4i64 || VT == MVT::v4f64)) 6890 return getTargetShuffleNode(X86ISD::VPERMI, dl, VT, V1, 6891 getShuffleCLImmediate(SVOp), DAG); 6892 6893 6894 //===--------------------------------------------------------------------===// 6895 // Since no target specific shuffle was selected for this generic one, 6896 // lower it into other known shuffles. FIXME: this isn't true yet, but 6897 // this is the plan. 6898 // 6899 6900 // Handle v8i16 specifically since SSE can do byte extraction and insertion. 6901 if (VT == MVT::v8i16) { 6902 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, Subtarget, DAG); 6903 if (NewOp.getNode()) 6904 return NewOp; 6905 } 6906 6907 if (VT == MVT::v16i8) { 6908 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, DAG, *this); 6909 if (NewOp.getNode()) 6910 return NewOp; 6911 } 6912 6913 if (VT == MVT::v32i8) { 6914 SDValue NewOp = LowerVECTOR_SHUFFLEv32i8(SVOp, Subtarget, DAG); 6915 if (NewOp.getNode()) 6916 return NewOp; 6917 } 6918 6919 // Handle all 128-bit wide vectors with 4 elements, and match them with 6920 // several different shuffle types. 6921 if (NumElems == 4 && VT.is128BitVector()) 6922 return LowerVECTOR_SHUFFLE_128v4(SVOp, DAG); 6923 6924 // Handle general 256-bit shuffles 6925 if (VT.is256BitVector()) 6926 return LowerVECTOR_SHUFFLE_256(SVOp, DAG); 6927 6928 return SDValue(); 6929} 6930 6931SDValue 6932X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, 6933 SelectionDAG &DAG) const { 6934 EVT VT = Op.getValueType(); 6935 DebugLoc dl = Op.getDebugLoc(); 6936 6937 if (!Op.getOperand(0).getValueType().is128BitVector()) 6938 return SDValue(); 6939 6940 if (VT.getSizeInBits() == 8) { 6941 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32, 6942 Op.getOperand(0), Op.getOperand(1)); 6943 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract, 6944 DAG.getValueType(VT)); 6945 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 6946 } 6947 6948 if (VT.getSizeInBits() == 16) { 6949 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 6950 // If Idx is 0, it's cheaper to do a move instead of a pextrw. 6951 if (Idx == 0) 6952 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, 6953 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 6954 DAG.getNode(ISD::BITCAST, dl, 6955 MVT::v4i32, 6956 Op.getOperand(0)), 6957 Op.getOperand(1))); 6958 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32, 6959 Op.getOperand(0), Op.getOperand(1)); 6960 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract, 6961 DAG.getValueType(VT)); 6962 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 6963 } 6964 6965 if (VT == MVT::f32) { 6966 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy 6967 // the result back to FR32 register. It's only worth matching if the 6968 // result has a single use which is a store or a bitcast to i32. And in 6969 // the case of a store, it's not worth it if the index is a constant 0, 6970 // because a MOVSSmr can be used instead, which is smaller and faster. 6971 if (!Op.hasOneUse()) 6972 return SDValue(); 6973 SDNode *User = *Op.getNode()->use_begin(); 6974 if ((User->getOpcode() != ISD::STORE || 6975 (isa<ConstantSDNode>(Op.getOperand(1)) && 6976 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) && 6977 (User->getOpcode() != ISD::BITCAST || 6978 User->getValueType(0) != MVT::i32)) 6979 return SDValue(); 6980 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 6981 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, 6982 Op.getOperand(0)), 6983 Op.getOperand(1)); 6984 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract); 6985 } 6986 6987 if (VT == MVT::i32 || VT == MVT::i64) { 6988 // ExtractPS/pextrq works with constant index. 6989 if (isa<ConstantSDNode>(Op.getOperand(1))) 6990 return Op; 6991 } 6992 return SDValue(); 6993} 6994 6995 6996SDValue 6997X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 6998 SelectionDAG &DAG) const { 6999 if (!isa<ConstantSDNode>(Op.getOperand(1))) 7000 return SDValue(); 7001 7002 SDValue Vec = Op.getOperand(0); 7003 EVT VecVT = Vec.getValueType(); 7004 7005 // If this is a 256-bit vector result, first extract the 128-bit vector and 7006 // then extract the element from the 128-bit vector. 7007 if (VecVT.is256BitVector()) { 7008 DebugLoc dl = Op.getNode()->getDebugLoc(); 7009 unsigned NumElems = VecVT.getVectorNumElements(); 7010 SDValue Idx = Op.getOperand(1); 7011 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 7012 7013 // Get the 128-bit vector. 7014 Vec = Extract128BitVector(Vec, IdxVal, DAG, dl); 7015 7016 if (IdxVal >= NumElems/2) 7017 IdxVal -= NumElems/2; 7018 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec, 7019 DAG.getConstant(IdxVal, MVT::i32)); 7020 } 7021 7022 assert(VecVT.is128BitVector() && "Unexpected vector length"); 7023 7024 if (Subtarget->hasSSE41()) { 7025 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG); 7026 if (Res.getNode()) 7027 return Res; 7028 } 7029 7030 EVT VT = Op.getValueType(); 7031 DebugLoc dl = Op.getDebugLoc(); 7032 // TODO: handle v16i8. 7033 if (VT.getSizeInBits() == 16) { 7034 SDValue Vec = Op.getOperand(0); 7035 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 7036 if (Idx == 0) 7037 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, 7038 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 7039 DAG.getNode(ISD::BITCAST, dl, 7040 MVT::v4i32, Vec), 7041 Op.getOperand(1))); 7042 // Transform it so it match pextrw which produces a 32-bit result. 7043 EVT EltVT = MVT::i32; 7044 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT, 7045 Op.getOperand(0), Op.getOperand(1)); 7046 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract, 7047 DAG.getValueType(VT)); 7048 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 7049 } 7050 7051 if (VT.getSizeInBits() == 32) { 7052 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 7053 if (Idx == 0) 7054 return Op; 7055 7056 // SHUFPS the element to the lowest double word, then movss. 7057 int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 }; 7058 EVT VVT = Op.getOperand(0).getValueType(); 7059 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0), 7060 DAG.getUNDEF(VVT), Mask); 7061 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, 7062 DAG.getIntPtrConstant(0)); 7063 } 7064 7065 if (VT.getSizeInBits() == 64) { 7066 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b 7067 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught 7068 // to match extract_elt for f64. 7069 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 7070 if (Idx == 0) 7071 return Op; 7072 7073 // UNPCKHPD the element to the lowest double word, then movsd. 7074 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored 7075 // to a f64mem, the whole operation is folded into a single MOVHPDmr. 7076 int Mask[2] = { 1, -1 }; 7077 EVT VVT = Op.getOperand(0).getValueType(); 7078 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0), 7079 DAG.getUNDEF(VVT), Mask); 7080 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, 7081 DAG.getIntPtrConstant(0)); 7082 } 7083 7084 return SDValue(); 7085} 7086 7087SDValue 7088X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, 7089 SelectionDAG &DAG) const { 7090 EVT VT = Op.getValueType(); 7091 EVT EltVT = VT.getVectorElementType(); 7092 DebugLoc dl = Op.getDebugLoc(); 7093 7094 SDValue N0 = Op.getOperand(0); 7095 SDValue N1 = Op.getOperand(1); 7096 SDValue N2 = Op.getOperand(2); 7097 7098 if (!VT.is128BitVector()) 7099 return SDValue(); 7100 7101 if ((EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) && 7102 isa<ConstantSDNode>(N2)) { 7103 unsigned Opc; 7104 if (VT == MVT::v8i16) 7105 Opc = X86ISD::PINSRW; 7106 else if (VT == MVT::v16i8) 7107 Opc = X86ISD::PINSRB; 7108 else 7109 Opc = X86ISD::PINSRB; 7110 7111 // Transform it so it match pinsr{b,w} which expects a GR32 as its second 7112 // argument. 7113 if (N1.getValueType() != MVT::i32) 7114 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1); 7115 if (N2.getValueType() != MVT::i32) 7116 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue()); 7117 return DAG.getNode(Opc, dl, VT, N0, N1, N2); 7118 } 7119 7120 if (EltVT == MVT::f32 && isa<ConstantSDNode>(N2)) { 7121 // Bits [7:6] of the constant are the source select. This will always be 7122 // zero here. The DAG Combiner may combine an extract_elt index into these 7123 // bits. For example (insert (extract, 3), 2) could be matched by putting 7124 // the '3' into bits [7:6] of X86ISD::INSERTPS. 7125 // Bits [5:4] of the constant are the destination select. This is the 7126 // value of the incoming immediate. 7127 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may 7128 // combine either bitwise AND or insert of float 0.0 to set these bits. 7129 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue() << 4); 7130 // Create this as a scalar to vector.. 7131 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1); 7132 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2); 7133 } 7134 7135 if ((EltVT == MVT::i32 || EltVT == MVT::i64) && isa<ConstantSDNode>(N2)) { 7136 // PINSR* works with constant index. 7137 return Op; 7138 } 7139 return SDValue(); 7140} 7141 7142SDValue 7143X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const { 7144 EVT VT = Op.getValueType(); 7145 EVT EltVT = VT.getVectorElementType(); 7146 7147 DebugLoc dl = Op.getDebugLoc(); 7148 SDValue N0 = Op.getOperand(0); 7149 SDValue N1 = Op.getOperand(1); 7150 SDValue N2 = Op.getOperand(2); 7151 7152 // If this is a 256-bit vector result, first extract the 128-bit vector, 7153 // insert the element into the extracted half and then place it back. 7154 if (VT.is256BitVector()) { 7155 if (!isa<ConstantSDNode>(N2)) 7156 return SDValue(); 7157 7158 // Get the desired 128-bit vector half. 7159 unsigned NumElems = VT.getVectorNumElements(); 7160 unsigned IdxVal = cast<ConstantSDNode>(N2)->getZExtValue(); 7161 SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl); 7162 7163 // Insert the element into the desired half. 7164 bool Upper = IdxVal >= NumElems/2; 7165 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1, 7166 DAG.getConstant(Upper ? IdxVal-NumElems/2 : IdxVal, MVT::i32)); 7167 7168 // Insert the changed part back to the 256-bit vector 7169 return Insert128BitVector(N0, V, IdxVal, DAG, dl); 7170 } 7171 7172 if (Subtarget->hasSSE41()) 7173 return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG); 7174 7175 if (EltVT == MVT::i8) 7176 return SDValue(); 7177 7178 if (EltVT.getSizeInBits() == 16 && isa<ConstantSDNode>(N2)) { 7179 // Transform it so it match pinsrw which expects a 16-bit value in a GR32 7180 // as its second argument. 7181 if (N1.getValueType() != MVT::i32) 7182 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1); 7183 if (N2.getValueType() != MVT::i32) 7184 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue()); 7185 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2); 7186 } 7187 return SDValue(); 7188} 7189 7190static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) { 7191 LLVMContext *Context = DAG.getContext(); 7192 DebugLoc dl = Op.getDebugLoc(); 7193 EVT OpVT = Op.getValueType(); 7194 7195 // If this is a 256-bit vector result, first insert into a 128-bit 7196 // vector and then insert into the 256-bit vector. 7197 if (!OpVT.is128BitVector()) { 7198 // Insert into a 128-bit vector. 7199 EVT VT128 = EVT::getVectorVT(*Context, 7200 OpVT.getVectorElementType(), 7201 OpVT.getVectorNumElements() / 2); 7202 7203 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0)); 7204 7205 // Insert the 128-bit vector. 7206 return Insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl); 7207 } 7208 7209 if (OpVT == MVT::v1i64 && 7210 Op.getOperand(0).getValueType() == MVT::i64) 7211 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0)); 7212 7213 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0)); 7214 assert(OpVT.is128BitVector() && "Expected an SSE type!"); 7215 return DAG.getNode(ISD::BITCAST, dl, OpVT, 7216 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt)); 7217} 7218 7219// Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in 7220// a simple subregister reference or explicit instructions to grab 7221// upper bits of a vector. 7222static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget, 7223 SelectionDAG &DAG) { 7224 if (Subtarget->hasAVX()) { 7225 DebugLoc dl = Op.getNode()->getDebugLoc(); 7226 SDValue Vec = Op.getNode()->getOperand(0); 7227 SDValue Idx = Op.getNode()->getOperand(1); 7228 7229 if (Op.getNode()->getValueType(0).is128BitVector() && 7230 Vec.getNode()->getValueType(0).is256BitVector() && 7231 isa<ConstantSDNode>(Idx)) { 7232 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 7233 return Extract128BitVector(Vec, IdxVal, DAG, dl); 7234 } 7235 } 7236 return SDValue(); 7237} 7238 7239// Lower a node with an INSERT_SUBVECTOR opcode. This may result in a 7240// simple superregister reference or explicit instructions to insert 7241// the upper bits of a vector. 7242static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget, 7243 SelectionDAG &DAG) { 7244 if (Subtarget->hasAVX()) { 7245 DebugLoc dl = Op.getNode()->getDebugLoc(); 7246 SDValue Vec = Op.getNode()->getOperand(0); 7247 SDValue SubVec = Op.getNode()->getOperand(1); 7248 SDValue Idx = Op.getNode()->getOperand(2); 7249 7250 if (Op.getNode()->getValueType(0).is256BitVector() && 7251 SubVec.getNode()->getValueType(0).is128BitVector() && 7252 isa<ConstantSDNode>(Idx)) { 7253 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 7254 return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl); 7255 } 7256 } 7257 return SDValue(); 7258} 7259 7260// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 7261// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is 7262// one of the above mentioned nodes. It has to be wrapped because otherwise 7263// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 7264// be used to form addressing mode. These wrapped nodes will be selected 7265// into MOV32ri. 7266SDValue 7267X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { 7268 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 7269 7270 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7271 // global base reg. 7272 unsigned char OpFlag = 0; 7273 unsigned WrapperKind = X86ISD::Wrapper; 7274 CodeModel::Model M = getTargetMachine().getCodeModel(); 7275 7276 if (Subtarget->isPICStyleRIPRel() && 7277 (M == CodeModel::Small || M == CodeModel::Kernel)) 7278 WrapperKind = X86ISD::WrapperRIP; 7279 else if (Subtarget->isPICStyleGOT()) 7280 OpFlag = X86II::MO_GOTOFF; 7281 else if (Subtarget->isPICStyleStubPIC()) 7282 OpFlag = X86II::MO_PIC_BASE_OFFSET; 7283 7284 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(), 7285 CP->getAlignment(), 7286 CP->getOffset(), OpFlag); 7287 DebugLoc DL = CP->getDebugLoc(); 7288 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7289 // With PIC, the address is actually $g + Offset. 7290 if (OpFlag) { 7291 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7292 DAG.getNode(X86ISD::GlobalBaseReg, 7293 DebugLoc(), getPointerTy()), 7294 Result); 7295 } 7296 7297 return Result; 7298} 7299 7300SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 7301 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 7302 7303 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7304 // global base reg. 7305 unsigned char OpFlag = 0; 7306 unsigned WrapperKind = X86ISD::Wrapper; 7307 CodeModel::Model M = getTargetMachine().getCodeModel(); 7308 7309 if (Subtarget->isPICStyleRIPRel() && 7310 (M == CodeModel::Small || M == CodeModel::Kernel)) 7311 WrapperKind = X86ISD::WrapperRIP; 7312 else if (Subtarget->isPICStyleGOT()) 7313 OpFlag = X86II::MO_GOTOFF; 7314 else if (Subtarget->isPICStyleStubPIC()) 7315 OpFlag = X86II::MO_PIC_BASE_OFFSET; 7316 7317 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(), 7318 OpFlag); 7319 DebugLoc DL = JT->getDebugLoc(); 7320 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7321 7322 // With PIC, the address is actually $g + Offset. 7323 if (OpFlag) 7324 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7325 DAG.getNode(X86ISD::GlobalBaseReg, 7326 DebugLoc(), getPointerTy()), 7327 Result); 7328 7329 return Result; 7330} 7331 7332SDValue 7333X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const { 7334 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 7335 7336 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7337 // global base reg. 7338 unsigned char OpFlag = 0; 7339 unsigned WrapperKind = X86ISD::Wrapper; 7340 CodeModel::Model M = getTargetMachine().getCodeModel(); 7341 7342 if (Subtarget->isPICStyleRIPRel() && 7343 (M == CodeModel::Small || M == CodeModel::Kernel)) { 7344 if (Subtarget->isTargetDarwin() || Subtarget->isTargetELF()) 7345 OpFlag = X86II::MO_GOTPCREL; 7346 WrapperKind = X86ISD::WrapperRIP; 7347 } else if (Subtarget->isPICStyleGOT()) { 7348 OpFlag = X86II::MO_GOT; 7349 } else if (Subtarget->isPICStyleStubPIC()) { 7350 OpFlag = X86II::MO_DARWIN_NONLAZY_PIC_BASE; 7351 } else if (Subtarget->isPICStyleStubNoDynamic()) { 7352 OpFlag = X86II::MO_DARWIN_NONLAZY; 7353 } 7354 7355 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag); 7356 7357 DebugLoc DL = Op.getDebugLoc(); 7358 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7359 7360 7361 // With PIC, the address is actually $g + Offset. 7362 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 7363 !Subtarget->is64Bit()) { 7364 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7365 DAG.getNode(X86ISD::GlobalBaseReg, 7366 DebugLoc(), getPointerTy()), 7367 Result); 7368 } 7369 7370 // For symbols that require a load from a stub to get the address, emit the 7371 // load. 7372 if (isGlobalStubReference(OpFlag)) 7373 Result = DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), Result, 7374 MachinePointerInfo::getGOT(), false, false, false, 0); 7375 7376 return Result; 7377} 7378 7379SDValue 7380X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { 7381 // Create the TargetBlockAddressAddress node. 7382 unsigned char OpFlags = 7383 Subtarget->ClassifyBlockAddressReference(); 7384 CodeModel::Model M = getTargetMachine().getCodeModel(); 7385 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 7386 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset(); 7387 DebugLoc dl = Op.getDebugLoc(); 7388 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(), Offset, 7389 OpFlags); 7390 7391 if (Subtarget->isPICStyleRIPRel() && 7392 (M == CodeModel::Small || M == CodeModel::Kernel)) 7393 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result); 7394 else 7395 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result); 7396 7397 // With PIC, the address is actually $g + Offset. 7398 if (isGlobalRelativeToPICBase(OpFlags)) { 7399 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), 7400 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()), 7401 Result); 7402 } 7403 7404 return Result; 7405} 7406 7407SDValue 7408X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl, 7409 int64_t Offset, 7410 SelectionDAG &DAG) const { 7411 // Create the TargetGlobalAddress node, folding in the constant 7412 // offset if it is legal. 7413 unsigned char OpFlags = 7414 Subtarget->ClassifyGlobalReference(GV, getTargetMachine()); 7415 CodeModel::Model M = getTargetMachine().getCodeModel(); 7416 SDValue Result; 7417 if (OpFlags == X86II::MO_NO_FLAG && 7418 X86::isOffsetSuitableForCodeModel(Offset, M)) { 7419 // A direct static reference to a global. 7420 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset); 7421 Offset = 0; 7422 } else { 7423 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 7424 } 7425 7426 if (Subtarget->isPICStyleRIPRel() && 7427 (M == CodeModel::Small || M == CodeModel::Kernel)) 7428 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result); 7429 else 7430 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result); 7431 7432 // With PIC, the address is actually $g + Offset. 7433 if (isGlobalRelativeToPICBase(OpFlags)) { 7434 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), 7435 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()), 7436 Result); 7437 } 7438 7439 // For globals that require a load from a stub to get the address, emit the 7440 // load. 7441 if (isGlobalStubReference(OpFlags)) 7442 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result, 7443 MachinePointerInfo::getGOT(), false, false, false, 0); 7444 7445 // If there was a non-zero offset that we didn't fold, create an explicit 7446 // addition for it. 7447 if (Offset != 0) 7448 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result, 7449 DAG.getConstant(Offset, getPointerTy())); 7450 7451 return Result; 7452} 7453 7454SDValue 7455X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { 7456 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 7457 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset(); 7458 return LowerGlobalAddress(GV, Op.getDebugLoc(), Offset, DAG); 7459} 7460 7461static SDValue 7462GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA, 7463 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg, 7464 unsigned char OperandFlags, bool LocalDynamic = false) { 7465 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 7466 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 7467 DebugLoc dl = GA->getDebugLoc(); 7468 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 7469 GA->getValueType(0), 7470 GA->getOffset(), 7471 OperandFlags); 7472 7473 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR 7474 : X86ISD::TLSADDR; 7475 7476 if (InFlag) { 7477 SDValue Ops[] = { Chain, TGA, *InFlag }; 7478 Chain = DAG.getNode(CallType, dl, NodeTys, Ops, 3); 7479 } else { 7480 SDValue Ops[] = { Chain, TGA }; 7481 Chain = DAG.getNode(CallType, dl, NodeTys, Ops, 2); 7482 } 7483 7484 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls. 7485 MFI->setAdjustsStack(true); 7486 7487 SDValue Flag = Chain.getValue(1); 7488 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag); 7489} 7490 7491// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit 7492static SDValue 7493LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG, 7494 const EVT PtrVT) { 7495 SDValue InFlag; 7496 DebugLoc dl = GA->getDebugLoc(); // ? function entry point might be better 7497 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX, 7498 DAG.getNode(X86ISD::GlobalBaseReg, 7499 DebugLoc(), PtrVT), InFlag); 7500 InFlag = Chain.getValue(1); 7501 7502 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD); 7503} 7504 7505// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit 7506static SDValue 7507LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG, 7508 const EVT PtrVT) { 7509 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, NULL, PtrVT, 7510 X86::RAX, X86II::MO_TLSGD); 7511} 7512 7513static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA, 7514 SelectionDAG &DAG, 7515 const EVT PtrVT, 7516 bool is64Bit) { 7517 DebugLoc dl = GA->getDebugLoc(); 7518 7519 // Get the start address of the TLS block for this module. 7520 X86MachineFunctionInfo* MFI = DAG.getMachineFunction() 7521 .getInfo<X86MachineFunctionInfo>(); 7522 MFI->incNumLocalDynamicTLSAccesses(); 7523 7524 SDValue Base; 7525 if (is64Bit) { 7526 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, NULL, PtrVT, X86::RAX, 7527 X86II::MO_TLSLD, /*LocalDynamic=*/true); 7528 } else { 7529 SDValue InFlag; 7530 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX, 7531 DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), PtrVT), InFlag); 7532 InFlag = Chain.getValue(1); 7533 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, 7534 X86II::MO_TLSLDM, /*LocalDynamic=*/true); 7535 } 7536 7537 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations 7538 // of Base. 7539 7540 // Build x@dtpoff. 7541 unsigned char OperandFlags = X86II::MO_DTPOFF; 7542 unsigned WrapperKind = X86ISD::Wrapper; 7543 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 7544 GA->getValueType(0), 7545 GA->getOffset(), OperandFlags); 7546 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA); 7547 7548 // Add x@dtpoff with the base. 7549 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base); 7550} 7551 7552// Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model. 7553static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 7554 const EVT PtrVT, TLSModel::Model model, 7555 bool is64Bit, bool isPIC) { 7556 DebugLoc dl = GA->getDebugLoc(); 7557 7558 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit). 7559 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(), 7560 is64Bit ? 257 : 256)); 7561 7562 SDValue ThreadPointer = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 7563 DAG.getIntPtrConstant(0), 7564 MachinePointerInfo(Ptr), 7565 false, false, false, 0); 7566 7567 unsigned char OperandFlags = 0; 7568 // Most TLS accesses are not RIP relative, even on x86-64. One exception is 7569 // initialexec. 7570 unsigned WrapperKind = X86ISD::Wrapper; 7571 if (model == TLSModel::LocalExec) { 7572 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF; 7573 } else if (model == TLSModel::InitialExec) { 7574 if (is64Bit) { 7575 OperandFlags = X86II::MO_GOTTPOFF; 7576 WrapperKind = X86ISD::WrapperRIP; 7577 } else { 7578 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF; 7579 } 7580 } else { 7581 llvm_unreachable("Unexpected model"); 7582 } 7583 7584 // emit "addl x@ntpoff,%eax" (local exec) 7585 // or "addl x@indntpoff,%eax" (initial exec) 7586 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic) 7587 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 7588 GA->getValueType(0), 7589 GA->getOffset(), OperandFlags); 7590 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA); 7591 7592 if (model == TLSModel::InitialExec) { 7593 if (isPIC && !is64Bit) { 7594 Offset = DAG.getNode(ISD::ADD, dl, PtrVT, 7595 DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), PtrVT), 7596 Offset); 7597 } 7598 7599 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset, 7600 MachinePointerInfo::getGOT(), false, false, false, 7601 0); 7602 } 7603 7604 // The address of the thread local variable is the add of the thread 7605 // pointer with the offset of the variable. 7606 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 7607} 7608 7609SDValue 7610X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 7611 7612 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 7613 const GlobalValue *GV = GA->getGlobal(); 7614 7615 if (Subtarget->isTargetELF()) { 7616 TLSModel::Model model = getTargetMachine().getTLSModel(GV); 7617 7618 switch (model) { 7619 case TLSModel::GeneralDynamic: 7620 if (Subtarget->is64Bit()) 7621 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy()); 7622 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy()); 7623 case TLSModel::LocalDynamic: 7624 return LowerToTLSLocalDynamicModel(GA, DAG, getPointerTy(), 7625 Subtarget->is64Bit()); 7626 case TLSModel::InitialExec: 7627 case TLSModel::LocalExec: 7628 return LowerToTLSExecModel(GA, DAG, getPointerTy(), model, 7629 Subtarget->is64Bit(), 7630 getTargetMachine().getRelocationModel() == Reloc::PIC_); 7631 } 7632 llvm_unreachable("Unknown TLS model."); 7633 } 7634 7635 if (Subtarget->isTargetDarwin()) { 7636 // Darwin only has one model of TLS. Lower to that. 7637 unsigned char OpFlag = 0; 7638 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ? 7639 X86ISD::WrapperRIP : X86ISD::Wrapper; 7640 7641 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7642 // global base reg. 7643 bool PIC32 = (getTargetMachine().getRelocationModel() == Reloc::PIC_) && 7644 !Subtarget->is64Bit(); 7645 if (PIC32) 7646 OpFlag = X86II::MO_TLVP_PIC_BASE; 7647 else 7648 OpFlag = X86II::MO_TLVP; 7649 DebugLoc DL = Op.getDebugLoc(); 7650 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL, 7651 GA->getValueType(0), 7652 GA->getOffset(), OpFlag); 7653 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7654 7655 // With PIC32, the address is actually $g + Offset. 7656 if (PIC32) 7657 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7658 DAG.getNode(X86ISD::GlobalBaseReg, 7659 DebugLoc(), getPointerTy()), 7660 Offset); 7661 7662 // Lowering the machine isd will make sure everything is in the right 7663 // location. 7664 SDValue Chain = DAG.getEntryNode(); 7665 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 7666 SDValue Args[] = { Chain, Offset }; 7667 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args, 2); 7668 7669 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls. 7670 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 7671 MFI->setAdjustsStack(true); 7672 7673 // And our return value (tls address) is in the standard call return value 7674 // location. 7675 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; 7676 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(), 7677 Chain.getValue(1)); 7678 } 7679 7680 if (Subtarget->isTargetWindows()) { 7681 // Just use the implicit TLS architecture 7682 // Need to generate someting similar to: 7683 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage 7684 // ; from TEB 7685 // mov ecx, dword [rel _tls_index]: Load index (from C runtime) 7686 // mov rcx, qword [rdx+rcx*8] 7687 // mov eax, .tls$:tlsvar 7688 // [rax+rcx] contains the address 7689 // Windows 64bit: gs:0x58 7690 // Windows 32bit: fs:__tls_array 7691 7692 // If GV is an alias then use the aliasee for determining 7693 // thread-localness. 7694 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) 7695 GV = GA->resolveAliasedGlobal(false); 7696 DebugLoc dl = GA->getDebugLoc(); 7697 SDValue Chain = DAG.getEntryNode(); 7698 7699 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or 7700 // %gs:0x58 (64-bit). 7701 Value *Ptr = Constant::getNullValue(Subtarget->is64Bit() 7702 ? Type::getInt8PtrTy(*DAG.getContext(), 7703 256) 7704 : Type::getInt32PtrTy(*DAG.getContext(), 7705 257)); 7706 7707 SDValue ThreadPointer = DAG.getLoad(getPointerTy(), dl, Chain, 7708 Subtarget->is64Bit() 7709 ? DAG.getIntPtrConstant(0x58) 7710 : DAG.getExternalSymbol("_tls_array", 7711 getPointerTy()), 7712 MachinePointerInfo(Ptr), 7713 false, false, false, 0); 7714 7715 // Load the _tls_index variable 7716 SDValue IDX = DAG.getExternalSymbol("_tls_index", getPointerTy()); 7717 if (Subtarget->is64Bit()) 7718 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, getPointerTy(), Chain, 7719 IDX, MachinePointerInfo(), MVT::i32, 7720 false, false, 0); 7721 else 7722 IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(), 7723 false, false, false, 0); 7724 7725 SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()), 7726 getPointerTy()); 7727 IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale); 7728 7729 SDValue res = DAG.getNode(ISD::ADD, dl, getPointerTy(), ThreadPointer, IDX); 7730 res = DAG.getLoad(getPointerTy(), dl, Chain, res, MachinePointerInfo(), 7731 false, false, false, 0); 7732 7733 // Get the offset of start of .tls section 7734 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 7735 GA->getValueType(0), 7736 GA->getOffset(), X86II::MO_SECREL); 7737 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), TGA); 7738 7739 // The address of the thread local variable is the add of the thread 7740 // pointer with the offset of the variable. 7741 return DAG.getNode(ISD::ADD, dl, getPointerTy(), res, Offset); 7742 } 7743 7744 llvm_unreachable("TLS not implemented for this target."); 7745} 7746 7747 7748/// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values 7749/// and take a 2 x i32 value to shift plus a shift amount. 7750SDValue X86TargetLowering::LowerShiftParts(SDValue Op, SelectionDAG &DAG) const{ 7751 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 7752 EVT VT = Op.getValueType(); 7753 unsigned VTBits = VT.getSizeInBits(); 7754 DebugLoc dl = Op.getDebugLoc(); 7755 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; 7756 SDValue ShOpLo = Op.getOperand(0); 7757 SDValue ShOpHi = Op.getOperand(1); 7758 SDValue ShAmt = Op.getOperand(2); 7759 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi, 7760 DAG.getConstant(VTBits - 1, MVT::i8)) 7761 : DAG.getConstant(0, VT); 7762 7763 SDValue Tmp2, Tmp3; 7764 if (Op.getOpcode() == ISD::SHL_PARTS) { 7765 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt); 7766 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 7767 } else { 7768 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt); 7769 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, ShAmt); 7770 } 7771 7772 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt, 7773 DAG.getConstant(VTBits, MVT::i8)); 7774 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 7775 AndNode, DAG.getConstant(0, MVT::i8)); 7776 7777 SDValue Hi, Lo; 7778 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8); 7779 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond }; 7780 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond }; 7781 7782 if (Op.getOpcode() == ISD::SHL_PARTS) { 7783 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4); 7784 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4); 7785 } else { 7786 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4); 7787 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4); 7788 } 7789 7790 SDValue Ops[2] = { Lo, Hi }; 7791 return DAG.getMergeValues(Ops, 2, dl); 7792} 7793 7794SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op, 7795 SelectionDAG &DAG) const { 7796 EVT SrcVT = Op.getOperand(0).getValueType(); 7797 7798 if (SrcVT.isVector()) 7799 return SDValue(); 7800 7801 assert(SrcVT.getSimpleVT() <= MVT::i64 && SrcVT.getSimpleVT() >= MVT::i16 && 7802 "Unknown SINT_TO_FP to lower!"); 7803 7804 // These are really Legal; return the operand so the caller accepts it as 7805 // Legal. 7806 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType())) 7807 return Op; 7808 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) && 7809 Subtarget->is64Bit()) { 7810 return Op; 7811 } 7812 7813 DebugLoc dl = Op.getDebugLoc(); 7814 unsigned Size = SrcVT.getSizeInBits()/8; 7815 MachineFunction &MF = DAG.getMachineFunction(); 7816 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false); 7817 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 7818 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 7819 StackSlot, 7820 MachinePointerInfo::getFixedStack(SSFI), 7821 false, false, 0); 7822 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG); 7823} 7824 7825SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, 7826 SDValue StackSlot, 7827 SelectionDAG &DAG) const { 7828 // Build the FILD 7829 DebugLoc DL = Op.getDebugLoc(); 7830 SDVTList Tys; 7831 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType()); 7832 if (useSSE) 7833 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue); 7834 else 7835 Tys = DAG.getVTList(Op.getValueType(), MVT::Other); 7836 7837 unsigned ByteSize = SrcVT.getSizeInBits()/8; 7838 7839 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot); 7840 MachineMemOperand *MMO; 7841 if (FI) { 7842 int SSFI = FI->getIndex(); 7843 MMO = 7844 DAG.getMachineFunction() 7845 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 7846 MachineMemOperand::MOLoad, ByteSize, ByteSize); 7847 } else { 7848 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand(); 7849 StackSlot = StackSlot.getOperand(1); 7850 } 7851 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) }; 7852 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG : 7853 X86ISD::FILD, DL, 7854 Tys, Ops, array_lengthof(Ops), 7855 SrcVT, MMO); 7856 7857 if (useSSE) { 7858 Chain = Result.getValue(1); 7859 SDValue InFlag = Result.getValue(2); 7860 7861 // FIXME: Currently the FST is flagged to the FILD_FLAG. This 7862 // shouldn't be necessary except that RFP cannot be live across 7863 // multiple blocks. When stackifier is fixed, they can be uncoupled. 7864 MachineFunction &MF = DAG.getMachineFunction(); 7865 unsigned SSFISize = Op.getValueType().getSizeInBits()/8; 7866 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false); 7867 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 7868 Tys = DAG.getVTList(MVT::Other); 7869 SDValue Ops[] = { 7870 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag 7871 }; 7872 MachineMemOperand *MMO = 7873 DAG.getMachineFunction() 7874 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 7875 MachineMemOperand::MOStore, SSFISize, SSFISize); 7876 7877 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, 7878 Ops, array_lengthof(Ops), 7879 Op.getValueType(), MMO); 7880 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot, 7881 MachinePointerInfo::getFixedStack(SSFI), 7882 false, false, false, 0); 7883 } 7884 7885 return Result; 7886} 7887 7888// LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion. 7889SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op, 7890 SelectionDAG &DAG) const { 7891 // This algorithm is not obvious. Here it is what we're trying to output: 7892 /* 7893 movq %rax, %xmm0 7894 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U } 7895 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 } 7896 #ifdef __SSE3__ 7897 haddpd %xmm0, %xmm0 7898 #else 7899 pshufd $0x4e, %xmm0, %xmm1 7900 addpd %xmm1, %xmm0 7901 #endif 7902 */ 7903 7904 DebugLoc dl = Op.getDebugLoc(); 7905 LLVMContext *Context = DAG.getContext(); 7906 7907 // Build some magic constants. 7908 const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 }; 7909 Constant *C0 = ConstantDataVector::get(*Context, CV0); 7910 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16); 7911 7912 SmallVector<Constant*,2> CV1; 7913 CV1.push_back( 7914 ConstantFP::get(*Context, APFloat(APInt(64, 0x4330000000000000ULL)))); 7915 CV1.push_back( 7916 ConstantFP::get(*Context, APFloat(APInt(64, 0x4530000000000000ULL)))); 7917 Constant *C1 = ConstantVector::get(CV1); 7918 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16); 7919 7920 // Load the 64-bit value into an XMM register. 7921 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, 7922 Op.getOperand(0)); 7923 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0, 7924 MachinePointerInfo::getConstantPool(), 7925 false, false, false, 16); 7926 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32, 7927 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, XR1), 7928 CLod0); 7929 7930 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1, 7931 MachinePointerInfo::getConstantPool(), 7932 false, false, false, 16); 7933 SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck1); 7934 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1); 7935 SDValue Result; 7936 7937 if (Subtarget->hasSSE3()) { 7938 // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'. 7939 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub); 7940 } else { 7941 SDValue S2F = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Sub); 7942 SDValue Shuffle = getTargetShuffleNode(X86ISD::PSHUFD, dl, MVT::v4i32, 7943 S2F, 0x4E, DAG); 7944 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, 7945 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Shuffle), 7946 Sub); 7947 } 7948 7949 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result, 7950 DAG.getIntPtrConstant(0)); 7951} 7952 7953// LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion. 7954SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op, 7955 SelectionDAG &DAG) const { 7956 DebugLoc dl = Op.getDebugLoc(); 7957 // FP constant to bias correct the final result. 7958 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), 7959 MVT::f64); 7960 7961 // Load the 32-bit value into an XMM register. 7962 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, 7963 Op.getOperand(0)); 7964 7965 // Zero out the upper parts of the register. 7966 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG); 7967 7968 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 7969 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load), 7970 DAG.getIntPtrConstant(0)); 7971 7972 // Or the load with the bias. 7973 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, 7974 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 7975 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 7976 MVT::v2f64, Load)), 7977 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 7978 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 7979 MVT::v2f64, Bias))); 7980 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 7981 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or), 7982 DAG.getIntPtrConstant(0)); 7983 7984 // Subtract the bias. 7985 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias); 7986 7987 // Handle final rounding. 7988 EVT DestVT = Op.getValueType(); 7989 7990 if (DestVT.bitsLT(MVT::f64)) 7991 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub, 7992 DAG.getIntPtrConstant(0)); 7993 if (DestVT.bitsGT(MVT::f64)) 7994 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub); 7995 7996 // Handle final rounding. 7997 return Sub; 7998} 7999 8000SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op, 8001 SelectionDAG &DAG) const { 8002 SDValue N0 = Op.getOperand(0); 8003 DebugLoc dl = Op.getDebugLoc(); 8004 8005 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't 8006 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform 8007 // the optimization here. 8008 if (DAG.SignBitIsZero(N0)) 8009 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0); 8010 8011 EVT SrcVT = N0.getValueType(); 8012 EVT DstVT = Op.getValueType(); 8013 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64) 8014 return LowerUINT_TO_FP_i64(Op, DAG); 8015 if (SrcVT == MVT::i32 && X86ScalarSSEf64) 8016 return LowerUINT_TO_FP_i32(Op, DAG); 8017 if (Subtarget->is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32) 8018 return SDValue(); 8019 8020 // Make a 64-bit buffer, and use it to build an FILD. 8021 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64); 8022 if (SrcVT == MVT::i32) { 8023 SDValue WordOff = DAG.getConstant(4, getPointerTy()); 8024 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl, 8025 getPointerTy(), StackSlot, WordOff); 8026 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 8027 StackSlot, MachinePointerInfo(), 8028 false, false, 0); 8029 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32), 8030 OffsetSlot, MachinePointerInfo(), 8031 false, false, 0); 8032 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG); 8033 return Fild; 8034 } 8035 8036 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP"); 8037 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 8038 StackSlot, MachinePointerInfo(), 8039 false, false, 0); 8040 // For i64 source, we need to add the appropriate power of 2 if the input 8041 // was negative. This is the same as the optimization in 8042 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here, 8043 // we must be careful to do the computation in x87 extended precision, not 8044 // in SSE. (The generic code can't know it's OK to do this, or how to.) 8045 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex(); 8046 MachineMemOperand *MMO = 8047 DAG.getMachineFunction() 8048 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 8049 MachineMemOperand::MOLoad, 8, 8); 8050 8051 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other); 8052 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) }; 8053 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops, 3, 8054 MVT::i64, MMO); 8055 8056 APInt FF(32, 0x5F800000ULL); 8057 8058 // Check whether the sign bit is set. 8059 SDValue SignSet = DAG.getSetCC(dl, getSetCCResultType(MVT::i64), 8060 Op.getOperand(0), DAG.getConstant(0, MVT::i64), 8061 ISD::SETLT); 8062 8063 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits. 8064 SDValue FudgePtr = DAG.getConstantPool( 8065 ConstantInt::get(*DAG.getContext(), FF.zext(64)), 8066 getPointerTy()); 8067 8068 // Get a pointer to FF if the sign bit was set, or to 0 otherwise. 8069 SDValue Zero = DAG.getIntPtrConstant(0); 8070 SDValue Four = DAG.getIntPtrConstant(4); 8071 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet, 8072 Zero, Four); 8073 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset); 8074 8075 // Load the value out, extending it from f32 to f80. 8076 // FIXME: Avoid the extend by constructing the right constant pool? 8077 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(), 8078 FudgePtr, MachinePointerInfo::getConstantPool(), 8079 MVT::f32, false, false, 4); 8080 // Extend everything to 80 bits to force it to be done on x87. 8081 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge); 8082 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0)); 8083} 8084 8085std::pair<SDValue,SDValue> X86TargetLowering:: 8086FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned, bool IsReplace) const { 8087 DebugLoc DL = Op.getDebugLoc(); 8088 8089 EVT DstTy = Op.getValueType(); 8090 8091 if (!IsSigned && !isIntegerTypeFTOL(DstTy)) { 8092 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT"); 8093 DstTy = MVT::i64; 8094 } 8095 8096 assert(DstTy.getSimpleVT() <= MVT::i64 && 8097 DstTy.getSimpleVT() >= MVT::i16 && 8098 "Unknown FP_TO_INT to lower!"); 8099 8100 // These are really Legal. 8101 if (DstTy == MVT::i32 && 8102 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 8103 return std::make_pair(SDValue(), SDValue()); 8104 if (Subtarget->is64Bit() && 8105 DstTy == MVT::i64 && 8106 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 8107 return std::make_pair(SDValue(), SDValue()); 8108 8109 // We lower FP->int64 either into FISTP64 followed by a load from a temporary 8110 // stack slot, or into the FTOL runtime function. 8111 MachineFunction &MF = DAG.getMachineFunction(); 8112 unsigned MemSize = DstTy.getSizeInBits()/8; 8113 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false); 8114 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 8115 8116 unsigned Opc; 8117 if (!IsSigned && isIntegerTypeFTOL(DstTy)) 8118 Opc = X86ISD::WIN_FTOL; 8119 else 8120 switch (DstTy.getSimpleVT().SimpleTy) { 8121 default: llvm_unreachable("Invalid FP_TO_SINT to lower!"); 8122 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; 8123 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; 8124 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; 8125 } 8126 8127 SDValue Chain = DAG.getEntryNode(); 8128 SDValue Value = Op.getOperand(0); 8129 EVT TheVT = Op.getOperand(0).getValueType(); 8130 // FIXME This causes a redundant load/store if the SSE-class value is already 8131 // in memory, such as if it is on the callstack. 8132 if (isScalarFPTypeInSSEReg(TheVT)) { 8133 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!"); 8134 Chain = DAG.getStore(Chain, DL, Value, StackSlot, 8135 MachinePointerInfo::getFixedStack(SSFI), 8136 false, false, 0); 8137 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other); 8138 SDValue Ops[] = { 8139 Chain, StackSlot, DAG.getValueType(TheVT) 8140 }; 8141 8142 MachineMemOperand *MMO = 8143 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 8144 MachineMemOperand::MOLoad, MemSize, MemSize); 8145 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, 3, 8146 DstTy, MMO); 8147 Chain = Value.getValue(1); 8148 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false); 8149 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 8150 } 8151 8152 MachineMemOperand *MMO = 8153 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 8154 MachineMemOperand::MOStore, MemSize, MemSize); 8155 8156 if (Opc != X86ISD::WIN_FTOL) { 8157 // Build the FP_TO_INT*_IN_MEM 8158 SDValue Ops[] = { Chain, Value, StackSlot }; 8159 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other), 8160 Ops, 3, DstTy, MMO); 8161 return std::make_pair(FIST, StackSlot); 8162 } else { 8163 SDValue ftol = DAG.getNode(X86ISD::WIN_FTOL, DL, 8164 DAG.getVTList(MVT::Other, MVT::Glue), 8165 Chain, Value); 8166 SDValue eax = DAG.getCopyFromReg(ftol, DL, X86::EAX, 8167 MVT::i32, ftol.getValue(1)); 8168 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), DL, X86::EDX, 8169 MVT::i32, eax.getValue(2)); 8170 SDValue Ops[] = { eax, edx }; 8171 SDValue pair = IsReplace 8172 ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops, 2) 8173 : DAG.getMergeValues(Ops, 2, DL); 8174 return std::make_pair(pair, SDValue()); 8175 } 8176} 8177 8178SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op, 8179 SelectionDAG &DAG) const { 8180 if (Op.getValueType().isVector()) 8181 return SDValue(); 8182 8183 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, 8184 /*IsSigned=*/ true, /*IsReplace=*/ false); 8185 SDValue FIST = Vals.first, StackSlot = Vals.second; 8186 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal. 8187 if (FIST.getNode() == 0) return Op; 8188 8189 if (StackSlot.getNode()) 8190 // Load the result. 8191 return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(), 8192 FIST, StackSlot, MachinePointerInfo(), 8193 false, false, false, 0); 8194 8195 // The node is the result. 8196 return FIST; 8197} 8198 8199SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op, 8200 SelectionDAG &DAG) const { 8201 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, 8202 /*IsSigned=*/ false, /*IsReplace=*/ false); 8203 SDValue FIST = Vals.first, StackSlot = Vals.second; 8204 assert(FIST.getNode() && "Unexpected failure"); 8205 8206 if (StackSlot.getNode()) 8207 // Load the result. 8208 return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(), 8209 FIST, StackSlot, MachinePointerInfo(), 8210 false, false, false, 0); 8211 8212 // The node is the result. 8213 return FIST; 8214} 8215 8216SDValue X86TargetLowering::LowerFABS(SDValue Op, SelectionDAG &DAG) const { 8217 LLVMContext *Context = DAG.getContext(); 8218 DebugLoc dl = Op.getDebugLoc(); 8219 EVT VT = Op.getValueType(); 8220 EVT EltVT = VT; 8221 unsigned NumElts = VT == MVT::f64 ? 2 : 4; 8222 if (VT.isVector()) { 8223 EltVT = VT.getVectorElementType(); 8224 NumElts = VT.getVectorNumElements(); 8225 } 8226 Constant *C; 8227 if (EltVT == MVT::f64) 8228 C = ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63)))); 8229 else 8230 C = ConstantFP::get(*Context, APFloat(APInt(32, ~(1U << 31)))); 8231 C = ConstantVector::getSplat(NumElts, C); 8232 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy()); 8233 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 8234 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 8235 MachinePointerInfo::getConstantPool(), 8236 false, false, false, Alignment); 8237 if (VT.isVector()) { 8238 MVT ANDVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64; 8239 return DAG.getNode(ISD::BITCAST, dl, VT, 8240 DAG.getNode(ISD::AND, dl, ANDVT, 8241 DAG.getNode(ISD::BITCAST, dl, ANDVT, 8242 Op.getOperand(0)), 8243 DAG.getNode(ISD::BITCAST, dl, ANDVT, Mask))); 8244 } 8245 return DAG.getNode(X86ISD::FAND, dl, VT, Op.getOperand(0), Mask); 8246} 8247 8248SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) const { 8249 LLVMContext *Context = DAG.getContext(); 8250 DebugLoc dl = Op.getDebugLoc(); 8251 EVT VT = Op.getValueType(); 8252 EVT EltVT = VT; 8253 unsigned NumElts = VT == MVT::f64 ? 2 : 4; 8254 if (VT.isVector()) { 8255 EltVT = VT.getVectorElementType(); 8256 NumElts = VT.getVectorNumElements(); 8257 } 8258 Constant *C; 8259 if (EltVT == MVT::f64) 8260 C = ConstantFP::get(*Context, APFloat(APInt(64, 1ULL << 63))); 8261 else 8262 C = ConstantFP::get(*Context, APFloat(APInt(32, 1U << 31))); 8263 C = ConstantVector::getSplat(NumElts, C); 8264 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy()); 8265 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 8266 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 8267 MachinePointerInfo::getConstantPool(), 8268 false, false, false, Alignment); 8269 if (VT.isVector()) { 8270 MVT XORVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64; 8271 return DAG.getNode(ISD::BITCAST, dl, VT, 8272 DAG.getNode(ISD::XOR, dl, XORVT, 8273 DAG.getNode(ISD::BITCAST, dl, XORVT, 8274 Op.getOperand(0)), 8275 DAG.getNode(ISD::BITCAST, dl, XORVT, Mask))); 8276 } 8277 8278 return DAG.getNode(X86ISD::FXOR, dl, VT, Op.getOperand(0), Mask); 8279} 8280 8281SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 8282 LLVMContext *Context = DAG.getContext(); 8283 SDValue Op0 = Op.getOperand(0); 8284 SDValue Op1 = Op.getOperand(1); 8285 DebugLoc dl = Op.getDebugLoc(); 8286 EVT VT = Op.getValueType(); 8287 EVT SrcVT = Op1.getValueType(); 8288 8289 // If second operand is smaller, extend it first. 8290 if (SrcVT.bitsLT(VT)) { 8291 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1); 8292 SrcVT = VT; 8293 } 8294 // And if it is bigger, shrink it first. 8295 if (SrcVT.bitsGT(VT)) { 8296 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1)); 8297 SrcVT = VT; 8298 } 8299 8300 // At this point the operands and the result should have the same 8301 // type, and that won't be f80 since that is not custom lowered. 8302 8303 // First get the sign bit of second operand. 8304 SmallVector<Constant*,4> CV; 8305 if (SrcVT == MVT::f64) { 8306 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 1ULL << 63)))); 8307 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 0)))); 8308 } else { 8309 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 1U << 31)))); 8310 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 8311 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 8312 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 8313 } 8314 Constant *C = ConstantVector::get(CV); 8315 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 8316 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx, 8317 MachinePointerInfo::getConstantPool(), 8318 false, false, false, 16); 8319 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1); 8320 8321 // Shift sign bit right or left if the two operands have different types. 8322 if (SrcVT.bitsGT(VT)) { 8323 // Op0 is MVT::f32, Op1 is MVT::f64. 8324 SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, SignBit); 8325 SignBit = DAG.getNode(X86ISD::FSRL, dl, MVT::v2f64, SignBit, 8326 DAG.getConstant(32, MVT::i32)); 8327 SignBit = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, SignBit); 8328 SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, SignBit, 8329 DAG.getIntPtrConstant(0)); 8330 } 8331 8332 // Clear first operand sign bit. 8333 CV.clear(); 8334 if (VT == MVT::f64) { 8335 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63))))); 8336 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 0)))); 8337 } else { 8338 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, ~(1U << 31))))); 8339 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 8340 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 8341 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 8342 } 8343 C = ConstantVector::get(CV); 8344 CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 8345 SDValue Mask2 = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 8346 MachinePointerInfo::getConstantPool(), 8347 false, false, false, 16); 8348 SDValue Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Mask2); 8349 8350 // Or the value with the sign bit. 8351 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit); 8352} 8353 8354static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) { 8355 SDValue N0 = Op.getOperand(0); 8356 DebugLoc dl = Op.getDebugLoc(); 8357 EVT VT = Op.getValueType(); 8358 8359 // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1). 8360 SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0, 8361 DAG.getConstant(1, VT)); 8362 return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT)); 8363} 8364 8365// LowerVectorAllZeroTest - Check whether an OR'd tree is PTEST-able. 8366// 8367SDValue X86TargetLowering::LowerVectorAllZeroTest(SDValue Op, SelectionDAG &DAG) const { 8368 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree."); 8369 8370 if (!Subtarget->hasSSE41()) 8371 return SDValue(); 8372 8373 if (!Op->hasOneUse()) 8374 return SDValue(); 8375 8376 SDNode *N = Op.getNode(); 8377 DebugLoc DL = N->getDebugLoc(); 8378 8379 SmallVector<SDValue, 8> Opnds; 8380 DenseMap<SDValue, unsigned> VecInMap; 8381 EVT VT = MVT::Other; 8382 8383 // Recognize a special case where a vector is casted into wide integer to 8384 // test all 0s. 8385 Opnds.push_back(N->getOperand(0)); 8386 Opnds.push_back(N->getOperand(1)); 8387 8388 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) { 8389 SmallVector<SDValue, 8>::const_iterator I = Opnds.begin() + Slot; 8390 // BFS traverse all OR'd operands. 8391 if (I->getOpcode() == ISD::OR) { 8392 Opnds.push_back(I->getOperand(0)); 8393 Opnds.push_back(I->getOperand(1)); 8394 // Re-evaluate the number of nodes to be traversed. 8395 e += 2; // 2 more nodes (LHS and RHS) are pushed. 8396 continue; 8397 } 8398 8399 // Quit if a non-EXTRACT_VECTOR_ELT 8400 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 8401 return SDValue(); 8402 8403 // Quit if without a constant index. 8404 SDValue Idx = I->getOperand(1); 8405 if (!isa<ConstantSDNode>(Idx)) 8406 return SDValue(); 8407 8408 SDValue ExtractedFromVec = I->getOperand(0); 8409 DenseMap<SDValue, unsigned>::iterator M = VecInMap.find(ExtractedFromVec); 8410 if (M == VecInMap.end()) { 8411 VT = ExtractedFromVec.getValueType(); 8412 // Quit if not 128/256-bit vector. 8413 if (!VT.is128BitVector() && !VT.is256BitVector()) 8414 return SDValue(); 8415 // Quit if not the same type. 8416 if (VecInMap.begin() != VecInMap.end() && 8417 VT != VecInMap.begin()->first.getValueType()) 8418 return SDValue(); 8419 M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first; 8420 } 8421 M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue(); 8422 } 8423 8424 assert((VT.is128BitVector() || VT.is256BitVector()) && 8425 "Not extracted from 128-/256-bit vector."); 8426 8427 unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U; 8428 SmallVector<SDValue, 8> VecIns; 8429 8430 for (DenseMap<SDValue, unsigned>::const_iterator 8431 I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) { 8432 // Quit if not all elements are used. 8433 if (I->second != FullMask) 8434 return SDValue(); 8435 VecIns.push_back(I->first); 8436 } 8437 8438 EVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64; 8439 8440 // Cast all vectors into TestVT for PTEST. 8441 for (unsigned i = 0, e = VecIns.size(); i < e; ++i) 8442 VecIns[i] = DAG.getNode(ISD::BITCAST, DL, TestVT, VecIns[i]); 8443 8444 // If more than one full vectors are evaluated, OR them first before PTEST. 8445 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) { 8446 // Each iteration will OR 2 nodes and append the result until there is only 8447 // 1 node left, i.e. the final OR'd value of all vectors. 8448 SDValue LHS = VecIns[Slot]; 8449 SDValue RHS = VecIns[Slot + 1]; 8450 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS)); 8451 } 8452 8453 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32, 8454 VecIns.back(), VecIns.back()); 8455} 8456 8457/// Emit nodes that will be selected as "test Op0,Op0", or something 8458/// equivalent. 8459SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, 8460 SelectionDAG &DAG) const { 8461 DebugLoc dl = Op.getDebugLoc(); 8462 8463 // CF and OF aren't always set the way we want. Determine which 8464 // of these we need. 8465 bool NeedCF = false; 8466 bool NeedOF = false; 8467 switch (X86CC) { 8468 default: break; 8469 case X86::COND_A: case X86::COND_AE: 8470 case X86::COND_B: case X86::COND_BE: 8471 NeedCF = true; 8472 break; 8473 case X86::COND_G: case X86::COND_GE: 8474 case X86::COND_L: case X86::COND_LE: 8475 case X86::COND_O: case X86::COND_NO: 8476 NeedOF = true; 8477 break; 8478 } 8479 8480 // See if we can use the EFLAGS value from the operand instead of 8481 // doing a separate TEST. TEST always sets OF and CF to 0, so unless 8482 // we prove that the arithmetic won't overflow, we can't use OF or CF. 8483 if (Op.getResNo() != 0 || NeedOF || NeedCF) 8484 // Emit a CMP with 0, which is the TEST pattern. 8485 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, 8486 DAG.getConstant(0, Op.getValueType())); 8487 8488 unsigned Opcode = 0; 8489 unsigned NumOperands = 0; 8490 8491 // Truncate operations may prevent the merge of the SETCC instruction 8492 // and the arithmetic intruction before it. Attempt to truncate the operands 8493 // of the arithmetic instruction and use a reduced bit-width instruction. 8494 bool NeedTruncation = false; 8495 SDValue ArithOp = Op; 8496 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) { 8497 SDValue Arith = Op->getOperand(0); 8498 // Both the trunc and the arithmetic op need to have one user each. 8499 if (Arith->hasOneUse()) 8500 switch (Arith.getOpcode()) { 8501 default: break; 8502 case ISD::ADD: 8503 case ISD::SUB: 8504 case ISD::AND: 8505 case ISD::OR: 8506 case ISD::XOR: { 8507 NeedTruncation = true; 8508 ArithOp = Arith; 8509 } 8510 } 8511 } 8512 8513 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation 8514 // which may be the result of a CAST. We use the variable 'Op', which is the 8515 // non-casted variable when we check for possible users. 8516 switch (ArithOp.getOpcode()) { 8517 case ISD::ADD: 8518 // Due to an isel shortcoming, be conservative if this add is likely to be 8519 // selected as part of a load-modify-store instruction. When the root node 8520 // in a match is a store, isel doesn't know how to remap non-chain non-flag 8521 // uses of other nodes in the match, such as the ADD in this case. This 8522 // leads to the ADD being left around and reselected, with the result being 8523 // two adds in the output. Alas, even if none our users are stores, that 8524 // doesn't prove we're O.K. Ergo, if we have any parents that aren't 8525 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require 8526 // climbing the DAG back to the root, and it doesn't seem to be worth the 8527 // effort. 8528 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 8529 UE = Op.getNode()->use_end(); UI != UE; ++UI) 8530 if (UI->getOpcode() != ISD::CopyToReg && 8531 UI->getOpcode() != ISD::SETCC && 8532 UI->getOpcode() != ISD::STORE) 8533 goto default_case; 8534 8535 if (ConstantSDNode *C = 8536 dyn_cast<ConstantSDNode>(ArithOp.getNode()->getOperand(1))) { 8537 // An add of one will be selected as an INC. 8538 if (C->getAPIntValue() == 1) { 8539 Opcode = X86ISD::INC; 8540 NumOperands = 1; 8541 break; 8542 } 8543 8544 // An add of negative one (subtract of one) will be selected as a DEC. 8545 if (C->getAPIntValue().isAllOnesValue()) { 8546 Opcode = X86ISD::DEC; 8547 NumOperands = 1; 8548 break; 8549 } 8550 } 8551 8552 // Otherwise use a regular EFLAGS-setting add. 8553 Opcode = X86ISD::ADD; 8554 NumOperands = 2; 8555 break; 8556 case ISD::AND: { 8557 // If the primary and result isn't used, don't bother using X86ISD::AND, 8558 // because a TEST instruction will be better. 8559 bool NonFlagUse = false; 8560 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 8561 UE = Op.getNode()->use_end(); UI != UE; ++UI) { 8562 SDNode *User = *UI; 8563 unsigned UOpNo = UI.getOperandNo(); 8564 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) { 8565 // Look pass truncate. 8566 UOpNo = User->use_begin().getOperandNo(); 8567 User = *User->use_begin(); 8568 } 8569 8570 if (User->getOpcode() != ISD::BRCOND && 8571 User->getOpcode() != ISD::SETCC && 8572 !(User->getOpcode() == ISD::SELECT && UOpNo == 0)) { 8573 NonFlagUse = true; 8574 break; 8575 } 8576 } 8577 8578 if (!NonFlagUse) 8579 break; 8580 } 8581 // FALL THROUGH 8582 case ISD::SUB: 8583 case ISD::OR: 8584 case ISD::XOR: 8585 // Due to the ISEL shortcoming noted above, be conservative if this op is 8586 // likely to be selected as part of a load-modify-store instruction. 8587 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 8588 UE = Op.getNode()->use_end(); UI != UE; ++UI) 8589 if (UI->getOpcode() == ISD::STORE) 8590 goto default_case; 8591 8592 // Otherwise use a regular EFLAGS-setting instruction. 8593 switch (ArithOp.getOpcode()) { 8594 default: llvm_unreachable("unexpected operator!"); 8595 case ISD::SUB: Opcode = X86ISD::SUB; break; 8596 case ISD::XOR: Opcode = X86ISD::XOR; break; 8597 case ISD::AND: Opcode = X86ISD::AND; break; 8598 case ISD::OR: { 8599 if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) { 8600 SDValue EFLAGS = LowerVectorAllZeroTest(Op, DAG); 8601 if (EFLAGS.getNode()) 8602 return EFLAGS; 8603 } 8604 Opcode = X86ISD::OR; 8605 break; 8606 } 8607 } 8608 8609 NumOperands = 2; 8610 break; 8611 case X86ISD::ADD: 8612 case X86ISD::SUB: 8613 case X86ISD::INC: 8614 case X86ISD::DEC: 8615 case X86ISD::OR: 8616 case X86ISD::XOR: 8617 case X86ISD::AND: 8618 return SDValue(Op.getNode(), 1); 8619 default: 8620 default_case: 8621 break; 8622 } 8623 8624 // If we found that truncation is beneficial, perform the truncation and 8625 // update 'Op'. 8626 if (NeedTruncation) { 8627 EVT VT = Op.getValueType(); 8628 SDValue WideVal = Op->getOperand(0); 8629 EVT WideVT = WideVal.getValueType(); 8630 unsigned ConvertedOp = 0; 8631 // Use a target machine opcode to prevent further DAGCombine 8632 // optimizations that may separate the arithmetic operations 8633 // from the setcc node. 8634 switch (WideVal.getOpcode()) { 8635 default: break; 8636 case ISD::ADD: ConvertedOp = X86ISD::ADD; break; 8637 case ISD::SUB: ConvertedOp = X86ISD::SUB; break; 8638 case ISD::AND: ConvertedOp = X86ISD::AND; break; 8639 case ISD::OR: ConvertedOp = X86ISD::OR; break; 8640 case ISD::XOR: ConvertedOp = X86ISD::XOR; break; 8641 } 8642 8643 if (ConvertedOp) { 8644 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8645 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) { 8646 SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0)); 8647 SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1)); 8648 Op = DAG.getNode(ConvertedOp, dl, VT, V0, V1); 8649 } 8650 } 8651 } 8652 8653 if (Opcode == 0) 8654 // Emit a CMP with 0, which is the TEST pattern. 8655 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, 8656 DAG.getConstant(0, Op.getValueType())); 8657 8658 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 8659 SmallVector<SDValue, 4> Ops; 8660 for (unsigned i = 0; i != NumOperands; ++i) 8661 Ops.push_back(Op.getOperand(i)); 8662 8663 SDValue New = DAG.getNode(Opcode, dl, VTs, &Ops[0], NumOperands); 8664 DAG.ReplaceAllUsesWith(Op, New); 8665 return SDValue(New.getNode(), 1); 8666} 8667 8668/// Emit nodes that will be selected as "cmp Op0,Op1", or something 8669/// equivalent. 8670SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, 8671 SelectionDAG &DAG) const { 8672 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) 8673 if (C->getAPIntValue() == 0) 8674 return EmitTest(Op0, X86CC, DAG); 8675 8676 DebugLoc dl = Op0.getDebugLoc(); 8677 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 || 8678 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) { 8679 // Use SUB instead of CMP to enable CSE between SUB and CMP. 8680 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32); 8681 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs, 8682 Op0, Op1); 8683 return SDValue(Sub.getNode(), 1); 8684 } 8685 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1); 8686} 8687 8688/// Convert a comparison if required by the subtarget. 8689SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp, 8690 SelectionDAG &DAG) const { 8691 // If the subtarget does not support the FUCOMI instruction, floating-point 8692 // comparisons have to be converted. 8693 if (Subtarget->hasCMov() || 8694 Cmp.getOpcode() != X86ISD::CMP || 8695 !Cmp.getOperand(0).getValueType().isFloatingPoint() || 8696 !Cmp.getOperand(1).getValueType().isFloatingPoint()) 8697 return Cmp; 8698 8699 // The instruction selector will select an FUCOM instruction instead of 8700 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence 8701 // build an SDNode sequence that transfers the result from FPSW into EFLAGS: 8702 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8)))) 8703 DebugLoc dl = Cmp.getDebugLoc(); 8704 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp); 8705 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW); 8706 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW, 8707 DAG.getConstant(8, MVT::i8)); 8708 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl); 8709 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl); 8710} 8711 8712/// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node 8713/// if it's possible. 8714SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC, 8715 DebugLoc dl, SelectionDAG &DAG) const { 8716 SDValue Op0 = And.getOperand(0); 8717 SDValue Op1 = And.getOperand(1); 8718 if (Op0.getOpcode() == ISD::TRUNCATE) 8719 Op0 = Op0.getOperand(0); 8720 if (Op1.getOpcode() == ISD::TRUNCATE) 8721 Op1 = Op1.getOperand(0); 8722 8723 SDValue LHS, RHS; 8724 if (Op1.getOpcode() == ISD::SHL) 8725 std::swap(Op0, Op1); 8726 if (Op0.getOpcode() == ISD::SHL) { 8727 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0))) 8728 if (And00C->getZExtValue() == 1) { 8729 // If we looked past a truncate, check that it's only truncating away 8730 // known zeros. 8731 unsigned BitWidth = Op0.getValueSizeInBits(); 8732 unsigned AndBitWidth = And.getValueSizeInBits(); 8733 if (BitWidth > AndBitWidth) { 8734 APInt Zeros, Ones; 8735 DAG.ComputeMaskedBits(Op0, Zeros, Ones); 8736 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth) 8737 return SDValue(); 8738 } 8739 LHS = Op1; 8740 RHS = Op0.getOperand(1); 8741 } 8742 } else if (Op1.getOpcode() == ISD::Constant) { 8743 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1); 8744 uint64_t AndRHSVal = AndRHS->getZExtValue(); 8745 SDValue AndLHS = Op0; 8746 8747 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) { 8748 LHS = AndLHS.getOperand(0); 8749 RHS = AndLHS.getOperand(1); 8750 } 8751 8752 // Use BT if the immediate can't be encoded in a TEST instruction. 8753 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) { 8754 LHS = AndLHS; 8755 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), LHS.getValueType()); 8756 } 8757 } 8758 8759 if (LHS.getNode()) { 8760 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT 8761 // instruction. Since the shift amount is in-range-or-undefined, we know 8762 // that doing a bittest on the i32 value is ok. We extend to i32 because 8763 // the encoding for the i16 version is larger than the i32 version. 8764 // Also promote i16 to i32 for performance / code size reason. 8765 if (LHS.getValueType() == MVT::i8 || 8766 LHS.getValueType() == MVT::i16) 8767 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS); 8768 8769 // If the operand types disagree, extend the shift amount to match. Since 8770 // BT ignores high bits (like shifts) we can use anyextend. 8771 if (LHS.getValueType() != RHS.getValueType()) 8772 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS); 8773 8774 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS); 8775 unsigned Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B; 8776 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 8777 DAG.getConstant(Cond, MVT::i8), BT); 8778 } 8779 8780 return SDValue(); 8781} 8782 8783SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 8784 8785 if (Op.getValueType().isVector()) return LowerVSETCC(Op, DAG); 8786 8787 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer"); 8788 SDValue Op0 = Op.getOperand(0); 8789 SDValue Op1 = Op.getOperand(1); 8790 DebugLoc dl = Op.getDebugLoc(); 8791 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 8792 8793 // Optimize to BT if possible. 8794 // Lower (X & (1 << N)) == 0 to BT(X, N). 8795 // Lower ((X >>u N) & 1) != 0 to BT(X, N). 8796 // Lower ((X >>s N) & 1) != 0 to BT(X, N). 8797 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && 8798 Op1.getOpcode() == ISD::Constant && 8799 cast<ConstantSDNode>(Op1)->isNullValue() && 8800 (CC == ISD::SETEQ || CC == ISD::SETNE)) { 8801 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG); 8802 if (NewSetCC.getNode()) 8803 return NewSetCC; 8804 } 8805 8806 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of 8807 // these. 8808 if (Op1.getOpcode() == ISD::Constant && 8809 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 || 8810 cast<ConstantSDNode>(Op1)->isNullValue()) && 8811 (CC == ISD::SETEQ || CC == ISD::SETNE)) { 8812 8813 // If the input is a setcc, then reuse the input setcc or use a new one with 8814 // the inverted condition. 8815 if (Op0.getOpcode() == X86ISD::SETCC) { 8816 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0); 8817 bool Invert = (CC == ISD::SETNE) ^ 8818 cast<ConstantSDNode>(Op1)->isNullValue(); 8819 if (!Invert) return Op0; 8820 8821 CCode = X86::GetOppositeBranchCondition(CCode); 8822 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 8823 DAG.getConstant(CCode, MVT::i8), Op0.getOperand(1)); 8824 } 8825 } 8826 8827 bool isFP = Op1.getValueType().isFloatingPoint(); 8828 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG); 8829 if (X86CC == X86::COND_INVALID) 8830 return SDValue(); 8831 8832 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, DAG); 8833 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG); 8834 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 8835 DAG.getConstant(X86CC, MVT::i8), EFLAGS); 8836} 8837 8838// Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128 8839// ones, and then concatenate the result back. 8840static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) { 8841 EVT VT = Op.getValueType(); 8842 8843 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC && 8844 "Unsupported value type for operation"); 8845 8846 unsigned NumElems = VT.getVectorNumElements(); 8847 DebugLoc dl = Op.getDebugLoc(); 8848 SDValue CC = Op.getOperand(2); 8849 8850 // Extract the LHS vectors 8851 SDValue LHS = Op.getOperand(0); 8852 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl); 8853 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl); 8854 8855 // Extract the RHS vectors 8856 SDValue RHS = Op.getOperand(1); 8857 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl); 8858 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl); 8859 8860 // Issue the operation on the smaller types and concatenate the result back 8861 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 8862 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 8863 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, 8864 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC), 8865 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC)); 8866} 8867 8868 8869SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) const { 8870 SDValue Cond; 8871 SDValue Op0 = Op.getOperand(0); 8872 SDValue Op1 = Op.getOperand(1); 8873 SDValue CC = Op.getOperand(2); 8874 EVT VT = Op.getValueType(); 8875 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 8876 bool isFP = Op.getOperand(1).getValueType().isFloatingPoint(); 8877 DebugLoc dl = Op.getDebugLoc(); 8878 8879 if (isFP) { 8880#ifndef NDEBUG 8881 EVT EltVT = Op0.getValueType().getVectorElementType(); 8882 assert(EltVT == MVT::f32 || EltVT == MVT::f64); 8883#endif 8884 8885 unsigned SSECC; 8886 bool Swap = false; 8887 8888 // SSE Condition code mapping: 8889 // 0 - EQ 8890 // 1 - LT 8891 // 2 - LE 8892 // 3 - UNORD 8893 // 4 - NEQ 8894 // 5 - NLT 8895 // 6 - NLE 8896 // 7 - ORD 8897 switch (SetCCOpcode) { 8898 default: llvm_unreachable("Unexpected SETCC condition"); 8899 case ISD::SETOEQ: 8900 case ISD::SETEQ: SSECC = 0; break; 8901 case ISD::SETOGT: 8902 case ISD::SETGT: Swap = true; // Fallthrough 8903 case ISD::SETLT: 8904 case ISD::SETOLT: SSECC = 1; break; 8905 case ISD::SETOGE: 8906 case ISD::SETGE: Swap = true; // Fallthrough 8907 case ISD::SETLE: 8908 case ISD::SETOLE: SSECC = 2; break; 8909 case ISD::SETUO: SSECC = 3; break; 8910 case ISD::SETUNE: 8911 case ISD::SETNE: SSECC = 4; break; 8912 case ISD::SETULE: Swap = true; // Fallthrough 8913 case ISD::SETUGE: SSECC = 5; break; 8914 case ISD::SETULT: Swap = true; // Fallthrough 8915 case ISD::SETUGT: SSECC = 6; break; 8916 case ISD::SETO: SSECC = 7; break; 8917 case ISD::SETUEQ: 8918 case ISD::SETONE: SSECC = 8; break; 8919 } 8920 if (Swap) 8921 std::swap(Op0, Op1); 8922 8923 // In the two special cases we can't handle, emit two comparisons. 8924 if (SSECC == 8) { 8925 unsigned CC0, CC1; 8926 unsigned CombineOpc; 8927 if (SetCCOpcode == ISD::SETUEQ) { 8928 CC0 = 3; CC1 = 0; CombineOpc = ISD::OR; 8929 } else { 8930 assert(SetCCOpcode == ISD::SETONE); 8931 CC0 = 7; CC1 = 4; CombineOpc = ISD::AND; 8932 } 8933 8934 SDValue Cmp0 = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1, 8935 DAG.getConstant(CC0, MVT::i8)); 8936 SDValue Cmp1 = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1, 8937 DAG.getConstant(CC1, MVT::i8)); 8938 return DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1); 8939 } 8940 // Handle all other FP comparisons here. 8941 return DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1, 8942 DAG.getConstant(SSECC, MVT::i8)); 8943 } 8944 8945 // Break 256-bit integer vector compare into smaller ones. 8946 if (VT.is256BitVector() && !Subtarget->hasAVX2()) 8947 return Lower256IntVSETCC(Op, DAG); 8948 8949 // We are handling one of the integer comparisons here. Since SSE only has 8950 // GT and EQ comparisons for integer, swapping operands and multiple 8951 // operations may be required for some comparisons. 8952 unsigned Opc; 8953 bool Swap = false, Invert = false, FlipSigns = false; 8954 8955 switch (SetCCOpcode) { 8956 default: llvm_unreachable("Unexpected SETCC condition"); 8957 case ISD::SETNE: Invert = true; 8958 case ISD::SETEQ: Opc = X86ISD::PCMPEQ; break; 8959 case ISD::SETLT: Swap = true; 8960 case ISD::SETGT: Opc = X86ISD::PCMPGT; break; 8961 case ISD::SETGE: Swap = true; 8962 case ISD::SETLE: Opc = X86ISD::PCMPGT; Invert = true; break; 8963 case ISD::SETULT: Swap = true; 8964 case ISD::SETUGT: Opc = X86ISD::PCMPGT; FlipSigns = true; break; 8965 case ISD::SETUGE: Swap = true; 8966 case ISD::SETULE: Opc = X86ISD::PCMPGT; FlipSigns = true; Invert = true; break; 8967 } 8968 if (Swap) 8969 std::swap(Op0, Op1); 8970 8971 // Check that the operation in question is available (most are plain SSE2, 8972 // but PCMPGTQ and PCMPEQQ have different requirements). 8973 if (VT == MVT::v2i64) { 8974 if (Opc == X86ISD::PCMPGT && !Subtarget->hasSSE42()) 8975 return SDValue(); 8976 if (Opc == X86ISD::PCMPEQ && !Subtarget->hasSSE41()) 8977 return SDValue(); 8978 } 8979 8980 // Since SSE has no unsigned integer comparisons, we need to flip the sign 8981 // bits of the inputs before performing those operations. 8982 if (FlipSigns) { 8983 EVT EltVT = VT.getVectorElementType(); 8984 SDValue SignBit = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), 8985 EltVT); 8986 std::vector<SDValue> SignBits(VT.getVectorNumElements(), SignBit); 8987 SDValue SignVec = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &SignBits[0], 8988 SignBits.size()); 8989 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SignVec); 8990 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SignVec); 8991 } 8992 8993 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 8994 8995 // If the logical-not of the result is required, perform that now. 8996 if (Invert) 8997 Result = DAG.getNOT(dl, Result, VT); 8998 8999 return Result; 9000} 9001 9002// isX86LogicalCmp - Return true if opcode is a X86 logical comparison. 9003static bool isX86LogicalCmp(SDValue Op) { 9004 unsigned Opc = Op.getNode()->getOpcode(); 9005 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI || 9006 Opc == X86ISD::SAHF) 9007 return true; 9008 if (Op.getResNo() == 1 && 9009 (Opc == X86ISD::ADD || 9010 Opc == X86ISD::SUB || 9011 Opc == X86ISD::ADC || 9012 Opc == X86ISD::SBB || 9013 Opc == X86ISD::SMUL || 9014 Opc == X86ISD::UMUL || 9015 Opc == X86ISD::INC || 9016 Opc == X86ISD::DEC || 9017 Opc == X86ISD::OR || 9018 Opc == X86ISD::XOR || 9019 Opc == X86ISD::AND)) 9020 return true; 9021 9022 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL) 9023 return true; 9024 9025 return false; 9026} 9027 9028static bool isZero(SDValue V) { 9029 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V); 9030 return C && C->isNullValue(); 9031} 9032 9033static bool isAllOnes(SDValue V) { 9034 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V); 9035 return C && C->isAllOnesValue(); 9036} 9037 9038static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) { 9039 if (V.getOpcode() != ISD::TRUNCATE) 9040 return false; 9041 9042 SDValue VOp0 = V.getOperand(0); 9043 unsigned InBits = VOp0.getValueSizeInBits(); 9044 unsigned Bits = V.getValueSizeInBits(); 9045 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits)); 9046} 9047 9048SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 9049 bool addTest = true; 9050 SDValue Cond = Op.getOperand(0); 9051 SDValue Op1 = Op.getOperand(1); 9052 SDValue Op2 = Op.getOperand(2); 9053 DebugLoc DL = Op.getDebugLoc(); 9054 SDValue CC; 9055 9056 if (Cond.getOpcode() == ISD::SETCC) { 9057 SDValue NewCond = LowerSETCC(Cond, DAG); 9058 if (NewCond.getNode()) 9059 Cond = NewCond; 9060 } 9061 9062 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y 9063 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y 9064 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y 9065 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y 9066 if (Cond.getOpcode() == X86ISD::SETCC && 9067 Cond.getOperand(1).getOpcode() == X86ISD::CMP && 9068 isZero(Cond.getOperand(1).getOperand(1))) { 9069 SDValue Cmp = Cond.getOperand(1); 9070 9071 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue(); 9072 9073 if ((isAllOnes(Op1) || isAllOnes(Op2)) && 9074 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) { 9075 SDValue Y = isAllOnes(Op2) ? Op1 : Op2; 9076 9077 SDValue CmpOp0 = Cmp.getOperand(0); 9078 // Apply further optimizations for special cases 9079 // (select (x != 0), -1, 0) -> neg & sbb 9080 // (select (x == 0), 0, -1) -> neg & sbb 9081 if (ConstantSDNode *YC = dyn_cast<ConstantSDNode>(Y)) 9082 if (YC->isNullValue() && 9083 (isAllOnes(Op1) == (CondCode == X86::COND_NE))) { 9084 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32); 9085 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs, 9086 DAG.getConstant(0, CmpOp0.getValueType()), 9087 CmpOp0); 9088 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), 9089 DAG.getConstant(X86::COND_B, MVT::i8), 9090 SDValue(Neg.getNode(), 1)); 9091 return Res; 9092 } 9093 9094 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, 9095 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType())); 9096 Cmp = ConvertCmpIfNecessary(Cmp, DAG); 9097 9098 SDValue Res = // Res = 0 or -1. 9099 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), 9100 DAG.getConstant(X86::COND_B, MVT::i8), Cmp); 9101 9102 if (isAllOnes(Op1) != (CondCode == X86::COND_E)) 9103 Res = DAG.getNOT(DL, Res, Res.getValueType()); 9104 9105 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2); 9106 if (N2C == 0 || !N2C->isNullValue()) 9107 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y); 9108 return Res; 9109 } 9110 } 9111 9112 // Look past (and (setcc_carry (cmp ...)), 1). 9113 if (Cond.getOpcode() == ISD::AND && 9114 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) { 9115 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 9116 if (C && C->getAPIntValue() == 1) 9117 Cond = Cond.getOperand(0); 9118 } 9119 9120 // If condition flag is set by a X86ISD::CMP, then use it as the condition 9121 // setting operand in place of the X86ISD::SETCC. 9122 unsigned CondOpcode = Cond.getOpcode(); 9123 if (CondOpcode == X86ISD::SETCC || 9124 CondOpcode == X86ISD::SETCC_CARRY) { 9125 CC = Cond.getOperand(0); 9126 9127 SDValue Cmp = Cond.getOperand(1); 9128 unsigned Opc = Cmp.getOpcode(); 9129 EVT VT = Op.getValueType(); 9130 9131 bool IllegalFPCMov = false; 9132 if (VT.isFloatingPoint() && !VT.isVector() && 9133 !isScalarFPTypeInSSEReg(VT)) // FPStack? 9134 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue()); 9135 9136 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) || 9137 Opc == X86ISD::BT) { // FIXME 9138 Cond = Cmp; 9139 addTest = false; 9140 } 9141 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO || 9142 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO || 9143 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) && 9144 Cond.getOperand(0).getValueType() != MVT::i8)) { 9145 SDValue LHS = Cond.getOperand(0); 9146 SDValue RHS = Cond.getOperand(1); 9147 unsigned X86Opcode; 9148 unsigned X86Cond; 9149 SDVTList VTs; 9150 switch (CondOpcode) { 9151 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break; 9152 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break; 9153 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break; 9154 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break; 9155 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break; 9156 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break; 9157 default: llvm_unreachable("unexpected overflowing operator"); 9158 } 9159 if (CondOpcode == ISD::UMULO) 9160 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(), 9161 MVT::i32); 9162 else 9163 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); 9164 9165 SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS); 9166 9167 if (CondOpcode == ISD::UMULO) 9168 Cond = X86Op.getValue(2); 9169 else 9170 Cond = X86Op.getValue(1); 9171 9172 CC = DAG.getConstant(X86Cond, MVT::i8); 9173 addTest = false; 9174 } 9175 9176 if (addTest) { 9177 // Look pass the truncate if the high bits are known zero. 9178 if (isTruncWithZeroHighBitsInput(Cond, DAG)) 9179 Cond = Cond.getOperand(0); 9180 9181 // We know the result of AND is compared against zero. Try to match 9182 // it to BT. 9183 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) { 9184 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG); 9185 if (NewSetCC.getNode()) { 9186 CC = NewSetCC.getOperand(0); 9187 Cond = NewSetCC.getOperand(1); 9188 addTest = false; 9189 } 9190 } 9191 } 9192 9193 if (addTest) { 9194 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 9195 Cond = EmitTest(Cond, X86::COND_NE, DAG); 9196 } 9197 9198 // a < b ? -1 : 0 -> RES = ~setcc_carry 9199 // a < b ? 0 : -1 -> RES = setcc_carry 9200 // a >= b ? -1 : 0 -> RES = setcc_carry 9201 // a >= b ? 0 : -1 -> RES = ~setcc_carry 9202 if (Cond.getOpcode() == X86ISD::SUB) { 9203 Cond = ConvertCmpIfNecessary(Cond, DAG); 9204 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue(); 9205 9206 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) && 9207 (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) { 9208 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), 9209 DAG.getConstant(X86::COND_B, MVT::i8), Cond); 9210 if (isAllOnes(Op1) != (CondCode == X86::COND_B)) 9211 return DAG.getNOT(DL, Res, Res.getValueType()); 9212 return Res; 9213 } 9214 } 9215 9216 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if 9217 // condition is true. 9218 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 9219 SDValue Ops[] = { Op2, Op1, CC, Cond }; 9220 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops, array_lengthof(Ops)); 9221} 9222 9223// isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or 9224// ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart 9225// from the AND / OR. 9226static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) { 9227 Opc = Op.getOpcode(); 9228 if (Opc != ISD::OR && Opc != ISD::AND) 9229 return false; 9230 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC && 9231 Op.getOperand(0).hasOneUse() && 9232 Op.getOperand(1).getOpcode() == X86ISD::SETCC && 9233 Op.getOperand(1).hasOneUse()); 9234} 9235 9236// isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and 9237// 1 and that the SETCC node has a single use. 9238static bool isXor1OfSetCC(SDValue Op) { 9239 if (Op.getOpcode() != ISD::XOR) 9240 return false; 9241 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 9242 if (N1C && N1C->getAPIntValue() == 1) { 9243 return Op.getOperand(0).getOpcode() == X86ISD::SETCC && 9244 Op.getOperand(0).hasOneUse(); 9245 } 9246 return false; 9247} 9248 9249SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { 9250 bool addTest = true; 9251 SDValue Chain = Op.getOperand(0); 9252 SDValue Cond = Op.getOperand(1); 9253 SDValue Dest = Op.getOperand(2); 9254 DebugLoc dl = Op.getDebugLoc(); 9255 SDValue CC; 9256 bool Inverted = false; 9257 9258 if (Cond.getOpcode() == ISD::SETCC) { 9259 // Check for setcc([su]{add,sub,mul}o == 0). 9260 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ && 9261 isa<ConstantSDNode>(Cond.getOperand(1)) && 9262 cast<ConstantSDNode>(Cond.getOperand(1))->isNullValue() && 9263 Cond.getOperand(0).getResNo() == 1 && 9264 (Cond.getOperand(0).getOpcode() == ISD::SADDO || 9265 Cond.getOperand(0).getOpcode() == ISD::UADDO || 9266 Cond.getOperand(0).getOpcode() == ISD::SSUBO || 9267 Cond.getOperand(0).getOpcode() == ISD::USUBO || 9268 Cond.getOperand(0).getOpcode() == ISD::SMULO || 9269 Cond.getOperand(0).getOpcode() == ISD::UMULO)) { 9270 Inverted = true; 9271 Cond = Cond.getOperand(0); 9272 } else { 9273 SDValue NewCond = LowerSETCC(Cond, DAG); 9274 if (NewCond.getNode()) 9275 Cond = NewCond; 9276 } 9277 } 9278#if 0 9279 // FIXME: LowerXALUO doesn't handle these!! 9280 else if (Cond.getOpcode() == X86ISD::ADD || 9281 Cond.getOpcode() == X86ISD::SUB || 9282 Cond.getOpcode() == X86ISD::SMUL || 9283 Cond.getOpcode() == X86ISD::UMUL) 9284 Cond = LowerXALUO(Cond, DAG); 9285#endif 9286 9287 // Look pass (and (setcc_carry (cmp ...)), 1). 9288 if (Cond.getOpcode() == ISD::AND && 9289 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) { 9290 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 9291 if (C && C->getAPIntValue() == 1) 9292 Cond = Cond.getOperand(0); 9293 } 9294 9295 // If condition flag is set by a X86ISD::CMP, then use it as the condition 9296 // setting operand in place of the X86ISD::SETCC. 9297 unsigned CondOpcode = Cond.getOpcode(); 9298 if (CondOpcode == X86ISD::SETCC || 9299 CondOpcode == X86ISD::SETCC_CARRY) { 9300 CC = Cond.getOperand(0); 9301 9302 SDValue Cmp = Cond.getOperand(1); 9303 unsigned Opc = Cmp.getOpcode(); 9304 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp?? 9305 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) { 9306 Cond = Cmp; 9307 addTest = false; 9308 } else { 9309 switch (cast<ConstantSDNode>(CC)->getZExtValue()) { 9310 default: break; 9311 case X86::COND_O: 9312 case X86::COND_B: 9313 // These can only come from an arithmetic instruction with overflow, 9314 // e.g. SADDO, UADDO. 9315 Cond = Cond.getNode()->getOperand(1); 9316 addTest = false; 9317 break; 9318 } 9319 } 9320 } 9321 CondOpcode = Cond.getOpcode(); 9322 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO || 9323 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO || 9324 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) && 9325 Cond.getOperand(0).getValueType() != MVT::i8)) { 9326 SDValue LHS = Cond.getOperand(0); 9327 SDValue RHS = Cond.getOperand(1); 9328 unsigned X86Opcode; 9329 unsigned X86Cond; 9330 SDVTList VTs; 9331 switch (CondOpcode) { 9332 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break; 9333 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break; 9334 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break; 9335 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break; 9336 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break; 9337 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break; 9338 default: llvm_unreachable("unexpected overflowing operator"); 9339 } 9340 if (Inverted) 9341 X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond); 9342 if (CondOpcode == ISD::UMULO) 9343 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(), 9344 MVT::i32); 9345 else 9346 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); 9347 9348 SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS); 9349 9350 if (CondOpcode == ISD::UMULO) 9351 Cond = X86Op.getValue(2); 9352 else 9353 Cond = X86Op.getValue(1); 9354 9355 CC = DAG.getConstant(X86Cond, MVT::i8); 9356 addTest = false; 9357 } else { 9358 unsigned CondOpc; 9359 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) { 9360 SDValue Cmp = Cond.getOperand(0).getOperand(1); 9361 if (CondOpc == ISD::OR) { 9362 // Also, recognize the pattern generated by an FCMP_UNE. We can emit 9363 // two branches instead of an explicit OR instruction with a 9364 // separate test. 9365 if (Cmp == Cond.getOperand(1).getOperand(1) && 9366 isX86LogicalCmp(Cmp)) { 9367 CC = Cond.getOperand(0).getOperand(0); 9368 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 9369 Chain, Dest, CC, Cmp); 9370 CC = Cond.getOperand(1).getOperand(0); 9371 Cond = Cmp; 9372 addTest = false; 9373 } 9374 } else { // ISD::AND 9375 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit 9376 // two branches instead of an explicit AND instruction with a 9377 // separate test. However, we only do this if this block doesn't 9378 // have a fall-through edge, because this requires an explicit 9379 // jmp when the condition is false. 9380 if (Cmp == Cond.getOperand(1).getOperand(1) && 9381 isX86LogicalCmp(Cmp) && 9382 Op.getNode()->hasOneUse()) { 9383 X86::CondCode CCode = 9384 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0); 9385 CCode = X86::GetOppositeBranchCondition(CCode); 9386 CC = DAG.getConstant(CCode, MVT::i8); 9387 SDNode *User = *Op.getNode()->use_begin(); 9388 // Look for an unconditional branch following this conditional branch. 9389 // We need this because we need to reverse the successors in order 9390 // to implement FCMP_OEQ. 9391 if (User->getOpcode() == ISD::BR) { 9392 SDValue FalseBB = User->getOperand(1); 9393 SDNode *NewBR = 9394 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); 9395 assert(NewBR == User); 9396 (void)NewBR; 9397 Dest = FalseBB; 9398 9399 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 9400 Chain, Dest, CC, Cmp); 9401 X86::CondCode CCode = 9402 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0); 9403 CCode = X86::GetOppositeBranchCondition(CCode); 9404 CC = DAG.getConstant(CCode, MVT::i8); 9405 Cond = Cmp; 9406 addTest = false; 9407 } 9408 } 9409 } 9410 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) { 9411 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition. 9412 // It should be transformed during dag combiner except when the condition 9413 // is set by a arithmetics with overflow node. 9414 X86::CondCode CCode = 9415 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0); 9416 CCode = X86::GetOppositeBranchCondition(CCode); 9417 CC = DAG.getConstant(CCode, MVT::i8); 9418 Cond = Cond.getOperand(0).getOperand(1); 9419 addTest = false; 9420 } else if (Cond.getOpcode() == ISD::SETCC && 9421 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) { 9422 // For FCMP_OEQ, we can emit 9423 // two branches instead of an explicit AND instruction with a 9424 // separate test. However, we only do this if this block doesn't 9425 // have a fall-through edge, because this requires an explicit 9426 // jmp when the condition is false. 9427 if (Op.getNode()->hasOneUse()) { 9428 SDNode *User = *Op.getNode()->use_begin(); 9429 // Look for an unconditional branch following this conditional branch. 9430 // We need this because we need to reverse the successors in order 9431 // to implement FCMP_OEQ. 9432 if (User->getOpcode() == ISD::BR) { 9433 SDValue FalseBB = User->getOperand(1); 9434 SDNode *NewBR = 9435 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); 9436 assert(NewBR == User); 9437 (void)NewBR; 9438 Dest = FalseBB; 9439 9440 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 9441 Cond.getOperand(0), Cond.getOperand(1)); 9442 Cmp = ConvertCmpIfNecessary(Cmp, DAG); 9443 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 9444 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 9445 Chain, Dest, CC, Cmp); 9446 CC = DAG.getConstant(X86::COND_P, MVT::i8); 9447 Cond = Cmp; 9448 addTest = false; 9449 } 9450 } 9451 } else if (Cond.getOpcode() == ISD::SETCC && 9452 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) { 9453 // For FCMP_UNE, we can emit 9454 // two branches instead of an explicit AND instruction with a 9455 // separate test. However, we only do this if this block doesn't 9456 // have a fall-through edge, because this requires an explicit 9457 // jmp when the condition is false. 9458 if (Op.getNode()->hasOneUse()) { 9459 SDNode *User = *Op.getNode()->use_begin(); 9460 // Look for an unconditional branch following this conditional branch. 9461 // We need this because we need to reverse the successors in order 9462 // to implement FCMP_UNE. 9463 if (User->getOpcode() == ISD::BR) { 9464 SDValue FalseBB = User->getOperand(1); 9465 SDNode *NewBR = 9466 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); 9467 assert(NewBR == User); 9468 (void)NewBR; 9469 9470 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 9471 Cond.getOperand(0), Cond.getOperand(1)); 9472 Cmp = ConvertCmpIfNecessary(Cmp, DAG); 9473 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 9474 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 9475 Chain, Dest, CC, Cmp); 9476 CC = DAG.getConstant(X86::COND_NP, MVT::i8); 9477 Cond = Cmp; 9478 addTest = false; 9479 Dest = FalseBB; 9480 } 9481 } 9482 } 9483 } 9484 9485 if (addTest) { 9486 // Look pass the truncate if the high bits are known zero. 9487 if (isTruncWithZeroHighBitsInput(Cond, DAG)) 9488 Cond = Cond.getOperand(0); 9489 9490 // We know the result of AND is compared against zero. Try to match 9491 // it to BT. 9492 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) { 9493 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG); 9494 if (NewSetCC.getNode()) { 9495 CC = NewSetCC.getOperand(0); 9496 Cond = NewSetCC.getOperand(1); 9497 addTest = false; 9498 } 9499 } 9500 } 9501 9502 if (addTest) { 9503 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 9504 Cond = EmitTest(Cond, X86::COND_NE, DAG); 9505 } 9506 Cond = ConvertCmpIfNecessary(Cond, DAG); 9507 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 9508 Chain, Dest, CC, Cond); 9509} 9510 9511 9512// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets. 9513// Calls to _alloca is needed to probe the stack when allocating more than 4k 9514// bytes in one go. Touching the stack at 4K increments is necessary to ensure 9515// that the guard pages used by the OS virtual memory manager are allocated in 9516// correct sequence. 9517SDValue 9518X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 9519 SelectionDAG &DAG) const { 9520 assert((Subtarget->isTargetCygMing() || Subtarget->isTargetWindows() || 9521 getTargetMachine().Options.EnableSegmentedStacks) && 9522 "This should be used only on Windows targets or when segmented stacks " 9523 "are being used"); 9524 assert(!Subtarget->isTargetEnvMacho() && "Not implemented"); 9525 DebugLoc dl = Op.getDebugLoc(); 9526 9527 // Get the inputs. 9528 SDValue Chain = Op.getOperand(0); 9529 SDValue Size = Op.getOperand(1); 9530 // FIXME: Ensure alignment here 9531 9532 bool Is64Bit = Subtarget->is64Bit(); 9533 EVT SPTy = Is64Bit ? MVT::i64 : MVT::i32; 9534 9535 if (getTargetMachine().Options.EnableSegmentedStacks) { 9536 MachineFunction &MF = DAG.getMachineFunction(); 9537 MachineRegisterInfo &MRI = MF.getRegInfo(); 9538 9539 if (Is64Bit) { 9540 // The 64 bit implementation of segmented stacks needs to clobber both r10 9541 // r11. This makes it impossible to use it along with nested parameters. 9542 const Function *F = MF.getFunction(); 9543 9544 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); 9545 I != E; ++I) 9546 if (I->hasNestAttr()) 9547 report_fatal_error("Cannot use segmented stacks with functions that " 9548 "have nested arguments."); 9549 } 9550 9551 const TargetRegisterClass *AddrRegClass = 9552 getRegClassFor(Subtarget->is64Bit() ? MVT::i64:MVT::i32); 9553 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass); 9554 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size); 9555 SDValue Value = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain, 9556 DAG.getRegister(Vreg, SPTy)); 9557 SDValue Ops1[2] = { Value, Chain }; 9558 return DAG.getMergeValues(Ops1, 2, dl); 9559 } else { 9560 SDValue Flag; 9561 unsigned Reg = (Subtarget->is64Bit() ? X86::RAX : X86::EAX); 9562 9563 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag); 9564 Flag = Chain.getValue(1); 9565 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 9566 9567 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag); 9568 Flag = Chain.getValue(1); 9569 9570 Chain = DAG.getCopyFromReg(Chain, dl, X86StackPtr, SPTy).getValue(1); 9571 9572 SDValue Ops1[2] = { Chain.getValue(0), Chain }; 9573 return DAG.getMergeValues(Ops1, 2, dl); 9574 } 9575} 9576 9577SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 9578 MachineFunction &MF = DAG.getMachineFunction(); 9579 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 9580 9581 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 9582 DebugLoc DL = Op.getDebugLoc(); 9583 9584 if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) { 9585 // vastart just stores the address of the VarArgsFrameIndex slot into the 9586 // memory location argument. 9587 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 9588 getPointerTy()); 9589 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1), 9590 MachinePointerInfo(SV), false, false, 0); 9591 } 9592 9593 // __va_list_tag: 9594 // gp_offset (0 - 6 * 8) 9595 // fp_offset (48 - 48 + 8 * 16) 9596 // overflow_arg_area (point to parameters coming in memory). 9597 // reg_save_area 9598 SmallVector<SDValue, 8> MemOps; 9599 SDValue FIN = Op.getOperand(1); 9600 // Store gp_offset 9601 SDValue Store = DAG.getStore(Op.getOperand(0), DL, 9602 DAG.getConstant(FuncInfo->getVarArgsGPOffset(), 9603 MVT::i32), 9604 FIN, MachinePointerInfo(SV), false, false, 0); 9605 MemOps.push_back(Store); 9606 9607 // Store fp_offset 9608 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 9609 FIN, DAG.getIntPtrConstant(4)); 9610 Store = DAG.getStore(Op.getOperand(0), DL, 9611 DAG.getConstant(FuncInfo->getVarArgsFPOffset(), 9612 MVT::i32), 9613 FIN, MachinePointerInfo(SV, 4), false, false, 0); 9614 MemOps.push_back(Store); 9615 9616 // Store ptr to overflow_arg_area 9617 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 9618 FIN, DAG.getIntPtrConstant(4)); 9619 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 9620 getPointerTy()); 9621 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, 9622 MachinePointerInfo(SV, 8), 9623 false, false, 0); 9624 MemOps.push_back(Store); 9625 9626 // Store ptr to reg_save_area. 9627 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 9628 FIN, DAG.getIntPtrConstant(8)); 9629 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), 9630 getPointerTy()); 9631 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN, 9632 MachinePointerInfo(SV, 16), false, false, 0); 9633 MemOps.push_back(Store); 9634 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 9635 &MemOps[0], MemOps.size()); 9636} 9637 9638SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { 9639 assert(Subtarget->is64Bit() && 9640 "LowerVAARG only handles 64-bit va_arg!"); 9641 assert((Subtarget->isTargetLinux() || 9642 Subtarget->isTargetDarwin()) && 9643 "Unhandled target in LowerVAARG"); 9644 assert(Op.getNode()->getNumOperands() == 4); 9645 SDValue Chain = Op.getOperand(0); 9646 SDValue SrcPtr = Op.getOperand(1); 9647 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 9648 unsigned Align = Op.getConstantOperandVal(3); 9649 DebugLoc dl = Op.getDebugLoc(); 9650 9651 EVT ArgVT = Op.getNode()->getValueType(0); 9652 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 9653 uint32_t ArgSize = getTargetData()->getTypeAllocSize(ArgTy); 9654 uint8_t ArgMode; 9655 9656 // Decide which area this value should be read from. 9657 // TODO: Implement the AMD64 ABI in its entirety. This simple 9658 // selection mechanism works only for the basic types. 9659 if (ArgVT == MVT::f80) { 9660 llvm_unreachable("va_arg for f80 not yet implemented"); 9661 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) { 9662 ArgMode = 2; // Argument passed in XMM register. Use fp_offset. 9663 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) { 9664 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset. 9665 } else { 9666 llvm_unreachable("Unhandled argument type in LowerVAARG"); 9667 } 9668 9669 if (ArgMode == 2) { 9670 // Sanity Check: Make sure using fp_offset makes sense. 9671 assert(!getTargetMachine().Options.UseSoftFloat && 9672 !(DAG.getMachineFunction() 9673 .getFunction()->getFnAttributes().hasNoImplicitFloatAttr()) && 9674 Subtarget->hasSSE1()); 9675 } 9676 9677 // Insert VAARG_64 node into the DAG 9678 // VAARG_64 returns two values: Variable Argument Address, Chain 9679 SmallVector<SDValue, 11> InstOps; 9680 InstOps.push_back(Chain); 9681 InstOps.push_back(SrcPtr); 9682 InstOps.push_back(DAG.getConstant(ArgSize, MVT::i32)); 9683 InstOps.push_back(DAG.getConstant(ArgMode, MVT::i8)); 9684 InstOps.push_back(DAG.getConstant(Align, MVT::i32)); 9685 SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other); 9686 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl, 9687 VTs, &InstOps[0], InstOps.size(), 9688 MVT::i64, 9689 MachinePointerInfo(SV), 9690 /*Align=*/0, 9691 /*Volatile=*/false, 9692 /*ReadMem=*/true, 9693 /*WriteMem=*/true); 9694 Chain = VAARG.getValue(1); 9695 9696 // Load the next argument and return it 9697 return DAG.getLoad(ArgVT, dl, 9698 Chain, 9699 VAARG, 9700 MachinePointerInfo(), 9701 false, false, false, 0); 9702} 9703 9704static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget, 9705 SelectionDAG &DAG) { 9706 // X86-64 va_list is a struct { i32, i32, i8*, i8* }. 9707 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!"); 9708 SDValue Chain = Op.getOperand(0); 9709 SDValue DstPtr = Op.getOperand(1); 9710 SDValue SrcPtr = Op.getOperand(2); 9711 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 9712 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 9713 DebugLoc DL = Op.getDebugLoc(); 9714 9715 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, 9716 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false, 9717 false, 9718 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); 9719} 9720 9721// getTargetVShiftNOde - Handle vector element shifts where the shift amount 9722// may or may not be a constant. Takes immediate version of shift as input. 9723static SDValue getTargetVShiftNode(unsigned Opc, DebugLoc dl, EVT VT, 9724 SDValue SrcOp, SDValue ShAmt, 9725 SelectionDAG &DAG) { 9726 assert(ShAmt.getValueType() == MVT::i32 && "ShAmt is not i32"); 9727 9728 if (isa<ConstantSDNode>(ShAmt)) { 9729 // Constant may be a TargetConstant. Use a regular constant. 9730 uint32_t ShiftAmt = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 9731 switch (Opc) { 9732 default: llvm_unreachable("Unknown target vector shift node"); 9733 case X86ISD::VSHLI: 9734 case X86ISD::VSRLI: 9735 case X86ISD::VSRAI: 9736 return DAG.getNode(Opc, dl, VT, SrcOp, 9737 DAG.getConstant(ShiftAmt, MVT::i32)); 9738 } 9739 } 9740 9741 // Change opcode to non-immediate version 9742 switch (Opc) { 9743 default: llvm_unreachable("Unknown target vector shift node"); 9744 case X86ISD::VSHLI: Opc = X86ISD::VSHL; break; 9745 case X86ISD::VSRLI: Opc = X86ISD::VSRL; break; 9746 case X86ISD::VSRAI: Opc = X86ISD::VSRA; break; 9747 } 9748 9749 // Need to build a vector containing shift amount 9750 // Shift amount is 32-bits, but SSE instructions read 64-bit, so fill with 0 9751 SDValue ShOps[4]; 9752 ShOps[0] = ShAmt; 9753 ShOps[1] = DAG.getConstant(0, MVT::i32); 9754 ShOps[2] = ShOps[3] = DAG.getUNDEF(MVT::i32); 9755 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, &ShOps[0], 4); 9756 9757 // The return type has to be a 128-bit type with the same element 9758 // type as the input type. 9759 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 9760 EVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits()); 9761 9762 ShAmt = DAG.getNode(ISD::BITCAST, dl, ShVT, ShAmt); 9763 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt); 9764} 9765 9766static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) { 9767 DebugLoc dl = Op.getDebugLoc(); 9768 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 9769 switch (IntNo) { 9770 default: return SDValue(); // Don't custom lower most intrinsics. 9771 // Comparison intrinsics. 9772 case Intrinsic::x86_sse_comieq_ss: 9773 case Intrinsic::x86_sse_comilt_ss: 9774 case Intrinsic::x86_sse_comile_ss: 9775 case Intrinsic::x86_sse_comigt_ss: 9776 case Intrinsic::x86_sse_comige_ss: 9777 case Intrinsic::x86_sse_comineq_ss: 9778 case Intrinsic::x86_sse_ucomieq_ss: 9779 case Intrinsic::x86_sse_ucomilt_ss: 9780 case Intrinsic::x86_sse_ucomile_ss: 9781 case Intrinsic::x86_sse_ucomigt_ss: 9782 case Intrinsic::x86_sse_ucomige_ss: 9783 case Intrinsic::x86_sse_ucomineq_ss: 9784 case Intrinsic::x86_sse2_comieq_sd: 9785 case Intrinsic::x86_sse2_comilt_sd: 9786 case Intrinsic::x86_sse2_comile_sd: 9787 case Intrinsic::x86_sse2_comigt_sd: 9788 case Intrinsic::x86_sse2_comige_sd: 9789 case Intrinsic::x86_sse2_comineq_sd: 9790 case Intrinsic::x86_sse2_ucomieq_sd: 9791 case Intrinsic::x86_sse2_ucomilt_sd: 9792 case Intrinsic::x86_sse2_ucomile_sd: 9793 case Intrinsic::x86_sse2_ucomigt_sd: 9794 case Intrinsic::x86_sse2_ucomige_sd: 9795 case Intrinsic::x86_sse2_ucomineq_sd: { 9796 unsigned Opc; 9797 ISD::CondCode CC; 9798 switch (IntNo) { 9799 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 9800 case Intrinsic::x86_sse_comieq_ss: 9801 case Intrinsic::x86_sse2_comieq_sd: 9802 Opc = X86ISD::COMI; 9803 CC = ISD::SETEQ; 9804 break; 9805 case Intrinsic::x86_sse_comilt_ss: 9806 case Intrinsic::x86_sse2_comilt_sd: 9807 Opc = X86ISD::COMI; 9808 CC = ISD::SETLT; 9809 break; 9810 case Intrinsic::x86_sse_comile_ss: 9811 case Intrinsic::x86_sse2_comile_sd: 9812 Opc = X86ISD::COMI; 9813 CC = ISD::SETLE; 9814 break; 9815 case Intrinsic::x86_sse_comigt_ss: 9816 case Intrinsic::x86_sse2_comigt_sd: 9817 Opc = X86ISD::COMI; 9818 CC = ISD::SETGT; 9819 break; 9820 case Intrinsic::x86_sse_comige_ss: 9821 case Intrinsic::x86_sse2_comige_sd: 9822 Opc = X86ISD::COMI; 9823 CC = ISD::SETGE; 9824 break; 9825 case Intrinsic::x86_sse_comineq_ss: 9826 case Intrinsic::x86_sse2_comineq_sd: 9827 Opc = X86ISD::COMI; 9828 CC = ISD::SETNE; 9829 break; 9830 case Intrinsic::x86_sse_ucomieq_ss: 9831 case Intrinsic::x86_sse2_ucomieq_sd: 9832 Opc = X86ISD::UCOMI; 9833 CC = ISD::SETEQ; 9834 break; 9835 case Intrinsic::x86_sse_ucomilt_ss: 9836 case Intrinsic::x86_sse2_ucomilt_sd: 9837 Opc = X86ISD::UCOMI; 9838 CC = ISD::SETLT; 9839 break; 9840 case Intrinsic::x86_sse_ucomile_ss: 9841 case Intrinsic::x86_sse2_ucomile_sd: 9842 Opc = X86ISD::UCOMI; 9843 CC = ISD::SETLE; 9844 break; 9845 case Intrinsic::x86_sse_ucomigt_ss: 9846 case Intrinsic::x86_sse2_ucomigt_sd: 9847 Opc = X86ISD::UCOMI; 9848 CC = ISD::SETGT; 9849 break; 9850 case Intrinsic::x86_sse_ucomige_ss: 9851 case Intrinsic::x86_sse2_ucomige_sd: 9852 Opc = X86ISD::UCOMI; 9853 CC = ISD::SETGE; 9854 break; 9855 case Intrinsic::x86_sse_ucomineq_ss: 9856 case Intrinsic::x86_sse2_ucomineq_sd: 9857 Opc = X86ISD::UCOMI; 9858 CC = ISD::SETNE; 9859 break; 9860 } 9861 9862 SDValue LHS = Op.getOperand(1); 9863 SDValue RHS = Op.getOperand(2); 9864 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG); 9865 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!"); 9866 SDValue Cond = DAG.getNode(Opc, dl, MVT::i32, LHS, RHS); 9867 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 9868 DAG.getConstant(X86CC, MVT::i8), Cond); 9869 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 9870 } 9871 9872 // Arithmetic intrinsics. 9873 case Intrinsic::x86_sse2_pmulu_dq: 9874 case Intrinsic::x86_avx2_pmulu_dq: 9875 return DAG.getNode(X86ISD::PMULUDQ, dl, Op.getValueType(), 9876 Op.getOperand(1), Op.getOperand(2)); 9877 9878 // SSE3/AVX horizontal add/sub intrinsics 9879 case Intrinsic::x86_sse3_hadd_ps: 9880 case Intrinsic::x86_sse3_hadd_pd: 9881 case Intrinsic::x86_avx_hadd_ps_256: 9882 case Intrinsic::x86_avx_hadd_pd_256: 9883 case Intrinsic::x86_sse3_hsub_ps: 9884 case Intrinsic::x86_sse3_hsub_pd: 9885 case Intrinsic::x86_avx_hsub_ps_256: 9886 case Intrinsic::x86_avx_hsub_pd_256: 9887 case Intrinsic::x86_ssse3_phadd_w_128: 9888 case Intrinsic::x86_ssse3_phadd_d_128: 9889 case Intrinsic::x86_avx2_phadd_w: 9890 case Intrinsic::x86_avx2_phadd_d: 9891 case Intrinsic::x86_ssse3_phsub_w_128: 9892 case Intrinsic::x86_ssse3_phsub_d_128: 9893 case Intrinsic::x86_avx2_phsub_w: 9894 case Intrinsic::x86_avx2_phsub_d: { 9895 unsigned Opcode; 9896 switch (IntNo) { 9897 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 9898 case Intrinsic::x86_sse3_hadd_ps: 9899 case Intrinsic::x86_sse3_hadd_pd: 9900 case Intrinsic::x86_avx_hadd_ps_256: 9901 case Intrinsic::x86_avx_hadd_pd_256: 9902 Opcode = X86ISD::FHADD; 9903 break; 9904 case Intrinsic::x86_sse3_hsub_ps: 9905 case Intrinsic::x86_sse3_hsub_pd: 9906 case Intrinsic::x86_avx_hsub_ps_256: 9907 case Intrinsic::x86_avx_hsub_pd_256: 9908 Opcode = X86ISD::FHSUB; 9909 break; 9910 case Intrinsic::x86_ssse3_phadd_w_128: 9911 case Intrinsic::x86_ssse3_phadd_d_128: 9912 case Intrinsic::x86_avx2_phadd_w: 9913 case Intrinsic::x86_avx2_phadd_d: 9914 Opcode = X86ISD::HADD; 9915 break; 9916 case Intrinsic::x86_ssse3_phsub_w_128: 9917 case Intrinsic::x86_ssse3_phsub_d_128: 9918 case Intrinsic::x86_avx2_phsub_w: 9919 case Intrinsic::x86_avx2_phsub_d: 9920 Opcode = X86ISD::HSUB; 9921 break; 9922 } 9923 return DAG.getNode(Opcode, dl, Op.getValueType(), 9924 Op.getOperand(1), Op.getOperand(2)); 9925 } 9926 9927 // AVX2 variable shift intrinsics 9928 case Intrinsic::x86_avx2_psllv_d: 9929 case Intrinsic::x86_avx2_psllv_q: 9930 case Intrinsic::x86_avx2_psllv_d_256: 9931 case Intrinsic::x86_avx2_psllv_q_256: 9932 case Intrinsic::x86_avx2_psrlv_d: 9933 case Intrinsic::x86_avx2_psrlv_q: 9934 case Intrinsic::x86_avx2_psrlv_d_256: 9935 case Intrinsic::x86_avx2_psrlv_q_256: 9936 case Intrinsic::x86_avx2_psrav_d: 9937 case Intrinsic::x86_avx2_psrav_d_256: { 9938 unsigned Opcode; 9939 switch (IntNo) { 9940 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 9941 case Intrinsic::x86_avx2_psllv_d: 9942 case Intrinsic::x86_avx2_psllv_q: 9943 case Intrinsic::x86_avx2_psllv_d_256: 9944 case Intrinsic::x86_avx2_psllv_q_256: 9945 Opcode = ISD::SHL; 9946 break; 9947 case Intrinsic::x86_avx2_psrlv_d: 9948 case Intrinsic::x86_avx2_psrlv_q: 9949 case Intrinsic::x86_avx2_psrlv_d_256: 9950 case Intrinsic::x86_avx2_psrlv_q_256: 9951 Opcode = ISD::SRL; 9952 break; 9953 case Intrinsic::x86_avx2_psrav_d: 9954 case Intrinsic::x86_avx2_psrav_d_256: 9955 Opcode = ISD::SRA; 9956 break; 9957 } 9958 return DAG.getNode(Opcode, dl, Op.getValueType(), 9959 Op.getOperand(1), Op.getOperand(2)); 9960 } 9961 9962 case Intrinsic::x86_ssse3_pshuf_b_128: 9963 case Intrinsic::x86_avx2_pshuf_b: 9964 return DAG.getNode(X86ISD::PSHUFB, dl, Op.getValueType(), 9965 Op.getOperand(1), Op.getOperand(2)); 9966 9967 case Intrinsic::x86_ssse3_psign_b_128: 9968 case Intrinsic::x86_ssse3_psign_w_128: 9969 case Intrinsic::x86_ssse3_psign_d_128: 9970 case Intrinsic::x86_avx2_psign_b: 9971 case Intrinsic::x86_avx2_psign_w: 9972 case Intrinsic::x86_avx2_psign_d: 9973 return DAG.getNode(X86ISD::PSIGN, dl, Op.getValueType(), 9974 Op.getOperand(1), Op.getOperand(2)); 9975 9976 case Intrinsic::x86_sse41_insertps: 9977 return DAG.getNode(X86ISD::INSERTPS, dl, Op.getValueType(), 9978 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 9979 9980 case Intrinsic::x86_avx_vperm2f128_ps_256: 9981 case Intrinsic::x86_avx_vperm2f128_pd_256: 9982 case Intrinsic::x86_avx_vperm2f128_si_256: 9983 case Intrinsic::x86_avx2_vperm2i128: 9984 return DAG.getNode(X86ISD::VPERM2X128, dl, Op.getValueType(), 9985 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 9986 9987 case Intrinsic::x86_avx2_permd: 9988 case Intrinsic::x86_avx2_permps: 9989 // Operands intentionally swapped. Mask is last operand to intrinsic, 9990 // but second operand for node/intruction. 9991 return DAG.getNode(X86ISD::VPERMV, dl, Op.getValueType(), 9992 Op.getOperand(2), Op.getOperand(1)); 9993 9994 // ptest and testp intrinsics. The intrinsic these come from are designed to 9995 // return an integer value, not just an instruction so lower it to the ptest 9996 // or testp pattern and a setcc for the result. 9997 case Intrinsic::x86_sse41_ptestz: 9998 case Intrinsic::x86_sse41_ptestc: 9999 case Intrinsic::x86_sse41_ptestnzc: 10000 case Intrinsic::x86_avx_ptestz_256: 10001 case Intrinsic::x86_avx_ptestc_256: 10002 case Intrinsic::x86_avx_ptestnzc_256: 10003 case Intrinsic::x86_avx_vtestz_ps: 10004 case Intrinsic::x86_avx_vtestc_ps: 10005 case Intrinsic::x86_avx_vtestnzc_ps: 10006 case Intrinsic::x86_avx_vtestz_pd: 10007 case Intrinsic::x86_avx_vtestc_pd: 10008 case Intrinsic::x86_avx_vtestnzc_pd: 10009 case Intrinsic::x86_avx_vtestz_ps_256: 10010 case Intrinsic::x86_avx_vtestc_ps_256: 10011 case Intrinsic::x86_avx_vtestnzc_ps_256: 10012 case Intrinsic::x86_avx_vtestz_pd_256: 10013 case Intrinsic::x86_avx_vtestc_pd_256: 10014 case Intrinsic::x86_avx_vtestnzc_pd_256: { 10015 bool IsTestPacked = false; 10016 unsigned X86CC; 10017 switch (IntNo) { 10018 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering."); 10019 case Intrinsic::x86_avx_vtestz_ps: 10020 case Intrinsic::x86_avx_vtestz_pd: 10021 case Intrinsic::x86_avx_vtestz_ps_256: 10022 case Intrinsic::x86_avx_vtestz_pd_256: 10023 IsTestPacked = true; // Fallthrough 10024 case Intrinsic::x86_sse41_ptestz: 10025 case Intrinsic::x86_avx_ptestz_256: 10026 // ZF = 1 10027 X86CC = X86::COND_E; 10028 break; 10029 case Intrinsic::x86_avx_vtestc_ps: 10030 case Intrinsic::x86_avx_vtestc_pd: 10031 case Intrinsic::x86_avx_vtestc_ps_256: 10032 case Intrinsic::x86_avx_vtestc_pd_256: 10033 IsTestPacked = true; // Fallthrough 10034 case Intrinsic::x86_sse41_ptestc: 10035 case Intrinsic::x86_avx_ptestc_256: 10036 // CF = 1 10037 X86CC = X86::COND_B; 10038 break; 10039 case Intrinsic::x86_avx_vtestnzc_ps: 10040 case Intrinsic::x86_avx_vtestnzc_pd: 10041 case Intrinsic::x86_avx_vtestnzc_ps_256: 10042 case Intrinsic::x86_avx_vtestnzc_pd_256: 10043 IsTestPacked = true; // Fallthrough 10044 case Intrinsic::x86_sse41_ptestnzc: 10045 case Intrinsic::x86_avx_ptestnzc_256: 10046 // ZF and CF = 0 10047 X86CC = X86::COND_A; 10048 break; 10049 } 10050 10051 SDValue LHS = Op.getOperand(1); 10052 SDValue RHS = Op.getOperand(2); 10053 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST; 10054 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS); 10055 SDValue CC = DAG.getConstant(X86CC, MVT::i8); 10056 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test); 10057 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 10058 } 10059 10060 // SSE/AVX shift intrinsics 10061 case Intrinsic::x86_sse2_psll_w: 10062 case Intrinsic::x86_sse2_psll_d: 10063 case Intrinsic::x86_sse2_psll_q: 10064 case Intrinsic::x86_avx2_psll_w: 10065 case Intrinsic::x86_avx2_psll_d: 10066 case Intrinsic::x86_avx2_psll_q: 10067 case Intrinsic::x86_sse2_psrl_w: 10068 case Intrinsic::x86_sse2_psrl_d: 10069 case Intrinsic::x86_sse2_psrl_q: 10070 case Intrinsic::x86_avx2_psrl_w: 10071 case Intrinsic::x86_avx2_psrl_d: 10072 case Intrinsic::x86_avx2_psrl_q: 10073 case Intrinsic::x86_sse2_psra_w: 10074 case Intrinsic::x86_sse2_psra_d: 10075 case Intrinsic::x86_avx2_psra_w: 10076 case Intrinsic::x86_avx2_psra_d: { 10077 unsigned Opcode; 10078 switch (IntNo) { 10079 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10080 case Intrinsic::x86_sse2_psll_w: 10081 case Intrinsic::x86_sse2_psll_d: 10082 case Intrinsic::x86_sse2_psll_q: 10083 case Intrinsic::x86_avx2_psll_w: 10084 case Intrinsic::x86_avx2_psll_d: 10085 case Intrinsic::x86_avx2_psll_q: 10086 Opcode = X86ISD::VSHL; 10087 break; 10088 case Intrinsic::x86_sse2_psrl_w: 10089 case Intrinsic::x86_sse2_psrl_d: 10090 case Intrinsic::x86_sse2_psrl_q: 10091 case Intrinsic::x86_avx2_psrl_w: 10092 case Intrinsic::x86_avx2_psrl_d: 10093 case Intrinsic::x86_avx2_psrl_q: 10094 Opcode = X86ISD::VSRL; 10095 break; 10096 case Intrinsic::x86_sse2_psra_w: 10097 case Intrinsic::x86_sse2_psra_d: 10098 case Intrinsic::x86_avx2_psra_w: 10099 case Intrinsic::x86_avx2_psra_d: 10100 Opcode = X86ISD::VSRA; 10101 break; 10102 } 10103 return DAG.getNode(Opcode, dl, Op.getValueType(), 10104 Op.getOperand(1), Op.getOperand(2)); 10105 } 10106 10107 // SSE/AVX immediate shift intrinsics 10108 case Intrinsic::x86_sse2_pslli_w: 10109 case Intrinsic::x86_sse2_pslli_d: 10110 case Intrinsic::x86_sse2_pslli_q: 10111 case Intrinsic::x86_avx2_pslli_w: 10112 case Intrinsic::x86_avx2_pslli_d: 10113 case Intrinsic::x86_avx2_pslli_q: 10114 case Intrinsic::x86_sse2_psrli_w: 10115 case Intrinsic::x86_sse2_psrli_d: 10116 case Intrinsic::x86_sse2_psrli_q: 10117 case Intrinsic::x86_avx2_psrli_w: 10118 case Intrinsic::x86_avx2_psrli_d: 10119 case Intrinsic::x86_avx2_psrli_q: 10120 case Intrinsic::x86_sse2_psrai_w: 10121 case Intrinsic::x86_sse2_psrai_d: 10122 case Intrinsic::x86_avx2_psrai_w: 10123 case Intrinsic::x86_avx2_psrai_d: { 10124 unsigned Opcode; 10125 switch (IntNo) { 10126 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10127 case Intrinsic::x86_sse2_pslli_w: 10128 case Intrinsic::x86_sse2_pslli_d: 10129 case Intrinsic::x86_sse2_pslli_q: 10130 case Intrinsic::x86_avx2_pslli_w: 10131 case Intrinsic::x86_avx2_pslli_d: 10132 case Intrinsic::x86_avx2_pslli_q: 10133 Opcode = X86ISD::VSHLI; 10134 break; 10135 case Intrinsic::x86_sse2_psrli_w: 10136 case Intrinsic::x86_sse2_psrli_d: 10137 case Intrinsic::x86_sse2_psrli_q: 10138 case Intrinsic::x86_avx2_psrli_w: 10139 case Intrinsic::x86_avx2_psrli_d: 10140 case Intrinsic::x86_avx2_psrli_q: 10141 Opcode = X86ISD::VSRLI; 10142 break; 10143 case Intrinsic::x86_sse2_psrai_w: 10144 case Intrinsic::x86_sse2_psrai_d: 10145 case Intrinsic::x86_avx2_psrai_w: 10146 case Intrinsic::x86_avx2_psrai_d: 10147 Opcode = X86ISD::VSRAI; 10148 break; 10149 } 10150 return getTargetVShiftNode(Opcode, dl, Op.getValueType(), 10151 Op.getOperand(1), Op.getOperand(2), DAG); 10152 } 10153 10154 case Intrinsic::x86_sse42_pcmpistria128: 10155 case Intrinsic::x86_sse42_pcmpestria128: 10156 case Intrinsic::x86_sse42_pcmpistric128: 10157 case Intrinsic::x86_sse42_pcmpestric128: 10158 case Intrinsic::x86_sse42_pcmpistrio128: 10159 case Intrinsic::x86_sse42_pcmpestrio128: 10160 case Intrinsic::x86_sse42_pcmpistris128: 10161 case Intrinsic::x86_sse42_pcmpestris128: 10162 case Intrinsic::x86_sse42_pcmpistriz128: 10163 case Intrinsic::x86_sse42_pcmpestriz128: { 10164 unsigned Opcode; 10165 unsigned X86CC; 10166 switch (IntNo) { 10167 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10168 case Intrinsic::x86_sse42_pcmpistria128: 10169 Opcode = X86ISD::PCMPISTRI; 10170 X86CC = X86::COND_A; 10171 break; 10172 case Intrinsic::x86_sse42_pcmpestria128: 10173 Opcode = X86ISD::PCMPESTRI; 10174 X86CC = X86::COND_A; 10175 break; 10176 case Intrinsic::x86_sse42_pcmpistric128: 10177 Opcode = X86ISD::PCMPISTRI; 10178 X86CC = X86::COND_B; 10179 break; 10180 case Intrinsic::x86_sse42_pcmpestric128: 10181 Opcode = X86ISD::PCMPESTRI; 10182 X86CC = X86::COND_B; 10183 break; 10184 case Intrinsic::x86_sse42_pcmpistrio128: 10185 Opcode = X86ISD::PCMPISTRI; 10186 X86CC = X86::COND_O; 10187 break; 10188 case Intrinsic::x86_sse42_pcmpestrio128: 10189 Opcode = X86ISD::PCMPESTRI; 10190 X86CC = X86::COND_O; 10191 break; 10192 case Intrinsic::x86_sse42_pcmpistris128: 10193 Opcode = X86ISD::PCMPISTRI; 10194 X86CC = X86::COND_S; 10195 break; 10196 case Intrinsic::x86_sse42_pcmpestris128: 10197 Opcode = X86ISD::PCMPESTRI; 10198 X86CC = X86::COND_S; 10199 break; 10200 case Intrinsic::x86_sse42_pcmpistriz128: 10201 Opcode = X86ISD::PCMPISTRI; 10202 X86CC = X86::COND_E; 10203 break; 10204 case Intrinsic::x86_sse42_pcmpestriz128: 10205 Opcode = X86ISD::PCMPESTRI; 10206 X86CC = X86::COND_E; 10207 break; 10208 } 10209 SmallVector<SDValue, 5> NewOps; 10210 NewOps.append(Op->op_begin()+1, Op->op_end()); 10211 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 10212 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps.data(), NewOps.size()); 10213 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 10214 DAG.getConstant(X86CC, MVT::i8), 10215 SDValue(PCMP.getNode(), 1)); 10216 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 10217 } 10218 10219 case Intrinsic::x86_sse42_pcmpistri128: 10220 case Intrinsic::x86_sse42_pcmpestri128: { 10221 unsigned Opcode; 10222 if (IntNo == Intrinsic::x86_sse42_pcmpistri128) 10223 Opcode = X86ISD::PCMPISTRI; 10224 else 10225 Opcode = X86ISD::PCMPESTRI; 10226 10227 SmallVector<SDValue, 5> NewOps; 10228 NewOps.append(Op->op_begin()+1, Op->op_end()); 10229 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 10230 return DAG.getNode(Opcode, dl, VTs, NewOps.data(), NewOps.size()); 10231 } 10232 case Intrinsic::x86_fma_vfmadd_ps: 10233 case Intrinsic::x86_fma_vfmadd_pd: 10234 case Intrinsic::x86_fma_vfmsub_ps: 10235 case Intrinsic::x86_fma_vfmsub_pd: 10236 case Intrinsic::x86_fma_vfnmadd_ps: 10237 case Intrinsic::x86_fma_vfnmadd_pd: 10238 case Intrinsic::x86_fma_vfnmsub_ps: 10239 case Intrinsic::x86_fma_vfnmsub_pd: 10240 case Intrinsic::x86_fma_vfmaddsub_ps: 10241 case Intrinsic::x86_fma_vfmaddsub_pd: 10242 case Intrinsic::x86_fma_vfmsubadd_ps: 10243 case Intrinsic::x86_fma_vfmsubadd_pd: 10244 case Intrinsic::x86_fma_vfmadd_ps_256: 10245 case Intrinsic::x86_fma_vfmadd_pd_256: 10246 case Intrinsic::x86_fma_vfmsub_ps_256: 10247 case Intrinsic::x86_fma_vfmsub_pd_256: 10248 case Intrinsic::x86_fma_vfnmadd_ps_256: 10249 case Intrinsic::x86_fma_vfnmadd_pd_256: 10250 case Intrinsic::x86_fma_vfnmsub_ps_256: 10251 case Intrinsic::x86_fma_vfnmsub_pd_256: 10252 case Intrinsic::x86_fma_vfmaddsub_ps_256: 10253 case Intrinsic::x86_fma_vfmaddsub_pd_256: 10254 case Intrinsic::x86_fma_vfmsubadd_ps_256: 10255 case Intrinsic::x86_fma_vfmsubadd_pd_256: { 10256 unsigned Opc; 10257 switch (IntNo) { 10258 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10259 case Intrinsic::x86_fma_vfmadd_ps: 10260 case Intrinsic::x86_fma_vfmadd_pd: 10261 case Intrinsic::x86_fma_vfmadd_ps_256: 10262 case Intrinsic::x86_fma_vfmadd_pd_256: 10263 Opc = X86ISD::FMADD; 10264 break; 10265 case Intrinsic::x86_fma_vfmsub_ps: 10266 case Intrinsic::x86_fma_vfmsub_pd: 10267 case Intrinsic::x86_fma_vfmsub_ps_256: 10268 case Intrinsic::x86_fma_vfmsub_pd_256: 10269 Opc = X86ISD::FMSUB; 10270 break; 10271 case Intrinsic::x86_fma_vfnmadd_ps: 10272 case Intrinsic::x86_fma_vfnmadd_pd: 10273 case Intrinsic::x86_fma_vfnmadd_ps_256: 10274 case Intrinsic::x86_fma_vfnmadd_pd_256: 10275 Opc = X86ISD::FNMADD; 10276 break; 10277 case Intrinsic::x86_fma_vfnmsub_ps: 10278 case Intrinsic::x86_fma_vfnmsub_pd: 10279 case Intrinsic::x86_fma_vfnmsub_ps_256: 10280 case Intrinsic::x86_fma_vfnmsub_pd_256: 10281 Opc = X86ISD::FNMSUB; 10282 break; 10283 case Intrinsic::x86_fma_vfmaddsub_ps: 10284 case Intrinsic::x86_fma_vfmaddsub_pd: 10285 case Intrinsic::x86_fma_vfmaddsub_ps_256: 10286 case Intrinsic::x86_fma_vfmaddsub_pd_256: 10287 Opc = X86ISD::FMADDSUB; 10288 break; 10289 case Intrinsic::x86_fma_vfmsubadd_ps: 10290 case Intrinsic::x86_fma_vfmsubadd_pd: 10291 case Intrinsic::x86_fma_vfmsubadd_ps_256: 10292 case Intrinsic::x86_fma_vfmsubadd_pd_256: 10293 Opc = X86ISD::FMSUBADD; 10294 break; 10295 } 10296 10297 return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1), 10298 Op.getOperand(2), Op.getOperand(3)); 10299 } 10300 } 10301} 10302 10303static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) { 10304 DebugLoc dl = Op.getDebugLoc(); 10305 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 10306 switch (IntNo) { 10307 default: return SDValue(); // Don't custom lower most intrinsics. 10308 10309 // RDRAND intrinsics. 10310 case Intrinsic::x86_rdrand_16: 10311 case Intrinsic::x86_rdrand_32: 10312 case Intrinsic::x86_rdrand_64: { 10313 // Emit the node with the right value type. 10314 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other); 10315 SDValue Result = DAG.getNode(X86ISD::RDRAND, dl, VTs, Op.getOperand(0)); 10316 10317 // If the value returned by RDRAND was valid (CF=1), return 1. Otherwise 10318 // return the value from Rand, which is always 0, casted to i32. 10319 SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)), 10320 DAG.getConstant(1, Op->getValueType(1)), 10321 DAG.getConstant(X86::COND_B, MVT::i32), 10322 SDValue(Result.getNode(), 1) }; 10323 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl, 10324 DAG.getVTList(Op->getValueType(1), MVT::Glue), 10325 Ops, 4); 10326 10327 // Return { result, isValid, chain }. 10328 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid, 10329 SDValue(Result.getNode(), 2)); 10330 } 10331 } 10332} 10333 10334SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op, 10335 SelectionDAG &DAG) const { 10336 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 10337 MFI->setReturnAddressIsTaken(true); 10338 10339 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 10340 DebugLoc dl = Op.getDebugLoc(); 10341 10342 if (Depth > 0) { 10343 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 10344 SDValue Offset = 10345 DAG.getConstant(TD->getPointerSize(), 10346 Subtarget->is64Bit() ? MVT::i64 : MVT::i32); 10347 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 10348 DAG.getNode(ISD::ADD, dl, getPointerTy(), 10349 FrameAddr, Offset), 10350 MachinePointerInfo(), false, false, false, 0); 10351 } 10352 10353 // Just load the return address. 10354 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG); 10355 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 10356 RetAddrFI, MachinePointerInfo(), false, false, false, 0); 10357} 10358 10359SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 10360 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 10361 MFI->setFrameAddressIsTaken(true); 10362 10363 EVT VT = Op.getValueType(); 10364 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful 10365 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 10366 unsigned FrameReg = Subtarget->is64Bit() ? X86::RBP : X86::EBP; 10367 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 10368 while (Depth--) 10369 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 10370 MachinePointerInfo(), 10371 false, false, false, 0); 10372 return FrameAddr; 10373} 10374 10375SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op, 10376 SelectionDAG &DAG) const { 10377 return DAG.getIntPtrConstant(2*TD->getPointerSize()); 10378} 10379 10380SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { 10381 SDValue Chain = Op.getOperand(0); 10382 SDValue Offset = Op.getOperand(1); 10383 SDValue Handler = Op.getOperand(2); 10384 DebugLoc dl = Op.getDebugLoc(); 10385 10386 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, 10387 Subtarget->is64Bit() ? X86::RBP : X86::EBP, 10388 getPointerTy()); 10389 unsigned StoreAddrReg = (Subtarget->is64Bit() ? X86::RCX : X86::ECX); 10390 10391 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Frame, 10392 DAG.getIntPtrConstant(TD->getPointerSize())); 10393 StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StoreAddr, Offset); 10394 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(), 10395 false, false, 0); 10396 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr); 10397 10398 return DAG.getNode(X86ISD::EH_RETURN, dl, 10399 MVT::Other, 10400 Chain, DAG.getRegister(StoreAddrReg, getPointerTy())); 10401} 10402 10403static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) { 10404 return Op.getOperand(0); 10405} 10406 10407SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 10408 SelectionDAG &DAG) const { 10409 SDValue Root = Op.getOperand(0); 10410 SDValue Trmp = Op.getOperand(1); // trampoline 10411 SDValue FPtr = Op.getOperand(2); // nested function 10412 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 10413 DebugLoc dl = Op.getDebugLoc(); 10414 10415 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 10416 10417 if (Subtarget->is64Bit()) { 10418 SDValue OutChains[6]; 10419 10420 // Large code-model. 10421 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode. 10422 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode. 10423 10424 const unsigned char N86R10 = X86_MC::getX86RegNum(X86::R10); 10425 const unsigned char N86R11 = X86_MC::getX86RegNum(X86::R11); 10426 10427 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix 10428 10429 // Load the pointer to the nested function into R11. 10430 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11 10431 SDValue Addr = Trmp; 10432 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 10433 Addr, MachinePointerInfo(TrmpAddr), 10434 false, false, 0); 10435 10436 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 10437 DAG.getConstant(2, MVT::i64)); 10438 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr, 10439 MachinePointerInfo(TrmpAddr, 2), 10440 false, false, 2); 10441 10442 // Load the 'nest' parameter value into R10. 10443 // R10 is specified in X86CallingConv.td 10444 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10 10445 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 10446 DAG.getConstant(10, MVT::i64)); 10447 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 10448 Addr, MachinePointerInfo(TrmpAddr, 10), 10449 false, false, 0); 10450 10451 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 10452 DAG.getConstant(12, MVT::i64)); 10453 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr, 10454 MachinePointerInfo(TrmpAddr, 12), 10455 false, false, 2); 10456 10457 // Jump to the nested function. 10458 OpCode = (JMP64r << 8) | REX_WB; // jmpq *... 10459 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 10460 DAG.getConstant(20, MVT::i64)); 10461 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 10462 Addr, MachinePointerInfo(TrmpAddr, 20), 10463 false, false, 0); 10464 10465 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11 10466 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 10467 DAG.getConstant(22, MVT::i64)); 10468 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr, 10469 MachinePointerInfo(TrmpAddr, 22), 10470 false, false, 0); 10471 10472 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 6); 10473 } else { 10474 const Function *Func = 10475 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue()); 10476 CallingConv::ID CC = Func->getCallingConv(); 10477 unsigned NestReg; 10478 10479 switch (CC) { 10480 default: 10481 llvm_unreachable("Unsupported calling convention"); 10482 case CallingConv::C: 10483 case CallingConv::X86_StdCall: { 10484 // Pass 'nest' parameter in ECX. 10485 // Must be kept in sync with X86CallingConv.td 10486 NestReg = X86::ECX; 10487 10488 // Check that ECX wasn't needed by an 'inreg' parameter. 10489 FunctionType *FTy = Func->getFunctionType(); 10490 const AttrListPtr &Attrs = Func->getAttributes(); 10491 10492 if (!Attrs.isEmpty() && !Func->isVarArg()) { 10493 unsigned InRegCount = 0; 10494 unsigned Idx = 1; 10495 10496 for (FunctionType::param_iterator I = FTy->param_begin(), 10497 E = FTy->param_end(); I != E; ++I, ++Idx) 10498 if (Attrs.paramHasAttr(Idx, Attribute::InReg)) 10499 // FIXME: should only count parameters that are lowered to integers. 10500 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32; 10501 10502 if (InRegCount > 2) { 10503 report_fatal_error("Nest register in use - reduce number of inreg" 10504 " parameters!"); 10505 } 10506 } 10507 break; 10508 } 10509 case CallingConv::X86_FastCall: 10510 case CallingConv::X86_ThisCall: 10511 case CallingConv::Fast: 10512 // Pass 'nest' parameter in EAX. 10513 // Must be kept in sync with X86CallingConv.td 10514 NestReg = X86::EAX; 10515 break; 10516 } 10517 10518 SDValue OutChains[4]; 10519 SDValue Addr, Disp; 10520 10521 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 10522 DAG.getConstant(10, MVT::i32)); 10523 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr); 10524 10525 // This is storing the opcode for MOV32ri. 10526 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte. 10527 const unsigned char N86Reg = X86_MC::getX86RegNum(NestReg); 10528 OutChains[0] = DAG.getStore(Root, dl, 10529 DAG.getConstant(MOV32ri|N86Reg, MVT::i8), 10530 Trmp, MachinePointerInfo(TrmpAddr), 10531 false, false, 0); 10532 10533 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 10534 DAG.getConstant(1, MVT::i32)); 10535 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr, 10536 MachinePointerInfo(TrmpAddr, 1), 10537 false, false, 1); 10538 10539 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode. 10540 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 10541 DAG.getConstant(5, MVT::i32)); 10542 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr, 10543 MachinePointerInfo(TrmpAddr, 5), 10544 false, false, 1); 10545 10546 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 10547 DAG.getConstant(6, MVT::i32)); 10548 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr, 10549 MachinePointerInfo(TrmpAddr, 6), 10550 false, false, 1); 10551 10552 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 4); 10553 } 10554} 10555 10556SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op, 10557 SelectionDAG &DAG) const { 10558 /* 10559 The rounding mode is in bits 11:10 of FPSR, and has the following 10560 settings: 10561 00 Round to nearest 10562 01 Round to -inf 10563 10 Round to +inf 10564 11 Round to 0 10565 10566 FLT_ROUNDS, on the other hand, expects the following: 10567 -1 Undefined 10568 0 Round to 0 10569 1 Round to nearest 10570 2 Round to +inf 10571 3 Round to -inf 10572 10573 To perform the conversion, we do: 10574 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3) 10575 */ 10576 10577 MachineFunction &MF = DAG.getMachineFunction(); 10578 const TargetMachine &TM = MF.getTarget(); 10579 const TargetFrameLowering &TFI = *TM.getFrameLowering(); 10580 unsigned StackAlignment = TFI.getStackAlignment(); 10581 EVT VT = Op.getValueType(); 10582 DebugLoc DL = Op.getDebugLoc(); 10583 10584 // Save FP Control Word to stack slot 10585 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false); 10586 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 10587 10588 10589 MachineMemOperand *MMO = 10590 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 10591 MachineMemOperand::MOStore, 2, 2); 10592 10593 SDValue Ops[] = { DAG.getEntryNode(), StackSlot }; 10594 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL, 10595 DAG.getVTList(MVT::Other), 10596 Ops, 2, MVT::i16, MMO); 10597 10598 // Load FP Control Word from stack slot 10599 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, 10600 MachinePointerInfo(), false, false, false, 0); 10601 10602 // Transform as necessary 10603 SDValue CWD1 = 10604 DAG.getNode(ISD::SRL, DL, MVT::i16, 10605 DAG.getNode(ISD::AND, DL, MVT::i16, 10606 CWD, DAG.getConstant(0x800, MVT::i16)), 10607 DAG.getConstant(11, MVT::i8)); 10608 SDValue CWD2 = 10609 DAG.getNode(ISD::SRL, DL, MVT::i16, 10610 DAG.getNode(ISD::AND, DL, MVT::i16, 10611 CWD, DAG.getConstant(0x400, MVT::i16)), 10612 DAG.getConstant(9, MVT::i8)); 10613 10614 SDValue RetVal = 10615 DAG.getNode(ISD::AND, DL, MVT::i16, 10616 DAG.getNode(ISD::ADD, DL, MVT::i16, 10617 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2), 10618 DAG.getConstant(1, MVT::i16)), 10619 DAG.getConstant(3, MVT::i16)); 10620 10621 10622 return DAG.getNode((VT.getSizeInBits() < 16 ? 10623 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal); 10624} 10625 10626static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) { 10627 EVT VT = Op.getValueType(); 10628 EVT OpVT = VT; 10629 unsigned NumBits = VT.getSizeInBits(); 10630 DebugLoc dl = Op.getDebugLoc(); 10631 10632 Op = Op.getOperand(0); 10633 if (VT == MVT::i8) { 10634 // Zero extend to i32 since there is not an i8 bsr. 10635 OpVT = MVT::i32; 10636 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op); 10637 } 10638 10639 // Issue a bsr (scan bits in reverse) which also sets EFLAGS. 10640 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 10641 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op); 10642 10643 // If src is zero (i.e. bsr sets ZF), returns NumBits. 10644 SDValue Ops[] = { 10645 Op, 10646 DAG.getConstant(NumBits+NumBits-1, OpVT), 10647 DAG.getConstant(X86::COND_E, MVT::i8), 10648 Op.getValue(1) 10649 }; 10650 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops, array_lengthof(Ops)); 10651 10652 // Finally xor with NumBits-1. 10653 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 10654 10655 if (VT == MVT::i8) 10656 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op); 10657 return Op; 10658} 10659 10660static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) { 10661 EVT VT = Op.getValueType(); 10662 EVT OpVT = VT; 10663 unsigned NumBits = VT.getSizeInBits(); 10664 DebugLoc dl = Op.getDebugLoc(); 10665 10666 Op = Op.getOperand(0); 10667 if (VT == MVT::i8) { 10668 // Zero extend to i32 since there is not an i8 bsr. 10669 OpVT = MVT::i32; 10670 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op); 10671 } 10672 10673 // Issue a bsr (scan bits in reverse). 10674 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 10675 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op); 10676 10677 // And xor with NumBits-1. 10678 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 10679 10680 if (VT == MVT::i8) 10681 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op); 10682 return Op; 10683} 10684 10685static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) { 10686 EVT VT = Op.getValueType(); 10687 unsigned NumBits = VT.getSizeInBits(); 10688 DebugLoc dl = Op.getDebugLoc(); 10689 Op = Op.getOperand(0); 10690 10691 // Issue a bsf (scan bits forward) which also sets EFLAGS. 10692 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 10693 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op); 10694 10695 // If src is zero (i.e. bsf sets ZF), returns NumBits. 10696 SDValue Ops[] = { 10697 Op, 10698 DAG.getConstant(NumBits, VT), 10699 DAG.getConstant(X86::COND_E, MVT::i8), 10700 Op.getValue(1) 10701 }; 10702 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops, array_lengthof(Ops)); 10703} 10704 10705// Lower256IntArith - Break a 256-bit integer operation into two new 128-bit 10706// ones, and then concatenate the result back. 10707static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) { 10708 EVT VT = Op.getValueType(); 10709 10710 assert(VT.is256BitVector() && VT.isInteger() && 10711 "Unsupported value type for operation"); 10712 10713 unsigned NumElems = VT.getVectorNumElements(); 10714 DebugLoc dl = Op.getDebugLoc(); 10715 10716 // Extract the LHS vectors 10717 SDValue LHS = Op.getOperand(0); 10718 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl); 10719 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl); 10720 10721 // Extract the RHS vectors 10722 SDValue RHS = Op.getOperand(1); 10723 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl); 10724 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl); 10725 10726 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 10727 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 10728 10729 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, 10730 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1), 10731 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2)); 10732} 10733 10734static SDValue LowerADD(SDValue Op, SelectionDAG &DAG) { 10735 assert(Op.getValueType().is256BitVector() && 10736 Op.getValueType().isInteger() && 10737 "Only handle AVX 256-bit vector integer operation"); 10738 return Lower256IntArith(Op, DAG); 10739} 10740 10741static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) { 10742 assert(Op.getValueType().is256BitVector() && 10743 Op.getValueType().isInteger() && 10744 "Only handle AVX 256-bit vector integer operation"); 10745 return Lower256IntArith(Op, DAG); 10746} 10747 10748static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget, 10749 SelectionDAG &DAG) { 10750 EVT VT = Op.getValueType(); 10751 10752 // Decompose 256-bit ops into smaller 128-bit ops. 10753 if (VT.is256BitVector() && !Subtarget->hasAVX2()) 10754 return Lower256IntArith(Op, DAG); 10755 10756 assert((VT == MVT::v2i64 || VT == MVT::v4i64) && 10757 "Only know how to lower V2I64/V4I64 multiply"); 10758 10759 DebugLoc dl = Op.getDebugLoc(); 10760 10761 // Ahi = psrlqi(a, 32); 10762 // Bhi = psrlqi(b, 32); 10763 // 10764 // AloBlo = pmuludq(a, b); 10765 // AloBhi = pmuludq(a, Bhi); 10766 // AhiBlo = pmuludq(Ahi, b); 10767 10768 // AloBhi = psllqi(AloBhi, 32); 10769 // AhiBlo = psllqi(AhiBlo, 32); 10770 // return AloBlo + AloBhi + AhiBlo; 10771 10772 SDValue A = Op.getOperand(0); 10773 SDValue B = Op.getOperand(1); 10774 10775 SDValue ShAmt = DAG.getConstant(32, MVT::i32); 10776 10777 SDValue Ahi = DAG.getNode(X86ISD::VSRLI, dl, VT, A, ShAmt); 10778 SDValue Bhi = DAG.getNode(X86ISD::VSRLI, dl, VT, B, ShAmt); 10779 10780 // Bit cast to 32-bit vectors for MULUDQ 10781 EVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 : MVT::v8i32; 10782 A = DAG.getNode(ISD::BITCAST, dl, MulVT, A); 10783 B = DAG.getNode(ISD::BITCAST, dl, MulVT, B); 10784 Ahi = DAG.getNode(ISD::BITCAST, dl, MulVT, Ahi); 10785 Bhi = DAG.getNode(ISD::BITCAST, dl, MulVT, Bhi); 10786 10787 SDValue AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B); 10788 SDValue AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi); 10789 SDValue AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B); 10790 10791 AloBhi = DAG.getNode(X86ISD::VSHLI, dl, VT, AloBhi, ShAmt); 10792 AhiBlo = DAG.getNode(X86ISD::VSHLI, dl, VT, AhiBlo, ShAmt); 10793 10794 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi); 10795 return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo); 10796} 10797 10798SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const { 10799 10800 EVT VT = Op.getValueType(); 10801 DebugLoc dl = Op.getDebugLoc(); 10802 SDValue R = Op.getOperand(0); 10803 SDValue Amt = Op.getOperand(1); 10804 LLVMContext *Context = DAG.getContext(); 10805 10806 if (!Subtarget->hasSSE2()) 10807 return SDValue(); 10808 10809 // Optimize shl/srl/sra with constant shift amount. 10810 if (isSplatVector(Amt.getNode())) { 10811 SDValue SclrAmt = Amt->getOperand(0); 10812 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(SclrAmt)) { 10813 uint64_t ShiftAmt = C->getZExtValue(); 10814 10815 if (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 || 10816 (Subtarget->hasAVX2() && 10817 (VT == MVT::v4i64 || VT == MVT::v8i32 || VT == MVT::v16i16))) { 10818 if (Op.getOpcode() == ISD::SHL) 10819 return DAG.getNode(X86ISD::VSHLI, dl, VT, R, 10820 DAG.getConstant(ShiftAmt, MVT::i32)); 10821 if (Op.getOpcode() == ISD::SRL) 10822 return DAG.getNode(X86ISD::VSRLI, dl, VT, R, 10823 DAG.getConstant(ShiftAmt, MVT::i32)); 10824 if (Op.getOpcode() == ISD::SRA && VT != MVT::v2i64 && VT != MVT::v4i64) 10825 return DAG.getNode(X86ISD::VSRAI, dl, VT, R, 10826 DAG.getConstant(ShiftAmt, MVT::i32)); 10827 } 10828 10829 if (VT == MVT::v16i8) { 10830 if (Op.getOpcode() == ISD::SHL) { 10831 // Make a large shift. 10832 SDValue SHL = DAG.getNode(X86ISD::VSHLI, dl, MVT::v8i16, R, 10833 DAG.getConstant(ShiftAmt, MVT::i32)); 10834 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL); 10835 // Zero out the rightmost bits. 10836 SmallVector<SDValue, 16> V(16, 10837 DAG.getConstant(uint8_t(-1U << ShiftAmt), 10838 MVT::i8)); 10839 return DAG.getNode(ISD::AND, dl, VT, SHL, 10840 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16)); 10841 } 10842 if (Op.getOpcode() == ISD::SRL) { 10843 // Make a large shift. 10844 SDValue SRL = DAG.getNode(X86ISD::VSRLI, dl, MVT::v8i16, R, 10845 DAG.getConstant(ShiftAmt, MVT::i32)); 10846 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL); 10847 // Zero out the leftmost bits. 10848 SmallVector<SDValue, 16> V(16, 10849 DAG.getConstant(uint8_t(-1U) >> ShiftAmt, 10850 MVT::i8)); 10851 return DAG.getNode(ISD::AND, dl, VT, SRL, 10852 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16)); 10853 } 10854 if (Op.getOpcode() == ISD::SRA) { 10855 if (ShiftAmt == 7) { 10856 // R s>> 7 === R s< 0 10857 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl); 10858 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R); 10859 } 10860 10861 // R s>> a === ((R u>> a) ^ m) - m 10862 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt); 10863 SmallVector<SDValue, 16> V(16, DAG.getConstant(128 >> ShiftAmt, 10864 MVT::i8)); 10865 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16); 10866 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask); 10867 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask); 10868 return Res; 10869 } 10870 llvm_unreachable("Unknown shift opcode."); 10871 } 10872 10873 if (Subtarget->hasAVX2() && VT == MVT::v32i8) { 10874 if (Op.getOpcode() == ISD::SHL) { 10875 // Make a large shift. 10876 SDValue SHL = DAG.getNode(X86ISD::VSHLI, dl, MVT::v16i16, R, 10877 DAG.getConstant(ShiftAmt, MVT::i32)); 10878 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL); 10879 // Zero out the rightmost bits. 10880 SmallVector<SDValue, 32> V(32, 10881 DAG.getConstant(uint8_t(-1U << ShiftAmt), 10882 MVT::i8)); 10883 return DAG.getNode(ISD::AND, dl, VT, SHL, 10884 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32)); 10885 } 10886 if (Op.getOpcode() == ISD::SRL) { 10887 // Make a large shift. 10888 SDValue SRL = DAG.getNode(X86ISD::VSRLI, dl, MVT::v16i16, R, 10889 DAG.getConstant(ShiftAmt, MVT::i32)); 10890 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL); 10891 // Zero out the leftmost bits. 10892 SmallVector<SDValue, 32> V(32, 10893 DAG.getConstant(uint8_t(-1U) >> ShiftAmt, 10894 MVT::i8)); 10895 return DAG.getNode(ISD::AND, dl, VT, SRL, 10896 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32)); 10897 } 10898 if (Op.getOpcode() == ISD::SRA) { 10899 if (ShiftAmt == 7) { 10900 // R s>> 7 === R s< 0 10901 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl); 10902 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R); 10903 } 10904 10905 // R s>> a === ((R u>> a) ^ m) - m 10906 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt); 10907 SmallVector<SDValue, 32> V(32, DAG.getConstant(128 >> ShiftAmt, 10908 MVT::i8)); 10909 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32); 10910 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask); 10911 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask); 10912 return Res; 10913 } 10914 llvm_unreachable("Unknown shift opcode."); 10915 } 10916 } 10917 } 10918 10919 // Lower SHL with variable shift amount. 10920 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) { 10921 Op = DAG.getNode(X86ISD::VSHLI, dl, VT, Op.getOperand(1), 10922 DAG.getConstant(23, MVT::i32)); 10923 10924 const uint32_t CV[] = { 0x3f800000U, 0x3f800000U, 0x3f800000U, 0x3f800000U}; 10925 Constant *C = ConstantDataVector::get(*Context, CV); 10926 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 10927 SDValue Addend = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 10928 MachinePointerInfo::getConstantPool(), 10929 false, false, false, 16); 10930 10931 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Addend); 10932 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op); 10933 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op); 10934 return DAG.getNode(ISD::MUL, dl, VT, Op, R); 10935 } 10936 if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) { 10937 assert(Subtarget->hasSSE2() && "Need SSE2 for pslli/pcmpeq."); 10938 10939 // a = a << 5; 10940 Op = DAG.getNode(X86ISD::VSHLI, dl, MVT::v8i16, Op.getOperand(1), 10941 DAG.getConstant(5, MVT::i32)); 10942 Op = DAG.getNode(ISD::BITCAST, dl, VT, Op); 10943 10944 // Turn 'a' into a mask suitable for VSELECT 10945 SDValue VSelM = DAG.getConstant(0x80, VT); 10946 SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op); 10947 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM); 10948 10949 SDValue CM1 = DAG.getConstant(0x0f, VT); 10950 SDValue CM2 = DAG.getConstant(0x3f, VT); 10951 10952 // r = VSELECT(r, psllw(r & (char16)15, 4), a); 10953 SDValue M = DAG.getNode(ISD::AND, dl, VT, R, CM1); 10954 M = getTargetVShiftNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 10955 DAG.getConstant(4, MVT::i32), DAG); 10956 M = DAG.getNode(ISD::BITCAST, dl, VT, M); 10957 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R); 10958 10959 // a += a 10960 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); 10961 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op); 10962 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM); 10963 10964 // r = VSELECT(r, psllw(r & (char16)63, 2), a); 10965 M = DAG.getNode(ISD::AND, dl, VT, R, CM2); 10966 M = getTargetVShiftNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 10967 DAG.getConstant(2, MVT::i32), DAG); 10968 M = DAG.getNode(ISD::BITCAST, dl, VT, M); 10969 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R); 10970 10971 // a += a 10972 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); 10973 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op); 10974 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM); 10975 10976 // return VSELECT(r, r+r, a); 10977 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, 10978 DAG.getNode(ISD::ADD, dl, VT, R, R), R); 10979 return R; 10980 } 10981 10982 // Decompose 256-bit shifts into smaller 128-bit shifts. 10983 if (VT.is256BitVector()) { 10984 unsigned NumElems = VT.getVectorNumElements(); 10985 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 10986 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 10987 10988 // Extract the two vectors 10989 SDValue V1 = Extract128BitVector(R, 0, DAG, dl); 10990 SDValue V2 = Extract128BitVector(R, NumElems/2, DAG, dl); 10991 10992 // Recreate the shift amount vectors 10993 SDValue Amt1, Amt2; 10994 if (Amt.getOpcode() == ISD::BUILD_VECTOR) { 10995 // Constant shift amount 10996 SmallVector<SDValue, 4> Amt1Csts; 10997 SmallVector<SDValue, 4> Amt2Csts; 10998 for (unsigned i = 0; i != NumElems/2; ++i) 10999 Amt1Csts.push_back(Amt->getOperand(i)); 11000 for (unsigned i = NumElems/2; i != NumElems; ++i) 11001 Amt2Csts.push_back(Amt->getOperand(i)); 11002 11003 Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, 11004 &Amt1Csts[0], NumElems/2); 11005 Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, 11006 &Amt2Csts[0], NumElems/2); 11007 } else { 11008 // Variable shift amount 11009 Amt1 = Extract128BitVector(Amt, 0, DAG, dl); 11010 Amt2 = Extract128BitVector(Amt, NumElems/2, DAG, dl); 11011 } 11012 11013 // Issue new vector shifts for the smaller types 11014 V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1); 11015 V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2); 11016 11017 // Concatenate the result back 11018 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2); 11019 } 11020 11021 return SDValue(); 11022} 11023 11024static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) { 11025 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus 11026 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering 11027 // looks for this combo and may remove the "setcc" instruction if the "setcc" 11028 // has only one use. 11029 SDNode *N = Op.getNode(); 11030 SDValue LHS = N->getOperand(0); 11031 SDValue RHS = N->getOperand(1); 11032 unsigned BaseOp = 0; 11033 unsigned Cond = 0; 11034 DebugLoc DL = Op.getDebugLoc(); 11035 switch (Op.getOpcode()) { 11036 default: llvm_unreachable("Unknown ovf instruction!"); 11037 case ISD::SADDO: 11038 // A subtract of one will be selected as a INC. Note that INC doesn't 11039 // set CF, so we can't do this for UADDO. 11040 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) 11041 if (C->isOne()) { 11042 BaseOp = X86ISD::INC; 11043 Cond = X86::COND_O; 11044 break; 11045 } 11046 BaseOp = X86ISD::ADD; 11047 Cond = X86::COND_O; 11048 break; 11049 case ISD::UADDO: 11050 BaseOp = X86ISD::ADD; 11051 Cond = X86::COND_B; 11052 break; 11053 case ISD::SSUBO: 11054 // A subtract of one will be selected as a DEC. Note that DEC doesn't 11055 // set CF, so we can't do this for USUBO. 11056 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) 11057 if (C->isOne()) { 11058 BaseOp = X86ISD::DEC; 11059 Cond = X86::COND_O; 11060 break; 11061 } 11062 BaseOp = X86ISD::SUB; 11063 Cond = X86::COND_O; 11064 break; 11065 case ISD::USUBO: 11066 BaseOp = X86ISD::SUB; 11067 Cond = X86::COND_B; 11068 break; 11069 case ISD::SMULO: 11070 BaseOp = X86ISD::SMUL; 11071 Cond = X86::COND_O; 11072 break; 11073 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs 11074 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0), 11075 MVT::i32); 11076 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS); 11077 11078 SDValue SetCC = 11079 DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 11080 DAG.getConstant(X86::COND_O, MVT::i32), 11081 SDValue(Sum.getNode(), 2)); 11082 11083 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC); 11084 } 11085 } 11086 11087 // Also sets EFLAGS. 11088 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32); 11089 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS); 11090 11091 SDValue SetCC = 11092 DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1), 11093 DAG.getConstant(Cond, MVT::i32), 11094 SDValue(Sum.getNode(), 1)); 11095 11096 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC); 11097} 11098 11099SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 11100 SelectionDAG &DAG) const { 11101 DebugLoc dl = Op.getDebugLoc(); 11102 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 11103 EVT VT = Op.getValueType(); 11104 11105 if (!Subtarget->hasSSE2() || !VT.isVector()) 11106 return SDValue(); 11107 11108 unsigned BitsDiff = VT.getScalarType().getSizeInBits() - 11109 ExtraVT.getScalarType().getSizeInBits(); 11110 SDValue ShAmt = DAG.getConstant(BitsDiff, MVT::i32); 11111 11112 switch (VT.getSimpleVT().SimpleTy) { 11113 default: return SDValue(); 11114 case MVT::v8i32: 11115 case MVT::v16i16: 11116 if (!Subtarget->hasAVX()) 11117 return SDValue(); 11118 if (!Subtarget->hasAVX2()) { 11119 // needs to be split 11120 unsigned NumElems = VT.getVectorNumElements(); 11121 11122 // Extract the LHS vectors 11123 SDValue LHS = Op.getOperand(0); 11124 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl); 11125 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl); 11126 11127 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 11128 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 11129 11130 EVT ExtraEltVT = ExtraVT.getVectorElementType(); 11131 unsigned ExtraNumElems = ExtraVT.getVectorNumElements(); 11132 ExtraVT = EVT::getVectorVT(*DAG.getContext(), ExtraEltVT, 11133 ExtraNumElems/2); 11134 SDValue Extra = DAG.getValueType(ExtraVT); 11135 11136 LHS1 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, Extra); 11137 LHS2 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, Extra); 11138 11139 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2); 11140 } 11141 // fall through 11142 case MVT::v4i32: 11143 case MVT::v8i16: { 11144 SDValue Tmp1 = getTargetVShiftNode(X86ISD::VSHLI, dl, VT, 11145 Op.getOperand(0), ShAmt, DAG); 11146 return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, Tmp1, ShAmt, DAG); 11147 } 11148 } 11149} 11150 11151 11152static SDValue LowerMEMBARRIER(SDValue Op, const X86Subtarget *Subtarget, 11153 SelectionDAG &DAG) { 11154 DebugLoc dl = Op.getDebugLoc(); 11155 11156 // Go ahead and emit the fence on x86-64 even if we asked for no-sse2. 11157 // There isn't any reason to disable it if the target processor supports it. 11158 if (!Subtarget->hasSSE2() && !Subtarget->is64Bit()) { 11159 SDValue Chain = Op.getOperand(0); 11160 SDValue Zero = DAG.getConstant(0, MVT::i32); 11161 SDValue Ops[] = { 11162 DAG.getRegister(X86::ESP, MVT::i32), // Base 11163 DAG.getTargetConstant(1, MVT::i8), // Scale 11164 DAG.getRegister(0, MVT::i32), // Index 11165 DAG.getTargetConstant(0, MVT::i32), // Disp 11166 DAG.getRegister(0, MVT::i32), // Segment. 11167 Zero, 11168 Chain 11169 }; 11170 SDNode *Res = 11171 DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops, 11172 array_lengthof(Ops)); 11173 return SDValue(Res, 0); 11174 } 11175 11176 unsigned isDev = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue(); 11177 if (!isDev) 11178 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0)); 11179 11180 unsigned Op1 = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 11181 unsigned Op2 = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 11182 unsigned Op3 = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); 11183 unsigned Op4 = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 11184 11185 // def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>; 11186 if (!Op1 && !Op2 && !Op3 && Op4) 11187 return DAG.getNode(X86ISD::SFENCE, dl, MVT::Other, Op.getOperand(0)); 11188 11189 // def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>; 11190 if (Op1 && !Op2 && !Op3 && !Op4) 11191 return DAG.getNode(X86ISD::LFENCE, dl, MVT::Other, Op.getOperand(0)); 11192 11193 // def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm), (i8 1)), 11194 // (MFENCE)>; 11195 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0)); 11196} 11197 11198static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget, 11199 SelectionDAG &DAG) { 11200 DebugLoc dl = Op.getDebugLoc(); 11201 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>( 11202 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()); 11203 SynchronizationScope FenceScope = static_cast<SynchronizationScope>( 11204 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue()); 11205 11206 // The only fence that needs an instruction is a sequentially-consistent 11207 // cross-thread fence. 11208 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) { 11209 // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for 11210 // no-sse2). There isn't any reason to disable it if the target processor 11211 // supports it. 11212 if (Subtarget->hasSSE2() || Subtarget->is64Bit()) 11213 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0)); 11214 11215 SDValue Chain = Op.getOperand(0); 11216 SDValue Zero = DAG.getConstant(0, MVT::i32); 11217 SDValue Ops[] = { 11218 DAG.getRegister(X86::ESP, MVT::i32), // Base 11219 DAG.getTargetConstant(1, MVT::i8), // Scale 11220 DAG.getRegister(0, MVT::i32), // Index 11221 DAG.getTargetConstant(0, MVT::i32), // Disp 11222 DAG.getRegister(0, MVT::i32), // Segment. 11223 Zero, 11224 Chain 11225 }; 11226 SDNode *Res = 11227 DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops, 11228 array_lengthof(Ops)); 11229 return SDValue(Res, 0); 11230 } 11231 11232 // MEMBARRIER is a compiler barrier; it codegens to a no-op. 11233 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0)); 11234} 11235 11236 11237static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget, 11238 SelectionDAG &DAG) { 11239 EVT T = Op.getValueType(); 11240 DebugLoc DL = Op.getDebugLoc(); 11241 unsigned Reg = 0; 11242 unsigned size = 0; 11243 switch(T.getSimpleVT().SimpleTy) { 11244 default: llvm_unreachable("Invalid value type!"); 11245 case MVT::i8: Reg = X86::AL; size = 1; break; 11246 case MVT::i16: Reg = X86::AX; size = 2; break; 11247 case MVT::i32: Reg = X86::EAX; size = 4; break; 11248 case MVT::i64: 11249 assert(Subtarget->is64Bit() && "Node not type legal!"); 11250 Reg = X86::RAX; size = 8; 11251 break; 11252 } 11253 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg, 11254 Op.getOperand(2), SDValue()); 11255 SDValue Ops[] = { cpIn.getValue(0), 11256 Op.getOperand(1), 11257 Op.getOperand(3), 11258 DAG.getTargetConstant(size, MVT::i8), 11259 cpIn.getValue(1) }; 11260 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 11261 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand(); 11262 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys, 11263 Ops, 5, T, MMO); 11264 SDValue cpOut = 11265 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1)); 11266 return cpOut; 11267} 11268 11269static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget, 11270 SelectionDAG &DAG) { 11271 assert(Subtarget->is64Bit() && "Result not type legalized?"); 11272 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 11273 SDValue TheChain = Op.getOperand(0); 11274 DebugLoc dl = Op.getDebugLoc(); 11275 SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1); 11276 SDValue rax = DAG.getCopyFromReg(rd, dl, X86::RAX, MVT::i64, rd.getValue(1)); 11277 SDValue rdx = DAG.getCopyFromReg(rax.getValue(1), dl, X86::RDX, MVT::i64, 11278 rax.getValue(2)); 11279 SDValue Tmp = DAG.getNode(ISD::SHL, dl, MVT::i64, rdx, 11280 DAG.getConstant(32, MVT::i8)); 11281 SDValue Ops[] = { 11282 DAG.getNode(ISD::OR, dl, MVT::i64, rax, Tmp), 11283 rdx.getValue(1) 11284 }; 11285 return DAG.getMergeValues(Ops, 2, dl); 11286} 11287 11288SDValue X86TargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const { 11289 EVT SrcVT = Op.getOperand(0).getValueType(); 11290 EVT DstVT = Op.getValueType(); 11291 assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() && 11292 Subtarget->hasMMX() && "Unexpected custom BITCAST"); 11293 assert((DstVT == MVT::i64 || 11294 (DstVT.isVector() && DstVT.getSizeInBits()==64)) && 11295 "Unexpected custom BITCAST"); 11296 // i64 <=> MMX conversions are Legal. 11297 if (SrcVT==MVT::i64 && DstVT.isVector()) 11298 return Op; 11299 if (DstVT==MVT::i64 && SrcVT.isVector()) 11300 return Op; 11301 // MMX <=> MMX conversions are Legal. 11302 if (SrcVT.isVector() && DstVT.isVector()) 11303 return Op; 11304 // All other conversions need to be expanded. 11305 return SDValue(); 11306} 11307 11308static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) { 11309 SDNode *Node = Op.getNode(); 11310 DebugLoc dl = Node->getDebugLoc(); 11311 EVT T = Node->getValueType(0); 11312 SDValue negOp = DAG.getNode(ISD::SUB, dl, T, 11313 DAG.getConstant(0, T), Node->getOperand(2)); 11314 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl, 11315 cast<AtomicSDNode>(Node)->getMemoryVT(), 11316 Node->getOperand(0), 11317 Node->getOperand(1), negOp, 11318 cast<AtomicSDNode>(Node)->getSrcValue(), 11319 cast<AtomicSDNode>(Node)->getAlignment(), 11320 cast<AtomicSDNode>(Node)->getOrdering(), 11321 cast<AtomicSDNode>(Node)->getSynchScope()); 11322} 11323 11324static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) { 11325 SDNode *Node = Op.getNode(); 11326 DebugLoc dl = Node->getDebugLoc(); 11327 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT(); 11328 11329 // Convert seq_cst store -> xchg 11330 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b) 11331 // FIXME: On 32-bit, store -> fist or movq would be more efficient 11332 // (The only way to get a 16-byte store is cmpxchg16b) 11333 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment. 11334 if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent || 11335 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 11336 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl, 11337 cast<AtomicSDNode>(Node)->getMemoryVT(), 11338 Node->getOperand(0), 11339 Node->getOperand(1), Node->getOperand(2), 11340 cast<AtomicSDNode>(Node)->getMemOperand(), 11341 cast<AtomicSDNode>(Node)->getOrdering(), 11342 cast<AtomicSDNode>(Node)->getSynchScope()); 11343 return Swap.getValue(1); 11344 } 11345 // Other atomic stores have a simple pattern. 11346 return Op; 11347} 11348 11349static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { 11350 EVT VT = Op.getNode()->getValueType(0); 11351 11352 // Let legalize expand this if it isn't a legal type yet. 11353 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 11354 return SDValue(); 11355 11356 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 11357 11358 unsigned Opc; 11359 bool ExtraOp = false; 11360 switch (Op.getOpcode()) { 11361 default: llvm_unreachable("Invalid code"); 11362 case ISD::ADDC: Opc = X86ISD::ADD; break; 11363 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break; 11364 case ISD::SUBC: Opc = X86ISD::SUB; break; 11365 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break; 11366 } 11367 11368 if (!ExtraOp) 11369 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 11370 Op.getOperand(1)); 11371 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 11372 Op.getOperand(1), Op.getOperand(2)); 11373} 11374 11375/// LowerOperation - Provide custom lowering hooks for some operations. 11376/// 11377SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 11378 switch (Op.getOpcode()) { 11379 default: llvm_unreachable("Should not custom lower this!"); 11380 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG); 11381 case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, Subtarget, DAG); 11382 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG); 11383 case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op, Subtarget, DAG); 11384 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG); 11385 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG); 11386 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 11387 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 11388 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 11389 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 11390 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 11391 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG); 11392 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG); 11393 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 11394 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 11395 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 11396 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 11397 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); 11398 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 11399 case ISD::SHL_PARTS: 11400 case ISD::SRA_PARTS: 11401 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG); 11402 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 11403 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG); 11404 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 11405 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG); 11406 case ISD::FABS: return LowerFABS(Op, DAG); 11407 case ISD::FNEG: return LowerFNEG(Op, DAG); 11408 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 11409 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG); 11410 case ISD::SETCC: return LowerSETCC(Op, DAG); 11411 case ISD::SELECT: return LowerSELECT(Op, DAG); 11412 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 11413 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 11414 case ISD::VASTART: return LowerVASTART(Op, DAG); 11415 case ISD::VAARG: return LowerVAARG(Op, DAG); 11416 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG); 11417 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 11418 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG); 11419 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 11420 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 11421 case ISD::FRAME_TO_ARGS_OFFSET: 11422 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 11423 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 11424 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 11425 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 11426 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 11427 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 11428 case ISD::CTLZ: return LowerCTLZ(Op, DAG); 11429 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG); 11430 case ISD::CTTZ: return LowerCTTZ(Op, DAG); 11431 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG); 11432 case ISD::SRA: 11433 case ISD::SRL: 11434 case ISD::SHL: return LowerShift(Op, DAG); 11435 case ISD::SADDO: 11436 case ISD::UADDO: 11437 case ISD::SSUBO: 11438 case ISD::USUBO: 11439 case ISD::SMULO: 11440 case ISD::UMULO: return LowerXALUO(Op, DAG); 11441 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG); 11442 case ISD::BITCAST: return LowerBITCAST(Op, DAG); 11443 case ISD::ADDC: 11444 case ISD::ADDE: 11445 case ISD::SUBC: 11446 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); 11447 case ISD::ADD: return LowerADD(Op, DAG); 11448 case ISD::SUB: return LowerSUB(Op, DAG); 11449 } 11450} 11451 11452static void ReplaceATOMIC_LOAD(SDNode *Node, 11453 SmallVectorImpl<SDValue> &Results, 11454 SelectionDAG &DAG) { 11455 DebugLoc dl = Node->getDebugLoc(); 11456 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT(); 11457 11458 // Convert wide load -> cmpxchg8b/cmpxchg16b 11459 // FIXME: On 32-bit, load -> fild or movq would be more efficient 11460 // (The only way to get a 16-byte load is cmpxchg16b) 11461 // FIXME: 16-byte ATOMIC_CMP_SWAP isn't actually hooked up at the moment. 11462 SDValue Zero = DAG.getConstant(0, VT); 11463 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, dl, VT, 11464 Node->getOperand(0), 11465 Node->getOperand(1), Zero, Zero, 11466 cast<AtomicSDNode>(Node)->getMemOperand(), 11467 cast<AtomicSDNode>(Node)->getOrdering(), 11468 cast<AtomicSDNode>(Node)->getSynchScope()); 11469 Results.push_back(Swap.getValue(0)); 11470 Results.push_back(Swap.getValue(1)); 11471} 11472 11473static void 11474ReplaceATOMIC_BINARY_64(SDNode *Node, SmallVectorImpl<SDValue>&Results, 11475 SelectionDAG &DAG, unsigned NewOp) { 11476 DebugLoc dl = Node->getDebugLoc(); 11477 assert (Node->getValueType(0) == MVT::i64 && 11478 "Only know how to expand i64 atomics"); 11479 11480 SDValue Chain = Node->getOperand(0); 11481 SDValue In1 = Node->getOperand(1); 11482 SDValue In2L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 11483 Node->getOperand(2), DAG.getIntPtrConstant(0)); 11484 SDValue In2H = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 11485 Node->getOperand(2), DAG.getIntPtrConstant(1)); 11486 SDValue Ops[] = { Chain, In1, In2L, In2H }; 11487 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 11488 SDValue Result = 11489 DAG.getMemIntrinsicNode(NewOp, dl, Tys, Ops, 4, MVT::i64, 11490 cast<MemSDNode>(Node)->getMemOperand()); 11491 SDValue OpsF[] = { Result.getValue(0), Result.getValue(1)}; 11492 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2)); 11493 Results.push_back(Result.getValue(2)); 11494} 11495 11496/// ReplaceNodeResults - Replace a node with an illegal result type 11497/// with a new node built out of custom code. 11498void X86TargetLowering::ReplaceNodeResults(SDNode *N, 11499 SmallVectorImpl<SDValue>&Results, 11500 SelectionDAG &DAG) const { 11501 DebugLoc dl = N->getDebugLoc(); 11502 switch (N->getOpcode()) { 11503 default: 11504 llvm_unreachable("Do not know how to custom type legalize this operation!"); 11505 case ISD::SIGN_EXTEND_INREG: 11506 case ISD::ADDC: 11507 case ISD::ADDE: 11508 case ISD::SUBC: 11509 case ISD::SUBE: 11510 // We don't want to expand or promote these. 11511 return; 11512 case ISD::FP_TO_SINT: 11513 case ISD::FP_TO_UINT: { 11514 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT; 11515 11516 if (!IsSigned && !isIntegerTypeFTOL(SDValue(N, 0).getValueType())) 11517 return; 11518 11519 std::pair<SDValue,SDValue> Vals = 11520 FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true); 11521 SDValue FIST = Vals.first, StackSlot = Vals.second; 11522 if (FIST.getNode() != 0) { 11523 EVT VT = N->getValueType(0); 11524 // Return a load from the stack slot. 11525 if (StackSlot.getNode() != 0) 11526 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot, 11527 MachinePointerInfo(), 11528 false, false, false, 0)); 11529 else 11530 Results.push_back(FIST); 11531 } 11532 return; 11533 } 11534 case ISD::READCYCLECOUNTER: { 11535 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 11536 SDValue TheChain = N->getOperand(0); 11537 SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1); 11538 SDValue eax = DAG.getCopyFromReg(rd, dl, X86::EAX, MVT::i32, 11539 rd.getValue(1)); 11540 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), dl, X86::EDX, MVT::i32, 11541 eax.getValue(2)); 11542 // Use a buildpair to merge the two 32-bit values into a 64-bit one. 11543 SDValue Ops[] = { eax, edx }; 11544 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops, 2)); 11545 Results.push_back(edx.getValue(1)); 11546 return; 11547 } 11548 case ISD::ATOMIC_CMP_SWAP: { 11549 EVT T = N->getValueType(0); 11550 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair"); 11551 bool Regs64bit = T == MVT::i128; 11552 EVT HalfT = Regs64bit ? MVT::i64 : MVT::i32; 11553 SDValue cpInL, cpInH; 11554 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2), 11555 DAG.getConstant(0, HalfT)); 11556 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2), 11557 DAG.getConstant(1, HalfT)); 11558 cpInL = DAG.getCopyToReg(N->getOperand(0), dl, 11559 Regs64bit ? X86::RAX : X86::EAX, 11560 cpInL, SDValue()); 11561 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl, 11562 Regs64bit ? X86::RDX : X86::EDX, 11563 cpInH, cpInL.getValue(1)); 11564 SDValue swapInL, swapInH; 11565 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3), 11566 DAG.getConstant(0, HalfT)); 11567 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3), 11568 DAG.getConstant(1, HalfT)); 11569 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl, 11570 Regs64bit ? X86::RBX : X86::EBX, 11571 swapInL, cpInH.getValue(1)); 11572 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl, 11573 Regs64bit ? X86::RCX : X86::ECX, 11574 swapInH, swapInL.getValue(1)); 11575 SDValue Ops[] = { swapInH.getValue(0), 11576 N->getOperand(1), 11577 swapInH.getValue(1) }; 11578 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 11579 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); 11580 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG : 11581 X86ISD::LCMPXCHG8_DAG; 11582 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, 11583 Ops, 3, T, MMO); 11584 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl, 11585 Regs64bit ? X86::RAX : X86::EAX, 11586 HalfT, Result.getValue(1)); 11587 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl, 11588 Regs64bit ? X86::RDX : X86::EDX, 11589 HalfT, cpOutL.getValue(2)); 11590 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)}; 11591 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF, 2)); 11592 Results.push_back(cpOutH.getValue(1)); 11593 return; 11594 } 11595 case ISD::ATOMIC_LOAD_ADD: 11596 case ISD::ATOMIC_LOAD_AND: 11597 case ISD::ATOMIC_LOAD_NAND: 11598 case ISD::ATOMIC_LOAD_OR: 11599 case ISD::ATOMIC_LOAD_SUB: 11600 case ISD::ATOMIC_LOAD_XOR: 11601 case ISD::ATOMIC_LOAD_MAX: 11602 case ISD::ATOMIC_LOAD_MIN: 11603 case ISD::ATOMIC_LOAD_UMAX: 11604 case ISD::ATOMIC_LOAD_UMIN: 11605 case ISD::ATOMIC_SWAP: { 11606 unsigned Opc; 11607 switch (N->getOpcode()) { 11608 default: llvm_unreachable("Unexpected opcode"); 11609 case ISD::ATOMIC_LOAD_ADD: 11610 Opc = X86ISD::ATOMADD64_DAG; 11611 break; 11612 case ISD::ATOMIC_LOAD_AND: 11613 Opc = X86ISD::ATOMAND64_DAG; 11614 break; 11615 case ISD::ATOMIC_LOAD_NAND: 11616 Opc = X86ISD::ATOMNAND64_DAG; 11617 break; 11618 case ISD::ATOMIC_LOAD_OR: 11619 Opc = X86ISD::ATOMOR64_DAG; 11620 break; 11621 case ISD::ATOMIC_LOAD_SUB: 11622 Opc = X86ISD::ATOMSUB64_DAG; 11623 break; 11624 case ISD::ATOMIC_LOAD_XOR: 11625 Opc = X86ISD::ATOMXOR64_DAG; 11626 break; 11627 case ISD::ATOMIC_LOAD_MAX: 11628 Opc = X86ISD::ATOMMAX64_DAG; 11629 break; 11630 case ISD::ATOMIC_LOAD_MIN: 11631 Opc = X86ISD::ATOMMIN64_DAG; 11632 break; 11633 case ISD::ATOMIC_LOAD_UMAX: 11634 Opc = X86ISD::ATOMUMAX64_DAG; 11635 break; 11636 case ISD::ATOMIC_LOAD_UMIN: 11637 Opc = X86ISD::ATOMUMIN64_DAG; 11638 break; 11639 case ISD::ATOMIC_SWAP: 11640 Opc = X86ISD::ATOMSWAP64_DAG; 11641 break; 11642 } 11643 ReplaceATOMIC_BINARY_64(N, Results, DAG, Opc); 11644 return; 11645 } 11646 case ISD::ATOMIC_LOAD: 11647 ReplaceATOMIC_LOAD(N, Results, DAG); 11648 } 11649} 11650 11651const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { 11652 switch (Opcode) { 11653 default: return NULL; 11654 case X86ISD::BSF: return "X86ISD::BSF"; 11655 case X86ISD::BSR: return "X86ISD::BSR"; 11656 case X86ISD::SHLD: return "X86ISD::SHLD"; 11657 case X86ISD::SHRD: return "X86ISD::SHRD"; 11658 case X86ISD::FAND: return "X86ISD::FAND"; 11659 case X86ISD::FOR: return "X86ISD::FOR"; 11660 case X86ISD::FXOR: return "X86ISD::FXOR"; 11661 case X86ISD::FSRL: return "X86ISD::FSRL"; 11662 case X86ISD::FILD: return "X86ISD::FILD"; 11663 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG"; 11664 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM"; 11665 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM"; 11666 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM"; 11667 case X86ISD::FLD: return "X86ISD::FLD"; 11668 case X86ISD::FST: return "X86ISD::FST"; 11669 case X86ISD::CALL: return "X86ISD::CALL"; 11670 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; 11671 case X86ISD::BT: return "X86ISD::BT"; 11672 case X86ISD::CMP: return "X86ISD::CMP"; 11673 case X86ISD::COMI: return "X86ISD::COMI"; 11674 case X86ISD::UCOMI: return "X86ISD::UCOMI"; 11675 case X86ISD::SETCC: return "X86ISD::SETCC"; 11676 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY"; 11677 case X86ISD::FSETCCsd: return "X86ISD::FSETCCsd"; 11678 case X86ISD::FSETCCss: return "X86ISD::FSETCCss"; 11679 case X86ISD::CMOV: return "X86ISD::CMOV"; 11680 case X86ISD::BRCOND: return "X86ISD::BRCOND"; 11681 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; 11682 case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; 11683 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; 11684 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; 11685 case X86ISD::Wrapper: return "X86ISD::Wrapper"; 11686 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP"; 11687 case X86ISD::PEXTRB: return "X86ISD::PEXTRB"; 11688 case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; 11689 case X86ISD::INSERTPS: return "X86ISD::INSERTPS"; 11690 case X86ISD::PINSRB: return "X86ISD::PINSRB"; 11691 case X86ISD::PINSRW: return "X86ISD::PINSRW"; 11692 case X86ISD::PSHUFB: return "X86ISD::PSHUFB"; 11693 case X86ISD::ANDNP: return "X86ISD::ANDNP"; 11694 case X86ISD::PSIGN: return "X86ISD::PSIGN"; 11695 case X86ISD::BLENDV: return "X86ISD::BLENDV"; 11696 case X86ISD::BLENDPW: return "X86ISD::BLENDPW"; 11697 case X86ISD::BLENDPS: return "X86ISD::BLENDPS"; 11698 case X86ISD::BLENDPD: return "X86ISD::BLENDPD"; 11699 case X86ISD::HADD: return "X86ISD::HADD"; 11700 case X86ISD::HSUB: return "X86ISD::HSUB"; 11701 case X86ISD::FHADD: return "X86ISD::FHADD"; 11702 case X86ISD::FHSUB: return "X86ISD::FHSUB"; 11703 case X86ISD::FMAX: return "X86ISD::FMAX"; 11704 case X86ISD::FMIN: return "X86ISD::FMIN"; 11705 case X86ISD::FMAXC: return "X86ISD::FMAXC"; 11706 case X86ISD::FMINC: return "X86ISD::FMINC"; 11707 case X86ISD::FRSQRT: return "X86ISD::FRSQRT"; 11708 case X86ISD::FRCP: return "X86ISD::FRCP"; 11709 case X86ISD::TLSADDR: return "X86ISD::TLSADDR"; 11710 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR"; 11711 case X86ISD::TLSCALL: return "X86ISD::TLSCALL"; 11712 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN"; 11713 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN"; 11714 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m"; 11715 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r"; 11716 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG"; 11717 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG"; 11718 case X86ISD::ATOMADD64_DAG: return "X86ISD::ATOMADD64_DAG"; 11719 case X86ISD::ATOMSUB64_DAG: return "X86ISD::ATOMSUB64_DAG"; 11720 case X86ISD::ATOMOR64_DAG: return "X86ISD::ATOMOR64_DAG"; 11721 case X86ISD::ATOMXOR64_DAG: return "X86ISD::ATOMXOR64_DAG"; 11722 case X86ISD::ATOMAND64_DAG: return "X86ISD::ATOMAND64_DAG"; 11723 case X86ISD::ATOMNAND64_DAG: return "X86ISD::ATOMNAND64_DAG"; 11724 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL"; 11725 case X86ISD::VSEXT_MOVL: return "X86ISD::VSEXT_MOVL"; 11726 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD"; 11727 case X86ISD::VFPEXT: return "X86ISD::VFPEXT"; 11728 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ"; 11729 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ"; 11730 case X86ISD::VSHL: return "X86ISD::VSHL"; 11731 case X86ISD::VSRL: return "X86ISD::VSRL"; 11732 case X86ISD::VSRA: return "X86ISD::VSRA"; 11733 case X86ISD::VSHLI: return "X86ISD::VSHLI"; 11734 case X86ISD::VSRLI: return "X86ISD::VSRLI"; 11735 case X86ISD::VSRAI: return "X86ISD::VSRAI"; 11736 case X86ISD::CMPP: return "X86ISD::CMPP"; 11737 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ"; 11738 case X86ISD::PCMPGT: return "X86ISD::PCMPGT"; 11739 case X86ISD::ADD: return "X86ISD::ADD"; 11740 case X86ISD::SUB: return "X86ISD::SUB"; 11741 case X86ISD::ADC: return "X86ISD::ADC"; 11742 case X86ISD::SBB: return "X86ISD::SBB"; 11743 case X86ISD::SMUL: return "X86ISD::SMUL"; 11744 case X86ISD::UMUL: return "X86ISD::UMUL"; 11745 case X86ISD::INC: return "X86ISD::INC"; 11746 case X86ISD::DEC: return "X86ISD::DEC"; 11747 case X86ISD::OR: return "X86ISD::OR"; 11748 case X86ISD::XOR: return "X86ISD::XOR"; 11749 case X86ISD::AND: return "X86ISD::AND"; 11750 case X86ISD::ANDN: return "X86ISD::ANDN"; 11751 case X86ISD::BLSI: return "X86ISD::BLSI"; 11752 case X86ISD::BLSMSK: return "X86ISD::BLSMSK"; 11753 case X86ISD::BLSR: return "X86ISD::BLSR"; 11754 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM"; 11755 case X86ISD::PTEST: return "X86ISD::PTEST"; 11756 case X86ISD::TESTP: return "X86ISD::TESTP"; 11757 case X86ISD::PALIGN: return "X86ISD::PALIGN"; 11758 case X86ISD::PSHUFD: return "X86ISD::PSHUFD"; 11759 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW"; 11760 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW"; 11761 case X86ISD::SHUFP: return "X86ISD::SHUFP"; 11762 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS"; 11763 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD"; 11764 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS"; 11765 case X86ISD::MOVLPS: return "X86ISD::MOVLPS"; 11766 case X86ISD::MOVLPD: return "X86ISD::MOVLPD"; 11767 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP"; 11768 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP"; 11769 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP"; 11770 case X86ISD::MOVSD: return "X86ISD::MOVSD"; 11771 case X86ISD::MOVSS: return "X86ISD::MOVSS"; 11772 case X86ISD::UNPCKL: return "X86ISD::UNPCKL"; 11773 case X86ISD::UNPCKH: return "X86ISD::UNPCKH"; 11774 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST"; 11775 case X86ISD::VPERMILP: return "X86ISD::VPERMILP"; 11776 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128"; 11777 case X86ISD::VPERMV: return "X86ISD::VPERMV"; 11778 case X86ISD::VPERMI: return "X86ISD::VPERMI"; 11779 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ"; 11780 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS"; 11781 case X86ISD::VAARG_64: return "X86ISD::VAARG_64"; 11782 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA"; 11783 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER"; 11784 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA"; 11785 case X86ISD::WIN_FTOL: return "X86ISD::WIN_FTOL"; 11786 case X86ISD::SAHF: return "X86ISD::SAHF"; 11787 case X86ISD::RDRAND: return "X86ISD::RDRAND"; 11788 case X86ISD::FMADD: return "X86ISD::FMADD"; 11789 case X86ISD::FMSUB: return "X86ISD::FMSUB"; 11790 case X86ISD::FNMADD: return "X86ISD::FNMADD"; 11791 case X86ISD::FNMSUB: return "X86ISD::FNMSUB"; 11792 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB"; 11793 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD"; 11794 } 11795} 11796 11797// isLegalAddressingMode - Return true if the addressing mode represented 11798// by AM is legal for this target, for a load/store of the specified type. 11799bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, 11800 Type *Ty) const { 11801 // X86 supports extremely general addressing modes. 11802 CodeModel::Model M = getTargetMachine().getCodeModel(); 11803 Reloc::Model R = getTargetMachine().getRelocationModel(); 11804 11805 // X86 allows a sign-extended 32-bit immediate field as a displacement. 11806 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != NULL)) 11807 return false; 11808 11809 if (AM.BaseGV) { 11810 unsigned GVFlags = 11811 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine()); 11812 11813 // If a reference to this global requires an extra load, we can't fold it. 11814 if (isGlobalStubReference(GVFlags)) 11815 return false; 11816 11817 // If BaseGV requires a register for the PIC base, we cannot also have a 11818 // BaseReg specified. 11819 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags)) 11820 return false; 11821 11822 // If lower 4G is not available, then we must use rip-relative addressing. 11823 if ((M != CodeModel::Small || R != Reloc::Static) && 11824 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1)) 11825 return false; 11826 } 11827 11828 switch (AM.Scale) { 11829 case 0: 11830 case 1: 11831 case 2: 11832 case 4: 11833 case 8: 11834 // These scales always work. 11835 break; 11836 case 3: 11837 case 5: 11838 case 9: 11839 // These scales are formed with basereg+scalereg. Only accept if there is 11840 // no basereg yet. 11841 if (AM.HasBaseReg) 11842 return false; 11843 break; 11844 default: // Other stuff never works. 11845 return false; 11846 } 11847 11848 return true; 11849} 11850 11851 11852bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 11853 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 11854 return false; 11855 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 11856 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 11857 if (NumBits1 <= NumBits2) 11858 return false; 11859 return true; 11860} 11861 11862bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const { 11863 return Imm == (int32_t)Imm; 11864} 11865 11866bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const { 11867 // Can also use sub to handle negated immediates. 11868 return Imm == (int32_t)Imm; 11869} 11870 11871bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 11872 if (!VT1.isInteger() || !VT2.isInteger()) 11873 return false; 11874 unsigned NumBits1 = VT1.getSizeInBits(); 11875 unsigned NumBits2 = VT2.getSizeInBits(); 11876 if (NumBits1 <= NumBits2) 11877 return false; 11878 return true; 11879} 11880 11881bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const { 11882 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers. 11883 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit(); 11884} 11885 11886bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const { 11887 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers. 11888 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit(); 11889} 11890 11891bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const { 11892 // i16 instructions are longer (0x66 prefix) and potentially slower. 11893 return !(VT1 == MVT::i32 && VT2 == MVT::i16); 11894} 11895 11896/// isShuffleMaskLegal - Targets can use this to indicate that they only 11897/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 11898/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 11899/// are assumed to be legal. 11900bool 11901X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 11902 EVT VT) const { 11903 // Very little shuffling can be done for 64-bit vectors right now. 11904 if (VT.getSizeInBits() == 64) 11905 return false; 11906 11907 // FIXME: pshufb, blends, shifts. 11908 return (VT.getVectorNumElements() == 2 || 11909 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 11910 isMOVLMask(M, VT) || 11911 isSHUFPMask(M, VT, Subtarget->hasAVX()) || 11912 isPSHUFDMask(M, VT) || 11913 isPSHUFHWMask(M, VT, Subtarget->hasAVX2()) || 11914 isPSHUFLWMask(M, VT, Subtarget->hasAVX2()) || 11915 isPALIGNRMask(M, VT, Subtarget) || 11916 isUNPCKLMask(M, VT, Subtarget->hasAVX2()) || 11917 isUNPCKHMask(M, VT, Subtarget->hasAVX2()) || 11918 isUNPCKL_v_undef_Mask(M, VT, Subtarget->hasAVX2()) || 11919 isUNPCKH_v_undef_Mask(M, VT, Subtarget->hasAVX2())); 11920} 11921 11922bool 11923X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask, 11924 EVT VT) const { 11925 unsigned NumElts = VT.getVectorNumElements(); 11926 // FIXME: This collection of masks seems suspect. 11927 if (NumElts == 2) 11928 return true; 11929 if (NumElts == 4 && VT.is128BitVector()) { 11930 return (isMOVLMask(Mask, VT) || 11931 isCommutedMOVLMask(Mask, VT, true) || 11932 isSHUFPMask(Mask, VT, Subtarget->hasAVX()) || 11933 isSHUFPMask(Mask, VT, Subtarget->hasAVX(), /* Commuted */ true)); 11934 } 11935 return false; 11936} 11937 11938//===----------------------------------------------------------------------===// 11939// X86 Scheduler Hooks 11940//===----------------------------------------------------------------------===// 11941 11942// private utility function 11943 11944// Get CMPXCHG opcode for the specified data type. 11945static unsigned getCmpXChgOpcode(EVT VT) { 11946 switch (VT.getSimpleVT().SimpleTy) { 11947 case MVT::i8: return X86::LCMPXCHG8; 11948 case MVT::i16: return X86::LCMPXCHG16; 11949 case MVT::i32: return X86::LCMPXCHG32; 11950 case MVT::i64: return X86::LCMPXCHG64; 11951 default: 11952 break; 11953 } 11954 llvm_unreachable("Invalid operand size!"); 11955} 11956 11957// Get LOAD opcode for the specified data type. 11958static unsigned getLoadOpcode(EVT VT) { 11959 switch (VT.getSimpleVT().SimpleTy) { 11960 case MVT::i8: return X86::MOV8rm; 11961 case MVT::i16: return X86::MOV16rm; 11962 case MVT::i32: return X86::MOV32rm; 11963 case MVT::i64: return X86::MOV64rm; 11964 default: 11965 break; 11966 } 11967 llvm_unreachable("Invalid operand size!"); 11968} 11969 11970// Get opcode of the non-atomic one from the specified atomic instruction. 11971static unsigned getNonAtomicOpcode(unsigned Opc) { 11972 switch (Opc) { 11973 case X86::ATOMAND8: return X86::AND8rr; 11974 case X86::ATOMAND16: return X86::AND16rr; 11975 case X86::ATOMAND32: return X86::AND32rr; 11976 case X86::ATOMAND64: return X86::AND64rr; 11977 case X86::ATOMOR8: return X86::OR8rr; 11978 case X86::ATOMOR16: return X86::OR16rr; 11979 case X86::ATOMOR32: return X86::OR32rr; 11980 case X86::ATOMOR64: return X86::OR64rr; 11981 case X86::ATOMXOR8: return X86::XOR8rr; 11982 case X86::ATOMXOR16: return X86::XOR16rr; 11983 case X86::ATOMXOR32: return X86::XOR32rr; 11984 case X86::ATOMXOR64: return X86::XOR64rr; 11985 } 11986 llvm_unreachable("Unhandled atomic-load-op opcode!"); 11987} 11988 11989// Get opcode of the non-atomic one from the specified atomic instruction with 11990// extra opcode. 11991static unsigned getNonAtomicOpcodeWithExtraOpc(unsigned Opc, 11992 unsigned &ExtraOpc) { 11993 switch (Opc) { 11994 case X86::ATOMNAND8: ExtraOpc = X86::NOT8r; return X86::AND8rr; 11995 case X86::ATOMNAND16: ExtraOpc = X86::NOT16r; return X86::AND16rr; 11996 case X86::ATOMNAND32: ExtraOpc = X86::NOT32r; return X86::AND32rr; 11997 case X86::ATOMNAND64: ExtraOpc = X86::NOT64r; return X86::AND64rr; 11998 case X86::ATOMMAX8: ExtraOpc = X86::CMP8rr; return X86::CMOVL32rr; 11999 case X86::ATOMMAX16: ExtraOpc = X86::CMP16rr; return X86::CMOVL16rr; 12000 case X86::ATOMMAX32: ExtraOpc = X86::CMP32rr; return X86::CMOVL32rr; 12001 case X86::ATOMMAX64: ExtraOpc = X86::CMP64rr; return X86::CMOVL64rr; 12002 case X86::ATOMMIN8: ExtraOpc = X86::CMP8rr; return X86::CMOVG32rr; 12003 case X86::ATOMMIN16: ExtraOpc = X86::CMP16rr; return X86::CMOVG16rr; 12004 case X86::ATOMMIN32: ExtraOpc = X86::CMP32rr; return X86::CMOVG32rr; 12005 case X86::ATOMMIN64: ExtraOpc = X86::CMP64rr; return X86::CMOVG64rr; 12006 case X86::ATOMUMAX8: ExtraOpc = X86::CMP8rr; return X86::CMOVB32rr; 12007 case X86::ATOMUMAX16: ExtraOpc = X86::CMP16rr; return X86::CMOVB16rr; 12008 case X86::ATOMUMAX32: ExtraOpc = X86::CMP32rr; return X86::CMOVB32rr; 12009 case X86::ATOMUMAX64: ExtraOpc = X86::CMP64rr; return X86::CMOVB64rr; 12010 case X86::ATOMUMIN8: ExtraOpc = X86::CMP8rr; return X86::CMOVA32rr; 12011 case X86::ATOMUMIN16: ExtraOpc = X86::CMP16rr; return X86::CMOVA16rr; 12012 case X86::ATOMUMIN32: ExtraOpc = X86::CMP32rr; return X86::CMOVA32rr; 12013 case X86::ATOMUMIN64: ExtraOpc = X86::CMP64rr; return X86::CMOVA64rr; 12014 } 12015 llvm_unreachable("Unhandled atomic-load-op opcode!"); 12016} 12017 12018// Get opcode of the non-atomic one from the specified atomic instruction for 12019// 64-bit data type on 32-bit target. 12020static unsigned getNonAtomic6432Opcode(unsigned Opc, unsigned &HiOpc) { 12021 switch (Opc) { 12022 case X86::ATOMAND6432: HiOpc = X86::AND32rr; return X86::AND32rr; 12023 case X86::ATOMOR6432: HiOpc = X86::OR32rr; return X86::OR32rr; 12024 case X86::ATOMXOR6432: HiOpc = X86::XOR32rr; return X86::XOR32rr; 12025 case X86::ATOMADD6432: HiOpc = X86::ADC32rr; return X86::ADD32rr; 12026 case X86::ATOMSUB6432: HiOpc = X86::SBB32rr; return X86::SUB32rr; 12027 case X86::ATOMSWAP6432: HiOpc = X86::MOV32rr; return X86::MOV32rr; 12028 case X86::ATOMMAX6432: HiOpc = X86::SETLr; return X86::SETLr; 12029 case X86::ATOMMIN6432: HiOpc = X86::SETGr; return X86::SETGr; 12030 case X86::ATOMUMAX6432: HiOpc = X86::SETBr; return X86::SETBr; 12031 case X86::ATOMUMIN6432: HiOpc = X86::SETAr; return X86::SETAr; 12032 } 12033 llvm_unreachable("Unhandled atomic-load-op opcode!"); 12034} 12035 12036// Get opcode of the non-atomic one from the specified atomic instruction for 12037// 64-bit data type on 32-bit target with extra opcode. 12038static unsigned getNonAtomic6432OpcodeWithExtraOpc(unsigned Opc, 12039 unsigned &HiOpc, 12040 unsigned &ExtraOpc) { 12041 switch (Opc) { 12042 case X86::ATOMNAND6432: 12043 ExtraOpc = X86::NOT32r; 12044 HiOpc = X86::AND32rr; 12045 return X86::AND32rr; 12046 } 12047 llvm_unreachable("Unhandled atomic-load-op opcode!"); 12048} 12049 12050// Get pseudo CMOV opcode from the specified data type. 12051static unsigned getPseudoCMOVOpc(EVT VT) { 12052 switch (VT.getSimpleVT().SimpleTy) { 12053 case MVT::i8: return X86::CMOV_GR8; 12054 case MVT::i16: return X86::CMOV_GR16; 12055 case MVT::i32: return X86::CMOV_GR32; 12056 default: 12057 break; 12058 } 12059 llvm_unreachable("Unknown CMOV opcode!"); 12060} 12061 12062// EmitAtomicLoadArith - emit the code sequence for pseudo atomic instructions. 12063// They will be translated into a spin-loop or compare-exchange loop from 12064// 12065// ... 12066// dst = atomic-fetch-op MI.addr, MI.val 12067// ... 12068// 12069// to 12070// 12071// ... 12072// EAX = LOAD MI.addr 12073// loop: 12074// t1 = OP MI.val, EAX 12075// LCMPXCHG [MI.addr], t1, [EAX is implicitly used & defined] 12076// JNE loop 12077// sink: 12078// dst = EAX 12079// ... 12080MachineBasicBlock * 12081X86TargetLowering::EmitAtomicLoadArith(MachineInstr *MI, 12082 MachineBasicBlock *MBB) const { 12083 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 12084 DebugLoc DL = MI->getDebugLoc(); 12085 12086 MachineFunction *MF = MBB->getParent(); 12087 MachineRegisterInfo &MRI = MF->getRegInfo(); 12088 12089 const BasicBlock *BB = MBB->getBasicBlock(); 12090 MachineFunction::iterator I = MBB; 12091 ++I; 12092 12093 assert(MI->getNumOperands() <= X86::AddrNumOperands + 3 && 12094 "Unexpected number of operands"); 12095 12096 assert(MI->hasOneMemOperand() && 12097 "Expected atomic-load-op to have one memoperand"); 12098 12099 // Memory Reference 12100 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 12101 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 12102 12103 unsigned DstReg, SrcReg; 12104 unsigned MemOpndSlot; 12105 12106 unsigned CurOp = 0; 12107 12108 DstReg = MI->getOperand(CurOp++).getReg(); 12109 MemOpndSlot = CurOp; 12110 CurOp += X86::AddrNumOperands; 12111 SrcReg = MI->getOperand(CurOp++).getReg(); 12112 12113 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 12114 MVT::SimpleValueType VT = *RC->vt_begin(); 12115 unsigned AccPhyReg = getX86SubSuperRegister(X86::EAX, VT); 12116 12117 unsigned LCMPXCHGOpc = getCmpXChgOpcode(VT); 12118 unsigned LOADOpc = getLoadOpcode(VT); 12119 12120 // For the atomic load-arith operator, we generate 12121 // 12122 // thisMBB: 12123 // EAX = LOAD [MI.addr] 12124 // mainMBB: 12125 // t1 = OP MI.val, EAX 12126 // LCMPXCHG [MI.addr], t1, [EAX is implicitly used & defined] 12127 // JNE mainMBB 12128 // sinkMBB: 12129 12130 MachineBasicBlock *thisMBB = MBB; 12131 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 12132 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 12133 MF->insert(I, mainMBB); 12134 MF->insert(I, sinkMBB); 12135 12136 MachineInstrBuilder MIB; 12137 12138 // Transfer the remainder of BB and its successor edges to sinkMBB. 12139 sinkMBB->splice(sinkMBB->begin(), MBB, 12140 llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); 12141 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 12142 12143 // thisMBB: 12144 MIB = BuildMI(thisMBB, DL, TII->get(LOADOpc), AccPhyReg); 12145 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) 12146 MIB.addOperand(MI->getOperand(MemOpndSlot + i)); 12147 MIB.setMemRefs(MMOBegin, MMOEnd); 12148 12149 thisMBB->addSuccessor(mainMBB); 12150 12151 // mainMBB: 12152 MachineBasicBlock *origMainMBB = mainMBB; 12153 mainMBB->addLiveIn(AccPhyReg); 12154 12155 // Copy AccPhyReg as it is used more than once. 12156 unsigned AccReg = MRI.createVirtualRegister(RC); 12157 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), AccReg) 12158 .addReg(AccPhyReg); 12159 12160 unsigned t1 = MRI.createVirtualRegister(RC); 12161 unsigned Opc = MI->getOpcode(); 12162 switch (Opc) { 12163 default: 12164 llvm_unreachable("Unhandled atomic-load-op opcode!"); 12165 case X86::ATOMAND8: 12166 case X86::ATOMAND16: 12167 case X86::ATOMAND32: 12168 case X86::ATOMAND64: 12169 case X86::ATOMOR8: 12170 case X86::ATOMOR16: 12171 case X86::ATOMOR32: 12172 case X86::ATOMOR64: 12173 case X86::ATOMXOR8: 12174 case X86::ATOMXOR16: 12175 case X86::ATOMXOR32: 12176 case X86::ATOMXOR64: { 12177 unsigned ARITHOpc = getNonAtomicOpcode(Opc); 12178 BuildMI(mainMBB, DL, TII->get(ARITHOpc), t1).addReg(SrcReg) 12179 .addReg(AccReg); 12180 break; 12181 } 12182 case X86::ATOMNAND8: 12183 case X86::ATOMNAND16: 12184 case X86::ATOMNAND32: 12185 case X86::ATOMNAND64: { 12186 unsigned t2 = MRI.createVirtualRegister(RC); 12187 unsigned NOTOpc; 12188 unsigned ANDOpc = getNonAtomicOpcodeWithExtraOpc(Opc, NOTOpc); 12189 BuildMI(mainMBB, DL, TII->get(ANDOpc), t2).addReg(SrcReg) 12190 .addReg(AccReg); 12191 BuildMI(mainMBB, DL, TII->get(NOTOpc), t1).addReg(t2); 12192 break; 12193 } 12194 case X86::ATOMMAX8: 12195 case X86::ATOMMAX16: 12196 case X86::ATOMMAX32: 12197 case X86::ATOMMAX64: 12198 case X86::ATOMMIN8: 12199 case X86::ATOMMIN16: 12200 case X86::ATOMMIN32: 12201 case X86::ATOMMIN64: 12202 case X86::ATOMUMAX8: 12203 case X86::ATOMUMAX16: 12204 case X86::ATOMUMAX32: 12205 case X86::ATOMUMAX64: 12206 case X86::ATOMUMIN8: 12207 case X86::ATOMUMIN16: 12208 case X86::ATOMUMIN32: 12209 case X86::ATOMUMIN64: { 12210 unsigned CMPOpc; 12211 unsigned CMOVOpc = getNonAtomicOpcodeWithExtraOpc(Opc, CMPOpc); 12212 12213 BuildMI(mainMBB, DL, TII->get(CMPOpc)) 12214 .addReg(SrcReg) 12215 .addReg(AccReg); 12216 12217 if (Subtarget->hasCMov()) { 12218 if (VT != MVT::i8) { 12219 // Native support 12220 BuildMI(mainMBB, DL, TII->get(CMOVOpc), t1) 12221 .addReg(SrcReg) 12222 .addReg(AccReg); 12223 } else { 12224 // Promote i8 to i32 to use CMOV32 12225 const TargetRegisterClass *RC32 = getRegClassFor(MVT::i32); 12226 unsigned SrcReg32 = MRI.createVirtualRegister(RC32); 12227 unsigned AccReg32 = MRI.createVirtualRegister(RC32); 12228 unsigned t2 = MRI.createVirtualRegister(RC32); 12229 12230 unsigned Undef = MRI.createVirtualRegister(RC32); 12231 BuildMI(mainMBB, DL, TII->get(TargetOpcode::IMPLICIT_DEF), Undef); 12232 12233 BuildMI(mainMBB, DL, TII->get(TargetOpcode::INSERT_SUBREG), SrcReg32) 12234 .addReg(Undef) 12235 .addReg(SrcReg) 12236 .addImm(X86::sub_8bit); 12237 BuildMI(mainMBB, DL, TII->get(TargetOpcode::INSERT_SUBREG), AccReg32) 12238 .addReg(Undef) 12239 .addReg(AccReg) 12240 .addImm(X86::sub_8bit); 12241 12242 BuildMI(mainMBB, DL, TII->get(CMOVOpc), t2) 12243 .addReg(SrcReg32) 12244 .addReg(AccReg32); 12245 12246 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), t1) 12247 .addReg(t2, 0, X86::sub_8bit); 12248 } 12249 } else { 12250 // Use pseudo select and lower them. 12251 assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) && 12252 "Invalid atomic-load-op transformation!"); 12253 unsigned SelOpc = getPseudoCMOVOpc(VT); 12254 X86::CondCode CC = X86::getCondFromCMovOpc(CMOVOpc); 12255 assert(CC != X86::COND_INVALID && "Invalid atomic-load-op transformation!"); 12256 MIB = BuildMI(mainMBB, DL, TII->get(SelOpc), t1) 12257 .addReg(SrcReg).addReg(AccReg) 12258 .addImm(CC); 12259 mainMBB = EmitLoweredSelect(MIB, mainMBB); 12260 } 12261 break; 12262 } 12263 } 12264 12265 // Copy AccPhyReg back from virtual register. 12266 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), AccPhyReg) 12267 .addReg(AccReg); 12268 12269 MIB = BuildMI(mainMBB, DL, TII->get(LCMPXCHGOpc)); 12270 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) 12271 MIB.addOperand(MI->getOperand(MemOpndSlot + i)); 12272 MIB.addReg(t1); 12273 MIB.setMemRefs(MMOBegin, MMOEnd); 12274 12275 BuildMI(mainMBB, DL, TII->get(X86::JNE_4)).addMBB(origMainMBB); 12276 12277 mainMBB->addSuccessor(origMainMBB); 12278 mainMBB->addSuccessor(sinkMBB); 12279 12280 // sinkMBB: 12281 sinkMBB->addLiveIn(AccPhyReg); 12282 12283 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 12284 TII->get(TargetOpcode::COPY), DstReg) 12285 .addReg(AccPhyReg); 12286 12287 MI->eraseFromParent(); 12288 return sinkMBB; 12289} 12290 12291// EmitAtomicLoadArith6432 - emit the code sequence for pseudo atomic 12292// instructions. They will be translated into a spin-loop or compare-exchange 12293// loop from 12294// 12295// ... 12296// dst = atomic-fetch-op MI.addr, MI.val 12297// ... 12298// 12299// to 12300// 12301// ... 12302// EAX = LOAD [MI.addr + 0] 12303// EDX = LOAD [MI.addr + 4] 12304// loop: 12305// EBX = OP MI.val.lo, EAX 12306// ECX = OP MI.val.hi, EDX 12307// LCMPXCHG8B [MI.addr], [ECX:EBX & EDX:EAX are implicitly used and EDX:EAX is implicitly defined] 12308// JNE loop 12309// sink: 12310// dst = EDX:EAX 12311// ... 12312MachineBasicBlock * 12313X86TargetLowering::EmitAtomicLoadArith6432(MachineInstr *MI, 12314 MachineBasicBlock *MBB) const { 12315 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 12316 DebugLoc DL = MI->getDebugLoc(); 12317 12318 MachineFunction *MF = MBB->getParent(); 12319 MachineRegisterInfo &MRI = MF->getRegInfo(); 12320 12321 const BasicBlock *BB = MBB->getBasicBlock(); 12322 MachineFunction::iterator I = MBB; 12323 ++I; 12324 12325 assert(MI->getNumOperands() <= X86::AddrNumOperands + 4 && 12326 "Unexpected number of operands"); 12327 12328 assert(MI->hasOneMemOperand() && 12329 "Expected atomic-load-op32 to have one memoperand"); 12330 12331 // Memory Reference 12332 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 12333 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 12334 12335 unsigned DstLoReg, DstHiReg; 12336 unsigned SrcLoReg, SrcHiReg; 12337 unsigned MemOpndSlot; 12338 12339 unsigned CurOp = 0; 12340 12341 DstLoReg = MI->getOperand(CurOp++).getReg(); 12342 DstHiReg = MI->getOperand(CurOp++).getReg(); 12343 MemOpndSlot = CurOp; 12344 CurOp += X86::AddrNumOperands; 12345 SrcLoReg = MI->getOperand(CurOp++).getReg(); 12346 SrcHiReg = MI->getOperand(CurOp++).getReg(); 12347 12348 const TargetRegisterClass *RC = &X86::GR32RegClass; 12349 const TargetRegisterClass *RC8 = &X86::GR8RegClass; 12350 12351 unsigned LCMPXCHGOpc = X86::LCMPXCHG8B; 12352 unsigned LOADOpc = X86::MOV32rm; 12353 12354 // For the atomic load-arith operator, we generate 12355 // 12356 // thisMBB: 12357 // EAX = LOAD [MI.addr + 0] 12358 // EDX = LOAD [MI.addr + 4] 12359 // mainMBB: 12360 // EBX = OP MI.vallo, EAX 12361 // ECX = OP MI.valhi, EDX 12362 // LCMPXCHG8B [MI.addr], [ECX:EBX & EDX:EAX are implicitly used and EDX:EAX is implicitly defined] 12363 // JNE mainMBB 12364 // sinkMBB: 12365 12366 MachineBasicBlock *thisMBB = MBB; 12367 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 12368 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 12369 MF->insert(I, mainMBB); 12370 MF->insert(I, sinkMBB); 12371 12372 MachineInstrBuilder MIB; 12373 12374 // Transfer the remainder of BB and its successor edges to sinkMBB. 12375 sinkMBB->splice(sinkMBB->begin(), MBB, 12376 llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); 12377 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 12378 12379 // thisMBB: 12380 // Lo 12381 MIB = BuildMI(thisMBB, DL, TII->get(LOADOpc), X86::EAX); 12382 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) 12383 MIB.addOperand(MI->getOperand(MemOpndSlot + i)); 12384 MIB.setMemRefs(MMOBegin, MMOEnd); 12385 // Hi 12386 MIB = BuildMI(thisMBB, DL, TII->get(LOADOpc), X86::EDX); 12387 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 12388 if (i == X86::AddrDisp) 12389 MIB.addDisp(MI->getOperand(MemOpndSlot + i), 4); // 4 == sizeof(i32) 12390 else 12391 MIB.addOperand(MI->getOperand(MemOpndSlot + i)); 12392 } 12393 MIB.setMemRefs(MMOBegin, MMOEnd); 12394 12395 thisMBB->addSuccessor(mainMBB); 12396 12397 // mainMBB: 12398 MachineBasicBlock *origMainMBB = mainMBB; 12399 mainMBB->addLiveIn(X86::EAX); 12400 mainMBB->addLiveIn(X86::EDX); 12401 12402 // Copy EDX:EAX as they are used more than once. 12403 unsigned LoReg = MRI.createVirtualRegister(RC); 12404 unsigned HiReg = MRI.createVirtualRegister(RC); 12405 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), LoReg).addReg(X86::EAX); 12406 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), HiReg).addReg(X86::EDX); 12407 12408 unsigned t1L = MRI.createVirtualRegister(RC); 12409 unsigned t1H = MRI.createVirtualRegister(RC); 12410 12411 unsigned Opc = MI->getOpcode(); 12412 switch (Opc) { 12413 default: 12414 llvm_unreachable("Unhandled atomic-load-op6432 opcode!"); 12415 case X86::ATOMAND6432: 12416 case X86::ATOMOR6432: 12417 case X86::ATOMXOR6432: 12418 case X86::ATOMADD6432: 12419 case X86::ATOMSUB6432: { 12420 unsigned HiOpc; 12421 unsigned LoOpc = getNonAtomic6432Opcode(Opc, HiOpc); 12422 BuildMI(mainMBB, DL, TII->get(LoOpc), t1L).addReg(SrcLoReg).addReg(LoReg); 12423 BuildMI(mainMBB, DL, TII->get(HiOpc), t1H).addReg(SrcHiReg).addReg(HiReg); 12424 break; 12425 } 12426 case X86::ATOMNAND6432: { 12427 unsigned HiOpc, NOTOpc; 12428 unsigned LoOpc = getNonAtomic6432OpcodeWithExtraOpc(Opc, HiOpc, NOTOpc); 12429 unsigned t2L = MRI.createVirtualRegister(RC); 12430 unsigned t2H = MRI.createVirtualRegister(RC); 12431 BuildMI(mainMBB, DL, TII->get(LoOpc), t2L).addReg(SrcLoReg).addReg(LoReg); 12432 BuildMI(mainMBB, DL, TII->get(HiOpc), t2H).addReg(SrcHiReg).addReg(HiReg); 12433 BuildMI(mainMBB, DL, TII->get(NOTOpc), t1L).addReg(t2L); 12434 BuildMI(mainMBB, DL, TII->get(NOTOpc), t1H).addReg(t2H); 12435 break; 12436 } 12437 case X86::ATOMMAX6432: 12438 case X86::ATOMMIN6432: 12439 case X86::ATOMUMAX6432: 12440 case X86::ATOMUMIN6432: { 12441 unsigned HiOpc; 12442 unsigned LoOpc = getNonAtomic6432Opcode(Opc, HiOpc); 12443 unsigned cL = MRI.createVirtualRegister(RC8); 12444 unsigned cH = MRI.createVirtualRegister(RC8); 12445 unsigned cL32 = MRI.createVirtualRegister(RC); 12446 unsigned cH32 = MRI.createVirtualRegister(RC); 12447 unsigned cc = MRI.createVirtualRegister(RC); 12448 // cl := cmp src_lo, lo 12449 BuildMI(mainMBB, DL, TII->get(X86::CMP32rr)) 12450 .addReg(SrcLoReg).addReg(LoReg); 12451 BuildMI(mainMBB, DL, TII->get(LoOpc), cL); 12452 BuildMI(mainMBB, DL, TII->get(X86::MOVZX32rr8), cL32).addReg(cL); 12453 // ch := cmp src_hi, hi 12454 BuildMI(mainMBB, DL, TII->get(X86::CMP32rr)) 12455 .addReg(SrcHiReg).addReg(HiReg); 12456 BuildMI(mainMBB, DL, TII->get(HiOpc), cH); 12457 BuildMI(mainMBB, DL, TII->get(X86::MOVZX32rr8), cH32).addReg(cH); 12458 // cc := if (src_hi == hi) ? cl : ch; 12459 if (Subtarget->hasCMov()) { 12460 BuildMI(mainMBB, DL, TII->get(X86::CMOVE32rr), cc) 12461 .addReg(cH32).addReg(cL32); 12462 } else { 12463 MIB = BuildMI(mainMBB, DL, TII->get(X86::CMOV_GR32), cc) 12464 .addReg(cH32).addReg(cL32) 12465 .addImm(X86::COND_E); 12466 mainMBB = EmitLoweredSelect(MIB, mainMBB); 12467 } 12468 BuildMI(mainMBB, DL, TII->get(X86::TEST32rr)).addReg(cc).addReg(cc); 12469 if (Subtarget->hasCMov()) { 12470 BuildMI(mainMBB, DL, TII->get(X86::CMOVNE32rr), t1L) 12471 .addReg(SrcLoReg).addReg(LoReg); 12472 BuildMI(mainMBB, DL, TII->get(X86::CMOVNE32rr), t1H) 12473 .addReg(SrcHiReg).addReg(HiReg); 12474 } else { 12475 MIB = BuildMI(mainMBB, DL, TII->get(X86::CMOV_GR32), t1L) 12476 .addReg(SrcLoReg).addReg(LoReg) 12477 .addImm(X86::COND_NE); 12478 mainMBB = EmitLoweredSelect(MIB, mainMBB); 12479 MIB = BuildMI(mainMBB, DL, TII->get(X86::CMOV_GR32), t1H) 12480 .addReg(SrcHiReg).addReg(HiReg) 12481 .addImm(X86::COND_NE); 12482 mainMBB = EmitLoweredSelect(MIB, mainMBB); 12483 } 12484 break; 12485 } 12486 case X86::ATOMSWAP6432: { 12487 unsigned HiOpc; 12488 unsigned LoOpc = getNonAtomic6432Opcode(Opc, HiOpc); 12489 BuildMI(mainMBB, DL, TII->get(LoOpc), t1L).addReg(SrcLoReg); 12490 BuildMI(mainMBB, DL, TII->get(HiOpc), t1H).addReg(SrcHiReg); 12491 break; 12492 } 12493 } 12494 12495 // Copy EDX:EAX back from HiReg:LoReg 12496 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::EAX).addReg(LoReg); 12497 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::EDX).addReg(HiReg); 12498 // Copy ECX:EBX from t1H:t1L 12499 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::EBX).addReg(t1L); 12500 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::ECX).addReg(t1H); 12501 12502 MIB = BuildMI(mainMBB, DL, TII->get(LCMPXCHGOpc)); 12503 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) 12504 MIB.addOperand(MI->getOperand(MemOpndSlot + i)); 12505 MIB.setMemRefs(MMOBegin, MMOEnd); 12506 12507 BuildMI(mainMBB, DL, TII->get(X86::JNE_4)).addMBB(origMainMBB); 12508 12509 mainMBB->addSuccessor(origMainMBB); 12510 mainMBB->addSuccessor(sinkMBB); 12511 12512 // sinkMBB: 12513 sinkMBB->addLiveIn(X86::EAX); 12514 sinkMBB->addLiveIn(X86::EDX); 12515 12516 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 12517 TII->get(TargetOpcode::COPY), DstLoReg) 12518 .addReg(X86::EAX); 12519 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 12520 TII->get(TargetOpcode::COPY), DstHiReg) 12521 .addReg(X86::EDX); 12522 12523 MI->eraseFromParent(); 12524 return sinkMBB; 12525} 12526 12527// FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8 12528// or XMM0_V32I8 in AVX all of this code can be replaced with that 12529// in the .td file. 12530MachineBasicBlock * 12531X86TargetLowering::EmitPCMP(MachineInstr *MI, MachineBasicBlock *BB, 12532 unsigned numArgs, bool memArg) const { 12533 assert(Subtarget->hasSSE42() && 12534 "Target must have SSE4.2 or AVX features enabled"); 12535 12536 DebugLoc dl = MI->getDebugLoc(); 12537 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 12538 unsigned Opc; 12539 if (!Subtarget->hasAVX()) { 12540 if (memArg) 12541 Opc = numArgs == 3 ? X86::PCMPISTRM128rm : X86::PCMPESTRM128rm; 12542 else 12543 Opc = numArgs == 3 ? X86::PCMPISTRM128rr : X86::PCMPESTRM128rr; 12544 } else { 12545 if (memArg) 12546 Opc = numArgs == 3 ? X86::VPCMPISTRM128rm : X86::VPCMPESTRM128rm; 12547 else 12548 Opc = numArgs == 3 ? X86::VPCMPISTRM128rr : X86::VPCMPESTRM128rr; 12549 } 12550 12551 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc)); 12552 for (unsigned i = 0; i < numArgs; ++i) { 12553 MachineOperand &Op = MI->getOperand(i+1); 12554 if (!(Op.isReg() && Op.isImplicit())) 12555 MIB.addOperand(Op); 12556 } 12557 BuildMI(*BB, MI, dl, 12558 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg()) 12559 .addReg(X86::XMM0); 12560 12561 MI->eraseFromParent(); 12562 return BB; 12563} 12564 12565MachineBasicBlock * 12566X86TargetLowering::EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB) const { 12567 DebugLoc dl = MI->getDebugLoc(); 12568 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 12569 12570 // Address into RAX/EAX, other two args into ECX, EDX. 12571 unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r; 12572 unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; 12573 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg); 12574 for (int i = 0; i < X86::AddrNumOperands; ++i) 12575 MIB.addOperand(MI->getOperand(i)); 12576 12577 unsigned ValOps = X86::AddrNumOperands; 12578 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX) 12579 .addReg(MI->getOperand(ValOps).getReg()); 12580 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX) 12581 .addReg(MI->getOperand(ValOps+1).getReg()); 12582 12583 // The instruction doesn't actually take any operands though. 12584 BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr)); 12585 12586 MI->eraseFromParent(); // The pseudo is gone now. 12587 return BB; 12588} 12589 12590MachineBasicBlock * 12591X86TargetLowering::EmitVAARG64WithCustomInserter( 12592 MachineInstr *MI, 12593 MachineBasicBlock *MBB) const { 12594 // Emit va_arg instruction on X86-64. 12595 12596 // Operands to this pseudo-instruction: 12597 // 0 ) Output : destination address (reg) 12598 // 1-5) Input : va_list address (addr, i64mem) 12599 // 6 ) ArgSize : Size (in bytes) of vararg type 12600 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset 12601 // 8 ) Align : Alignment of type 12602 // 9 ) EFLAGS (implicit-def) 12603 12604 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!"); 12605 assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands"); 12606 12607 unsigned DestReg = MI->getOperand(0).getReg(); 12608 MachineOperand &Base = MI->getOperand(1); 12609 MachineOperand &Scale = MI->getOperand(2); 12610 MachineOperand &Index = MI->getOperand(3); 12611 MachineOperand &Disp = MI->getOperand(4); 12612 MachineOperand &Segment = MI->getOperand(5); 12613 unsigned ArgSize = MI->getOperand(6).getImm(); 12614 unsigned ArgMode = MI->getOperand(7).getImm(); 12615 unsigned Align = MI->getOperand(8).getImm(); 12616 12617 // Memory Reference 12618 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand"); 12619 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 12620 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 12621 12622 // Machine Information 12623 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 12624 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 12625 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64); 12626 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32); 12627 DebugLoc DL = MI->getDebugLoc(); 12628 12629 // struct va_list { 12630 // i32 gp_offset 12631 // i32 fp_offset 12632 // i64 overflow_area (address) 12633 // i64 reg_save_area (address) 12634 // } 12635 // sizeof(va_list) = 24 12636 // alignment(va_list) = 8 12637 12638 unsigned TotalNumIntRegs = 6; 12639 unsigned TotalNumXMMRegs = 8; 12640 bool UseGPOffset = (ArgMode == 1); 12641 bool UseFPOffset = (ArgMode == 2); 12642 unsigned MaxOffset = TotalNumIntRegs * 8 + 12643 (UseFPOffset ? TotalNumXMMRegs * 16 : 0); 12644 12645 /* Align ArgSize to a multiple of 8 */ 12646 unsigned ArgSizeA8 = (ArgSize + 7) & ~7; 12647 bool NeedsAlign = (Align > 8); 12648 12649 MachineBasicBlock *thisMBB = MBB; 12650 MachineBasicBlock *overflowMBB; 12651 MachineBasicBlock *offsetMBB; 12652 MachineBasicBlock *endMBB; 12653 12654 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB 12655 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB 12656 unsigned OffsetReg = 0; 12657 12658 if (!UseGPOffset && !UseFPOffset) { 12659 // If we only pull from the overflow region, we don't create a branch. 12660 // We don't need to alter control flow. 12661 OffsetDestReg = 0; // unused 12662 OverflowDestReg = DestReg; 12663 12664 offsetMBB = NULL; 12665 overflowMBB = thisMBB; 12666 endMBB = thisMBB; 12667 } else { 12668 // First emit code to check if gp_offset (or fp_offset) is below the bound. 12669 // If so, pull the argument from reg_save_area. (branch to offsetMBB) 12670 // If not, pull from overflow_area. (branch to overflowMBB) 12671 // 12672 // thisMBB 12673 // | . 12674 // | . 12675 // offsetMBB overflowMBB 12676 // | . 12677 // | . 12678 // endMBB 12679 12680 // Registers for the PHI in endMBB 12681 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass); 12682 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass); 12683 12684 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 12685 MachineFunction *MF = MBB->getParent(); 12686 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB); 12687 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB); 12688 endMBB = MF->CreateMachineBasicBlock(LLVM_BB); 12689 12690 MachineFunction::iterator MBBIter = MBB; 12691 ++MBBIter; 12692 12693 // Insert the new basic blocks 12694 MF->insert(MBBIter, offsetMBB); 12695 MF->insert(MBBIter, overflowMBB); 12696 MF->insert(MBBIter, endMBB); 12697 12698 // Transfer the remainder of MBB and its successor edges to endMBB. 12699 endMBB->splice(endMBB->begin(), thisMBB, 12700 llvm::next(MachineBasicBlock::iterator(MI)), 12701 thisMBB->end()); 12702 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 12703 12704 // Make offsetMBB and overflowMBB successors of thisMBB 12705 thisMBB->addSuccessor(offsetMBB); 12706 thisMBB->addSuccessor(overflowMBB); 12707 12708 // endMBB is a successor of both offsetMBB and overflowMBB 12709 offsetMBB->addSuccessor(endMBB); 12710 overflowMBB->addSuccessor(endMBB); 12711 12712 // Load the offset value into a register 12713 OffsetReg = MRI.createVirtualRegister(OffsetRegClass); 12714 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg) 12715 .addOperand(Base) 12716 .addOperand(Scale) 12717 .addOperand(Index) 12718 .addDisp(Disp, UseFPOffset ? 4 : 0) 12719 .addOperand(Segment) 12720 .setMemRefs(MMOBegin, MMOEnd); 12721 12722 // Check if there is enough room left to pull this argument. 12723 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri)) 12724 .addReg(OffsetReg) 12725 .addImm(MaxOffset + 8 - ArgSizeA8); 12726 12727 // Branch to "overflowMBB" if offset >= max 12728 // Fall through to "offsetMBB" otherwise 12729 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE))) 12730 .addMBB(overflowMBB); 12731 } 12732 12733 // In offsetMBB, emit code to use the reg_save_area. 12734 if (offsetMBB) { 12735 assert(OffsetReg != 0); 12736 12737 // Read the reg_save_area address. 12738 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass); 12739 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg) 12740 .addOperand(Base) 12741 .addOperand(Scale) 12742 .addOperand(Index) 12743 .addDisp(Disp, 16) 12744 .addOperand(Segment) 12745 .setMemRefs(MMOBegin, MMOEnd); 12746 12747 // Zero-extend the offset 12748 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass); 12749 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64) 12750 .addImm(0) 12751 .addReg(OffsetReg) 12752 .addImm(X86::sub_32bit); 12753 12754 // Add the offset to the reg_save_area to get the final address. 12755 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg) 12756 .addReg(OffsetReg64) 12757 .addReg(RegSaveReg); 12758 12759 // Compute the offset for the next argument 12760 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass); 12761 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg) 12762 .addReg(OffsetReg) 12763 .addImm(UseFPOffset ? 16 : 8); 12764 12765 // Store it back into the va_list. 12766 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr)) 12767 .addOperand(Base) 12768 .addOperand(Scale) 12769 .addOperand(Index) 12770 .addDisp(Disp, UseFPOffset ? 4 : 0) 12771 .addOperand(Segment) 12772 .addReg(NextOffsetReg) 12773 .setMemRefs(MMOBegin, MMOEnd); 12774 12775 // Jump to endMBB 12776 BuildMI(offsetMBB, DL, TII->get(X86::JMP_4)) 12777 .addMBB(endMBB); 12778 } 12779 12780 // 12781 // Emit code to use overflow area 12782 // 12783 12784 // Load the overflow_area address into a register. 12785 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass); 12786 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg) 12787 .addOperand(Base) 12788 .addOperand(Scale) 12789 .addOperand(Index) 12790 .addDisp(Disp, 8) 12791 .addOperand(Segment) 12792 .setMemRefs(MMOBegin, MMOEnd); 12793 12794 // If we need to align it, do so. Otherwise, just copy the address 12795 // to OverflowDestReg. 12796 if (NeedsAlign) { 12797 // Align the overflow address 12798 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2"); 12799 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass); 12800 12801 // aligned_addr = (addr + (align-1)) & ~(align-1) 12802 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg) 12803 .addReg(OverflowAddrReg) 12804 .addImm(Align-1); 12805 12806 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg) 12807 .addReg(TmpReg) 12808 .addImm(~(uint64_t)(Align-1)); 12809 } else { 12810 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg) 12811 .addReg(OverflowAddrReg); 12812 } 12813 12814 // Compute the next overflow address after this argument. 12815 // (the overflow address should be kept 8-byte aligned) 12816 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass); 12817 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg) 12818 .addReg(OverflowDestReg) 12819 .addImm(ArgSizeA8); 12820 12821 // Store the new overflow address. 12822 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr)) 12823 .addOperand(Base) 12824 .addOperand(Scale) 12825 .addOperand(Index) 12826 .addDisp(Disp, 8) 12827 .addOperand(Segment) 12828 .addReg(NextAddrReg) 12829 .setMemRefs(MMOBegin, MMOEnd); 12830 12831 // If we branched, emit the PHI to the front of endMBB. 12832 if (offsetMBB) { 12833 BuildMI(*endMBB, endMBB->begin(), DL, 12834 TII->get(X86::PHI), DestReg) 12835 .addReg(OffsetDestReg).addMBB(offsetMBB) 12836 .addReg(OverflowDestReg).addMBB(overflowMBB); 12837 } 12838 12839 // Erase the pseudo instruction 12840 MI->eraseFromParent(); 12841 12842 return endMBB; 12843} 12844 12845MachineBasicBlock * 12846X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter( 12847 MachineInstr *MI, 12848 MachineBasicBlock *MBB) const { 12849 // Emit code to save XMM registers to the stack. The ABI says that the 12850 // number of registers to save is given in %al, so it's theoretically 12851 // possible to do an indirect jump trick to avoid saving all of them, 12852 // however this code takes a simpler approach and just executes all 12853 // of the stores if %al is non-zero. It's less code, and it's probably 12854 // easier on the hardware branch predictor, and stores aren't all that 12855 // expensive anyway. 12856 12857 // Create the new basic blocks. One block contains all the XMM stores, 12858 // and one block is the final destination regardless of whether any 12859 // stores were performed. 12860 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 12861 MachineFunction *F = MBB->getParent(); 12862 MachineFunction::iterator MBBIter = MBB; 12863 ++MBBIter; 12864 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB); 12865 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB); 12866 F->insert(MBBIter, XMMSaveMBB); 12867 F->insert(MBBIter, EndMBB); 12868 12869 // Transfer the remainder of MBB and its successor edges to EndMBB. 12870 EndMBB->splice(EndMBB->begin(), MBB, 12871 llvm::next(MachineBasicBlock::iterator(MI)), 12872 MBB->end()); 12873 EndMBB->transferSuccessorsAndUpdatePHIs(MBB); 12874 12875 // The original block will now fall through to the XMM save block. 12876 MBB->addSuccessor(XMMSaveMBB); 12877 // The XMMSaveMBB will fall through to the end block. 12878 XMMSaveMBB->addSuccessor(EndMBB); 12879 12880 // Now add the instructions. 12881 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 12882 DebugLoc DL = MI->getDebugLoc(); 12883 12884 unsigned CountReg = MI->getOperand(0).getReg(); 12885 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm(); 12886 int64_t VarArgsFPOffset = MI->getOperand(2).getImm(); 12887 12888 if (!Subtarget->isTargetWin64()) { 12889 // If %al is 0, branch around the XMM save block. 12890 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg); 12891 BuildMI(MBB, DL, TII->get(X86::JE_4)).addMBB(EndMBB); 12892 MBB->addSuccessor(EndMBB); 12893 } 12894 12895 unsigned MOVOpc = Subtarget->hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr; 12896 // In the XMM save block, save all the XMM argument registers. 12897 for (int i = 3, e = MI->getNumOperands(); i != e; ++i) { 12898 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset; 12899 MachineMemOperand *MMO = 12900 F->getMachineMemOperand( 12901 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset), 12902 MachineMemOperand::MOStore, 12903 /*Size=*/16, /*Align=*/16); 12904 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc)) 12905 .addFrameIndex(RegSaveFrameIndex) 12906 .addImm(/*Scale=*/1) 12907 .addReg(/*IndexReg=*/0) 12908 .addImm(/*Disp=*/Offset) 12909 .addReg(/*Segment=*/0) 12910 .addReg(MI->getOperand(i).getReg()) 12911 .addMemOperand(MMO); 12912 } 12913 12914 MI->eraseFromParent(); // The pseudo instruction is gone now. 12915 12916 return EndMBB; 12917} 12918 12919// The EFLAGS operand of SelectItr might be missing a kill marker 12920// because there were multiple uses of EFLAGS, and ISel didn't know 12921// which to mark. Figure out whether SelectItr should have had a 12922// kill marker, and set it if it should. Returns the correct kill 12923// marker value. 12924static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr, 12925 MachineBasicBlock* BB, 12926 const TargetRegisterInfo* TRI) { 12927 // Scan forward through BB for a use/def of EFLAGS. 12928 MachineBasicBlock::iterator miI(llvm::next(SelectItr)); 12929 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) { 12930 const MachineInstr& mi = *miI; 12931 if (mi.readsRegister(X86::EFLAGS)) 12932 return false; 12933 if (mi.definesRegister(X86::EFLAGS)) 12934 break; // Should have kill-flag - update below. 12935 } 12936 12937 // If we hit the end of the block, check whether EFLAGS is live into a 12938 // successor. 12939 if (miI == BB->end()) { 12940 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(), 12941 sEnd = BB->succ_end(); 12942 sItr != sEnd; ++sItr) { 12943 MachineBasicBlock* succ = *sItr; 12944 if (succ->isLiveIn(X86::EFLAGS)) 12945 return false; 12946 } 12947 } 12948 12949 // We found a def, or hit the end of the basic block and EFLAGS wasn't live 12950 // out. SelectMI should have a kill flag on EFLAGS. 12951 SelectItr->addRegisterKilled(X86::EFLAGS, TRI); 12952 return true; 12953} 12954 12955MachineBasicBlock * 12956X86TargetLowering::EmitLoweredSelect(MachineInstr *MI, 12957 MachineBasicBlock *BB) const { 12958 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 12959 DebugLoc DL = MI->getDebugLoc(); 12960 12961 // To "insert" a SELECT_CC instruction, we actually have to insert the 12962 // diamond control-flow pattern. The incoming instruction knows the 12963 // destination vreg to set, the condition code register to branch on, the 12964 // true/false values to select between, and a branch opcode to use. 12965 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 12966 MachineFunction::iterator It = BB; 12967 ++It; 12968 12969 // thisMBB: 12970 // ... 12971 // TrueVal = ... 12972 // cmpTY ccX, r1, r2 12973 // bCC copy1MBB 12974 // fallthrough --> copy0MBB 12975 MachineBasicBlock *thisMBB = BB; 12976 MachineFunction *F = BB->getParent(); 12977 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 12978 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 12979 F->insert(It, copy0MBB); 12980 F->insert(It, sinkMBB); 12981 12982 // If the EFLAGS register isn't dead in the terminator, then claim that it's 12983 // live into the sink and copy blocks. 12984 const TargetRegisterInfo* TRI = getTargetMachine().getRegisterInfo(); 12985 if (!MI->killsRegister(X86::EFLAGS) && 12986 !checkAndUpdateEFLAGSKill(MI, BB, TRI)) { 12987 copy0MBB->addLiveIn(X86::EFLAGS); 12988 sinkMBB->addLiveIn(X86::EFLAGS); 12989 } 12990 12991 // Transfer the remainder of BB and its successor edges to sinkMBB. 12992 sinkMBB->splice(sinkMBB->begin(), BB, 12993 llvm::next(MachineBasicBlock::iterator(MI)), 12994 BB->end()); 12995 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 12996 12997 // Add the true and fallthrough blocks as its successors. 12998 BB->addSuccessor(copy0MBB); 12999 BB->addSuccessor(sinkMBB); 13000 13001 // Create the conditional branch instruction. 13002 unsigned Opc = 13003 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm()); 13004 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB); 13005 13006 // copy0MBB: 13007 // %FalseValue = ... 13008 // # fallthrough to sinkMBB 13009 copy0MBB->addSuccessor(sinkMBB); 13010 13011 // sinkMBB: 13012 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 13013 // ... 13014 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 13015 TII->get(X86::PHI), MI->getOperand(0).getReg()) 13016 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 13017 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 13018 13019 MI->eraseFromParent(); // The pseudo instruction is gone now. 13020 return sinkMBB; 13021} 13022 13023MachineBasicBlock * 13024X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI, MachineBasicBlock *BB, 13025 bool Is64Bit) const { 13026 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 13027 DebugLoc DL = MI->getDebugLoc(); 13028 MachineFunction *MF = BB->getParent(); 13029 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 13030 13031 assert(getTargetMachine().Options.EnableSegmentedStacks); 13032 13033 unsigned TlsReg = Is64Bit ? X86::FS : X86::GS; 13034 unsigned TlsOffset = Is64Bit ? 0x70 : 0x30; 13035 13036 // BB: 13037 // ... [Till the alloca] 13038 // If stacklet is not large enough, jump to mallocMBB 13039 // 13040 // bumpMBB: 13041 // Allocate by subtracting from RSP 13042 // Jump to continueMBB 13043 // 13044 // mallocMBB: 13045 // Allocate by call to runtime 13046 // 13047 // continueMBB: 13048 // ... 13049 // [rest of original BB] 13050 // 13051 13052 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB); 13053 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB); 13054 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB); 13055 13056 MachineRegisterInfo &MRI = MF->getRegInfo(); 13057 const TargetRegisterClass *AddrRegClass = 13058 getRegClassFor(Is64Bit ? MVT::i64:MVT::i32); 13059 13060 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass), 13061 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass), 13062 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass), 13063 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass), 13064 sizeVReg = MI->getOperand(1).getReg(), 13065 physSPReg = Is64Bit ? X86::RSP : X86::ESP; 13066 13067 MachineFunction::iterator MBBIter = BB; 13068 ++MBBIter; 13069 13070 MF->insert(MBBIter, bumpMBB); 13071 MF->insert(MBBIter, mallocMBB); 13072 MF->insert(MBBIter, continueMBB); 13073 13074 continueMBB->splice(continueMBB->begin(), BB, llvm::next 13075 (MachineBasicBlock::iterator(MI)), BB->end()); 13076 continueMBB->transferSuccessorsAndUpdatePHIs(BB); 13077 13078 // Add code to the main basic block to check if the stack limit has been hit, 13079 // and if so, jump to mallocMBB otherwise to bumpMBB. 13080 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg); 13081 BuildMI(BB, DL, TII->get(Is64Bit ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg) 13082 .addReg(tmpSPVReg).addReg(sizeVReg); 13083 BuildMI(BB, DL, TII->get(Is64Bit ? X86::CMP64mr:X86::CMP32mr)) 13084 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg) 13085 .addReg(SPLimitVReg); 13086 BuildMI(BB, DL, TII->get(X86::JG_4)).addMBB(mallocMBB); 13087 13088 // bumpMBB simply decreases the stack pointer, since we know the current 13089 // stacklet has enough space. 13090 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg) 13091 .addReg(SPLimitVReg); 13092 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg) 13093 .addReg(SPLimitVReg); 13094 BuildMI(bumpMBB, DL, TII->get(X86::JMP_4)).addMBB(continueMBB); 13095 13096 // Calls into a routine in libgcc to allocate more space from the heap. 13097 const uint32_t *RegMask = 13098 getTargetMachine().getRegisterInfo()->getCallPreservedMask(CallingConv::C); 13099 if (Is64Bit) { 13100 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI) 13101 .addReg(sizeVReg); 13102 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32)) 13103 .addExternalSymbol("__morestack_allocate_stack_space") 13104 .addRegMask(RegMask) 13105 .addReg(X86::RDI, RegState::Implicit) 13106 .addReg(X86::RAX, RegState::ImplicitDefine); 13107 } else { 13108 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg) 13109 .addImm(12); 13110 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg); 13111 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32)) 13112 .addExternalSymbol("__morestack_allocate_stack_space") 13113 .addRegMask(RegMask) 13114 .addReg(X86::EAX, RegState::ImplicitDefine); 13115 } 13116 13117 if (!Is64Bit) 13118 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg) 13119 .addImm(16); 13120 13121 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg) 13122 .addReg(Is64Bit ? X86::RAX : X86::EAX); 13123 BuildMI(mallocMBB, DL, TII->get(X86::JMP_4)).addMBB(continueMBB); 13124 13125 // Set up the CFG correctly. 13126 BB->addSuccessor(bumpMBB); 13127 BB->addSuccessor(mallocMBB); 13128 mallocMBB->addSuccessor(continueMBB); 13129 bumpMBB->addSuccessor(continueMBB); 13130 13131 // Take care of the PHI nodes. 13132 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI), 13133 MI->getOperand(0).getReg()) 13134 .addReg(mallocPtrVReg).addMBB(mallocMBB) 13135 .addReg(bumpSPPtrVReg).addMBB(bumpMBB); 13136 13137 // Delete the original pseudo instruction. 13138 MI->eraseFromParent(); 13139 13140 // And we're done. 13141 return continueMBB; 13142} 13143 13144MachineBasicBlock * 13145X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI, 13146 MachineBasicBlock *BB) const { 13147 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 13148 DebugLoc DL = MI->getDebugLoc(); 13149 13150 assert(!Subtarget->isTargetEnvMacho()); 13151 13152 // The lowering is pretty easy: we're just emitting the call to _alloca. The 13153 // non-trivial part is impdef of ESP. 13154 13155 if (Subtarget->isTargetWin64()) { 13156 if (Subtarget->isTargetCygMing()) { 13157 // ___chkstk(Mingw64): 13158 // Clobbers R10, R11, RAX and EFLAGS. 13159 // Updates RSP. 13160 BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA)) 13161 .addExternalSymbol("___chkstk") 13162 .addReg(X86::RAX, RegState::Implicit) 13163 .addReg(X86::RSP, RegState::Implicit) 13164 .addReg(X86::RAX, RegState::Define | RegState::Implicit) 13165 .addReg(X86::RSP, RegState::Define | RegState::Implicit) 13166 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 13167 } else { 13168 // __chkstk(MSVCRT): does not update stack pointer. 13169 // Clobbers R10, R11 and EFLAGS. 13170 // FIXME: RAX(allocated size) might be reused and not killed. 13171 BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA)) 13172 .addExternalSymbol("__chkstk") 13173 .addReg(X86::RAX, RegState::Implicit) 13174 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 13175 // RAX has the offset to subtracted from RSP. 13176 BuildMI(*BB, MI, DL, TII->get(X86::SUB64rr), X86::RSP) 13177 .addReg(X86::RSP) 13178 .addReg(X86::RAX); 13179 } 13180 } else { 13181 const char *StackProbeSymbol = 13182 Subtarget->isTargetWindows() ? "_chkstk" : "_alloca"; 13183 13184 BuildMI(*BB, MI, DL, TII->get(X86::CALLpcrel32)) 13185 .addExternalSymbol(StackProbeSymbol) 13186 .addReg(X86::EAX, RegState::Implicit) 13187 .addReg(X86::ESP, RegState::Implicit) 13188 .addReg(X86::EAX, RegState::Define | RegState::Implicit) 13189 .addReg(X86::ESP, RegState::Define | RegState::Implicit) 13190 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 13191 } 13192 13193 MI->eraseFromParent(); // The pseudo instruction is gone now. 13194 return BB; 13195} 13196 13197MachineBasicBlock * 13198X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI, 13199 MachineBasicBlock *BB) const { 13200 // This is pretty easy. We're taking the value that we received from 13201 // our load from the relocation, sticking it in either RDI (x86-64) 13202 // or EAX and doing an indirect call. The return value will then 13203 // be in the normal return register. 13204 const X86InstrInfo *TII 13205 = static_cast<const X86InstrInfo*>(getTargetMachine().getInstrInfo()); 13206 DebugLoc DL = MI->getDebugLoc(); 13207 MachineFunction *F = BB->getParent(); 13208 13209 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?"); 13210 assert(MI->getOperand(3).isGlobal() && "This should be a global"); 13211 13212 // Get a register mask for the lowered call. 13213 // FIXME: The 32-bit calls have non-standard calling conventions. Use a 13214 // proper register mask. 13215 const uint32_t *RegMask = 13216 getTargetMachine().getRegisterInfo()->getCallPreservedMask(CallingConv::C); 13217 if (Subtarget->is64Bit()) { 13218 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 13219 TII->get(X86::MOV64rm), X86::RDI) 13220 .addReg(X86::RIP) 13221 .addImm(0).addReg(0) 13222 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 13223 MI->getOperand(3).getTargetFlags()) 13224 .addReg(0); 13225 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m)); 13226 addDirectMem(MIB, X86::RDI); 13227 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask); 13228 } else if (getTargetMachine().getRelocationModel() != Reloc::PIC_) { 13229 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 13230 TII->get(X86::MOV32rm), X86::EAX) 13231 .addReg(0) 13232 .addImm(0).addReg(0) 13233 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 13234 MI->getOperand(3).getTargetFlags()) 13235 .addReg(0); 13236 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); 13237 addDirectMem(MIB, X86::EAX); 13238 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask); 13239 } else { 13240 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 13241 TII->get(X86::MOV32rm), X86::EAX) 13242 .addReg(TII->getGlobalBaseReg(F)) 13243 .addImm(0).addReg(0) 13244 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 13245 MI->getOperand(3).getTargetFlags()) 13246 .addReg(0); 13247 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); 13248 addDirectMem(MIB, X86::EAX); 13249 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask); 13250 } 13251 13252 MI->eraseFromParent(); // The pseudo instruction is gone now. 13253 return BB; 13254} 13255 13256MachineBasicBlock * 13257X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 13258 MachineBasicBlock *BB) const { 13259 switch (MI->getOpcode()) { 13260 default: llvm_unreachable("Unexpected instr type to insert"); 13261 case X86::TAILJMPd64: 13262 case X86::TAILJMPr64: 13263 case X86::TAILJMPm64: 13264 llvm_unreachable("TAILJMP64 would not be touched here."); 13265 case X86::TCRETURNdi64: 13266 case X86::TCRETURNri64: 13267 case X86::TCRETURNmi64: 13268 return BB; 13269 case X86::WIN_ALLOCA: 13270 return EmitLoweredWinAlloca(MI, BB); 13271 case X86::SEG_ALLOCA_32: 13272 return EmitLoweredSegAlloca(MI, BB, false); 13273 case X86::SEG_ALLOCA_64: 13274 return EmitLoweredSegAlloca(MI, BB, true); 13275 case X86::TLSCall_32: 13276 case X86::TLSCall_64: 13277 return EmitLoweredTLSCall(MI, BB); 13278 case X86::CMOV_GR8: 13279 case X86::CMOV_FR32: 13280 case X86::CMOV_FR64: 13281 case X86::CMOV_V4F32: 13282 case X86::CMOV_V2F64: 13283 case X86::CMOV_V2I64: 13284 case X86::CMOV_V8F32: 13285 case X86::CMOV_V4F64: 13286 case X86::CMOV_V4I64: 13287 case X86::CMOV_GR16: 13288 case X86::CMOV_GR32: 13289 case X86::CMOV_RFP32: 13290 case X86::CMOV_RFP64: 13291 case X86::CMOV_RFP80: 13292 return EmitLoweredSelect(MI, BB); 13293 13294 case X86::FP32_TO_INT16_IN_MEM: 13295 case X86::FP32_TO_INT32_IN_MEM: 13296 case X86::FP32_TO_INT64_IN_MEM: 13297 case X86::FP64_TO_INT16_IN_MEM: 13298 case X86::FP64_TO_INT32_IN_MEM: 13299 case X86::FP64_TO_INT64_IN_MEM: 13300 case X86::FP80_TO_INT16_IN_MEM: 13301 case X86::FP80_TO_INT32_IN_MEM: 13302 case X86::FP80_TO_INT64_IN_MEM: { 13303 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 13304 DebugLoc DL = MI->getDebugLoc(); 13305 13306 // Change the floating point control register to use "round towards zero" 13307 // mode when truncating to an integer value. 13308 MachineFunction *F = BB->getParent(); 13309 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false); 13310 addFrameReference(BuildMI(*BB, MI, DL, 13311 TII->get(X86::FNSTCW16m)), CWFrameIdx); 13312 13313 // Load the old value of the high byte of the control word... 13314 unsigned OldCW = 13315 F->getRegInfo().createVirtualRegister(&X86::GR16RegClass); 13316 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW), 13317 CWFrameIdx); 13318 13319 // Set the high part to be round to zero... 13320 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx) 13321 .addImm(0xC7F); 13322 13323 // Reload the modified control word now... 13324 addFrameReference(BuildMI(*BB, MI, DL, 13325 TII->get(X86::FLDCW16m)), CWFrameIdx); 13326 13327 // Restore the memory image of control word to original value 13328 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx) 13329 .addReg(OldCW); 13330 13331 // Get the X86 opcode to use. 13332 unsigned Opc; 13333 switch (MI->getOpcode()) { 13334 default: llvm_unreachable("illegal opcode!"); 13335 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break; 13336 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break; 13337 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break; 13338 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break; 13339 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break; 13340 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break; 13341 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break; 13342 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break; 13343 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break; 13344 } 13345 13346 X86AddressMode AM; 13347 MachineOperand &Op = MI->getOperand(0); 13348 if (Op.isReg()) { 13349 AM.BaseType = X86AddressMode::RegBase; 13350 AM.Base.Reg = Op.getReg(); 13351 } else { 13352 AM.BaseType = X86AddressMode::FrameIndexBase; 13353 AM.Base.FrameIndex = Op.getIndex(); 13354 } 13355 Op = MI->getOperand(1); 13356 if (Op.isImm()) 13357 AM.Scale = Op.getImm(); 13358 Op = MI->getOperand(2); 13359 if (Op.isImm()) 13360 AM.IndexReg = Op.getImm(); 13361 Op = MI->getOperand(3); 13362 if (Op.isGlobal()) { 13363 AM.GV = Op.getGlobal(); 13364 } else { 13365 AM.Disp = Op.getImm(); 13366 } 13367 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM) 13368 .addReg(MI->getOperand(X86::AddrNumOperands).getReg()); 13369 13370 // Reload the original control word now. 13371 addFrameReference(BuildMI(*BB, MI, DL, 13372 TII->get(X86::FLDCW16m)), CWFrameIdx); 13373 13374 MI->eraseFromParent(); // The pseudo instruction is gone now. 13375 return BB; 13376 } 13377 // String/text processing lowering. 13378 case X86::PCMPISTRM128REG: 13379 case X86::VPCMPISTRM128REG: 13380 case X86::PCMPISTRM128MEM: 13381 case X86::VPCMPISTRM128MEM: 13382 case X86::PCMPESTRM128REG: 13383 case X86::VPCMPESTRM128REG: 13384 case X86::PCMPESTRM128MEM: 13385 case X86::VPCMPESTRM128MEM: { 13386 unsigned NumArgs; 13387 bool MemArg; 13388 switch (MI->getOpcode()) { 13389 default: llvm_unreachable("illegal opcode!"); 13390 case X86::PCMPISTRM128REG: 13391 case X86::VPCMPISTRM128REG: 13392 NumArgs = 3; MemArg = false; break; 13393 case X86::PCMPISTRM128MEM: 13394 case X86::VPCMPISTRM128MEM: 13395 NumArgs = 3; MemArg = true; break; 13396 case X86::PCMPESTRM128REG: 13397 case X86::VPCMPESTRM128REG: 13398 NumArgs = 5; MemArg = false; break; 13399 case X86::PCMPESTRM128MEM: 13400 case X86::VPCMPESTRM128MEM: 13401 NumArgs = 5; MemArg = true; break; 13402 } 13403 return EmitPCMP(MI, BB, NumArgs, MemArg); 13404 } 13405 13406 // Thread synchronization. 13407 case X86::MONITOR: 13408 return EmitMonitor(MI, BB); 13409 13410 // Atomic Lowering. 13411 case X86::ATOMAND8: 13412 case X86::ATOMAND16: 13413 case X86::ATOMAND32: 13414 case X86::ATOMAND64: 13415 // Fall through 13416 case X86::ATOMOR8: 13417 case X86::ATOMOR16: 13418 case X86::ATOMOR32: 13419 case X86::ATOMOR64: 13420 // Fall through 13421 case X86::ATOMXOR16: 13422 case X86::ATOMXOR8: 13423 case X86::ATOMXOR32: 13424 case X86::ATOMXOR64: 13425 // Fall through 13426 case X86::ATOMNAND8: 13427 case X86::ATOMNAND16: 13428 case X86::ATOMNAND32: 13429 case X86::ATOMNAND64: 13430 // Fall through 13431 case X86::ATOMMAX8: 13432 case X86::ATOMMAX16: 13433 case X86::ATOMMAX32: 13434 case X86::ATOMMAX64: 13435 // Fall through 13436 case X86::ATOMMIN8: 13437 case X86::ATOMMIN16: 13438 case X86::ATOMMIN32: 13439 case X86::ATOMMIN64: 13440 // Fall through 13441 case X86::ATOMUMAX8: 13442 case X86::ATOMUMAX16: 13443 case X86::ATOMUMAX32: 13444 case X86::ATOMUMAX64: 13445 // Fall through 13446 case X86::ATOMUMIN8: 13447 case X86::ATOMUMIN16: 13448 case X86::ATOMUMIN32: 13449 case X86::ATOMUMIN64: 13450 return EmitAtomicLoadArith(MI, BB); 13451 13452 // This group does 64-bit operations on a 32-bit host. 13453 case X86::ATOMAND6432: 13454 case X86::ATOMOR6432: 13455 case X86::ATOMXOR6432: 13456 case X86::ATOMNAND6432: 13457 case X86::ATOMADD6432: 13458 case X86::ATOMSUB6432: 13459 case X86::ATOMMAX6432: 13460 case X86::ATOMMIN6432: 13461 case X86::ATOMUMAX6432: 13462 case X86::ATOMUMIN6432: 13463 case X86::ATOMSWAP6432: 13464 return EmitAtomicLoadArith6432(MI, BB); 13465 13466 case X86::VASTART_SAVE_XMM_REGS: 13467 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB); 13468 13469 case X86::VAARG_64: 13470 return EmitVAARG64WithCustomInserter(MI, BB); 13471 } 13472} 13473 13474//===----------------------------------------------------------------------===// 13475// X86 Optimization Hooks 13476//===----------------------------------------------------------------------===// 13477 13478void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 13479 APInt &KnownZero, 13480 APInt &KnownOne, 13481 const SelectionDAG &DAG, 13482 unsigned Depth) const { 13483 unsigned BitWidth = KnownZero.getBitWidth(); 13484 unsigned Opc = Op.getOpcode(); 13485 assert((Opc >= ISD::BUILTIN_OP_END || 13486 Opc == ISD::INTRINSIC_WO_CHAIN || 13487 Opc == ISD::INTRINSIC_W_CHAIN || 13488 Opc == ISD::INTRINSIC_VOID) && 13489 "Should use MaskedValueIsZero if you don't know whether Op" 13490 " is a target node!"); 13491 13492 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything. 13493 switch (Opc) { 13494 default: break; 13495 case X86ISD::ADD: 13496 case X86ISD::SUB: 13497 case X86ISD::ADC: 13498 case X86ISD::SBB: 13499 case X86ISD::SMUL: 13500 case X86ISD::UMUL: 13501 case X86ISD::INC: 13502 case X86ISD::DEC: 13503 case X86ISD::OR: 13504 case X86ISD::XOR: 13505 case X86ISD::AND: 13506 // These nodes' second result is a boolean. 13507 if (Op.getResNo() == 0) 13508 break; 13509 // Fallthrough 13510 case X86ISD::SETCC: 13511 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1); 13512 break; 13513 case ISD::INTRINSIC_WO_CHAIN: { 13514 unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 13515 unsigned NumLoBits = 0; 13516 switch (IntId) { 13517 default: break; 13518 case Intrinsic::x86_sse_movmsk_ps: 13519 case Intrinsic::x86_avx_movmsk_ps_256: 13520 case Intrinsic::x86_sse2_movmsk_pd: 13521 case Intrinsic::x86_avx_movmsk_pd_256: 13522 case Intrinsic::x86_mmx_pmovmskb: 13523 case Intrinsic::x86_sse2_pmovmskb_128: 13524 case Intrinsic::x86_avx2_pmovmskb: { 13525 // High bits of movmskp{s|d}, pmovmskb are known zero. 13526 switch (IntId) { 13527 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 13528 case Intrinsic::x86_sse_movmsk_ps: NumLoBits = 4; break; 13529 case Intrinsic::x86_avx_movmsk_ps_256: NumLoBits = 8; break; 13530 case Intrinsic::x86_sse2_movmsk_pd: NumLoBits = 2; break; 13531 case Intrinsic::x86_avx_movmsk_pd_256: NumLoBits = 4; break; 13532 case Intrinsic::x86_mmx_pmovmskb: NumLoBits = 8; break; 13533 case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break; 13534 case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break; 13535 } 13536 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits); 13537 break; 13538 } 13539 } 13540 break; 13541 } 13542 } 13543} 13544 13545unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, 13546 unsigned Depth) const { 13547 // SETCC_CARRY sets the dest to ~0 for true or 0 for false. 13548 if (Op.getOpcode() == X86ISD::SETCC_CARRY) 13549 return Op.getValueType().getScalarType().getSizeInBits(); 13550 13551 // Fallback case. 13552 return 1; 13553} 13554 13555/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 13556/// node is a GlobalAddress + offset. 13557bool X86TargetLowering::isGAPlusOffset(SDNode *N, 13558 const GlobalValue* &GA, 13559 int64_t &Offset) const { 13560 if (N->getOpcode() == X86ISD::Wrapper) { 13561 if (isa<GlobalAddressSDNode>(N->getOperand(0))) { 13562 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal(); 13563 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset(); 13564 return true; 13565 } 13566 } 13567 return TargetLowering::isGAPlusOffset(N, GA, Offset); 13568} 13569 13570/// isShuffleHigh128VectorInsertLow - Checks whether the shuffle node is the 13571/// same as extracting the high 128-bit part of 256-bit vector and then 13572/// inserting the result into the low part of a new 256-bit vector 13573static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) { 13574 EVT VT = SVOp->getValueType(0); 13575 unsigned NumElems = VT.getVectorNumElements(); 13576 13577 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u> 13578 for (unsigned i = 0, j = NumElems/2; i != NumElems/2; ++i, ++j) 13579 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) || 13580 SVOp->getMaskElt(j) >= 0) 13581 return false; 13582 13583 return true; 13584} 13585 13586/// isShuffleLow128VectorInsertHigh - Checks whether the shuffle node is the 13587/// same as extracting the low 128-bit part of 256-bit vector and then 13588/// inserting the result into the high part of a new 256-bit vector 13589static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) { 13590 EVT VT = SVOp->getValueType(0); 13591 unsigned NumElems = VT.getVectorNumElements(); 13592 13593 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1> 13594 for (unsigned i = NumElems/2, j = 0; i != NumElems; ++i, ++j) 13595 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) || 13596 SVOp->getMaskElt(j) >= 0) 13597 return false; 13598 13599 return true; 13600} 13601 13602/// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors. 13603static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG, 13604 TargetLowering::DAGCombinerInfo &DCI, 13605 const X86Subtarget* Subtarget) { 13606 DebugLoc dl = N->getDebugLoc(); 13607 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 13608 SDValue V1 = SVOp->getOperand(0); 13609 SDValue V2 = SVOp->getOperand(1); 13610 EVT VT = SVOp->getValueType(0); 13611 unsigned NumElems = VT.getVectorNumElements(); 13612 13613 if (V1.getOpcode() == ISD::CONCAT_VECTORS && 13614 V2.getOpcode() == ISD::CONCAT_VECTORS) { 13615 // 13616 // 0,0,0,... 13617 // | 13618 // V UNDEF BUILD_VECTOR UNDEF 13619 // \ / \ / 13620 // CONCAT_VECTOR CONCAT_VECTOR 13621 // \ / 13622 // \ / 13623 // RESULT: V + zero extended 13624 // 13625 if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR || 13626 V2.getOperand(1).getOpcode() != ISD::UNDEF || 13627 V1.getOperand(1).getOpcode() != ISD::UNDEF) 13628 return SDValue(); 13629 13630 if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode())) 13631 return SDValue(); 13632 13633 // To match the shuffle mask, the first half of the mask should 13634 // be exactly the first vector, and all the rest a splat with the 13635 // first element of the second one. 13636 for (unsigned i = 0; i != NumElems/2; ++i) 13637 if (!isUndefOrEqual(SVOp->getMaskElt(i), i) || 13638 !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems)) 13639 return SDValue(); 13640 13641 // If V1 is coming from a vector load then just fold to a VZEXT_LOAD. 13642 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(V1.getOperand(0))) { 13643 if (Ld->hasNUsesOfValue(1, 0)) { 13644 SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other); 13645 SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() }; 13646 SDValue ResNode = 13647 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, 2, 13648 Ld->getMemoryVT(), 13649 Ld->getPointerInfo(), 13650 Ld->getAlignment(), 13651 false/*isVolatile*/, true/*ReadMem*/, 13652 false/*WriteMem*/); 13653 return DAG.getNode(ISD::BITCAST, dl, VT, ResNode); 13654 } 13655 } 13656 13657 // Emit a zeroed vector and insert the desired subvector on its 13658 // first half. 13659 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl); 13660 SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl); 13661 return DCI.CombineTo(N, InsV); 13662 } 13663 13664 //===--------------------------------------------------------------------===// 13665 // Combine some shuffles into subvector extracts and inserts: 13666 // 13667 13668 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u> 13669 if (isShuffleHigh128VectorInsertLow(SVOp)) { 13670 SDValue V = Extract128BitVector(V1, NumElems/2, DAG, dl); 13671 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, 0, DAG, dl); 13672 return DCI.CombineTo(N, InsV); 13673 } 13674 13675 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1> 13676 if (isShuffleLow128VectorInsertHigh(SVOp)) { 13677 SDValue V = Extract128BitVector(V1, 0, DAG, dl); 13678 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, NumElems/2, DAG, dl); 13679 return DCI.CombineTo(N, InsV); 13680 } 13681 13682 return SDValue(); 13683} 13684 13685/// PerformShuffleCombine - Performs several different shuffle combines. 13686static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, 13687 TargetLowering::DAGCombinerInfo &DCI, 13688 const X86Subtarget *Subtarget) { 13689 DebugLoc dl = N->getDebugLoc(); 13690 EVT VT = N->getValueType(0); 13691 13692 // Don't create instructions with illegal types after legalize types has run. 13693 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 13694 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType())) 13695 return SDValue(); 13696 13697 // Combine 256-bit vector shuffles. This is only profitable when in AVX mode 13698 if (Subtarget->hasAVX() && VT.is256BitVector() && 13699 N->getOpcode() == ISD::VECTOR_SHUFFLE) 13700 return PerformShuffleCombine256(N, DAG, DCI, Subtarget); 13701 13702 // Only handle 128 wide vector from here on. 13703 if (!VT.is128BitVector()) 13704 return SDValue(); 13705 13706 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3, 13707 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are 13708 // consecutive, non-overlapping, and in the right order. 13709 SmallVector<SDValue, 16> Elts; 13710 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) 13711 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0)); 13712 13713 return EltsFromConsecutiveLoads(VT, Elts, dl, DAG); 13714} 13715 13716 13717/// PerformTruncateCombine - Converts truncate operation to 13718/// a sequence of vector shuffle operations. 13719/// It is possible when we truncate 256-bit vector to 128-bit vector 13720static SDValue PerformTruncateCombine(SDNode *N, SelectionDAG &DAG, 13721 TargetLowering::DAGCombinerInfo &DCI, 13722 const X86Subtarget *Subtarget) { 13723 if (!DCI.isBeforeLegalizeOps()) 13724 return SDValue(); 13725 13726 if (!Subtarget->hasAVX()) 13727 return SDValue(); 13728 13729 EVT VT = N->getValueType(0); 13730 SDValue Op = N->getOperand(0); 13731 EVT OpVT = Op.getValueType(); 13732 DebugLoc dl = N->getDebugLoc(); 13733 13734 if ((VT == MVT::v4i32) && (OpVT == MVT::v4i64)) { 13735 13736 if (Subtarget->hasAVX2()) { 13737 // AVX2: v4i64 -> v4i32 13738 13739 // VPERMD 13740 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1}; 13741 13742 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v8i32, Op); 13743 Op = DAG.getVectorShuffle(MVT::v8i32, dl, Op, DAG.getUNDEF(MVT::v8i32), 13744 ShufMask); 13745 13746 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Op, 13747 DAG.getIntPtrConstant(0)); 13748 } 13749 13750 // AVX: v4i64 -> v4i32 13751 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Op, 13752 DAG.getIntPtrConstant(0)); 13753 13754 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Op, 13755 DAG.getIntPtrConstant(2)); 13756 13757 OpLo = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpLo); 13758 OpHi = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpHi); 13759 13760 // PSHUFD 13761 static const int ShufMask1[] = {0, 2, 0, 0}; 13762 13763 SDValue Undef = DAG.getUNDEF(VT); 13764 OpLo = DAG.getVectorShuffle(VT, dl, OpLo, Undef, ShufMask1); 13765 OpHi = DAG.getVectorShuffle(VT, dl, OpHi, Undef, ShufMask1); 13766 13767 // MOVLHPS 13768 static const int ShufMask2[] = {0, 1, 4, 5}; 13769 13770 return DAG.getVectorShuffle(VT, dl, OpLo, OpHi, ShufMask2); 13771 } 13772 13773 if ((VT == MVT::v8i16) && (OpVT == MVT::v8i32)) { 13774 13775 if (Subtarget->hasAVX2()) { 13776 // AVX2: v8i32 -> v8i16 13777 13778 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v32i8, Op); 13779 13780 // PSHUFB 13781 SmallVector<SDValue,32> pshufbMask; 13782 for (unsigned i = 0; i < 2; ++i) { 13783 pshufbMask.push_back(DAG.getConstant(0x0, MVT::i8)); 13784 pshufbMask.push_back(DAG.getConstant(0x1, MVT::i8)); 13785 pshufbMask.push_back(DAG.getConstant(0x4, MVT::i8)); 13786 pshufbMask.push_back(DAG.getConstant(0x5, MVT::i8)); 13787 pshufbMask.push_back(DAG.getConstant(0x8, MVT::i8)); 13788 pshufbMask.push_back(DAG.getConstant(0x9, MVT::i8)); 13789 pshufbMask.push_back(DAG.getConstant(0xc, MVT::i8)); 13790 pshufbMask.push_back(DAG.getConstant(0xd, MVT::i8)); 13791 for (unsigned j = 0; j < 8; ++j) 13792 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 13793 } 13794 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v32i8, 13795 &pshufbMask[0], 32); 13796 Op = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v32i8, Op, BV); 13797 13798 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i64, Op); 13799 13800 static const int ShufMask[] = {0, 2, -1, -1}; 13801 Op = DAG.getVectorShuffle(MVT::v4i64, dl, Op, DAG.getUNDEF(MVT::v4i64), 13802 &ShufMask[0]); 13803 13804 Op = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Op, 13805 DAG.getIntPtrConstant(0)); 13806 13807 return DAG.getNode(ISD::BITCAST, dl, VT, Op); 13808 } 13809 13810 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i32, Op, 13811 DAG.getIntPtrConstant(0)); 13812 13813 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i32, Op, 13814 DAG.getIntPtrConstant(4)); 13815 13816 OpLo = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLo); 13817 OpHi = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpHi); 13818 13819 // PSHUFB 13820 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13, 13821 -1, -1, -1, -1, -1, -1, -1, -1}; 13822 13823 SDValue Undef = DAG.getUNDEF(MVT::v16i8); 13824 OpLo = DAG.getVectorShuffle(MVT::v16i8, dl, OpLo, Undef, ShufMask1); 13825 OpHi = DAG.getVectorShuffle(MVT::v16i8, dl, OpHi, Undef, ShufMask1); 13826 13827 OpLo = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpLo); 13828 OpHi = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpHi); 13829 13830 // MOVLHPS 13831 static const int ShufMask2[] = {0, 1, 4, 5}; 13832 13833 SDValue res = DAG.getVectorShuffle(MVT::v4i32, dl, OpLo, OpHi, ShufMask2); 13834 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, res); 13835 } 13836 13837 return SDValue(); 13838} 13839 13840/// XFormVExtractWithShuffleIntoLoad - Check if a vector extract from a target 13841/// specific shuffle of a load can be folded into a single element load. 13842/// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but 13843/// shuffles have been customed lowered so we need to handle those here. 13844static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG, 13845 TargetLowering::DAGCombinerInfo &DCI) { 13846 if (DCI.isBeforeLegalizeOps()) 13847 return SDValue(); 13848 13849 SDValue InVec = N->getOperand(0); 13850 SDValue EltNo = N->getOperand(1); 13851 13852 if (!isa<ConstantSDNode>(EltNo)) 13853 return SDValue(); 13854 13855 EVT VT = InVec.getValueType(); 13856 13857 bool HasShuffleIntoBitcast = false; 13858 if (InVec.getOpcode() == ISD::BITCAST) { 13859 // Don't duplicate a load with other uses. 13860 if (!InVec.hasOneUse()) 13861 return SDValue(); 13862 EVT BCVT = InVec.getOperand(0).getValueType(); 13863 if (BCVT.getVectorNumElements() != VT.getVectorNumElements()) 13864 return SDValue(); 13865 InVec = InVec.getOperand(0); 13866 HasShuffleIntoBitcast = true; 13867 } 13868 13869 if (!isTargetShuffle(InVec.getOpcode())) 13870 return SDValue(); 13871 13872 // Don't duplicate a load with other uses. 13873 if (!InVec.hasOneUse()) 13874 return SDValue(); 13875 13876 SmallVector<int, 16> ShuffleMask; 13877 bool UnaryShuffle; 13878 if (!getTargetShuffleMask(InVec.getNode(), VT.getSimpleVT(), ShuffleMask, 13879 UnaryShuffle)) 13880 return SDValue(); 13881 13882 // Select the input vector, guarding against out of range extract vector. 13883 unsigned NumElems = VT.getVectorNumElements(); 13884 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 13885 int Idx = (Elt > (int)NumElems) ? -1 : ShuffleMask[Elt]; 13886 SDValue LdNode = (Idx < (int)NumElems) ? InVec.getOperand(0) 13887 : InVec.getOperand(1); 13888 13889 // If inputs to shuffle are the same for both ops, then allow 2 uses 13890 unsigned AllowedUses = InVec.getOperand(0) == InVec.getOperand(1) ? 2 : 1; 13891 13892 if (LdNode.getOpcode() == ISD::BITCAST) { 13893 // Don't duplicate a load with other uses. 13894 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0)) 13895 return SDValue(); 13896 13897 AllowedUses = 1; // only allow 1 load use if we have a bitcast 13898 LdNode = LdNode.getOperand(0); 13899 } 13900 13901 if (!ISD::isNormalLoad(LdNode.getNode())) 13902 return SDValue(); 13903 13904 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode); 13905 13906 if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile()) 13907 return SDValue(); 13908 13909 if (HasShuffleIntoBitcast) { 13910 // If there's a bitcast before the shuffle, check if the load type and 13911 // alignment is valid. 13912 unsigned Align = LN0->getAlignment(); 13913 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 13914 unsigned NewAlign = TLI.getTargetData()-> 13915 getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext())); 13916 13917 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VT)) 13918 return SDValue(); 13919 } 13920 13921 // All checks match so transform back to vector_shuffle so that DAG combiner 13922 // can finish the job 13923 DebugLoc dl = N->getDebugLoc(); 13924 13925 // Create shuffle node taking into account the case that its a unary shuffle 13926 SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(VT) : InVec.getOperand(1); 13927 Shuffle = DAG.getVectorShuffle(InVec.getValueType(), dl, 13928 InVec.getOperand(0), Shuffle, 13929 &ShuffleMask[0]); 13930 Shuffle = DAG.getNode(ISD::BITCAST, dl, VT, Shuffle); 13931 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle, 13932 EltNo); 13933} 13934 13935/// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index 13936/// generation and convert it from being a bunch of shuffles and extracts 13937/// to a simple store and scalar loads to extract the elements. 13938static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG, 13939 TargetLowering::DAGCombinerInfo &DCI) { 13940 SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI); 13941 if (NewOp.getNode()) 13942 return NewOp; 13943 13944 SDValue InputVector = N->getOperand(0); 13945 13946 // Only operate on vectors of 4 elements, where the alternative shuffling 13947 // gets to be more expensive. 13948 if (InputVector.getValueType() != MVT::v4i32) 13949 return SDValue(); 13950 13951 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a 13952 // single use which is a sign-extend or zero-extend, and all elements are 13953 // used. 13954 SmallVector<SDNode *, 4> Uses; 13955 unsigned ExtractedElements = 0; 13956 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(), 13957 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) { 13958 if (UI.getUse().getResNo() != InputVector.getResNo()) 13959 return SDValue(); 13960 13961 SDNode *Extract = *UI; 13962 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 13963 return SDValue(); 13964 13965 if (Extract->getValueType(0) != MVT::i32) 13966 return SDValue(); 13967 if (!Extract->hasOneUse()) 13968 return SDValue(); 13969 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND && 13970 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND) 13971 return SDValue(); 13972 if (!isa<ConstantSDNode>(Extract->getOperand(1))) 13973 return SDValue(); 13974 13975 // Record which element was extracted. 13976 ExtractedElements |= 13977 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue(); 13978 13979 Uses.push_back(Extract); 13980 } 13981 13982 // If not all the elements were used, this may not be worthwhile. 13983 if (ExtractedElements != 15) 13984 return SDValue(); 13985 13986 // Ok, we've now decided to do the transformation. 13987 DebugLoc dl = InputVector.getDebugLoc(); 13988 13989 // Store the value to a temporary stack slot. 13990 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType()); 13991 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr, 13992 MachinePointerInfo(), false, false, 0); 13993 13994 // Replace each use (extract) with a load of the appropriate element. 13995 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(), 13996 UE = Uses.end(); UI != UE; ++UI) { 13997 SDNode *Extract = *UI; 13998 13999 // cOMpute the element's address. 14000 SDValue Idx = Extract->getOperand(1); 14001 unsigned EltSize = 14002 InputVector.getValueType().getVectorElementType().getSizeInBits()/8; 14003 uint64_t Offset = EltSize * cast<ConstantSDNode>(Idx)->getZExtValue(); 14004 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 14005 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy()); 14006 14007 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), 14008 StackPtr, OffsetVal); 14009 14010 // Load the scalar. 14011 SDValue LoadScalar = DAG.getLoad(Extract->getValueType(0), dl, Ch, 14012 ScalarAddr, MachinePointerInfo(), 14013 false, false, false, 0); 14014 14015 // Replace the exact with the load. 14016 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), LoadScalar); 14017 } 14018 14019 // The replacement was made in place; don't return anything. 14020 return SDValue(); 14021} 14022 14023/// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT 14024/// nodes. 14025static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, 14026 TargetLowering::DAGCombinerInfo &DCI, 14027 const X86Subtarget *Subtarget) { 14028 DebugLoc DL = N->getDebugLoc(); 14029 SDValue Cond = N->getOperand(0); 14030 // Get the LHS/RHS of the select. 14031 SDValue LHS = N->getOperand(1); 14032 SDValue RHS = N->getOperand(2); 14033 EVT VT = LHS.getValueType(); 14034 14035 // If we have SSE[12] support, try to form min/max nodes. SSE min/max 14036 // instructions match the semantics of the common C idiom x<y?x:y but not 14037 // x<=y?x:y, because of how they handle negative zero (which can be 14038 // ignored in unsafe-math mode). 14039 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() && 14040 VT != MVT::f80 && DAG.getTargetLoweringInfo().isTypeLegal(VT) && 14041 (Subtarget->hasSSE2() || 14042 (Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) { 14043 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 14044 14045 unsigned Opcode = 0; 14046 // Check for x CC y ? x : y. 14047 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) && 14048 DAG.isEqualTo(RHS, Cond.getOperand(1))) { 14049 switch (CC) { 14050 default: break; 14051 case ISD::SETULT: 14052 // Converting this to a min would handle NaNs incorrectly, and swapping 14053 // the operands would cause it to handle comparisons between positive 14054 // and negative zero incorrectly. 14055 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { 14056 if (!DAG.getTarget().Options.UnsafeFPMath && 14057 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 14058 break; 14059 std::swap(LHS, RHS); 14060 } 14061 Opcode = X86ISD::FMIN; 14062 break; 14063 case ISD::SETOLE: 14064 // Converting this to a min would handle comparisons between positive 14065 // and negative zero incorrectly. 14066 if (!DAG.getTarget().Options.UnsafeFPMath && 14067 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) 14068 break; 14069 Opcode = X86ISD::FMIN; 14070 break; 14071 case ISD::SETULE: 14072 // Converting this to a min would handle both negative zeros and NaNs 14073 // incorrectly, but we can swap the operands to fix both. 14074 std::swap(LHS, RHS); 14075 case ISD::SETOLT: 14076 case ISD::SETLT: 14077 case ISD::SETLE: 14078 Opcode = X86ISD::FMIN; 14079 break; 14080 14081 case ISD::SETOGE: 14082 // Converting this to a max would handle comparisons between positive 14083 // and negative zero incorrectly. 14084 if (!DAG.getTarget().Options.UnsafeFPMath && 14085 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) 14086 break; 14087 Opcode = X86ISD::FMAX; 14088 break; 14089 case ISD::SETUGT: 14090 // Converting this to a max would handle NaNs incorrectly, and swapping 14091 // the operands would cause it to handle comparisons between positive 14092 // and negative zero incorrectly. 14093 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { 14094 if (!DAG.getTarget().Options.UnsafeFPMath && 14095 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 14096 break; 14097 std::swap(LHS, RHS); 14098 } 14099 Opcode = X86ISD::FMAX; 14100 break; 14101 case ISD::SETUGE: 14102 // Converting this to a max would handle both negative zeros and NaNs 14103 // incorrectly, but we can swap the operands to fix both. 14104 std::swap(LHS, RHS); 14105 case ISD::SETOGT: 14106 case ISD::SETGT: 14107 case ISD::SETGE: 14108 Opcode = X86ISD::FMAX; 14109 break; 14110 } 14111 // Check for x CC y ? y : x -- a min/max with reversed arms. 14112 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) && 14113 DAG.isEqualTo(RHS, Cond.getOperand(0))) { 14114 switch (CC) { 14115 default: break; 14116 case ISD::SETOGE: 14117 // Converting this to a min would handle comparisons between positive 14118 // and negative zero incorrectly, and swapping the operands would 14119 // cause it to handle NaNs incorrectly. 14120 if (!DAG.getTarget().Options.UnsafeFPMath && 14121 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) { 14122 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 14123 break; 14124 std::swap(LHS, RHS); 14125 } 14126 Opcode = X86ISD::FMIN; 14127 break; 14128 case ISD::SETUGT: 14129 // Converting this to a min would handle NaNs incorrectly. 14130 if (!DAG.getTarget().Options.UnsafeFPMath && 14131 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))) 14132 break; 14133 Opcode = X86ISD::FMIN; 14134 break; 14135 case ISD::SETUGE: 14136 // Converting this to a min would handle both negative zeros and NaNs 14137 // incorrectly, but we can swap the operands to fix both. 14138 std::swap(LHS, RHS); 14139 case ISD::SETOGT: 14140 case ISD::SETGT: 14141 case ISD::SETGE: 14142 Opcode = X86ISD::FMIN; 14143 break; 14144 14145 case ISD::SETULT: 14146 // Converting this to a max would handle NaNs incorrectly. 14147 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 14148 break; 14149 Opcode = X86ISD::FMAX; 14150 break; 14151 case ISD::SETOLE: 14152 // Converting this to a max would handle comparisons between positive 14153 // and negative zero incorrectly, and swapping the operands would 14154 // cause it to handle NaNs incorrectly. 14155 if (!DAG.getTarget().Options.UnsafeFPMath && 14156 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) { 14157 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 14158 break; 14159 std::swap(LHS, RHS); 14160 } 14161 Opcode = X86ISD::FMAX; 14162 break; 14163 case ISD::SETULE: 14164 // Converting this to a max would handle both negative zeros and NaNs 14165 // incorrectly, but we can swap the operands to fix both. 14166 std::swap(LHS, RHS); 14167 case ISD::SETOLT: 14168 case ISD::SETLT: 14169 case ISD::SETLE: 14170 Opcode = X86ISD::FMAX; 14171 break; 14172 } 14173 } 14174 14175 if (Opcode) 14176 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS); 14177 } 14178 14179 // If this is a select between two integer constants, try to do some 14180 // optimizations. 14181 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) { 14182 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS)) 14183 // Don't do this for crazy integer types. 14184 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) { 14185 // If this is efficiently invertible, canonicalize the LHSC/RHSC values 14186 // so that TrueC (the true value) is larger than FalseC. 14187 bool NeedsCondInvert = false; 14188 14189 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) && 14190 // Efficiently invertible. 14191 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible. 14192 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible. 14193 isa<ConstantSDNode>(Cond.getOperand(1))))) { 14194 NeedsCondInvert = true; 14195 std::swap(TrueC, FalseC); 14196 } 14197 14198 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0. 14199 if (FalseC->getAPIntValue() == 0 && 14200 TrueC->getAPIntValue().isPowerOf2()) { 14201 if (NeedsCondInvert) // Invert the condition if needed. 14202 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 14203 DAG.getConstant(1, Cond.getValueType())); 14204 14205 // Zero extend the condition if needed. 14206 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond); 14207 14208 unsigned ShAmt = TrueC->getAPIntValue().logBase2(); 14209 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond, 14210 DAG.getConstant(ShAmt, MVT::i8)); 14211 } 14212 14213 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. 14214 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) { 14215 if (NeedsCondInvert) // Invert the condition if needed. 14216 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 14217 DAG.getConstant(1, Cond.getValueType())); 14218 14219 // Zero extend the condition if needed. 14220 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, 14221 FalseC->getValueType(0), Cond); 14222 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 14223 SDValue(FalseC, 0)); 14224 } 14225 14226 // Optimize cases that will turn into an LEA instruction. This requires 14227 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9). 14228 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) { 14229 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue(); 14230 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff; 14231 14232 bool isFastMultiplier = false; 14233 if (Diff < 10) { 14234 switch ((unsigned char)Diff) { 14235 default: break; 14236 case 1: // result = add base, cond 14237 case 2: // result = lea base( , cond*2) 14238 case 3: // result = lea base(cond, cond*2) 14239 case 4: // result = lea base( , cond*4) 14240 case 5: // result = lea base(cond, cond*4) 14241 case 8: // result = lea base( , cond*8) 14242 case 9: // result = lea base(cond, cond*8) 14243 isFastMultiplier = true; 14244 break; 14245 } 14246 } 14247 14248 if (isFastMultiplier) { 14249 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue(); 14250 if (NeedsCondInvert) // Invert the condition if needed. 14251 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 14252 DAG.getConstant(1, Cond.getValueType())); 14253 14254 // Zero extend the condition if needed. 14255 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0), 14256 Cond); 14257 // Scale the condition by the difference. 14258 if (Diff != 1) 14259 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond, 14260 DAG.getConstant(Diff, Cond.getValueType())); 14261 14262 // Add the base if non-zero. 14263 if (FalseC->getAPIntValue() != 0) 14264 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 14265 SDValue(FalseC, 0)); 14266 return Cond; 14267 } 14268 } 14269 } 14270 } 14271 14272 // Canonicalize max and min: 14273 // (x > y) ? x : y -> (x >= y) ? x : y 14274 // (x < y) ? x : y -> (x <= y) ? x : y 14275 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates 14276 // the need for an extra compare 14277 // against zero. e.g. 14278 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0 14279 // subl %esi, %edi 14280 // testl %edi, %edi 14281 // movl $0, %eax 14282 // cmovgl %edi, %eax 14283 // => 14284 // xorl %eax, %eax 14285 // subl %esi, $edi 14286 // cmovsl %eax, %edi 14287 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC && 14288 DAG.isEqualTo(LHS, Cond.getOperand(0)) && 14289 DAG.isEqualTo(RHS, Cond.getOperand(1))) { 14290 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 14291 switch (CC) { 14292 default: break; 14293 case ISD::SETLT: 14294 case ISD::SETGT: { 14295 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE; 14296 Cond = DAG.getSetCC(Cond.getDebugLoc(), Cond.getValueType(), 14297 Cond.getOperand(0), Cond.getOperand(1), NewCC); 14298 return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS); 14299 } 14300 } 14301 } 14302 14303 // If we know that this node is legal then we know that it is going to be 14304 // matched by one of the SSE/AVX BLEND instructions. These instructions only 14305 // depend on the highest bit in each word. Try to use SimplifyDemandedBits 14306 // to simplify previous instructions. 14307 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 14308 if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() && 14309 !DCI.isBeforeLegalize() && TLI.isOperationLegal(ISD::VSELECT, VT)) { 14310 unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits(); 14311 14312 // Don't optimize vector selects that map to mask-registers. 14313 if (BitWidth == 1) 14314 return SDValue(); 14315 14316 assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size"); 14317 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1); 14318 14319 APInt KnownZero, KnownOne; 14320 TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(), 14321 DCI.isBeforeLegalizeOps()); 14322 if (TLO.ShrinkDemandedConstant(Cond, DemandedMask) || 14323 TLI.SimplifyDemandedBits(Cond, DemandedMask, KnownZero, KnownOne, TLO)) 14324 DCI.CommitTargetLoweringOpt(TLO); 14325 } 14326 14327 return SDValue(); 14328} 14329 14330// Check whether a boolean test is testing a boolean value generated by 14331// X86ISD::SETCC. If so, return the operand of that SETCC and proper condition 14332// code. 14333// 14334// Simplify the following patterns: 14335// (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or 14336// (Op (CMP (SETCC Cond EFLAGS) 0) NEQ) 14337// to (Op EFLAGS Cond) 14338// 14339// (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or 14340// (Op (CMP (SETCC Cond EFLAGS) 1) NEQ) 14341// to (Op EFLAGS !Cond) 14342// 14343// where Op could be BRCOND or CMOV. 14344// 14345static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) { 14346 // Quit if not CMP and SUB with its value result used. 14347 if (Cmp.getOpcode() != X86ISD::CMP && 14348 (Cmp.getOpcode() != X86ISD::SUB || Cmp.getNode()->hasAnyUseOfValue(0))) 14349 return SDValue(); 14350 14351 // Quit if not used as a boolean value. 14352 if (CC != X86::COND_E && CC != X86::COND_NE) 14353 return SDValue(); 14354 14355 // Check CMP operands. One of them should be 0 or 1 and the other should be 14356 // an SetCC or extended from it. 14357 SDValue Op1 = Cmp.getOperand(0); 14358 SDValue Op2 = Cmp.getOperand(1); 14359 14360 SDValue SetCC; 14361 const ConstantSDNode* C = 0; 14362 bool needOppositeCond = (CC == X86::COND_E); 14363 14364 if ((C = dyn_cast<ConstantSDNode>(Op1))) 14365 SetCC = Op2; 14366 else if ((C = dyn_cast<ConstantSDNode>(Op2))) 14367 SetCC = Op1; 14368 else // Quit if all operands are not constants. 14369 return SDValue(); 14370 14371 if (C->getZExtValue() == 1) 14372 needOppositeCond = !needOppositeCond; 14373 else if (C->getZExtValue() != 0) 14374 // Quit if the constant is neither 0 or 1. 14375 return SDValue(); 14376 14377 // Skip 'zext' node. 14378 if (SetCC.getOpcode() == ISD::ZERO_EXTEND) 14379 SetCC = SetCC.getOperand(0); 14380 14381 switch (SetCC.getOpcode()) { 14382 case X86ISD::SETCC: 14383 // Set the condition code or opposite one if necessary. 14384 CC = X86::CondCode(SetCC.getConstantOperandVal(0)); 14385 if (needOppositeCond) 14386 CC = X86::GetOppositeBranchCondition(CC); 14387 return SetCC.getOperand(1); 14388 case X86ISD::CMOV: { 14389 // Check whether false/true value has canonical one, i.e. 0 or 1. 14390 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0)); 14391 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1)); 14392 // Quit if true value is not a constant. 14393 if (!TVal) 14394 return SDValue(); 14395 // Quit if false value is not a constant. 14396 if (!FVal) { 14397 // A special case for rdrand, where 0 is set if false cond is found. 14398 SDValue Op = SetCC.getOperand(0); 14399 if (Op.getOpcode() != X86ISD::RDRAND) 14400 return SDValue(); 14401 } 14402 // Quit if false value is not the constant 0 or 1. 14403 bool FValIsFalse = true; 14404 if (FVal && FVal->getZExtValue() != 0) { 14405 if (FVal->getZExtValue() != 1) 14406 return SDValue(); 14407 // If FVal is 1, opposite cond is needed. 14408 needOppositeCond = !needOppositeCond; 14409 FValIsFalse = false; 14410 } 14411 // Quit if TVal is not the constant opposite of FVal. 14412 if (FValIsFalse && TVal->getZExtValue() != 1) 14413 return SDValue(); 14414 if (!FValIsFalse && TVal->getZExtValue() != 0) 14415 return SDValue(); 14416 CC = X86::CondCode(SetCC.getConstantOperandVal(2)); 14417 if (needOppositeCond) 14418 CC = X86::GetOppositeBranchCondition(CC); 14419 return SetCC.getOperand(3); 14420 } 14421 } 14422 14423 return SDValue(); 14424} 14425 14426/// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL] 14427static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG, 14428 TargetLowering::DAGCombinerInfo &DCI, 14429 const X86Subtarget *Subtarget) { 14430 DebugLoc DL = N->getDebugLoc(); 14431 14432 // If the flag operand isn't dead, don't touch this CMOV. 14433 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty()) 14434 return SDValue(); 14435 14436 SDValue FalseOp = N->getOperand(0); 14437 SDValue TrueOp = N->getOperand(1); 14438 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2); 14439 SDValue Cond = N->getOperand(3); 14440 14441 if (CC == X86::COND_E || CC == X86::COND_NE) { 14442 switch (Cond.getOpcode()) { 14443 default: break; 14444 case X86ISD::BSR: 14445 case X86ISD::BSF: 14446 // If operand of BSR / BSF are proven never zero, then ZF cannot be set. 14447 if (DAG.isKnownNeverZero(Cond.getOperand(0))) 14448 return (CC == X86::COND_E) ? FalseOp : TrueOp; 14449 } 14450 } 14451 14452 SDValue Flags; 14453 14454 Flags = checkBoolTestSetCCCombine(Cond, CC); 14455 if (Flags.getNode() && 14456 // Extra check as FCMOV only supports a subset of X86 cond. 14457 (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) { 14458 SDValue Ops[] = { FalseOp, TrueOp, 14459 DAG.getConstant(CC, MVT::i8), Flags }; 14460 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), 14461 Ops, array_lengthof(Ops)); 14462 } 14463 14464 // If this is a select between two integer constants, try to do some 14465 // optimizations. Note that the operands are ordered the opposite of SELECT 14466 // operands. 14467 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) { 14468 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) { 14469 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is 14470 // larger than FalseC (the false value). 14471 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) { 14472 CC = X86::GetOppositeBranchCondition(CC); 14473 std::swap(TrueC, FalseC); 14474 std::swap(TrueOp, FalseOp); 14475 } 14476 14477 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0. 14478 // This is efficient for any integer data type (including i8/i16) and 14479 // shift amount. 14480 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) { 14481 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 14482 DAG.getConstant(CC, MVT::i8), Cond); 14483 14484 // Zero extend the condition if needed. 14485 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond); 14486 14487 unsigned ShAmt = TrueC->getAPIntValue().logBase2(); 14488 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond, 14489 DAG.getConstant(ShAmt, MVT::i8)); 14490 if (N->getNumValues() == 2) // Dead flag value? 14491 return DCI.CombineTo(N, Cond, SDValue()); 14492 return Cond; 14493 } 14494 14495 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient 14496 // for any integer data type, including i8/i16. 14497 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) { 14498 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 14499 DAG.getConstant(CC, MVT::i8), Cond); 14500 14501 // Zero extend the condition if needed. 14502 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, 14503 FalseC->getValueType(0), Cond); 14504 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 14505 SDValue(FalseC, 0)); 14506 14507 if (N->getNumValues() == 2) // Dead flag value? 14508 return DCI.CombineTo(N, Cond, SDValue()); 14509 return Cond; 14510 } 14511 14512 // Optimize cases that will turn into an LEA instruction. This requires 14513 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9). 14514 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) { 14515 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue(); 14516 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff; 14517 14518 bool isFastMultiplier = false; 14519 if (Diff < 10) { 14520 switch ((unsigned char)Diff) { 14521 default: break; 14522 case 1: // result = add base, cond 14523 case 2: // result = lea base( , cond*2) 14524 case 3: // result = lea base(cond, cond*2) 14525 case 4: // result = lea base( , cond*4) 14526 case 5: // result = lea base(cond, cond*4) 14527 case 8: // result = lea base( , cond*8) 14528 case 9: // result = lea base(cond, cond*8) 14529 isFastMultiplier = true; 14530 break; 14531 } 14532 } 14533 14534 if (isFastMultiplier) { 14535 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue(); 14536 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 14537 DAG.getConstant(CC, MVT::i8), Cond); 14538 // Zero extend the condition if needed. 14539 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0), 14540 Cond); 14541 // Scale the condition by the difference. 14542 if (Diff != 1) 14543 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond, 14544 DAG.getConstant(Diff, Cond.getValueType())); 14545 14546 // Add the base if non-zero. 14547 if (FalseC->getAPIntValue() != 0) 14548 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 14549 SDValue(FalseC, 0)); 14550 if (N->getNumValues() == 2) // Dead flag value? 14551 return DCI.CombineTo(N, Cond, SDValue()); 14552 return Cond; 14553 } 14554 } 14555 } 14556 } 14557 14558 // Handle these cases: 14559 // (select (x != c), e, c) -> select (x != c), e, x), 14560 // (select (x == c), c, e) -> select (x == c), x, e) 14561 // where the c is an integer constant, and the "select" is the combination 14562 // of CMOV and CMP. 14563 // 14564 // The rationale for this change is that the conditional-move from a constant 14565 // needs two instructions, however, conditional-move from a register needs 14566 // only one instruction. 14567 // 14568 // CAVEAT: By replacing a constant with a symbolic value, it may obscure 14569 // some instruction-combining opportunities. This opt needs to be 14570 // postponed as late as possible. 14571 // 14572 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) { 14573 // the DCI.xxxx conditions are provided to postpone the optimization as 14574 // late as possible. 14575 14576 ConstantSDNode *CmpAgainst = 0; 14577 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) && 14578 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) && 14579 dyn_cast<ConstantSDNode>(Cond.getOperand(0)) == 0) { 14580 14581 if (CC == X86::COND_NE && 14582 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) { 14583 CC = X86::GetOppositeBranchCondition(CC); 14584 std::swap(TrueOp, FalseOp); 14585 } 14586 14587 if (CC == X86::COND_E && 14588 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) { 14589 SDValue Ops[] = { FalseOp, Cond.getOperand(0), 14590 DAG.getConstant(CC, MVT::i8), Cond }; 14591 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops, 14592 array_lengthof(Ops)); 14593 } 14594 } 14595 } 14596 14597 return SDValue(); 14598} 14599 14600 14601/// PerformMulCombine - Optimize a single multiply with constant into two 14602/// in order to implement it with two cheaper instructions, e.g. 14603/// LEA + SHL, LEA + LEA. 14604static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG, 14605 TargetLowering::DAGCombinerInfo &DCI) { 14606 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 14607 return SDValue(); 14608 14609 EVT VT = N->getValueType(0); 14610 if (VT != MVT::i64) 14611 return SDValue(); 14612 14613 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 14614 if (!C) 14615 return SDValue(); 14616 uint64_t MulAmt = C->getZExtValue(); 14617 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9) 14618 return SDValue(); 14619 14620 uint64_t MulAmt1 = 0; 14621 uint64_t MulAmt2 = 0; 14622 if ((MulAmt % 9) == 0) { 14623 MulAmt1 = 9; 14624 MulAmt2 = MulAmt / 9; 14625 } else if ((MulAmt % 5) == 0) { 14626 MulAmt1 = 5; 14627 MulAmt2 = MulAmt / 5; 14628 } else if ((MulAmt % 3) == 0) { 14629 MulAmt1 = 3; 14630 MulAmt2 = MulAmt / 3; 14631 } 14632 if (MulAmt2 && 14633 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){ 14634 DebugLoc DL = N->getDebugLoc(); 14635 14636 if (isPowerOf2_64(MulAmt2) && 14637 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD)) 14638 // If second multiplifer is pow2, issue it first. We want the multiply by 14639 // 3, 5, or 9 to be folded into the addressing mode unless the lone use 14640 // is an add. 14641 std::swap(MulAmt1, MulAmt2); 14642 14643 SDValue NewMul; 14644 if (isPowerOf2_64(MulAmt1)) 14645 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), 14646 DAG.getConstant(Log2_64(MulAmt1), MVT::i8)); 14647 else 14648 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0), 14649 DAG.getConstant(MulAmt1, VT)); 14650 14651 if (isPowerOf2_64(MulAmt2)) 14652 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul, 14653 DAG.getConstant(Log2_64(MulAmt2), MVT::i8)); 14654 else 14655 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul, 14656 DAG.getConstant(MulAmt2, VT)); 14657 14658 // Do not add new nodes to DAG combiner worklist. 14659 DCI.CombineTo(N, NewMul, false); 14660 } 14661 return SDValue(); 14662} 14663 14664static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) { 14665 SDValue N0 = N->getOperand(0); 14666 SDValue N1 = N->getOperand(1); 14667 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 14668 EVT VT = N0.getValueType(); 14669 14670 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2)) 14671 // since the result of setcc_c is all zero's or all ones. 14672 if (VT.isInteger() && !VT.isVector() && 14673 N1C && N0.getOpcode() == ISD::AND && 14674 N0.getOperand(1).getOpcode() == ISD::Constant) { 14675 SDValue N00 = N0.getOperand(0); 14676 if (N00.getOpcode() == X86ISD::SETCC_CARRY || 14677 ((N00.getOpcode() == ISD::ANY_EXTEND || 14678 N00.getOpcode() == ISD::ZERO_EXTEND) && 14679 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) { 14680 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 14681 APInt ShAmt = N1C->getAPIntValue(); 14682 Mask = Mask.shl(ShAmt); 14683 if (Mask != 0) 14684 return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, 14685 N00, DAG.getConstant(Mask, VT)); 14686 } 14687 } 14688 14689 14690 // Hardware support for vector shifts is sparse which makes us scalarize the 14691 // vector operations in many cases. Also, on sandybridge ADD is faster than 14692 // shl. 14693 // (shl V, 1) -> add V,V 14694 if (isSplatVector(N1.getNode())) { 14695 assert(N0.getValueType().isVector() && "Invalid vector shift type"); 14696 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(0)); 14697 // We shift all of the values by one. In many cases we do not have 14698 // hardware support for this operation. This is better expressed as an ADD 14699 // of two values. 14700 if (N1C && (1 == N1C->getZExtValue())) { 14701 return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N0, N0); 14702 } 14703 } 14704 14705 return SDValue(); 14706} 14707 14708/// PerformShiftCombine - Transforms vector shift nodes to use vector shifts 14709/// when possible. 14710static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG, 14711 TargetLowering::DAGCombinerInfo &DCI, 14712 const X86Subtarget *Subtarget) { 14713 EVT VT = N->getValueType(0); 14714 if (N->getOpcode() == ISD::SHL) { 14715 SDValue V = PerformSHLCombine(N, DAG); 14716 if (V.getNode()) return V; 14717 } 14718 14719 // On X86 with SSE2 support, we can transform this to a vector shift if 14720 // all elements are shifted by the same amount. We can't do this in legalize 14721 // because the a constant vector is typically transformed to a constant pool 14722 // so we have no knowledge of the shift amount. 14723 if (!Subtarget->hasSSE2()) 14724 return SDValue(); 14725 14726 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 && 14727 (!Subtarget->hasAVX2() || 14728 (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16))) 14729 return SDValue(); 14730 14731 SDValue ShAmtOp = N->getOperand(1); 14732 EVT EltVT = VT.getVectorElementType(); 14733 DebugLoc DL = N->getDebugLoc(); 14734 SDValue BaseShAmt = SDValue(); 14735 if (ShAmtOp.getOpcode() == ISD::BUILD_VECTOR) { 14736 unsigned NumElts = VT.getVectorNumElements(); 14737 unsigned i = 0; 14738 for (; i != NumElts; ++i) { 14739 SDValue Arg = ShAmtOp.getOperand(i); 14740 if (Arg.getOpcode() == ISD::UNDEF) continue; 14741 BaseShAmt = Arg; 14742 break; 14743 } 14744 // Handle the case where the build_vector is all undef 14745 // FIXME: Should DAG allow this? 14746 if (i == NumElts) 14747 return SDValue(); 14748 14749 for (; i != NumElts; ++i) { 14750 SDValue Arg = ShAmtOp.getOperand(i); 14751 if (Arg.getOpcode() == ISD::UNDEF) continue; 14752 if (Arg != BaseShAmt) { 14753 return SDValue(); 14754 } 14755 } 14756 } else if (ShAmtOp.getOpcode() == ISD::VECTOR_SHUFFLE && 14757 cast<ShuffleVectorSDNode>(ShAmtOp)->isSplat()) { 14758 SDValue InVec = ShAmtOp.getOperand(0); 14759 if (InVec.getOpcode() == ISD::BUILD_VECTOR) { 14760 unsigned NumElts = InVec.getValueType().getVectorNumElements(); 14761 unsigned i = 0; 14762 for (; i != NumElts; ++i) { 14763 SDValue Arg = InVec.getOperand(i); 14764 if (Arg.getOpcode() == ISD::UNDEF) continue; 14765 BaseShAmt = Arg; 14766 break; 14767 } 14768 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) { 14769 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(InVec.getOperand(2))) { 14770 unsigned SplatIdx= cast<ShuffleVectorSDNode>(ShAmtOp)->getSplatIndex(); 14771 if (C->getZExtValue() == SplatIdx) 14772 BaseShAmt = InVec.getOperand(1); 14773 } 14774 } 14775 if (BaseShAmt.getNode() == 0) { 14776 // Don't create instructions with illegal types after legalize 14777 // types has run. 14778 if (!DAG.getTargetLoweringInfo().isTypeLegal(EltVT) && 14779 !DCI.isBeforeLegalize()) 14780 return SDValue(); 14781 14782 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, ShAmtOp, 14783 DAG.getIntPtrConstant(0)); 14784 } 14785 } else 14786 return SDValue(); 14787 14788 // The shift amount is an i32. 14789 if (EltVT.bitsGT(MVT::i32)) 14790 BaseShAmt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, BaseShAmt); 14791 else if (EltVT.bitsLT(MVT::i32)) 14792 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, BaseShAmt); 14793 14794 // The shift amount is identical so we can do a vector shift. 14795 SDValue ValOp = N->getOperand(0); 14796 switch (N->getOpcode()) { 14797 default: 14798 llvm_unreachable("Unknown shift opcode!"); 14799 case ISD::SHL: 14800 switch (VT.getSimpleVT().SimpleTy) { 14801 default: return SDValue(); 14802 case MVT::v2i64: 14803 case MVT::v4i32: 14804 case MVT::v8i16: 14805 case MVT::v4i64: 14806 case MVT::v8i32: 14807 case MVT::v16i16: 14808 return getTargetVShiftNode(X86ISD::VSHLI, DL, VT, ValOp, BaseShAmt, DAG); 14809 } 14810 case ISD::SRA: 14811 switch (VT.getSimpleVT().SimpleTy) { 14812 default: return SDValue(); 14813 case MVT::v4i32: 14814 case MVT::v8i16: 14815 case MVT::v8i32: 14816 case MVT::v16i16: 14817 return getTargetVShiftNode(X86ISD::VSRAI, DL, VT, ValOp, BaseShAmt, DAG); 14818 } 14819 case ISD::SRL: 14820 switch (VT.getSimpleVT().SimpleTy) { 14821 default: return SDValue(); 14822 case MVT::v2i64: 14823 case MVT::v4i32: 14824 case MVT::v8i16: 14825 case MVT::v4i64: 14826 case MVT::v8i32: 14827 case MVT::v16i16: 14828 return getTargetVShiftNode(X86ISD::VSRLI, DL, VT, ValOp, BaseShAmt, DAG); 14829 } 14830 } 14831} 14832 14833 14834// CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..)) 14835// where both setccs reference the same FP CMP, and rewrite for CMPEQSS 14836// and friends. Likewise for OR -> CMPNEQSS. 14837static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG, 14838 TargetLowering::DAGCombinerInfo &DCI, 14839 const X86Subtarget *Subtarget) { 14840 unsigned opcode; 14841 14842 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but 14843 // we're requiring SSE2 for both. 14844 if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) { 14845 SDValue N0 = N->getOperand(0); 14846 SDValue N1 = N->getOperand(1); 14847 SDValue CMP0 = N0->getOperand(1); 14848 SDValue CMP1 = N1->getOperand(1); 14849 DebugLoc DL = N->getDebugLoc(); 14850 14851 // The SETCCs should both refer to the same CMP. 14852 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1) 14853 return SDValue(); 14854 14855 SDValue CMP00 = CMP0->getOperand(0); 14856 SDValue CMP01 = CMP0->getOperand(1); 14857 EVT VT = CMP00.getValueType(); 14858 14859 if (VT == MVT::f32 || VT == MVT::f64) { 14860 bool ExpectingFlags = false; 14861 // Check for any users that want flags: 14862 for (SDNode::use_iterator UI = N->use_begin(), 14863 UE = N->use_end(); 14864 !ExpectingFlags && UI != UE; ++UI) 14865 switch (UI->getOpcode()) { 14866 default: 14867 case ISD::BR_CC: 14868 case ISD::BRCOND: 14869 case ISD::SELECT: 14870 ExpectingFlags = true; 14871 break; 14872 case ISD::CopyToReg: 14873 case ISD::SIGN_EXTEND: 14874 case ISD::ZERO_EXTEND: 14875 case ISD::ANY_EXTEND: 14876 break; 14877 } 14878 14879 if (!ExpectingFlags) { 14880 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0); 14881 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0); 14882 14883 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) { 14884 X86::CondCode tmp = cc0; 14885 cc0 = cc1; 14886 cc1 = tmp; 14887 } 14888 14889 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) || 14890 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) { 14891 bool is64BitFP = (CMP00.getValueType() == MVT::f64); 14892 X86ISD::NodeType NTOperator = is64BitFP ? 14893 X86ISD::FSETCCsd : X86ISD::FSETCCss; 14894 // FIXME: need symbolic constants for these magic numbers. 14895 // See X86ATTInstPrinter.cpp:printSSECC(). 14896 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4; 14897 SDValue OnesOrZeroesF = DAG.getNode(NTOperator, DL, MVT::f32, CMP00, CMP01, 14898 DAG.getConstant(x86cc, MVT::i8)); 14899 SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, MVT::i32, 14900 OnesOrZeroesF); 14901 SDValue ANDed = DAG.getNode(ISD::AND, DL, MVT::i32, OnesOrZeroesI, 14902 DAG.getConstant(1, MVT::i32)); 14903 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed); 14904 return OneBitOfTruth; 14905 } 14906 } 14907 } 14908 } 14909 return SDValue(); 14910} 14911 14912/// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector 14913/// so it can be folded inside ANDNP. 14914static bool CanFoldXORWithAllOnes(const SDNode *N) { 14915 EVT VT = N->getValueType(0); 14916 14917 // Match direct AllOnes for 128 and 256-bit vectors 14918 if (ISD::isBuildVectorAllOnes(N)) 14919 return true; 14920 14921 // Look through a bit convert. 14922 if (N->getOpcode() == ISD::BITCAST) 14923 N = N->getOperand(0).getNode(); 14924 14925 // Sometimes the operand may come from a insert_subvector building a 256-bit 14926 // allones vector 14927 if (VT.is256BitVector() && 14928 N->getOpcode() == ISD::INSERT_SUBVECTOR) { 14929 SDValue V1 = N->getOperand(0); 14930 SDValue V2 = N->getOperand(1); 14931 14932 if (V1.getOpcode() == ISD::INSERT_SUBVECTOR && 14933 V1.getOperand(0).getOpcode() == ISD::UNDEF && 14934 ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) && 14935 ISD::isBuildVectorAllOnes(V2.getNode())) 14936 return true; 14937 } 14938 14939 return false; 14940} 14941 14942static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG, 14943 TargetLowering::DAGCombinerInfo &DCI, 14944 const X86Subtarget *Subtarget) { 14945 if (DCI.isBeforeLegalizeOps()) 14946 return SDValue(); 14947 14948 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget); 14949 if (R.getNode()) 14950 return R; 14951 14952 EVT VT = N->getValueType(0); 14953 14954 // Create ANDN, BLSI, and BLSR instructions 14955 // BLSI is X & (-X) 14956 // BLSR is X & (X-1) 14957 if (Subtarget->hasBMI() && (VT == MVT::i32 || VT == MVT::i64)) { 14958 SDValue N0 = N->getOperand(0); 14959 SDValue N1 = N->getOperand(1); 14960 DebugLoc DL = N->getDebugLoc(); 14961 14962 // Check LHS for not 14963 if (N0.getOpcode() == ISD::XOR && isAllOnes(N0.getOperand(1))) 14964 return DAG.getNode(X86ISD::ANDN, DL, VT, N0.getOperand(0), N1); 14965 // Check RHS for not 14966 if (N1.getOpcode() == ISD::XOR && isAllOnes(N1.getOperand(1))) 14967 return DAG.getNode(X86ISD::ANDN, DL, VT, N1.getOperand(0), N0); 14968 14969 // Check LHS for neg 14970 if (N0.getOpcode() == ISD::SUB && N0.getOperand(1) == N1 && 14971 isZero(N0.getOperand(0))) 14972 return DAG.getNode(X86ISD::BLSI, DL, VT, N1); 14973 14974 // Check RHS for neg 14975 if (N1.getOpcode() == ISD::SUB && N1.getOperand(1) == N0 && 14976 isZero(N1.getOperand(0))) 14977 return DAG.getNode(X86ISD::BLSI, DL, VT, N0); 14978 14979 // Check LHS for X-1 14980 if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1 && 14981 isAllOnes(N0.getOperand(1))) 14982 return DAG.getNode(X86ISD::BLSR, DL, VT, N1); 14983 14984 // Check RHS for X-1 14985 if (N1.getOpcode() == ISD::ADD && N1.getOperand(0) == N0 && 14986 isAllOnes(N1.getOperand(1))) 14987 return DAG.getNode(X86ISD::BLSR, DL, VT, N0); 14988 14989 return SDValue(); 14990 } 14991 14992 // Want to form ANDNP nodes: 14993 // 1) In the hopes of then easily combining them with OR and AND nodes 14994 // to form PBLEND/PSIGN. 14995 // 2) To match ANDN packed intrinsics 14996 if (VT != MVT::v2i64 && VT != MVT::v4i64) 14997 return SDValue(); 14998 14999 SDValue N0 = N->getOperand(0); 15000 SDValue N1 = N->getOperand(1); 15001 DebugLoc DL = N->getDebugLoc(); 15002 15003 // Check LHS for vnot 15004 if (N0.getOpcode() == ISD::XOR && 15005 //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode())) 15006 CanFoldXORWithAllOnes(N0.getOperand(1).getNode())) 15007 return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1); 15008 15009 // Check RHS for vnot 15010 if (N1.getOpcode() == ISD::XOR && 15011 //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode())) 15012 CanFoldXORWithAllOnes(N1.getOperand(1).getNode())) 15013 return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0); 15014 15015 return SDValue(); 15016} 15017 15018static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG, 15019 TargetLowering::DAGCombinerInfo &DCI, 15020 const X86Subtarget *Subtarget) { 15021 if (DCI.isBeforeLegalizeOps()) 15022 return SDValue(); 15023 15024 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget); 15025 if (R.getNode()) 15026 return R; 15027 15028 EVT VT = N->getValueType(0); 15029 15030 SDValue N0 = N->getOperand(0); 15031 SDValue N1 = N->getOperand(1); 15032 15033 // look for psign/blend 15034 if (VT == MVT::v2i64 || VT == MVT::v4i64) { 15035 if (!Subtarget->hasSSSE3() || 15036 (VT == MVT::v4i64 && !Subtarget->hasAVX2())) 15037 return SDValue(); 15038 15039 // Canonicalize pandn to RHS 15040 if (N0.getOpcode() == X86ISD::ANDNP) 15041 std::swap(N0, N1); 15042 // or (and (m, y), (pandn m, x)) 15043 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) { 15044 SDValue Mask = N1.getOperand(0); 15045 SDValue X = N1.getOperand(1); 15046 SDValue Y; 15047 if (N0.getOperand(0) == Mask) 15048 Y = N0.getOperand(1); 15049 if (N0.getOperand(1) == Mask) 15050 Y = N0.getOperand(0); 15051 15052 // Check to see if the mask appeared in both the AND and ANDNP and 15053 if (!Y.getNode()) 15054 return SDValue(); 15055 15056 // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them. 15057 // Look through mask bitcast. 15058 if (Mask.getOpcode() == ISD::BITCAST) 15059 Mask = Mask.getOperand(0); 15060 if (X.getOpcode() == ISD::BITCAST) 15061 X = X.getOperand(0); 15062 if (Y.getOpcode() == ISD::BITCAST) 15063 Y = Y.getOperand(0); 15064 15065 EVT MaskVT = Mask.getValueType(); 15066 15067 // Validate that the Mask operand is a vector sra node. 15068 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but 15069 // there is no psrai.b 15070 if (Mask.getOpcode() != X86ISD::VSRAI) 15071 return SDValue(); 15072 15073 // Check that the SRA is all signbits. 15074 SDValue SraC = Mask.getOperand(1); 15075 unsigned SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue(); 15076 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits(); 15077 if ((SraAmt + 1) != EltBits) 15078 return SDValue(); 15079 15080 DebugLoc DL = N->getDebugLoc(); 15081 15082 // Now we know we at least have a plendvb with the mask val. See if 15083 // we can form a psignb/w/d. 15084 // psign = x.type == y.type == mask.type && y = sub(0, x); 15085 if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X && 15086 ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) && 15087 X.getValueType() == MaskVT && Y.getValueType() == MaskVT) { 15088 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) && 15089 "Unsupported VT for PSIGN"); 15090 Mask = DAG.getNode(X86ISD::PSIGN, DL, MaskVT, X, Mask.getOperand(0)); 15091 return DAG.getNode(ISD::BITCAST, DL, VT, Mask); 15092 } 15093 // PBLENDVB only available on SSE 4.1 15094 if (!Subtarget->hasSSE41()) 15095 return SDValue(); 15096 15097 EVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8; 15098 15099 X = DAG.getNode(ISD::BITCAST, DL, BlendVT, X); 15100 Y = DAG.getNode(ISD::BITCAST, DL, BlendVT, Y); 15101 Mask = DAG.getNode(ISD::BITCAST, DL, BlendVT, Mask); 15102 Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X); 15103 return DAG.getNode(ISD::BITCAST, DL, VT, Mask); 15104 } 15105 } 15106 15107 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64) 15108 return SDValue(); 15109 15110 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c) 15111 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL) 15112 std::swap(N0, N1); 15113 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL) 15114 return SDValue(); 15115 if (!N0.hasOneUse() || !N1.hasOneUse()) 15116 return SDValue(); 15117 15118 SDValue ShAmt0 = N0.getOperand(1); 15119 if (ShAmt0.getValueType() != MVT::i8) 15120 return SDValue(); 15121 SDValue ShAmt1 = N1.getOperand(1); 15122 if (ShAmt1.getValueType() != MVT::i8) 15123 return SDValue(); 15124 if (ShAmt0.getOpcode() == ISD::TRUNCATE) 15125 ShAmt0 = ShAmt0.getOperand(0); 15126 if (ShAmt1.getOpcode() == ISD::TRUNCATE) 15127 ShAmt1 = ShAmt1.getOperand(0); 15128 15129 DebugLoc DL = N->getDebugLoc(); 15130 unsigned Opc = X86ISD::SHLD; 15131 SDValue Op0 = N0.getOperand(0); 15132 SDValue Op1 = N1.getOperand(0); 15133 if (ShAmt0.getOpcode() == ISD::SUB) { 15134 Opc = X86ISD::SHRD; 15135 std::swap(Op0, Op1); 15136 std::swap(ShAmt0, ShAmt1); 15137 } 15138 15139 unsigned Bits = VT.getSizeInBits(); 15140 if (ShAmt1.getOpcode() == ISD::SUB) { 15141 SDValue Sum = ShAmt1.getOperand(0); 15142 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) { 15143 SDValue ShAmt1Op1 = ShAmt1.getOperand(1); 15144 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE) 15145 ShAmt1Op1 = ShAmt1Op1.getOperand(0); 15146 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0) 15147 return DAG.getNode(Opc, DL, VT, 15148 Op0, Op1, 15149 DAG.getNode(ISD::TRUNCATE, DL, 15150 MVT::i8, ShAmt0)); 15151 } 15152 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) { 15153 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0); 15154 if (ShAmt0C && 15155 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits) 15156 return DAG.getNode(Opc, DL, VT, 15157 N0.getOperand(0), N1.getOperand(0), 15158 DAG.getNode(ISD::TRUNCATE, DL, 15159 MVT::i8, ShAmt0)); 15160 } 15161 15162 return SDValue(); 15163} 15164 15165// Generate NEG and CMOV for integer abs. 15166static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) { 15167 EVT VT = N->getValueType(0); 15168 15169 // Since X86 does not have CMOV for 8-bit integer, we don't convert 15170 // 8-bit integer abs to NEG and CMOV. 15171 if (VT.isInteger() && VT.getSizeInBits() == 8) 15172 return SDValue(); 15173 15174 SDValue N0 = N->getOperand(0); 15175 SDValue N1 = N->getOperand(1); 15176 DebugLoc DL = N->getDebugLoc(); 15177 15178 // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1) 15179 // and change it to SUB and CMOV. 15180 if (VT.isInteger() && N->getOpcode() == ISD::XOR && 15181 N0.getOpcode() == ISD::ADD && 15182 N0.getOperand(1) == N1 && 15183 N1.getOpcode() == ISD::SRA && 15184 N1.getOperand(0) == N0.getOperand(0)) 15185 if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1))) 15186 if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) { 15187 // Generate SUB & CMOV. 15188 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32), 15189 DAG.getConstant(0, VT), N0.getOperand(0)); 15190 15191 SDValue Ops[] = { N0.getOperand(0), Neg, 15192 DAG.getConstant(X86::COND_GE, MVT::i8), 15193 SDValue(Neg.getNode(), 1) }; 15194 return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), 15195 Ops, array_lengthof(Ops)); 15196 } 15197 return SDValue(); 15198} 15199 15200// PerformXorCombine - Attempts to turn XOR nodes into BLSMSK nodes 15201static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG, 15202 TargetLowering::DAGCombinerInfo &DCI, 15203 const X86Subtarget *Subtarget) { 15204 if (DCI.isBeforeLegalizeOps()) 15205 return SDValue(); 15206 15207 if (Subtarget->hasCMov()) { 15208 SDValue RV = performIntegerAbsCombine(N, DAG); 15209 if (RV.getNode()) 15210 return RV; 15211 } 15212 15213 // Try forming BMI if it is available. 15214 if (!Subtarget->hasBMI()) 15215 return SDValue(); 15216 15217 EVT VT = N->getValueType(0); 15218 15219 if (VT != MVT::i32 && VT != MVT::i64) 15220 return SDValue(); 15221 15222 assert(Subtarget->hasBMI() && "Creating BLSMSK requires BMI instructions"); 15223 15224 // Create BLSMSK instructions by finding X ^ (X-1) 15225 SDValue N0 = N->getOperand(0); 15226 SDValue N1 = N->getOperand(1); 15227 DebugLoc DL = N->getDebugLoc(); 15228 15229 if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1 && 15230 isAllOnes(N0.getOperand(1))) 15231 return DAG.getNode(X86ISD::BLSMSK, DL, VT, N1); 15232 15233 if (N1.getOpcode() == ISD::ADD && N1.getOperand(0) == N0 && 15234 isAllOnes(N1.getOperand(1))) 15235 return DAG.getNode(X86ISD::BLSMSK, DL, VT, N0); 15236 15237 return SDValue(); 15238} 15239 15240/// PerformLOADCombine - Do target-specific dag combines on LOAD nodes. 15241static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG, 15242 TargetLowering::DAGCombinerInfo &DCI, 15243 const X86Subtarget *Subtarget) { 15244 LoadSDNode *Ld = cast<LoadSDNode>(N); 15245 EVT RegVT = Ld->getValueType(0); 15246 EVT MemVT = Ld->getMemoryVT(); 15247 DebugLoc dl = Ld->getDebugLoc(); 15248 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 15249 15250 ISD::LoadExtType Ext = Ld->getExtensionType(); 15251 15252 // If this is a vector EXT Load then attempt to optimize it using a 15253 // shuffle. We need SSE4 for the shuffles. 15254 // TODO: It is possible to support ZExt by zeroing the undef values 15255 // during the shuffle phase or after the shuffle. 15256 if (RegVT.isVector() && RegVT.isInteger() && 15257 Ext == ISD::EXTLOAD && Subtarget->hasSSE41()) { 15258 assert(MemVT != RegVT && "Cannot extend to the same type"); 15259 assert(MemVT.isVector() && "Must load a vector from memory"); 15260 15261 unsigned NumElems = RegVT.getVectorNumElements(); 15262 unsigned RegSz = RegVT.getSizeInBits(); 15263 unsigned MemSz = MemVT.getSizeInBits(); 15264 assert(RegSz > MemSz && "Register size must be greater than the mem size"); 15265 15266 // All sizes must be a power of two. 15267 if (!isPowerOf2_32(RegSz * MemSz * NumElems)) 15268 return SDValue(); 15269 15270 // Attempt to load the original value using scalar loads. 15271 // Find the largest scalar type that divides the total loaded size. 15272 MVT SclrLoadTy = MVT::i8; 15273 for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE; 15274 tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) { 15275 MVT Tp = (MVT::SimpleValueType)tp; 15276 if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) { 15277 SclrLoadTy = Tp; 15278 } 15279 } 15280 15281 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64. 15282 if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 && 15283 (64 <= MemSz)) 15284 SclrLoadTy = MVT::f64; 15285 15286 // Calculate the number of scalar loads that we need to perform 15287 // in order to load our vector from memory. 15288 unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits(); 15289 15290 // Represent our vector as a sequence of elements which are the 15291 // largest scalar that we can load. 15292 EVT LoadUnitVecVT = EVT::getVectorVT(*DAG.getContext(), SclrLoadTy, 15293 RegSz/SclrLoadTy.getSizeInBits()); 15294 15295 // Represent the data using the same element type that is stored in 15296 // memory. In practice, we ''widen'' MemVT. 15297 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), 15298 RegSz/MemVT.getScalarType().getSizeInBits()); 15299 15300 assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() && 15301 "Invalid vector type"); 15302 15303 // We can't shuffle using an illegal type. 15304 if (!TLI.isTypeLegal(WideVecVT)) 15305 return SDValue(); 15306 15307 SmallVector<SDValue, 8> Chains; 15308 SDValue Ptr = Ld->getBasePtr(); 15309 SDValue Increment = DAG.getConstant(SclrLoadTy.getSizeInBits()/8, 15310 TLI.getPointerTy()); 15311 SDValue Res = DAG.getUNDEF(LoadUnitVecVT); 15312 15313 for (unsigned i = 0; i < NumLoads; ++i) { 15314 // Perform a single load. 15315 SDValue ScalarLoad = DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), 15316 Ptr, Ld->getPointerInfo(), 15317 Ld->isVolatile(), Ld->isNonTemporal(), 15318 Ld->isInvariant(), Ld->getAlignment()); 15319 Chains.push_back(ScalarLoad.getValue(1)); 15320 // Create the first element type using SCALAR_TO_VECTOR in order to avoid 15321 // another round of DAGCombining. 15322 if (i == 0) 15323 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad); 15324 else 15325 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res, 15326 ScalarLoad, DAG.getIntPtrConstant(i)); 15327 15328 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 15329 } 15330 15331 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], 15332 Chains.size()); 15333 15334 // Bitcast the loaded value to a vector of the original element type, in 15335 // the size of the target vector type. 15336 SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Res); 15337 unsigned SizeRatio = RegSz/MemSz; 15338 15339 // Redistribute the loaded elements into the different locations. 15340 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 15341 for (unsigned i = 0; i != NumElems; ++i) 15342 ShuffleVec[i*SizeRatio] = i; 15343 15344 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec, 15345 DAG.getUNDEF(WideVecVT), 15346 &ShuffleVec[0]); 15347 15348 // Bitcast to the requested type. 15349 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff); 15350 // Replace the original load with the new sequence 15351 // and return the new chain. 15352 return DCI.CombineTo(N, Shuff, TF, true); 15353 } 15354 15355 return SDValue(); 15356} 15357 15358/// PerformSTORECombine - Do target-specific dag combines on STORE nodes. 15359static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, 15360 const X86Subtarget *Subtarget) { 15361 StoreSDNode *St = cast<StoreSDNode>(N); 15362 EVT VT = St->getValue().getValueType(); 15363 EVT StVT = St->getMemoryVT(); 15364 DebugLoc dl = St->getDebugLoc(); 15365 SDValue StoredVal = St->getOperand(1); 15366 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 15367 15368 // If we are saving a concatenation of two XMM registers, perform two stores. 15369 // On Sandy Bridge, 256-bit memory operations are executed by two 15370 // 128-bit ports. However, on Haswell it is better to issue a single 256-bit 15371 // memory operation. 15372 if (VT.is256BitVector() && !Subtarget->hasAVX2() && 15373 StoredVal.getNode()->getOpcode() == ISD::CONCAT_VECTORS && 15374 StoredVal.getNumOperands() == 2) { 15375 SDValue Value0 = StoredVal.getOperand(0); 15376 SDValue Value1 = StoredVal.getOperand(1); 15377 15378 SDValue Stride = DAG.getConstant(16, TLI.getPointerTy()); 15379 SDValue Ptr0 = St->getBasePtr(); 15380 SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride); 15381 15382 SDValue Ch0 = DAG.getStore(St->getChain(), dl, Value0, Ptr0, 15383 St->getPointerInfo(), St->isVolatile(), 15384 St->isNonTemporal(), St->getAlignment()); 15385 SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1, 15386 St->getPointerInfo(), St->isVolatile(), 15387 St->isNonTemporal(), St->getAlignment()); 15388 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1); 15389 } 15390 15391 // Optimize trunc store (of multiple scalars) to shuffle and store. 15392 // First, pack all of the elements in one place. Next, store to memory 15393 // in fewer chunks. 15394 if (St->isTruncatingStore() && VT.isVector()) { 15395 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 15396 unsigned NumElems = VT.getVectorNumElements(); 15397 assert(StVT != VT && "Cannot truncate to the same type"); 15398 unsigned FromSz = VT.getVectorElementType().getSizeInBits(); 15399 unsigned ToSz = StVT.getVectorElementType().getSizeInBits(); 15400 15401 // From, To sizes and ElemCount must be pow of two 15402 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue(); 15403 // We are going to use the original vector elt for storing. 15404 // Accumulated smaller vector elements must be a multiple of the store size. 15405 if (0 != (NumElems * FromSz) % ToSz) return SDValue(); 15406 15407 unsigned SizeRatio = FromSz / ToSz; 15408 15409 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits()); 15410 15411 // Create a type on which we perform the shuffle 15412 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), 15413 StVT.getScalarType(), NumElems*SizeRatio); 15414 15415 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits()); 15416 15417 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, St->getValue()); 15418 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 15419 for (unsigned i = 0; i != NumElems; ++i) 15420 ShuffleVec[i] = i * SizeRatio; 15421 15422 // Can't shuffle using an illegal type. 15423 if (!TLI.isTypeLegal(WideVecVT)) 15424 return SDValue(); 15425 15426 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec, 15427 DAG.getUNDEF(WideVecVT), 15428 &ShuffleVec[0]); 15429 // At this point all of the data is stored at the bottom of the 15430 // register. We now need to save it to mem. 15431 15432 // Find the largest store unit 15433 MVT StoreType = MVT::i8; 15434 for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE; 15435 tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) { 15436 MVT Tp = (MVT::SimpleValueType)tp; 15437 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz) 15438 StoreType = Tp; 15439 } 15440 15441 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64. 15442 if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 && 15443 (64 <= NumElems * ToSz)) 15444 StoreType = MVT::f64; 15445 15446 // Bitcast the original vector into a vector of store-size units 15447 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(), 15448 StoreType, VT.getSizeInBits()/StoreType.getSizeInBits()); 15449 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits()); 15450 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff); 15451 SmallVector<SDValue, 8> Chains; 15452 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8, 15453 TLI.getPointerTy()); 15454 SDValue Ptr = St->getBasePtr(); 15455 15456 // Perform one or more big stores into memory. 15457 for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) { 15458 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, 15459 StoreType, ShuffWide, 15460 DAG.getIntPtrConstant(i)); 15461 SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr, 15462 St->getPointerInfo(), St->isVolatile(), 15463 St->isNonTemporal(), St->getAlignment()); 15464 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 15465 Chains.push_back(Ch); 15466 } 15467 15468 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], 15469 Chains.size()); 15470 } 15471 15472 15473 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering 15474 // the FP state in cases where an emms may be missing. 15475 // A preferable solution to the general problem is to figure out the right 15476 // places to insert EMMS. This qualifies as a quick hack. 15477 15478 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode. 15479 if (VT.getSizeInBits() != 64) 15480 return SDValue(); 15481 15482 const Function *F = DAG.getMachineFunction().getFunction(); 15483 bool NoImplicitFloatOps = F->getFnAttributes().hasNoImplicitFloatAttr(); 15484 bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps 15485 && Subtarget->hasSSE2(); 15486 if ((VT.isVector() || 15487 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) && 15488 isa<LoadSDNode>(St->getValue()) && 15489 !cast<LoadSDNode>(St->getValue())->isVolatile() && 15490 St->getChain().hasOneUse() && !St->isVolatile()) { 15491 SDNode* LdVal = St->getValue().getNode(); 15492 LoadSDNode *Ld = 0; 15493 int TokenFactorIndex = -1; 15494 SmallVector<SDValue, 8> Ops; 15495 SDNode* ChainVal = St->getChain().getNode(); 15496 // Must be a store of a load. We currently handle two cases: the load 15497 // is a direct child, and it's under an intervening TokenFactor. It is 15498 // possible to dig deeper under nested TokenFactors. 15499 if (ChainVal == LdVal) 15500 Ld = cast<LoadSDNode>(St->getChain()); 15501 else if (St->getValue().hasOneUse() && 15502 ChainVal->getOpcode() == ISD::TokenFactor) { 15503 for (unsigned i = 0, e = ChainVal->getNumOperands(); i != e; ++i) { 15504 if (ChainVal->getOperand(i).getNode() == LdVal) { 15505 TokenFactorIndex = i; 15506 Ld = cast<LoadSDNode>(St->getValue()); 15507 } else 15508 Ops.push_back(ChainVal->getOperand(i)); 15509 } 15510 } 15511 15512 if (!Ld || !ISD::isNormalLoad(Ld)) 15513 return SDValue(); 15514 15515 // If this is not the MMX case, i.e. we are just turning i64 load/store 15516 // into f64 load/store, avoid the transformation if there are multiple 15517 // uses of the loaded value. 15518 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0)) 15519 return SDValue(); 15520 15521 DebugLoc LdDL = Ld->getDebugLoc(); 15522 DebugLoc StDL = N->getDebugLoc(); 15523 // If we are a 64-bit capable x86, lower to a single movq load/store pair. 15524 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store 15525 // pair instead. 15526 if (Subtarget->is64Bit() || F64IsLegal) { 15527 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64; 15528 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(), 15529 Ld->getPointerInfo(), Ld->isVolatile(), 15530 Ld->isNonTemporal(), Ld->isInvariant(), 15531 Ld->getAlignment()); 15532 SDValue NewChain = NewLd.getValue(1); 15533 if (TokenFactorIndex != -1) { 15534 Ops.push_back(NewChain); 15535 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0], 15536 Ops.size()); 15537 } 15538 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(), 15539 St->getPointerInfo(), 15540 St->isVolatile(), St->isNonTemporal(), 15541 St->getAlignment()); 15542 } 15543 15544 // Otherwise, lower to two pairs of 32-bit loads / stores. 15545 SDValue LoAddr = Ld->getBasePtr(); 15546 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr, 15547 DAG.getConstant(4, MVT::i32)); 15548 15549 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr, 15550 Ld->getPointerInfo(), 15551 Ld->isVolatile(), Ld->isNonTemporal(), 15552 Ld->isInvariant(), Ld->getAlignment()); 15553 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr, 15554 Ld->getPointerInfo().getWithOffset(4), 15555 Ld->isVolatile(), Ld->isNonTemporal(), 15556 Ld->isInvariant(), 15557 MinAlign(Ld->getAlignment(), 4)); 15558 15559 SDValue NewChain = LoLd.getValue(1); 15560 if (TokenFactorIndex != -1) { 15561 Ops.push_back(LoLd); 15562 Ops.push_back(HiLd); 15563 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0], 15564 Ops.size()); 15565 } 15566 15567 LoAddr = St->getBasePtr(); 15568 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr, 15569 DAG.getConstant(4, MVT::i32)); 15570 15571 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr, 15572 St->getPointerInfo(), 15573 St->isVolatile(), St->isNonTemporal(), 15574 St->getAlignment()); 15575 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr, 15576 St->getPointerInfo().getWithOffset(4), 15577 St->isVolatile(), 15578 St->isNonTemporal(), 15579 MinAlign(St->getAlignment(), 4)); 15580 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt); 15581 } 15582 return SDValue(); 15583} 15584 15585/// isHorizontalBinOp - Return 'true' if this vector operation is "horizontal" 15586/// and return the operands for the horizontal operation in LHS and RHS. A 15587/// horizontal operation performs the binary operation on successive elements 15588/// of its first operand, then on successive elements of its second operand, 15589/// returning the resulting values in a vector. For example, if 15590/// A = < float a0, float a1, float a2, float a3 > 15591/// and 15592/// B = < float b0, float b1, float b2, float b3 > 15593/// then the result of doing a horizontal operation on A and B is 15594/// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >. 15595/// In short, LHS and RHS are inspected to see if LHS op RHS is of the form 15596/// A horizontal-op B, for some already available A and B, and if so then LHS is 15597/// set to A, RHS to B, and the routine returns 'true'. 15598/// Note that the binary operation should have the property that if one of the 15599/// operands is UNDEF then the result is UNDEF. 15600static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) { 15601 // Look for the following pattern: if 15602 // A = < float a0, float a1, float a2, float a3 > 15603 // B = < float b0, float b1, float b2, float b3 > 15604 // and 15605 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6> 15606 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7> 15607 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 > 15608 // which is A horizontal-op B. 15609 15610 // At least one of the operands should be a vector shuffle. 15611 if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE && 15612 RHS.getOpcode() != ISD::VECTOR_SHUFFLE) 15613 return false; 15614 15615 EVT VT = LHS.getValueType(); 15616 15617 assert((VT.is128BitVector() || VT.is256BitVector()) && 15618 "Unsupported vector type for horizontal add/sub"); 15619 15620 // Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to 15621 // operate independently on 128-bit lanes. 15622 unsigned NumElts = VT.getVectorNumElements(); 15623 unsigned NumLanes = VT.getSizeInBits()/128; 15624 unsigned NumLaneElts = NumElts / NumLanes; 15625 assert((NumLaneElts % 2 == 0) && 15626 "Vector type should have an even number of elements in each lane"); 15627 unsigned HalfLaneElts = NumLaneElts/2; 15628 15629 // View LHS in the form 15630 // LHS = VECTOR_SHUFFLE A, B, LMask 15631 // If LHS is not a shuffle then pretend it is the shuffle 15632 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1> 15633 // NOTE: in what follows a default initialized SDValue represents an UNDEF of 15634 // type VT. 15635 SDValue A, B; 15636 SmallVector<int, 16> LMask(NumElts); 15637 if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) { 15638 if (LHS.getOperand(0).getOpcode() != ISD::UNDEF) 15639 A = LHS.getOperand(0); 15640 if (LHS.getOperand(1).getOpcode() != ISD::UNDEF) 15641 B = LHS.getOperand(1); 15642 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask(); 15643 std::copy(Mask.begin(), Mask.end(), LMask.begin()); 15644 } else { 15645 if (LHS.getOpcode() != ISD::UNDEF) 15646 A = LHS; 15647 for (unsigned i = 0; i != NumElts; ++i) 15648 LMask[i] = i; 15649 } 15650 15651 // Likewise, view RHS in the form 15652 // RHS = VECTOR_SHUFFLE C, D, RMask 15653 SDValue C, D; 15654 SmallVector<int, 16> RMask(NumElts); 15655 if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) { 15656 if (RHS.getOperand(0).getOpcode() != ISD::UNDEF) 15657 C = RHS.getOperand(0); 15658 if (RHS.getOperand(1).getOpcode() != ISD::UNDEF) 15659 D = RHS.getOperand(1); 15660 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask(); 15661 std::copy(Mask.begin(), Mask.end(), RMask.begin()); 15662 } else { 15663 if (RHS.getOpcode() != ISD::UNDEF) 15664 C = RHS; 15665 for (unsigned i = 0; i != NumElts; ++i) 15666 RMask[i] = i; 15667 } 15668 15669 // Check that the shuffles are both shuffling the same vectors. 15670 if (!(A == C && B == D) && !(A == D && B == C)) 15671 return false; 15672 15673 // If everything is UNDEF then bail out: it would be better to fold to UNDEF. 15674 if (!A.getNode() && !B.getNode()) 15675 return false; 15676 15677 // If A and B occur in reverse order in RHS, then "swap" them (which means 15678 // rewriting the mask). 15679 if (A != C) 15680 CommuteVectorShuffleMask(RMask, NumElts); 15681 15682 // At this point LHS and RHS are equivalent to 15683 // LHS = VECTOR_SHUFFLE A, B, LMask 15684 // RHS = VECTOR_SHUFFLE A, B, RMask 15685 // Check that the masks correspond to performing a horizontal operation. 15686 for (unsigned i = 0; i != NumElts; ++i) { 15687 int LIdx = LMask[i], RIdx = RMask[i]; 15688 15689 // Ignore any UNDEF components. 15690 if (LIdx < 0 || RIdx < 0 || 15691 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) || 15692 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts))) 15693 continue; 15694 15695 // Check that successive elements are being operated on. If not, this is 15696 // not a horizontal operation. 15697 unsigned Src = (i/HalfLaneElts) % 2; // each lane is split between srcs 15698 unsigned LaneStart = (i/NumLaneElts) * NumLaneElts; 15699 int Index = 2*(i%HalfLaneElts) + NumElts*Src + LaneStart; 15700 if (!(LIdx == Index && RIdx == Index + 1) && 15701 !(IsCommutative && LIdx == Index + 1 && RIdx == Index)) 15702 return false; 15703 } 15704 15705 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it. 15706 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it. 15707 return true; 15708} 15709 15710/// PerformFADDCombine - Do target-specific dag combines on floating point adds. 15711static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG, 15712 const X86Subtarget *Subtarget) { 15713 EVT VT = N->getValueType(0); 15714 SDValue LHS = N->getOperand(0); 15715 SDValue RHS = N->getOperand(1); 15716 15717 // Try to synthesize horizontal adds from adds of shuffles. 15718 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) || 15719 (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) && 15720 isHorizontalBinOp(LHS, RHS, true)) 15721 return DAG.getNode(X86ISD::FHADD, N->getDebugLoc(), VT, LHS, RHS); 15722 return SDValue(); 15723} 15724 15725/// PerformFSUBCombine - Do target-specific dag combines on floating point subs. 15726static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG, 15727 const X86Subtarget *Subtarget) { 15728 EVT VT = N->getValueType(0); 15729 SDValue LHS = N->getOperand(0); 15730 SDValue RHS = N->getOperand(1); 15731 15732 // Try to synthesize horizontal subs from subs of shuffles. 15733 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) || 15734 (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) && 15735 isHorizontalBinOp(LHS, RHS, false)) 15736 return DAG.getNode(X86ISD::FHSUB, N->getDebugLoc(), VT, LHS, RHS); 15737 return SDValue(); 15738} 15739 15740/// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and 15741/// X86ISD::FXOR nodes. 15742static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) { 15743 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR); 15744 // F[X]OR(0.0, x) -> x 15745 // F[X]OR(x, 0.0) -> x 15746 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 15747 if (C->getValueAPF().isPosZero()) 15748 return N->getOperand(1); 15749 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 15750 if (C->getValueAPF().isPosZero()) 15751 return N->getOperand(0); 15752 return SDValue(); 15753} 15754 15755/// PerformFMinFMaxCombine - Do target-specific dag combines on X86ISD::FMIN and 15756/// X86ISD::FMAX nodes. 15757static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) { 15758 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX); 15759 15760 // Only perform optimizations if UnsafeMath is used. 15761 if (!DAG.getTarget().Options.UnsafeFPMath) 15762 return SDValue(); 15763 15764 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes 15765 // into FMINC and FMAXC, which are Commutative operations. 15766 unsigned NewOp = 0; 15767 switch (N->getOpcode()) { 15768 default: llvm_unreachable("unknown opcode"); 15769 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break; 15770 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break; 15771 } 15772 15773 return DAG.getNode(NewOp, N->getDebugLoc(), N->getValueType(0), 15774 N->getOperand(0), N->getOperand(1)); 15775} 15776 15777 15778/// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes. 15779static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) { 15780 // FAND(0.0, x) -> 0.0 15781 // FAND(x, 0.0) -> 0.0 15782 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 15783 if (C->getValueAPF().isPosZero()) 15784 return N->getOperand(0); 15785 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 15786 if (C->getValueAPF().isPosZero()) 15787 return N->getOperand(1); 15788 return SDValue(); 15789} 15790 15791static SDValue PerformBTCombine(SDNode *N, 15792 SelectionDAG &DAG, 15793 TargetLowering::DAGCombinerInfo &DCI) { 15794 // BT ignores high bits in the bit index operand. 15795 SDValue Op1 = N->getOperand(1); 15796 if (Op1.hasOneUse()) { 15797 unsigned BitWidth = Op1.getValueSizeInBits(); 15798 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth)); 15799 APInt KnownZero, KnownOne; 15800 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 15801 !DCI.isBeforeLegalizeOps()); 15802 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 15803 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) || 15804 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO)) 15805 DCI.CommitTargetLoweringOpt(TLO); 15806 } 15807 return SDValue(); 15808} 15809 15810static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) { 15811 SDValue Op = N->getOperand(0); 15812 if (Op.getOpcode() == ISD::BITCAST) 15813 Op = Op.getOperand(0); 15814 EVT VT = N->getValueType(0), OpVT = Op.getValueType(); 15815 if (Op.getOpcode() == X86ISD::VZEXT_LOAD && 15816 VT.getVectorElementType().getSizeInBits() == 15817 OpVT.getVectorElementType().getSizeInBits()) { 15818 return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op); 15819 } 15820 return SDValue(); 15821} 15822 15823static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG, 15824 TargetLowering::DAGCombinerInfo &DCI, 15825 const X86Subtarget *Subtarget) { 15826 if (!DCI.isBeforeLegalizeOps()) 15827 return SDValue(); 15828 15829 if (!Subtarget->hasAVX()) 15830 return SDValue(); 15831 15832 EVT VT = N->getValueType(0); 15833 SDValue Op = N->getOperand(0); 15834 EVT OpVT = Op.getValueType(); 15835 DebugLoc dl = N->getDebugLoc(); 15836 15837 if ((VT == MVT::v4i64 && OpVT == MVT::v4i32) || 15838 (VT == MVT::v8i32 && OpVT == MVT::v8i16)) { 15839 15840 if (Subtarget->hasAVX2()) 15841 return DAG.getNode(X86ISD::VSEXT_MOVL, dl, VT, Op); 15842 15843 // Optimize vectors in AVX mode 15844 // Sign extend v8i16 to v8i32 and 15845 // v4i32 to v4i64 15846 // 15847 // Divide input vector into two parts 15848 // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1} 15849 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32 15850 // concat the vectors to original VT 15851 15852 unsigned NumElems = OpVT.getVectorNumElements(); 15853 SDValue Undef = DAG.getUNDEF(OpVT); 15854 15855 SmallVector<int,8> ShufMask1(NumElems, -1); 15856 for (unsigned i = 0; i != NumElems/2; ++i) 15857 ShufMask1[i] = i; 15858 15859 SDValue OpLo = DAG.getVectorShuffle(OpVT, dl, Op, Undef, &ShufMask1[0]); 15860 15861 SmallVector<int,8> ShufMask2(NumElems, -1); 15862 for (unsigned i = 0; i != NumElems/2; ++i) 15863 ShufMask2[i] = i + NumElems/2; 15864 15865 SDValue OpHi = DAG.getVectorShuffle(OpVT, dl, Op, Undef, &ShufMask2[0]); 15866 15867 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), 15868 VT.getVectorNumElements()/2); 15869 15870 OpLo = DAG.getNode(X86ISD::VSEXT_MOVL, dl, HalfVT, OpLo); 15871 OpHi = DAG.getNode(X86ISD::VSEXT_MOVL, dl, HalfVT, OpHi); 15872 15873 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi); 15874 } 15875 return SDValue(); 15876} 15877 15878static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG, 15879 const X86Subtarget* Subtarget) { 15880 DebugLoc dl = N->getDebugLoc(); 15881 EVT VT = N->getValueType(0); 15882 15883 // Let legalize expand this if it isn't a legal type yet. 15884 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 15885 return SDValue(); 15886 15887 EVT ScalarVT = VT.getScalarType(); 15888 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || 15889 (!Subtarget->hasFMA() && !Subtarget->hasFMA4())) 15890 return SDValue(); 15891 15892 SDValue A = N->getOperand(0); 15893 SDValue B = N->getOperand(1); 15894 SDValue C = N->getOperand(2); 15895 15896 bool NegA = (A.getOpcode() == ISD::FNEG); 15897 bool NegB = (B.getOpcode() == ISD::FNEG); 15898 bool NegC = (C.getOpcode() == ISD::FNEG); 15899 15900 // Negative multiplication when NegA xor NegB 15901 bool NegMul = (NegA != NegB); 15902 if (NegA) 15903 A = A.getOperand(0); 15904 if (NegB) 15905 B = B.getOperand(0); 15906 if (NegC) 15907 C = C.getOperand(0); 15908 15909 unsigned Opcode; 15910 if (!NegMul) 15911 Opcode = (!NegC) ? X86ISD::FMADD : X86ISD::FMSUB; 15912 else 15913 Opcode = (!NegC) ? X86ISD::FNMADD : X86ISD::FNMSUB; 15914 15915 return DAG.getNode(Opcode, dl, VT, A, B, C); 15916} 15917 15918static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG, 15919 TargetLowering::DAGCombinerInfo &DCI, 15920 const X86Subtarget *Subtarget) { 15921 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) -> 15922 // (and (i32 x86isd::setcc_carry), 1) 15923 // This eliminates the zext. This transformation is necessary because 15924 // ISD::SETCC is always legalized to i8. 15925 DebugLoc dl = N->getDebugLoc(); 15926 SDValue N0 = N->getOperand(0); 15927 EVT VT = N->getValueType(0); 15928 EVT OpVT = N0.getValueType(); 15929 15930 if (N0.getOpcode() == ISD::AND && 15931 N0.hasOneUse() && 15932 N0.getOperand(0).hasOneUse()) { 15933 SDValue N00 = N0.getOperand(0); 15934 if (N00.getOpcode() != X86ISD::SETCC_CARRY) 15935 return SDValue(); 15936 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 15937 if (!C || C->getZExtValue() != 1) 15938 return SDValue(); 15939 return DAG.getNode(ISD::AND, dl, VT, 15940 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT, 15941 N00.getOperand(0), N00.getOperand(1)), 15942 DAG.getConstant(1, VT)); 15943 } 15944 15945 // Optimize vectors in AVX mode: 15946 // 15947 // v8i16 -> v8i32 15948 // Use vpunpcklwd for 4 lower elements v8i16 -> v4i32. 15949 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32. 15950 // Concat upper and lower parts. 15951 // 15952 // v4i32 -> v4i64 15953 // Use vpunpckldq for 4 lower elements v4i32 -> v2i64. 15954 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64. 15955 // Concat upper and lower parts. 15956 // 15957 if (!DCI.isBeforeLegalizeOps()) 15958 return SDValue(); 15959 15960 if (!Subtarget->hasAVX()) 15961 return SDValue(); 15962 15963 if (((VT == MVT::v8i32) && (OpVT == MVT::v8i16)) || 15964 ((VT == MVT::v4i64) && (OpVT == MVT::v4i32))) { 15965 15966 if (Subtarget->hasAVX2()) 15967 return DAG.getNode(X86ISD::VZEXT_MOVL, dl, VT, N0); 15968 15969 SDValue ZeroVec = getZeroVector(OpVT, Subtarget, DAG, dl); 15970 SDValue OpLo = getUnpackl(DAG, dl, OpVT, N0, ZeroVec); 15971 SDValue OpHi = getUnpackh(DAG, dl, OpVT, N0, ZeroVec); 15972 15973 EVT HVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 15974 VT.getVectorNumElements()/2); 15975 15976 OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo); 15977 OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi); 15978 15979 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi); 15980 } 15981 15982 return SDValue(); 15983} 15984 15985// Optimize x == -y --> x+y == 0 15986// x != -y --> x+y != 0 15987static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG) { 15988 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 15989 SDValue LHS = N->getOperand(0); 15990 SDValue RHS = N->getOperand(1); 15991 15992 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB) 15993 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0))) 15994 if (C->getAPIntValue() == 0 && LHS.hasOneUse()) { 15995 SDValue addV = DAG.getNode(ISD::ADD, N->getDebugLoc(), 15996 LHS.getValueType(), RHS, LHS.getOperand(1)); 15997 return DAG.getSetCC(N->getDebugLoc(), N->getValueType(0), 15998 addV, DAG.getConstant(0, addV.getValueType()), CC); 15999 } 16000 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB) 16001 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS.getOperand(0))) 16002 if (C->getAPIntValue() == 0 && RHS.hasOneUse()) { 16003 SDValue addV = DAG.getNode(ISD::ADD, N->getDebugLoc(), 16004 RHS.getValueType(), LHS, RHS.getOperand(1)); 16005 return DAG.getSetCC(N->getDebugLoc(), N->getValueType(0), 16006 addV, DAG.getConstant(0, addV.getValueType()), CC); 16007 } 16008 return SDValue(); 16009} 16010 16011// Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT 16012static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG, 16013 TargetLowering::DAGCombinerInfo &DCI, 16014 const X86Subtarget *Subtarget) { 16015 DebugLoc DL = N->getDebugLoc(); 16016 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0)); 16017 SDValue EFLAGS = N->getOperand(1); 16018 16019 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without 16020 // a zext and produces an all-ones bit which is more useful than 0/1 in some 16021 // cases. 16022 if (CC == X86::COND_B) 16023 return DAG.getNode(ISD::AND, DL, MVT::i8, 16024 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8, 16025 DAG.getConstant(CC, MVT::i8), EFLAGS), 16026 DAG.getConstant(1, MVT::i8)); 16027 16028 SDValue Flags; 16029 16030 Flags = checkBoolTestSetCCCombine(EFLAGS, CC); 16031 if (Flags.getNode()) { 16032 SDValue Cond = DAG.getConstant(CC, MVT::i8); 16033 return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags); 16034 } 16035 16036 return SDValue(); 16037} 16038 16039// Optimize branch condition evaluation. 16040// 16041static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG, 16042 TargetLowering::DAGCombinerInfo &DCI, 16043 const X86Subtarget *Subtarget) { 16044 DebugLoc DL = N->getDebugLoc(); 16045 SDValue Chain = N->getOperand(0); 16046 SDValue Dest = N->getOperand(1); 16047 SDValue EFLAGS = N->getOperand(3); 16048 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2)); 16049 16050 SDValue Flags; 16051 16052 Flags = checkBoolTestSetCCCombine(EFLAGS, CC); 16053 if (Flags.getNode()) { 16054 SDValue Cond = DAG.getConstant(CC, MVT::i8); 16055 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond, 16056 Flags); 16057 } 16058 16059 return SDValue(); 16060} 16061 16062static SDValue PerformUINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG) { 16063 SDValue Op0 = N->getOperand(0); 16064 EVT InVT = Op0->getValueType(0); 16065 16066 // UINT_TO_FP(v4i8) -> SINT_TO_FP(ZEXT(v4i8 to v4i32)) 16067 if (InVT == MVT::v8i8 || InVT == MVT::v4i8) { 16068 DebugLoc dl = N->getDebugLoc(); 16069 MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32; 16070 SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0); 16071 // Notice that we use SINT_TO_FP because we know that the high bits 16072 // are zero and SINT_TO_FP is better supported by the hardware. 16073 return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P); 16074 } 16075 16076 return SDValue(); 16077} 16078 16079static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG, 16080 const X86TargetLowering *XTLI) { 16081 SDValue Op0 = N->getOperand(0); 16082 EVT InVT = Op0->getValueType(0); 16083 16084 // SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32)) 16085 if (InVT == MVT::v8i8 || InVT == MVT::v4i8) { 16086 DebugLoc dl = N->getDebugLoc(); 16087 MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32; 16088 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0); 16089 return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P); 16090 } 16091 16092 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have 16093 // a 32-bit target where SSE doesn't support i64->FP operations. 16094 if (Op0.getOpcode() == ISD::LOAD) { 16095 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode()); 16096 EVT VT = Ld->getValueType(0); 16097 if (!Ld->isVolatile() && !N->getValueType(0).isVector() && 16098 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() && 16099 !XTLI->getSubtarget()->is64Bit() && 16100 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 16101 SDValue FILDChain = XTLI->BuildFILD(SDValue(N, 0), Ld->getValueType(0), 16102 Ld->getChain(), Op0, DAG); 16103 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1)); 16104 return FILDChain; 16105 } 16106 } 16107 return SDValue(); 16108} 16109 16110static SDValue PerformFP_TO_SINTCombine(SDNode *N, SelectionDAG &DAG) { 16111 EVT VT = N->getValueType(0); 16112 16113 // v4i8 = FP_TO_SINT() -> v4i8 = TRUNCATE (V4i32 = FP_TO_SINT() 16114 if (VT == MVT::v8i8 || VT == MVT::v4i8) { 16115 DebugLoc dl = N->getDebugLoc(); 16116 MVT DstVT = VT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32; 16117 SDValue I = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, N->getOperand(0)); 16118 return DAG.getNode(ISD::TRUNCATE, dl, VT, I); 16119 } 16120 16121 return SDValue(); 16122} 16123 16124// Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS 16125static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG, 16126 X86TargetLowering::DAGCombinerInfo &DCI) { 16127 // If the LHS and RHS of the ADC node are zero, then it can't overflow and 16128 // the result is either zero or one (depending on the input carry bit). 16129 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1. 16130 if (X86::isZeroNode(N->getOperand(0)) && 16131 X86::isZeroNode(N->getOperand(1)) && 16132 // We don't have a good way to replace an EFLAGS use, so only do this when 16133 // dead right now. 16134 SDValue(N, 1).use_empty()) { 16135 DebugLoc DL = N->getDebugLoc(); 16136 EVT VT = N->getValueType(0); 16137 SDValue CarryOut = DAG.getConstant(0, N->getValueType(1)); 16138 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT, 16139 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT, 16140 DAG.getConstant(X86::COND_B,MVT::i8), 16141 N->getOperand(2)), 16142 DAG.getConstant(1, VT)); 16143 return DCI.CombineTo(N, Res1, CarryOut); 16144 } 16145 16146 return SDValue(); 16147} 16148 16149// fold (add Y, (sete X, 0)) -> adc 0, Y 16150// (add Y, (setne X, 0)) -> sbb -1, Y 16151// (sub (sete X, 0), Y) -> sbb 0, Y 16152// (sub (setne X, 0), Y) -> adc -1, Y 16153static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) { 16154 DebugLoc DL = N->getDebugLoc(); 16155 16156 // Look through ZExts. 16157 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0); 16158 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse()) 16159 return SDValue(); 16160 16161 SDValue SetCC = Ext.getOperand(0); 16162 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse()) 16163 return SDValue(); 16164 16165 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0); 16166 if (CC != X86::COND_E && CC != X86::COND_NE) 16167 return SDValue(); 16168 16169 SDValue Cmp = SetCC.getOperand(1); 16170 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() || 16171 !X86::isZeroNode(Cmp.getOperand(1)) || 16172 !Cmp.getOperand(0).getValueType().isInteger()) 16173 return SDValue(); 16174 16175 SDValue CmpOp0 = Cmp.getOperand(0); 16176 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0, 16177 DAG.getConstant(1, CmpOp0.getValueType())); 16178 16179 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1); 16180 if (CC == X86::COND_NE) 16181 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB, 16182 DL, OtherVal.getValueType(), OtherVal, 16183 DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp); 16184 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC, 16185 DL, OtherVal.getValueType(), OtherVal, 16186 DAG.getConstant(0, OtherVal.getValueType()), NewCmp); 16187} 16188 16189/// PerformADDCombine - Do target-specific dag combines on integer adds. 16190static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG, 16191 const X86Subtarget *Subtarget) { 16192 EVT VT = N->getValueType(0); 16193 SDValue Op0 = N->getOperand(0); 16194 SDValue Op1 = N->getOperand(1); 16195 16196 // Try to synthesize horizontal adds from adds of shuffles. 16197 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) || 16198 (Subtarget->hasAVX2() && (VT == MVT::v16i16 || VT == MVT::v8i32))) && 16199 isHorizontalBinOp(Op0, Op1, true)) 16200 return DAG.getNode(X86ISD::HADD, N->getDebugLoc(), VT, Op0, Op1); 16201 16202 return OptimizeConditionalInDecrement(N, DAG); 16203} 16204 16205static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG, 16206 const X86Subtarget *Subtarget) { 16207 SDValue Op0 = N->getOperand(0); 16208 SDValue Op1 = N->getOperand(1); 16209 16210 // X86 can't encode an immediate LHS of a sub. See if we can push the 16211 // negation into a preceding instruction. 16212 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) { 16213 // If the RHS of the sub is a XOR with one use and a constant, invert the 16214 // immediate. Then add one to the LHS of the sub so we can turn 16215 // X-Y -> X+~Y+1, saving one register. 16216 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR && 16217 isa<ConstantSDNode>(Op1.getOperand(1))) { 16218 APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue(); 16219 EVT VT = Op0.getValueType(); 16220 SDValue NewXor = DAG.getNode(ISD::XOR, Op1.getDebugLoc(), VT, 16221 Op1.getOperand(0), 16222 DAG.getConstant(~XorC, VT)); 16223 return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, NewXor, 16224 DAG.getConstant(C->getAPIntValue()+1, VT)); 16225 } 16226 } 16227 16228 // Try to synthesize horizontal adds from adds of shuffles. 16229 EVT VT = N->getValueType(0); 16230 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) || 16231 (Subtarget->hasAVX2() && (VT == MVT::v16i16 || VT == MVT::v8i32))) && 16232 isHorizontalBinOp(Op0, Op1, true)) 16233 return DAG.getNode(X86ISD::HSUB, N->getDebugLoc(), VT, Op0, Op1); 16234 16235 return OptimizeConditionalInDecrement(N, DAG); 16236} 16237 16238SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, 16239 DAGCombinerInfo &DCI) const { 16240 SelectionDAG &DAG = DCI.DAG; 16241 switch (N->getOpcode()) { 16242 default: break; 16243 case ISD::EXTRACT_VECTOR_ELT: 16244 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, DCI); 16245 case ISD::VSELECT: 16246 case ISD::SELECT: return PerformSELECTCombine(N, DAG, DCI, Subtarget); 16247 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI, Subtarget); 16248 case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget); 16249 case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget); 16250 case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI); 16251 case ISD::MUL: return PerformMulCombine(N, DAG, DCI); 16252 case ISD::SHL: 16253 case ISD::SRA: 16254 case ISD::SRL: return PerformShiftCombine(N, DAG, DCI, Subtarget); 16255 case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget); 16256 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget); 16257 case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget); 16258 case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget); 16259 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget); 16260 case ISD::UINT_TO_FP: return PerformUINT_TO_FPCombine(N, DAG); 16261 case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, this); 16262 case ISD::FP_TO_SINT: return PerformFP_TO_SINTCombine(N, DAG); 16263 case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget); 16264 case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget); 16265 case X86ISD::FXOR: 16266 case X86ISD::FOR: return PerformFORCombine(N, DAG); 16267 case X86ISD::FMIN: 16268 case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG); 16269 case X86ISD::FAND: return PerformFANDCombine(N, DAG); 16270 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI); 16271 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG); 16272 case ISD::ANY_EXTEND: 16273 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget); 16274 case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget); 16275 case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG,DCI,Subtarget); 16276 case ISD::SETCC: return PerformISDSETCCCombine(N, DAG); 16277 case X86ISD::SETCC: return PerformSETCCCombine(N, DAG, DCI, Subtarget); 16278 case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget); 16279 case X86ISD::SHUFP: // Handle all target specific shuffles 16280 case X86ISD::PALIGN: 16281 case X86ISD::UNPCKH: 16282 case X86ISD::UNPCKL: 16283 case X86ISD::MOVHLPS: 16284 case X86ISD::MOVLHPS: 16285 case X86ISD::PSHUFD: 16286 case X86ISD::PSHUFHW: 16287 case X86ISD::PSHUFLW: 16288 case X86ISD::MOVSS: 16289 case X86ISD::MOVSD: 16290 case X86ISD::VPERMILP: 16291 case X86ISD::VPERM2X128: 16292 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget); 16293 case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget); 16294 } 16295 16296 return SDValue(); 16297} 16298 16299/// isTypeDesirableForOp - Return true if the target has native support for 16300/// the specified value type and it is 'desirable' to use the type for the 16301/// given node type. e.g. On x86 i16 is legal, but undesirable since i16 16302/// instruction encodings are longer and some i16 instructions are slow. 16303bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const { 16304 if (!isTypeLegal(VT)) 16305 return false; 16306 if (VT != MVT::i16) 16307 return true; 16308 16309 switch (Opc) { 16310 default: 16311 return true; 16312 case ISD::LOAD: 16313 case ISD::SIGN_EXTEND: 16314 case ISD::ZERO_EXTEND: 16315 case ISD::ANY_EXTEND: 16316 case ISD::SHL: 16317 case ISD::SRL: 16318 case ISD::SUB: 16319 case ISD::ADD: 16320 case ISD::MUL: 16321 case ISD::AND: 16322 case ISD::OR: 16323 case ISD::XOR: 16324 return false; 16325 } 16326} 16327 16328/// IsDesirableToPromoteOp - This method query the target whether it is 16329/// beneficial for dag combiner to promote the specified node. If true, it 16330/// should return the desired promotion type by reference. 16331bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const { 16332 EVT VT = Op.getValueType(); 16333 if (VT != MVT::i16) 16334 return false; 16335 16336 bool Promote = false; 16337 bool Commute = false; 16338 switch (Op.getOpcode()) { 16339 default: break; 16340 case ISD::LOAD: { 16341 LoadSDNode *LD = cast<LoadSDNode>(Op); 16342 // If the non-extending load has a single use and it's not live out, then it 16343 // might be folded. 16344 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&& 16345 Op.hasOneUse()*/) { 16346 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 16347 UE = Op.getNode()->use_end(); UI != UE; ++UI) { 16348 // The only case where we'd want to promote LOAD (rather then it being 16349 // promoted as an operand is when it's only use is liveout. 16350 if (UI->getOpcode() != ISD::CopyToReg) 16351 return false; 16352 } 16353 } 16354 Promote = true; 16355 break; 16356 } 16357 case ISD::SIGN_EXTEND: 16358 case ISD::ZERO_EXTEND: 16359 case ISD::ANY_EXTEND: 16360 Promote = true; 16361 break; 16362 case ISD::SHL: 16363 case ISD::SRL: { 16364 SDValue N0 = Op.getOperand(0); 16365 // Look out for (store (shl (load), x)). 16366 if (MayFoldLoad(N0) && MayFoldIntoStore(Op)) 16367 return false; 16368 Promote = true; 16369 break; 16370 } 16371 case ISD::ADD: 16372 case ISD::MUL: 16373 case ISD::AND: 16374 case ISD::OR: 16375 case ISD::XOR: 16376 Commute = true; 16377 // fallthrough 16378 case ISD::SUB: { 16379 SDValue N0 = Op.getOperand(0); 16380 SDValue N1 = Op.getOperand(1); 16381 if (!Commute && MayFoldLoad(N1)) 16382 return false; 16383 // Avoid disabling potential load folding opportunities. 16384 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op))) 16385 return false; 16386 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op))) 16387 return false; 16388 Promote = true; 16389 } 16390 } 16391 16392 PVT = MVT::i32; 16393 return Promote; 16394} 16395 16396//===----------------------------------------------------------------------===// 16397// X86 Inline Assembly Support 16398//===----------------------------------------------------------------------===// 16399 16400namespace { 16401 // Helper to match a string separated by whitespace. 16402 bool matchAsmImpl(StringRef s, ArrayRef<const StringRef *> args) { 16403 s = s.substr(s.find_first_not_of(" \t")); // Skip leading whitespace. 16404 16405 for (unsigned i = 0, e = args.size(); i != e; ++i) { 16406 StringRef piece(*args[i]); 16407 if (!s.startswith(piece)) // Check if the piece matches. 16408 return false; 16409 16410 s = s.substr(piece.size()); 16411 StringRef::size_type pos = s.find_first_not_of(" \t"); 16412 if (pos == 0) // We matched a prefix. 16413 return false; 16414 16415 s = s.substr(pos); 16416 } 16417 16418 return s.empty(); 16419 } 16420 const VariadicFunction1<bool, StringRef, StringRef, matchAsmImpl> matchAsm={}; 16421} 16422 16423bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const { 16424 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); 16425 16426 std::string AsmStr = IA->getAsmString(); 16427 16428 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 16429 if (!Ty || Ty->getBitWidth() % 16 != 0) 16430 return false; 16431 16432 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a" 16433 SmallVector<StringRef, 4> AsmPieces; 16434 SplitString(AsmStr, AsmPieces, ";\n"); 16435 16436 switch (AsmPieces.size()) { 16437 default: return false; 16438 case 1: 16439 // FIXME: this should verify that we are targeting a 486 or better. If not, 16440 // we will turn this bswap into something that will be lowered to logical 16441 // ops instead of emitting the bswap asm. For now, we don't support 486 or 16442 // lower so don't worry about this. 16443 // bswap $0 16444 if (matchAsm(AsmPieces[0], "bswap", "$0") || 16445 matchAsm(AsmPieces[0], "bswapl", "$0") || 16446 matchAsm(AsmPieces[0], "bswapq", "$0") || 16447 matchAsm(AsmPieces[0], "bswap", "${0:q}") || 16448 matchAsm(AsmPieces[0], "bswapl", "${0:q}") || 16449 matchAsm(AsmPieces[0], "bswapq", "${0:q}")) { 16450 // No need to check constraints, nothing other than the equivalent of 16451 // "=r,0" would be valid here. 16452 return IntrinsicLowering::LowerToByteSwap(CI); 16453 } 16454 16455 // rorw $$8, ${0:w} --> llvm.bswap.i16 16456 if (CI->getType()->isIntegerTy(16) && 16457 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 && 16458 (matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") || 16459 matchAsm(AsmPieces[0], "rolw", "$$8,", "${0:w}"))) { 16460 AsmPieces.clear(); 16461 const std::string &ConstraintsStr = IA->getConstraintString(); 16462 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ","); 16463 std::sort(AsmPieces.begin(), AsmPieces.end()); 16464 if (AsmPieces.size() == 4 && 16465 AsmPieces[0] == "~{cc}" && 16466 AsmPieces[1] == "~{dirflag}" && 16467 AsmPieces[2] == "~{flags}" && 16468 AsmPieces[3] == "~{fpsr}") 16469 return IntrinsicLowering::LowerToByteSwap(CI); 16470 } 16471 break; 16472 case 3: 16473 if (CI->getType()->isIntegerTy(32) && 16474 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 && 16475 matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") && 16476 matchAsm(AsmPieces[1], "rorl", "$$16,", "$0") && 16477 matchAsm(AsmPieces[2], "rorw", "$$8,", "${0:w}")) { 16478 AsmPieces.clear(); 16479 const std::string &ConstraintsStr = IA->getConstraintString(); 16480 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ","); 16481 std::sort(AsmPieces.begin(), AsmPieces.end()); 16482 if (AsmPieces.size() == 4 && 16483 AsmPieces[0] == "~{cc}" && 16484 AsmPieces[1] == "~{dirflag}" && 16485 AsmPieces[2] == "~{flags}" && 16486 AsmPieces[3] == "~{fpsr}") 16487 return IntrinsicLowering::LowerToByteSwap(CI); 16488 } 16489 16490 if (CI->getType()->isIntegerTy(64)) { 16491 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints(); 16492 if (Constraints.size() >= 2 && 16493 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" && 16494 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") { 16495 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64 16496 if (matchAsm(AsmPieces[0], "bswap", "%eax") && 16497 matchAsm(AsmPieces[1], "bswap", "%edx") && 16498 matchAsm(AsmPieces[2], "xchgl", "%eax,", "%edx")) 16499 return IntrinsicLowering::LowerToByteSwap(CI); 16500 } 16501 } 16502 break; 16503 } 16504 return false; 16505} 16506 16507 16508 16509/// getConstraintType - Given a constraint letter, return the type of 16510/// constraint it is for this target. 16511X86TargetLowering::ConstraintType 16512X86TargetLowering::getConstraintType(const std::string &Constraint) const { 16513 if (Constraint.size() == 1) { 16514 switch (Constraint[0]) { 16515 case 'R': 16516 case 'q': 16517 case 'Q': 16518 case 'f': 16519 case 't': 16520 case 'u': 16521 case 'y': 16522 case 'x': 16523 case 'Y': 16524 case 'l': 16525 return C_RegisterClass; 16526 case 'a': 16527 case 'b': 16528 case 'c': 16529 case 'd': 16530 case 'S': 16531 case 'D': 16532 case 'A': 16533 return C_Register; 16534 case 'I': 16535 case 'J': 16536 case 'K': 16537 case 'L': 16538 case 'M': 16539 case 'N': 16540 case 'G': 16541 case 'C': 16542 case 'e': 16543 case 'Z': 16544 return C_Other; 16545 default: 16546 break; 16547 } 16548 } 16549 return TargetLowering::getConstraintType(Constraint); 16550} 16551 16552/// Examine constraint type and operand type and determine a weight value. 16553/// This object must already have been set up with the operand type 16554/// and the current alternative constraint selected. 16555TargetLowering::ConstraintWeight 16556 X86TargetLowering::getSingleConstraintMatchWeight( 16557 AsmOperandInfo &info, const char *constraint) const { 16558 ConstraintWeight weight = CW_Invalid; 16559 Value *CallOperandVal = info.CallOperandVal; 16560 // If we don't have a value, we can't do a match, 16561 // but allow it at the lowest weight. 16562 if (CallOperandVal == NULL) 16563 return CW_Default; 16564 Type *type = CallOperandVal->getType(); 16565 // Look at the constraint type. 16566 switch (*constraint) { 16567 default: 16568 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 16569 case 'R': 16570 case 'q': 16571 case 'Q': 16572 case 'a': 16573 case 'b': 16574 case 'c': 16575 case 'd': 16576 case 'S': 16577 case 'D': 16578 case 'A': 16579 if (CallOperandVal->getType()->isIntegerTy()) 16580 weight = CW_SpecificReg; 16581 break; 16582 case 'f': 16583 case 't': 16584 case 'u': 16585 if (type->isFloatingPointTy()) 16586 weight = CW_SpecificReg; 16587 break; 16588 case 'y': 16589 if (type->isX86_MMXTy() && Subtarget->hasMMX()) 16590 weight = CW_SpecificReg; 16591 break; 16592 case 'x': 16593 case 'Y': 16594 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) || 16595 ((type->getPrimitiveSizeInBits() == 256) && Subtarget->hasAVX())) 16596 weight = CW_Register; 16597 break; 16598 case 'I': 16599 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) { 16600 if (C->getZExtValue() <= 31) 16601 weight = CW_Constant; 16602 } 16603 break; 16604 case 'J': 16605 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 16606 if (C->getZExtValue() <= 63) 16607 weight = CW_Constant; 16608 } 16609 break; 16610 case 'K': 16611 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 16612 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f)) 16613 weight = CW_Constant; 16614 } 16615 break; 16616 case 'L': 16617 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 16618 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff)) 16619 weight = CW_Constant; 16620 } 16621 break; 16622 case 'M': 16623 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 16624 if (C->getZExtValue() <= 3) 16625 weight = CW_Constant; 16626 } 16627 break; 16628 case 'N': 16629 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 16630 if (C->getZExtValue() <= 0xff) 16631 weight = CW_Constant; 16632 } 16633 break; 16634 case 'G': 16635 case 'C': 16636 if (dyn_cast<ConstantFP>(CallOperandVal)) { 16637 weight = CW_Constant; 16638 } 16639 break; 16640 case 'e': 16641 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 16642 if ((C->getSExtValue() >= -0x80000000LL) && 16643 (C->getSExtValue() <= 0x7fffffffLL)) 16644 weight = CW_Constant; 16645 } 16646 break; 16647 case 'Z': 16648 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 16649 if (C->getZExtValue() <= 0xffffffff) 16650 weight = CW_Constant; 16651 } 16652 break; 16653 } 16654 return weight; 16655} 16656 16657/// LowerXConstraint - try to replace an X constraint, which matches anything, 16658/// with another that has more specific requirements based on the type of the 16659/// corresponding operand. 16660const char *X86TargetLowering:: 16661LowerXConstraint(EVT ConstraintVT) const { 16662 // FP X constraints get lowered to SSE1/2 registers if available, otherwise 16663 // 'f' like normal targets. 16664 if (ConstraintVT.isFloatingPoint()) { 16665 if (Subtarget->hasSSE2()) 16666 return "Y"; 16667 if (Subtarget->hasSSE1()) 16668 return "x"; 16669 } 16670 16671 return TargetLowering::LowerXConstraint(ConstraintVT); 16672} 16673 16674/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 16675/// vector. If it is invalid, don't add anything to Ops. 16676void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 16677 std::string &Constraint, 16678 std::vector<SDValue>&Ops, 16679 SelectionDAG &DAG) const { 16680 SDValue Result(0, 0); 16681 16682 // Only support length 1 constraints for now. 16683 if (Constraint.length() > 1) return; 16684 16685 char ConstraintLetter = Constraint[0]; 16686 switch (ConstraintLetter) { 16687 default: break; 16688 case 'I': 16689 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 16690 if (C->getZExtValue() <= 31) { 16691 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 16692 break; 16693 } 16694 } 16695 return; 16696 case 'J': 16697 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 16698 if (C->getZExtValue() <= 63) { 16699 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 16700 break; 16701 } 16702 } 16703 return; 16704 case 'K': 16705 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 16706 if ((int8_t)C->getSExtValue() == C->getSExtValue()) { 16707 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 16708 break; 16709 } 16710 } 16711 return; 16712 case 'N': 16713 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 16714 if (C->getZExtValue() <= 255) { 16715 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 16716 break; 16717 } 16718 } 16719 return; 16720 case 'e': { 16721 // 32-bit signed value 16722 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 16723 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()), 16724 C->getSExtValue())) { 16725 // Widen to 64 bits here to get it sign extended. 16726 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64); 16727 break; 16728 } 16729 // FIXME gcc accepts some relocatable values here too, but only in certain 16730 // memory models; it's complicated. 16731 } 16732 return; 16733 } 16734 case 'Z': { 16735 // 32-bit unsigned value 16736 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 16737 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()), 16738 C->getZExtValue())) { 16739 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 16740 break; 16741 } 16742 } 16743 // FIXME gcc accepts some relocatable values here too, but only in certain 16744 // memory models; it's complicated. 16745 return; 16746 } 16747 case 'i': { 16748 // Literal immediates are always ok. 16749 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) { 16750 // Widen to 64 bits here to get it sign extended. 16751 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64); 16752 break; 16753 } 16754 16755 // In any sort of PIC mode addresses need to be computed at runtime by 16756 // adding in a register or some sort of table lookup. These can't 16757 // be used as immediates. 16758 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC()) 16759 return; 16760 16761 // If we are in non-pic codegen mode, we allow the address of a global (with 16762 // an optional displacement) to be used with 'i'. 16763 GlobalAddressSDNode *GA = 0; 16764 int64_t Offset = 0; 16765 16766 // Match either (GA), (GA+C), (GA+C1+C2), etc. 16767 while (1) { 16768 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) { 16769 Offset += GA->getOffset(); 16770 break; 16771 } else if (Op.getOpcode() == ISD::ADD) { 16772 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 16773 Offset += C->getZExtValue(); 16774 Op = Op.getOperand(0); 16775 continue; 16776 } 16777 } else if (Op.getOpcode() == ISD::SUB) { 16778 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 16779 Offset += -C->getZExtValue(); 16780 Op = Op.getOperand(0); 16781 continue; 16782 } 16783 } 16784 16785 // Otherwise, this isn't something we can handle, reject it. 16786 return; 16787 } 16788 16789 const GlobalValue *GV = GA->getGlobal(); 16790 // If we require an extra load to get this address, as in PIC mode, we 16791 // can't accept it. 16792 if (isGlobalStubReference(Subtarget->ClassifyGlobalReference(GV, 16793 getTargetMachine()))) 16794 return; 16795 16796 Result = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(), 16797 GA->getValueType(0), Offset); 16798 break; 16799 } 16800 } 16801 16802 if (Result.getNode()) { 16803 Ops.push_back(Result); 16804 return; 16805 } 16806 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 16807} 16808 16809std::pair<unsigned, const TargetRegisterClass*> 16810X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 16811 EVT VT) const { 16812 // First, see if this is a constraint that directly corresponds to an LLVM 16813 // register class. 16814 if (Constraint.size() == 1) { 16815 // GCC Constraint Letters 16816 switch (Constraint[0]) { 16817 default: break; 16818 // TODO: Slight differences here in allocation order and leaving 16819 // RIP in the class. Do they matter any more here than they do 16820 // in the normal allocation? 16821 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode. 16822 if (Subtarget->is64Bit()) { 16823 if (VT == MVT::i32 || VT == MVT::f32) 16824 return std::make_pair(0U, &X86::GR32RegClass); 16825 if (VT == MVT::i16) 16826 return std::make_pair(0U, &X86::GR16RegClass); 16827 if (VT == MVT::i8 || VT == MVT::i1) 16828 return std::make_pair(0U, &X86::GR8RegClass); 16829 if (VT == MVT::i64 || VT == MVT::f64) 16830 return std::make_pair(0U, &X86::GR64RegClass); 16831 break; 16832 } 16833 // 32-bit fallthrough 16834 case 'Q': // Q_REGS 16835 if (VT == MVT::i32 || VT == MVT::f32) 16836 return std::make_pair(0U, &X86::GR32_ABCDRegClass); 16837 if (VT == MVT::i16) 16838 return std::make_pair(0U, &X86::GR16_ABCDRegClass); 16839 if (VT == MVT::i8 || VT == MVT::i1) 16840 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass); 16841 if (VT == MVT::i64) 16842 return std::make_pair(0U, &X86::GR64_ABCDRegClass); 16843 break; 16844 case 'r': // GENERAL_REGS 16845 case 'l': // INDEX_REGS 16846 if (VT == MVT::i8 || VT == MVT::i1) 16847 return std::make_pair(0U, &X86::GR8RegClass); 16848 if (VT == MVT::i16) 16849 return std::make_pair(0U, &X86::GR16RegClass); 16850 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit()) 16851 return std::make_pair(0U, &X86::GR32RegClass); 16852 return std::make_pair(0U, &X86::GR64RegClass); 16853 case 'R': // LEGACY_REGS 16854 if (VT == MVT::i8 || VT == MVT::i1) 16855 return std::make_pair(0U, &X86::GR8_NOREXRegClass); 16856 if (VT == MVT::i16) 16857 return std::make_pair(0U, &X86::GR16_NOREXRegClass); 16858 if (VT == MVT::i32 || !Subtarget->is64Bit()) 16859 return std::make_pair(0U, &X86::GR32_NOREXRegClass); 16860 return std::make_pair(0U, &X86::GR64_NOREXRegClass); 16861 case 'f': // FP Stack registers. 16862 // If SSE is enabled for this VT, use f80 to ensure the isel moves the 16863 // value to the correct fpstack register class. 16864 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT)) 16865 return std::make_pair(0U, &X86::RFP32RegClass); 16866 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT)) 16867 return std::make_pair(0U, &X86::RFP64RegClass); 16868 return std::make_pair(0U, &X86::RFP80RegClass); 16869 case 'y': // MMX_REGS if MMX allowed. 16870 if (!Subtarget->hasMMX()) break; 16871 return std::make_pair(0U, &X86::VR64RegClass); 16872 case 'Y': // SSE_REGS if SSE2 allowed 16873 if (!Subtarget->hasSSE2()) break; 16874 // FALL THROUGH. 16875 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed 16876 if (!Subtarget->hasSSE1()) break; 16877 16878 switch (VT.getSimpleVT().SimpleTy) { 16879 default: break; 16880 // Scalar SSE types. 16881 case MVT::f32: 16882 case MVT::i32: 16883 return std::make_pair(0U, &X86::FR32RegClass); 16884 case MVT::f64: 16885 case MVT::i64: 16886 return std::make_pair(0U, &X86::FR64RegClass); 16887 // Vector types. 16888 case MVT::v16i8: 16889 case MVT::v8i16: 16890 case MVT::v4i32: 16891 case MVT::v2i64: 16892 case MVT::v4f32: 16893 case MVT::v2f64: 16894 return std::make_pair(0U, &X86::VR128RegClass); 16895 // AVX types. 16896 case MVT::v32i8: 16897 case MVT::v16i16: 16898 case MVT::v8i32: 16899 case MVT::v4i64: 16900 case MVT::v8f32: 16901 case MVT::v4f64: 16902 return std::make_pair(0U, &X86::VR256RegClass); 16903 } 16904 break; 16905 } 16906 } 16907 16908 // Use the default implementation in TargetLowering to convert the register 16909 // constraint into a member of a register class. 16910 std::pair<unsigned, const TargetRegisterClass*> Res; 16911 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 16912 16913 // Not found as a standard register? 16914 if (Res.second == 0) { 16915 // Map st(0) -> st(7) -> ST0 16916 if (Constraint.size() == 7 && Constraint[0] == '{' && 16917 tolower(Constraint[1]) == 's' && 16918 tolower(Constraint[2]) == 't' && 16919 Constraint[3] == '(' && 16920 (Constraint[4] >= '0' && Constraint[4] <= '7') && 16921 Constraint[5] == ')' && 16922 Constraint[6] == '}') { 16923 16924 Res.first = X86::ST0+Constraint[4]-'0'; 16925 Res.second = &X86::RFP80RegClass; 16926 return Res; 16927 } 16928 16929 // GCC allows "st(0)" to be called just plain "st". 16930 if (StringRef("{st}").equals_lower(Constraint)) { 16931 Res.first = X86::ST0; 16932 Res.second = &X86::RFP80RegClass; 16933 return Res; 16934 } 16935 16936 // flags -> EFLAGS 16937 if (StringRef("{flags}").equals_lower(Constraint)) { 16938 Res.first = X86::EFLAGS; 16939 Res.second = &X86::CCRRegClass; 16940 return Res; 16941 } 16942 16943 // 'A' means EAX + EDX. 16944 if (Constraint == "A") { 16945 Res.first = X86::EAX; 16946 Res.second = &X86::GR32_ADRegClass; 16947 return Res; 16948 } 16949 return Res; 16950 } 16951 16952 // Otherwise, check to see if this is a register class of the wrong value 16953 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to 16954 // turn into {ax},{dx}. 16955 if (Res.second->hasType(VT)) 16956 return Res; // Correct type already, nothing to do. 16957 16958 // All of the single-register GCC register classes map their values onto 16959 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we 16960 // really want an 8-bit or 32-bit register, map to the appropriate register 16961 // class and return the appropriate register. 16962 if (Res.second == &X86::GR16RegClass) { 16963 if (VT == MVT::i8) { 16964 unsigned DestReg = 0; 16965 switch (Res.first) { 16966 default: break; 16967 case X86::AX: DestReg = X86::AL; break; 16968 case X86::DX: DestReg = X86::DL; break; 16969 case X86::CX: DestReg = X86::CL; break; 16970 case X86::BX: DestReg = X86::BL; break; 16971 } 16972 if (DestReg) { 16973 Res.first = DestReg; 16974 Res.second = &X86::GR8RegClass; 16975 } 16976 } else if (VT == MVT::i32) { 16977 unsigned DestReg = 0; 16978 switch (Res.first) { 16979 default: break; 16980 case X86::AX: DestReg = X86::EAX; break; 16981 case X86::DX: DestReg = X86::EDX; break; 16982 case X86::CX: DestReg = X86::ECX; break; 16983 case X86::BX: DestReg = X86::EBX; break; 16984 case X86::SI: DestReg = X86::ESI; break; 16985 case X86::DI: DestReg = X86::EDI; break; 16986 case X86::BP: DestReg = X86::EBP; break; 16987 case X86::SP: DestReg = X86::ESP; break; 16988 } 16989 if (DestReg) { 16990 Res.first = DestReg; 16991 Res.second = &X86::GR32RegClass; 16992 } 16993 } else if (VT == MVT::i64) { 16994 unsigned DestReg = 0; 16995 switch (Res.first) { 16996 default: break; 16997 case X86::AX: DestReg = X86::RAX; break; 16998 case X86::DX: DestReg = X86::RDX; break; 16999 case X86::CX: DestReg = X86::RCX; break; 17000 case X86::BX: DestReg = X86::RBX; break; 17001 case X86::SI: DestReg = X86::RSI; break; 17002 case X86::DI: DestReg = X86::RDI; break; 17003 case X86::BP: DestReg = X86::RBP; break; 17004 case X86::SP: DestReg = X86::RSP; break; 17005 } 17006 if (DestReg) { 17007 Res.first = DestReg; 17008 Res.second = &X86::GR64RegClass; 17009 } 17010 } 17011 } else if (Res.second == &X86::FR32RegClass || 17012 Res.second == &X86::FR64RegClass || 17013 Res.second == &X86::VR128RegClass) { 17014 // Handle references to XMM physical registers that got mapped into the 17015 // wrong class. This can happen with constraints like {xmm0} where the 17016 // target independent register mapper will just pick the first match it can 17017 // find, ignoring the required type. 17018 17019 if (VT == MVT::f32 || VT == MVT::i32) 17020 Res.second = &X86::FR32RegClass; 17021 else if (VT == MVT::f64 || VT == MVT::i64) 17022 Res.second = &X86::FR64RegClass; 17023 else if (X86::VR128RegClass.hasType(VT)) 17024 Res.second = &X86::VR128RegClass; 17025 else if (X86::VR256RegClass.hasType(VT)) 17026 Res.second = &X86::VR256RegClass; 17027 } 17028 17029 return Res; 17030} 17031