X86ISelLowering.cpp revision 363496
1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that X86 uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "X86ISelLowering.h"
15#include "Utils/X86ShuffleDecode.h"
16#include "X86CallingConv.h"
17#include "X86FrameLowering.h"
18#include "X86InstrBuilder.h"
19#include "X86IntrinsicsInfo.h"
20#include "X86MachineFunctionInfo.h"
21#include "X86TargetMachine.h"
22#include "X86TargetObjectFile.h"
23#include "llvm/ADT/SmallBitVector.h"
24#include "llvm/ADT/SmallSet.h"
25#include "llvm/ADT/Statistic.h"
26#include "llvm/ADT/StringExtras.h"
27#include "llvm/ADT/StringSwitch.h"
28#include "llvm/Analysis/BlockFrequencyInfo.h"
29#include "llvm/Analysis/EHPersonalities.h"
30#include "llvm/Analysis/ProfileSummaryInfo.h"
31#include "llvm/CodeGen/IntrinsicLowering.h"
32#include "llvm/CodeGen/MachineFrameInfo.h"
33#include "llvm/CodeGen/MachineFunction.h"
34#include "llvm/CodeGen/MachineInstrBuilder.h"
35#include "llvm/CodeGen/MachineJumpTableInfo.h"
36#include "llvm/CodeGen/MachineModuleInfo.h"
37#include "llvm/CodeGen/MachineRegisterInfo.h"
38#include "llvm/CodeGen/TargetLowering.h"
39#include "llvm/CodeGen/WinEHFuncInfo.h"
40#include "llvm/IR/CallSite.h"
41#include "llvm/IR/CallingConv.h"
42#include "llvm/IR/Constants.h"
43#include "llvm/IR/DerivedTypes.h"
44#include "llvm/IR/DiagnosticInfo.h"
45#include "llvm/IR/Function.h"
46#include "llvm/IR/GlobalAlias.h"
47#include "llvm/IR/GlobalVariable.h"
48#include "llvm/IR/Instructions.h"
49#include "llvm/IR/Intrinsics.h"
50#include "llvm/MC/MCAsmInfo.h"
51#include "llvm/MC/MCContext.h"
52#include "llvm/MC/MCExpr.h"
53#include "llvm/MC/MCSymbol.h"
54#include "llvm/Support/CommandLine.h"
55#include "llvm/Support/Debug.h"
56#include "llvm/Support/ErrorHandling.h"
57#include "llvm/Support/KnownBits.h"
58#include "llvm/Support/MathExtras.h"
59#include "llvm/Target/TargetOptions.h"
60#include <algorithm>
61#include <bitset>
62#include <cctype>
63#include <numeric>
64using namespace llvm;
65
66#define DEBUG_TYPE "x86-isel"
67
68STATISTIC(NumTailCalls, "Number of tail calls");
69
70static cl::opt<int> ExperimentalPrefLoopAlignment(
71    "x86-experimental-pref-loop-alignment", cl::init(4),
72    cl::desc(
73        "Sets the preferable loop alignment for experiments (as log2 bytes)"
74        "(the last x86-experimental-pref-loop-alignment bits"
75        " of the loop header PC will be 0)."),
76    cl::Hidden);
77
78// Added in 10.0.
79static cl::opt<bool> EnableOldKNLABI(
80    "x86-enable-old-knl-abi", cl::init(false),
81    cl::desc("Enables passing v32i16 and v64i8 in 2 YMM registers instead of "
82             "one ZMM register on AVX512F, but not AVX512BW targets."),
83    cl::Hidden);
84
85static cl::opt<bool> MulConstantOptimization(
86    "mul-constant-optimization", cl::init(true),
87    cl::desc("Replace 'mul x, Const' with more effective instructions like "
88             "SHIFT, LEA, etc."),
89    cl::Hidden);
90
91static cl::opt<bool> ExperimentalUnorderedISEL(
92    "x86-experimental-unordered-atomic-isel", cl::init(false),
93    cl::desc("Use LoadSDNode and StoreSDNode instead of "
94             "AtomicSDNode for unordered atomic loads and "
95             "stores respectively."),
96    cl::Hidden);
97
98/// Call this when the user attempts to do something unsupported, like
99/// returning a double without SSE2 enabled on x86_64. This is not fatal, unlike
100/// report_fatal_error, so calling code should attempt to recover without
101/// crashing.
102static void errorUnsupported(SelectionDAG &DAG, const SDLoc &dl,
103                             const char *Msg) {
104  MachineFunction &MF = DAG.getMachineFunction();
105  DAG.getContext()->diagnose(
106      DiagnosticInfoUnsupported(MF.getFunction(), Msg, dl.getDebugLoc()));
107}
108
109X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
110                                     const X86Subtarget &STI)
111    : TargetLowering(TM), Subtarget(STI) {
112  bool UseX87 = !Subtarget.useSoftFloat() && Subtarget.hasX87();
113  X86ScalarSSEf64 = Subtarget.hasSSE2();
114  X86ScalarSSEf32 = Subtarget.hasSSE1();
115  MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
116
117  // Set up the TargetLowering object.
118
119  // X86 is weird. It always uses i8 for shift amounts and setcc results.
120  setBooleanContents(ZeroOrOneBooleanContent);
121  // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
122  setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
123
124  // For 64-bit, since we have so many registers, use the ILP scheduler.
125  // For 32-bit, use the register pressure specific scheduling.
126  // For Atom, always use ILP scheduling.
127  if (Subtarget.isAtom())
128    setSchedulingPreference(Sched::ILP);
129  else if (Subtarget.is64Bit())
130    setSchedulingPreference(Sched::ILP);
131  else
132    setSchedulingPreference(Sched::RegPressure);
133  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
134  setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
135
136  // Bypass expensive divides and use cheaper ones.
137  if (TM.getOptLevel() >= CodeGenOpt::Default) {
138    if (Subtarget.hasSlowDivide32())
139      addBypassSlowDiv(32, 8);
140    if (Subtarget.hasSlowDivide64() && Subtarget.is64Bit())
141      addBypassSlowDiv(64, 32);
142  }
143
144  if (Subtarget.isTargetWindowsMSVC() ||
145      Subtarget.isTargetWindowsItanium()) {
146    // Setup Windows compiler runtime calls.
147    setLibcallName(RTLIB::SDIV_I64, "_alldiv");
148    setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
149    setLibcallName(RTLIB::SREM_I64, "_allrem");
150    setLibcallName(RTLIB::UREM_I64, "_aullrem");
151    setLibcallName(RTLIB::MUL_I64, "_allmul");
152    setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
153    setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
154    setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
155    setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
156    setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
157  }
158
159  if (Subtarget.getTargetTriple().isOSMSVCRT()) {
160    // MSVCRT doesn't have powi; fall back to pow
161    setLibcallName(RTLIB::POWI_F32, nullptr);
162    setLibcallName(RTLIB::POWI_F64, nullptr);
163  }
164
165  // If we don't have cmpxchg8b(meaing this is a 386/486), limit atomic size to
166  // 32 bits so the AtomicExpandPass will expand it so we don't need cmpxchg8b.
167  // FIXME: Should we be limitting the atomic size on other configs? Default is
168  // 1024.
169  if (!Subtarget.hasCmpxchg8b())
170    setMaxAtomicSizeInBitsSupported(32);
171
172  // Set up the register classes.
173  addRegisterClass(MVT::i8, &X86::GR8RegClass);
174  addRegisterClass(MVT::i16, &X86::GR16RegClass);
175  addRegisterClass(MVT::i32, &X86::GR32RegClass);
176  if (Subtarget.is64Bit())
177    addRegisterClass(MVT::i64, &X86::GR64RegClass);
178
179  for (MVT VT : MVT::integer_valuetypes())
180    setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
181
182  // We don't accept any truncstore of integer registers.
183  setTruncStoreAction(MVT::i64, MVT::i32, Expand);
184  setTruncStoreAction(MVT::i64, MVT::i16, Expand);
185  setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
186  setTruncStoreAction(MVT::i32, MVT::i16, Expand);
187  setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
188  setTruncStoreAction(MVT::i16, MVT::i8,  Expand);
189
190  setTruncStoreAction(MVT::f64, MVT::f32, Expand);
191
192  // SETOEQ and SETUNE require checking two conditions.
193  setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
194  setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
195  setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
196  setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
197  setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
198  setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
199
200  // Integer absolute.
201  if (Subtarget.hasCMov()) {
202    setOperationAction(ISD::ABS            , MVT::i16  , Custom);
203    setOperationAction(ISD::ABS            , MVT::i32  , Custom);
204  }
205  setOperationAction(ISD::ABS              , MVT::i64  , Custom);
206
207  // Funnel shifts.
208  for (auto ShiftOp : {ISD::FSHL, ISD::FSHR}) {
209    setOperationAction(ShiftOp             , MVT::i16  , Custom);
210    setOperationAction(ShiftOp             , MVT::i32  , Custom);
211    if (Subtarget.is64Bit())
212      setOperationAction(ShiftOp           , MVT::i64  , Custom);
213  }
214
215  if (!Subtarget.useSoftFloat()) {
216    // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
217    // operation.
218    setOperationAction(ISD::UINT_TO_FP,        MVT::i8, Promote);
219    setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i8, Promote);
220    setOperationAction(ISD::UINT_TO_FP,        MVT::i16, Promote);
221    setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i16, Promote);
222    // We have an algorithm for SSE2, and we turn this into a 64-bit
223    // FILD or VCVTUSI2SS/SD for other targets.
224    setOperationAction(ISD::UINT_TO_FP,        MVT::i32, Custom);
225    setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom);
226    // We have an algorithm for SSE2->double, and we turn this into a
227    // 64-bit FILD followed by conditional FADD for other targets.
228    setOperationAction(ISD::UINT_TO_FP,        MVT::i64, Custom);
229    setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom);
230
231    // Promote i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
232    // this operation.
233    setOperationAction(ISD::SINT_TO_FP,        MVT::i8, Promote);
234    setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i8, Promote);
235    // SSE has no i16 to fp conversion, only i32. We promote in the handler
236    // to allow f80 to use i16 and f64 to use i16 with sse1 only
237    setOperationAction(ISD::SINT_TO_FP,        MVT::i16, Custom);
238    setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i16, Custom);
239    // f32 and f64 cases are Legal with SSE1/SSE2, f80 case is not
240    setOperationAction(ISD::SINT_TO_FP,        MVT::i32, Custom);
241    setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
242    // In 32-bit mode these are custom lowered.  In 64-bit mode F32 and F64
243    // are Legal, f80 is custom lowered.
244    setOperationAction(ISD::SINT_TO_FP,        MVT::i64, Custom);
245    setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
246
247    // Promote i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
248    // this operation.
249    setOperationAction(ISD::FP_TO_SINT,        MVT::i8,  Promote);
250    // FIXME: This doesn't generate invalid exception when it should. PR44019.
251    setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i8,  Promote);
252    setOperationAction(ISD::FP_TO_SINT,        MVT::i16, Custom);
253    setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i16, Custom);
254    setOperationAction(ISD::FP_TO_SINT,        MVT::i32, Custom);
255    setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
256    // In 32-bit mode these are custom lowered.  In 64-bit mode F32 and F64
257    // are Legal, f80 is custom lowered.
258    setOperationAction(ISD::FP_TO_SINT,        MVT::i64, Custom);
259    setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
260
261    // Handle FP_TO_UINT by promoting the destination to a larger signed
262    // conversion.
263    setOperationAction(ISD::FP_TO_UINT,        MVT::i8,  Promote);
264    // FIXME: This doesn't generate invalid exception when it should. PR44019.
265    setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i8,  Promote);
266    setOperationAction(ISD::FP_TO_UINT,        MVT::i16, Promote);
267    // FIXME: This doesn't generate invalid exception when it should. PR44019.
268    setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i16, Promote);
269    setOperationAction(ISD::FP_TO_UINT,        MVT::i32, Custom);
270    setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
271    setOperationAction(ISD::FP_TO_UINT,        MVT::i64, Custom);
272    setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
273  }
274
275  // Handle address space casts between mixed sized pointers.
276  setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
277  setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
278
279  // TODO: when we have SSE, these could be more efficient, by using movd/movq.
280  if (!X86ScalarSSEf64) {
281    setOperationAction(ISD::BITCAST        , MVT::f32  , Expand);
282    setOperationAction(ISD::BITCAST        , MVT::i32  , Expand);
283    if (Subtarget.is64Bit()) {
284      setOperationAction(ISD::BITCAST      , MVT::f64  , Expand);
285      // Without SSE, i64->f64 goes through memory.
286      setOperationAction(ISD::BITCAST      , MVT::i64  , Expand);
287    }
288  } else if (!Subtarget.is64Bit())
289    setOperationAction(ISD::BITCAST      , MVT::i64  , Custom);
290
291  // Scalar integer divide and remainder are lowered to use operations that
292  // produce two results, to match the available instructions. This exposes
293  // the two-result form to trivial CSE, which is able to combine x/y and x%y
294  // into a single instruction.
295  //
296  // Scalar integer multiply-high is also lowered to use two-result
297  // operations, to match the available instructions. However, plain multiply
298  // (low) operations are left as Legal, as there are single-result
299  // instructions for this in x86. Using the two-result multiply instructions
300  // when both high and low results are needed must be arranged by dagcombine.
301  for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
302    setOperationAction(ISD::MULHS, VT, Expand);
303    setOperationAction(ISD::MULHU, VT, Expand);
304    setOperationAction(ISD::SDIV, VT, Expand);
305    setOperationAction(ISD::UDIV, VT, Expand);
306    setOperationAction(ISD::SREM, VT, Expand);
307    setOperationAction(ISD::UREM, VT, Expand);
308  }
309
310  setOperationAction(ISD::BR_JT            , MVT::Other, Expand);
311  setOperationAction(ISD::BRCOND           , MVT::Other, Custom);
312  for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128,
313                   MVT::i8,  MVT::i16, MVT::i32, MVT::i64 }) {
314    setOperationAction(ISD::BR_CC,     VT, Expand);
315    setOperationAction(ISD::SELECT_CC, VT, Expand);
316  }
317  if (Subtarget.is64Bit())
318    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
319  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16  , Legal);
320  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8   , Legal);
321  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1   , Expand);
322
323  setOperationAction(ISD::FREM             , MVT::f32  , Expand);
324  setOperationAction(ISD::FREM             , MVT::f64  , Expand);
325  setOperationAction(ISD::FREM             , MVT::f80  , Expand);
326  setOperationAction(ISD::FREM             , MVT::f128 , Expand);
327  setOperationAction(ISD::FLT_ROUNDS_      , MVT::i32  , Custom);
328
329  // Promote the i8 variants and force them on up to i32 which has a shorter
330  // encoding.
331  setOperationPromotedToType(ISD::CTTZ           , MVT::i8   , MVT::i32);
332  setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i8   , MVT::i32);
333  if (!Subtarget.hasBMI()) {
334    setOperationAction(ISD::CTTZ           , MVT::i16  , Custom);
335    setOperationAction(ISD::CTTZ           , MVT::i32  , Custom);
336    setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16  , Legal);
337    setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32  , Legal);
338    if (Subtarget.is64Bit()) {
339      setOperationAction(ISD::CTTZ         , MVT::i64  , Custom);
340      setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Legal);
341    }
342  }
343
344  if (Subtarget.hasLZCNT()) {
345    // When promoting the i8 variants, force them to i32 for a shorter
346    // encoding.
347    setOperationPromotedToType(ISD::CTLZ           , MVT::i8   , MVT::i32);
348    setOperationPromotedToType(ISD::CTLZ_ZERO_UNDEF, MVT::i8   , MVT::i32);
349  } else {
350    setOperationAction(ISD::CTLZ           , MVT::i8   , Custom);
351    setOperationAction(ISD::CTLZ           , MVT::i16  , Custom);
352    setOperationAction(ISD::CTLZ           , MVT::i32  , Custom);
353    setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8   , Custom);
354    setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16  , Custom);
355    setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32  , Custom);
356    if (Subtarget.is64Bit()) {
357      setOperationAction(ISD::CTLZ         , MVT::i64  , Custom);
358      setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
359    }
360  }
361
362  // Special handling for half-precision floating point conversions.
363  // If we don't have F16C support, then lower half float conversions
364  // into library calls.
365  if (Subtarget.useSoftFloat() || !Subtarget.hasF16C()) {
366    setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
367    setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
368  }
369
370  // There's never any support for operations beyond MVT::f32.
371  setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
372  setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
373  setOperationAction(ISD::FP16_TO_FP, MVT::f128, Expand);
374  setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
375  setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
376  setOperationAction(ISD::FP_TO_FP16, MVT::f128, Expand);
377
378  setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
379  setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
380  setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
381  setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f16, Expand);
382  setTruncStoreAction(MVT::f32, MVT::f16, Expand);
383  setTruncStoreAction(MVT::f64, MVT::f16, Expand);
384  setTruncStoreAction(MVT::f80, MVT::f16, Expand);
385  setTruncStoreAction(MVT::f128, MVT::f16, Expand);
386
387  if (Subtarget.hasPOPCNT()) {
388    setOperationPromotedToType(ISD::CTPOP, MVT::i8, MVT::i32);
389  } else {
390    setOperationAction(ISD::CTPOP          , MVT::i8   , Expand);
391    setOperationAction(ISD::CTPOP          , MVT::i16  , Expand);
392    setOperationAction(ISD::CTPOP          , MVT::i32  , Expand);
393    if (Subtarget.is64Bit())
394      setOperationAction(ISD::CTPOP        , MVT::i64  , Expand);
395    else
396      setOperationAction(ISD::CTPOP        , MVT::i64  , Custom);
397  }
398
399  setOperationAction(ISD::READCYCLECOUNTER , MVT::i64  , Custom);
400
401  if (!Subtarget.hasMOVBE())
402    setOperationAction(ISD::BSWAP          , MVT::i16  , Expand);
403
404  // X86 wants to expand cmov itself.
405  for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128 }) {
406    setOperationAction(ISD::SELECT, VT, Custom);
407    setOperationAction(ISD::SETCC, VT, Custom);
408    setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
409    setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
410  }
411  for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
412    if (VT == MVT::i64 && !Subtarget.is64Bit())
413      continue;
414    setOperationAction(ISD::SELECT, VT, Custom);
415    setOperationAction(ISD::SETCC,  VT, Custom);
416  }
417
418  // Custom action for SELECT MMX and expand action for SELECT_CC MMX
419  setOperationAction(ISD::SELECT, MVT::x86mmx, Custom);
420  setOperationAction(ISD::SELECT_CC, MVT::x86mmx, Expand);
421
422  setOperationAction(ISD::EH_RETURN       , MVT::Other, Custom);
423  // NOTE: EH_SJLJ_SETJMP/_LONGJMP are not recommended, since
424  // LLVM/Clang supports zero-cost DWARF and SEH exception handling.
425  setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
426  setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
427  setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
428  if (TM.Options.ExceptionModel == ExceptionHandling::SjLj)
429    setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
430
431  // Darwin ABI issue.
432  for (auto VT : { MVT::i32, MVT::i64 }) {
433    if (VT == MVT::i64 && !Subtarget.is64Bit())
434      continue;
435    setOperationAction(ISD::ConstantPool    , VT, Custom);
436    setOperationAction(ISD::JumpTable       , VT, Custom);
437    setOperationAction(ISD::GlobalAddress   , VT, Custom);
438    setOperationAction(ISD::GlobalTLSAddress, VT, Custom);
439    setOperationAction(ISD::ExternalSymbol  , VT, Custom);
440    setOperationAction(ISD::BlockAddress    , VT, Custom);
441  }
442
443  // 64-bit shl, sra, srl (iff 32-bit x86)
444  for (auto VT : { MVT::i32, MVT::i64 }) {
445    if (VT == MVT::i64 && !Subtarget.is64Bit())
446      continue;
447    setOperationAction(ISD::SHL_PARTS, VT, Custom);
448    setOperationAction(ISD::SRA_PARTS, VT, Custom);
449    setOperationAction(ISD::SRL_PARTS, VT, Custom);
450  }
451
452  if (Subtarget.hasSSEPrefetch() || Subtarget.has3DNow())
453    setOperationAction(ISD::PREFETCH      , MVT::Other, Legal);
454
455  setOperationAction(ISD::ATOMIC_FENCE  , MVT::Other, Custom);
456
457  // Expand certain atomics
458  for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
459    setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
460    setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
461    setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom);
462    setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom);
463    setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom);
464    setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom);
465    setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
466  }
467
468  if (!Subtarget.is64Bit())
469    setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
470
471  if (Subtarget.hasCmpxchg16b()) {
472    setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
473  }
474
475  // FIXME - use subtarget debug flags
476  if (!Subtarget.isTargetDarwin() && !Subtarget.isTargetELF() &&
477      !Subtarget.isTargetCygMing() && !Subtarget.isTargetWin64() &&
478      TM.Options.ExceptionModel != ExceptionHandling::SjLj) {
479    setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
480  }
481
482  setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
483  setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
484
485  setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
486  setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
487
488  setOperationAction(ISD::TRAP, MVT::Other, Legal);
489  setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
490
491  // VASTART needs to be custom lowered to use the VarArgsFrameIndex
492  setOperationAction(ISD::VASTART           , MVT::Other, Custom);
493  setOperationAction(ISD::VAEND             , MVT::Other, Expand);
494  bool Is64Bit = Subtarget.is64Bit();
495  setOperationAction(ISD::VAARG,  MVT::Other, Is64Bit ? Custom : Expand);
496  setOperationAction(ISD::VACOPY, MVT::Other, Is64Bit ? Custom : Expand);
497
498  setOperationAction(ISD::STACKSAVE,          MVT::Other, Expand);
499  setOperationAction(ISD::STACKRESTORE,       MVT::Other, Expand);
500
501  setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
502
503  // GC_TRANSITION_START and GC_TRANSITION_END need custom lowering.
504  setOperationAction(ISD::GC_TRANSITION_START, MVT::Other, Custom);
505  setOperationAction(ISD::GC_TRANSITION_END, MVT::Other, Custom);
506
507  if (!Subtarget.useSoftFloat() && X86ScalarSSEf64) {
508    // f32 and f64 use SSE.
509    // Set up the FP register classes.
510    addRegisterClass(MVT::f32, Subtarget.hasAVX512() ? &X86::FR32XRegClass
511                                                     : &X86::FR32RegClass);
512    addRegisterClass(MVT::f64, Subtarget.hasAVX512() ? &X86::FR64XRegClass
513                                                     : &X86::FR64RegClass);
514
515    // Disable f32->f64 extload as we can only generate this in one instruction
516    // under optsize. So its easier to pattern match (fpext (load)) for that
517    // case instead of needing to emit 2 instructions for extload in the
518    // non-optsize case.
519    setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
520
521    for (auto VT : { MVT::f32, MVT::f64 }) {
522      // Use ANDPD to simulate FABS.
523      setOperationAction(ISD::FABS, VT, Custom);
524
525      // Use XORP to simulate FNEG.
526      setOperationAction(ISD::FNEG, VT, Custom);
527
528      // Use ANDPD and ORPD to simulate FCOPYSIGN.
529      setOperationAction(ISD::FCOPYSIGN, VT, Custom);
530
531      // These might be better off as horizontal vector ops.
532      setOperationAction(ISD::FADD, VT, Custom);
533      setOperationAction(ISD::FSUB, VT, Custom);
534
535      // We don't support sin/cos/fmod
536      setOperationAction(ISD::FSIN   , VT, Expand);
537      setOperationAction(ISD::FCOS   , VT, Expand);
538      setOperationAction(ISD::FSINCOS, VT, Expand);
539    }
540
541    // Lower this to MOVMSK plus an AND.
542    setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
543    setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
544
545  } else if (!useSoftFloat() && X86ScalarSSEf32 && (UseX87 || Is64Bit)) {
546    // Use SSE for f32, x87 for f64.
547    // Set up the FP register classes.
548    addRegisterClass(MVT::f32, &X86::FR32RegClass);
549    if (UseX87)
550      addRegisterClass(MVT::f64, &X86::RFP64RegClass);
551
552    // Use ANDPS to simulate FABS.
553    setOperationAction(ISD::FABS , MVT::f32, Custom);
554
555    // Use XORP to simulate FNEG.
556    setOperationAction(ISD::FNEG , MVT::f32, Custom);
557
558    if (UseX87)
559      setOperationAction(ISD::UNDEF, MVT::f64, Expand);
560
561    // Use ANDPS and ORPS to simulate FCOPYSIGN.
562    if (UseX87)
563      setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
564    setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
565
566    // We don't support sin/cos/fmod
567    setOperationAction(ISD::FSIN   , MVT::f32, Expand);
568    setOperationAction(ISD::FCOS   , MVT::f32, Expand);
569    setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
570
571    if (UseX87) {
572      // Always expand sin/cos functions even though x87 has an instruction.
573      setOperationAction(ISD::FSIN, MVT::f64, Expand);
574      setOperationAction(ISD::FCOS, MVT::f64, Expand);
575      setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
576    }
577  } else if (UseX87) {
578    // f32 and f64 in x87.
579    // Set up the FP register classes.
580    addRegisterClass(MVT::f64, &X86::RFP64RegClass);
581    addRegisterClass(MVT::f32, &X86::RFP32RegClass);
582
583    for (auto VT : { MVT::f32, MVT::f64 }) {
584      setOperationAction(ISD::UNDEF,     VT, Expand);
585      setOperationAction(ISD::FCOPYSIGN, VT, Expand);
586
587      // Always expand sin/cos functions even though x87 has an instruction.
588      setOperationAction(ISD::FSIN   , VT, Expand);
589      setOperationAction(ISD::FCOS   , VT, Expand);
590      setOperationAction(ISD::FSINCOS, VT, Expand);
591    }
592  }
593
594  // Expand FP32 immediates into loads from the stack, save special cases.
595  if (isTypeLegal(MVT::f32)) {
596    if (UseX87 && (getRegClassFor(MVT::f32) == &X86::RFP32RegClass)) {
597      addLegalFPImmediate(APFloat(+0.0f)); // FLD0
598      addLegalFPImmediate(APFloat(+1.0f)); // FLD1
599      addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
600      addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
601    } else // SSE immediates.
602      addLegalFPImmediate(APFloat(+0.0f)); // xorps
603  }
604  // Expand FP64 immediates into loads from the stack, save special cases.
605  if (isTypeLegal(MVT::f64)) {
606    if (UseX87 && getRegClassFor(MVT::f64) == &X86::RFP64RegClass) {
607      addLegalFPImmediate(APFloat(+0.0)); // FLD0
608      addLegalFPImmediate(APFloat(+1.0)); // FLD1
609      addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
610      addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
611    } else // SSE immediates.
612      addLegalFPImmediate(APFloat(+0.0)); // xorpd
613  }
614  // Handle constrained floating-point operations of scalar.
615  setOperationAction(ISD::STRICT_FADD,      MVT::f32, Legal);
616  setOperationAction(ISD::STRICT_FADD,      MVT::f64, Legal);
617  setOperationAction(ISD::STRICT_FSUB,      MVT::f32, Legal);
618  setOperationAction(ISD::STRICT_FSUB,      MVT::f64, Legal);
619  setOperationAction(ISD::STRICT_FMUL,      MVT::f32, Legal);
620  setOperationAction(ISD::STRICT_FMUL,      MVT::f64, Legal);
621  setOperationAction(ISD::STRICT_FDIV,      MVT::f32, Legal);
622  setOperationAction(ISD::STRICT_FDIV,      MVT::f64, Legal);
623  setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
624  setOperationAction(ISD::STRICT_FP_ROUND,  MVT::f32, Legal);
625  setOperationAction(ISD::STRICT_FP_ROUND,  MVT::f64, Legal);
626  setOperationAction(ISD::STRICT_FSQRT,     MVT::f32, Legal);
627  setOperationAction(ISD::STRICT_FSQRT,     MVT::f64, Legal);
628
629  // We don't support FMA.
630  setOperationAction(ISD::FMA, MVT::f64, Expand);
631  setOperationAction(ISD::FMA, MVT::f32, Expand);
632
633  // f80 always uses X87.
634  if (UseX87) {
635    addRegisterClass(MVT::f80, &X86::RFP80RegClass);
636    setOperationAction(ISD::UNDEF,     MVT::f80, Expand);
637    setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
638    {
639      APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended());
640      addLegalFPImmediate(TmpFlt);  // FLD0
641      TmpFlt.changeSign();
642      addLegalFPImmediate(TmpFlt);  // FLD0/FCHS
643
644      bool ignored;
645      APFloat TmpFlt2(+1.0);
646      TmpFlt2.convert(APFloat::x87DoubleExtended(), APFloat::rmNearestTiesToEven,
647                      &ignored);
648      addLegalFPImmediate(TmpFlt2);  // FLD1
649      TmpFlt2.changeSign();
650      addLegalFPImmediate(TmpFlt2);  // FLD1/FCHS
651    }
652
653    // Always expand sin/cos functions even though x87 has an instruction.
654    setOperationAction(ISD::FSIN   , MVT::f80, Expand);
655    setOperationAction(ISD::FCOS   , MVT::f80, Expand);
656    setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
657
658    setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
659    setOperationAction(ISD::FCEIL,  MVT::f80, Expand);
660    setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
661    setOperationAction(ISD::FRINT,  MVT::f80, Expand);
662    setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
663    setOperationAction(ISD::FMA, MVT::f80, Expand);
664    setOperationAction(ISD::LROUND, MVT::f80, Expand);
665    setOperationAction(ISD::LLROUND, MVT::f80, Expand);
666    setOperationAction(ISD::LRINT, MVT::f80, Expand);
667    setOperationAction(ISD::LLRINT, MVT::f80, Expand);
668
669    // Handle constrained floating-point operations of scalar.
670    setOperationAction(ISD::STRICT_FADD     , MVT::f80, Legal);
671    setOperationAction(ISD::STRICT_FSUB     , MVT::f80, Legal);
672    setOperationAction(ISD::STRICT_FMUL     , MVT::f80, Legal);
673    setOperationAction(ISD::STRICT_FDIV     , MVT::f80, Legal);
674    setOperationAction(ISD::STRICT_FSQRT    , MVT::f80, Legal);
675    setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f80, Legal);
676    // FIXME: When the target is 64-bit, STRICT_FP_ROUND will be overwritten
677    // as Custom.
678    setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Legal);
679  }
680
681  // f128 uses xmm registers, but most operations require libcalls.
682  if (!Subtarget.useSoftFloat() && Subtarget.is64Bit() && Subtarget.hasSSE1()) {
683    addRegisterClass(MVT::f128, Subtarget.hasVLX() ? &X86::VR128XRegClass
684                                                   : &X86::VR128RegClass);
685
686    addLegalFPImmediate(APFloat::getZero(APFloat::IEEEquad())); // xorps
687
688    setOperationAction(ISD::FADD,        MVT::f128, LibCall);
689    setOperationAction(ISD::STRICT_FADD, MVT::f128, LibCall);
690    setOperationAction(ISD::FSUB,        MVT::f128, LibCall);
691    setOperationAction(ISD::STRICT_FSUB, MVT::f128, LibCall);
692    setOperationAction(ISD::FDIV,        MVT::f128, LibCall);
693    setOperationAction(ISD::STRICT_FDIV, MVT::f128, LibCall);
694    setOperationAction(ISD::FMUL,        MVT::f128, LibCall);
695    setOperationAction(ISD::STRICT_FMUL, MVT::f128, LibCall);
696    setOperationAction(ISD::FMA,         MVT::f128, LibCall);
697    setOperationAction(ISD::STRICT_FMA,  MVT::f128, LibCall);
698
699    setOperationAction(ISD::FABS, MVT::f128, Custom);
700    setOperationAction(ISD::FNEG, MVT::f128, Custom);
701    setOperationAction(ISD::FCOPYSIGN, MVT::f128, Custom);
702
703    setOperationAction(ISD::FSIN,         MVT::f128, LibCall);
704    setOperationAction(ISD::STRICT_FSIN,  MVT::f128, LibCall);
705    setOperationAction(ISD::FCOS,         MVT::f128, LibCall);
706    setOperationAction(ISD::STRICT_FCOS,  MVT::f128, LibCall);
707    setOperationAction(ISD::FSINCOS,      MVT::f128, LibCall);
708    // No STRICT_FSINCOS
709    setOperationAction(ISD::FSQRT,        MVT::f128, LibCall);
710    setOperationAction(ISD::STRICT_FSQRT, MVT::f128, LibCall);
711
712    setOperationAction(ISD::FP_EXTEND,        MVT::f128, Custom);
713    setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Custom);
714    // We need to custom handle any FP_ROUND with an f128 input, but
715    // LegalizeDAG uses the result type to know when to run a custom handler.
716    // So we have to list all legal floating point result types here.
717    if (isTypeLegal(MVT::f32)) {
718      setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
719      setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom);
720    }
721    if (isTypeLegal(MVT::f64)) {
722      setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
723      setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Custom);
724    }
725    if (isTypeLegal(MVT::f80)) {
726      setOperationAction(ISD::FP_ROUND, MVT::f80, Custom);
727      setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Custom);
728    }
729
730    setOperationAction(ISD::SETCC, MVT::f128, Custom);
731
732    setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f32, Expand);
733    setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f64, Expand);
734    setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f80, Expand);
735    setTruncStoreAction(MVT::f128, MVT::f32, Expand);
736    setTruncStoreAction(MVT::f128, MVT::f64, Expand);
737    setTruncStoreAction(MVT::f128, MVT::f80, Expand);
738  }
739
740  // Always use a library call for pow.
741  setOperationAction(ISD::FPOW             , MVT::f32  , Expand);
742  setOperationAction(ISD::FPOW             , MVT::f64  , Expand);
743  setOperationAction(ISD::FPOW             , MVT::f80  , Expand);
744  setOperationAction(ISD::FPOW             , MVT::f128 , Expand);
745
746  setOperationAction(ISD::FLOG, MVT::f80, Expand);
747  setOperationAction(ISD::FLOG2, MVT::f80, Expand);
748  setOperationAction(ISD::FLOG10, MVT::f80, Expand);
749  setOperationAction(ISD::FEXP, MVT::f80, Expand);
750  setOperationAction(ISD::FEXP2, MVT::f80, Expand);
751  setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
752  setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
753
754  // Some FP actions are always expanded for vector types.
755  for (auto VT : { MVT::v4f32, MVT::v8f32, MVT::v16f32,
756                   MVT::v2f64, MVT::v4f64, MVT::v8f64 }) {
757    setOperationAction(ISD::FSIN,      VT, Expand);
758    setOperationAction(ISD::FSINCOS,   VT, Expand);
759    setOperationAction(ISD::FCOS,      VT, Expand);
760    setOperationAction(ISD::FREM,      VT, Expand);
761    setOperationAction(ISD::FCOPYSIGN, VT, Expand);
762    setOperationAction(ISD::FPOW,      VT, Expand);
763    setOperationAction(ISD::FLOG,      VT, Expand);
764    setOperationAction(ISD::FLOG2,     VT, Expand);
765    setOperationAction(ISD::FLOG10,    VT, Expand);
766    setOperationAction(ISD::FEXP,      VT, Expand);
767    setOperationAction(ISD::FEXP2,     VT, Expand);
768  }
769
770  // First set operation action for all vector types to either promote
771  // (for widening) or expand (for scalarization). Then we will selectively
772  // turn on ones that can be effectively codegen'd.
773  for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
774    setOperationAction(ISD::SDIV, VT, Expand);
775    setOperationAction(ISD::UDIV, VT, Expand);
776    setOperationAction(ISD::SREM, VT, Expand);
777    setOperationAction(ISD::UREM, VT, Expand);
778    setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
779    setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
780    setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
781    setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
782    setOperationAction(ISD::FMA,  VT, Expand);
783    setOperationAction(ISD::FFLOOR, VT, Expand);
784    setOperationAction(ISD::FCEIL, VT, Expand);
785    setOperationAction(ISD::FTRUNC, VT, Expand);
786    setOperationAction(ISD::FRINT, VT, Expand);
787    setOperationAction(ISD::FNEARBYINT, VT, Expand);
788    setOperationAction(ISD::SMUL_LOHI, VT, Expand);
789    setOperationAction(ISD::MULHS, VT, Expand);
790    setOperationAction(ISD::UMUL_LOHI, VT, Expand);
791    setOperationAction(ISD::MULHU, VT, Expand);
792    setOperationAction(ISD::SDIVREM, VT, Expand);
793    setOperationAction(ISD::UDIVREM, VT, Expand);
794    setOperationAction(ISD::CTPOP, VT, Expand);
795    setOperationAction(ISD::CTTZ, VT, Expand);
796    setOperationAction(ISD::CTLZ, VT, Expand);
797    setOperationAction(ISD::ROTL, VT, Expand);
798    setOperationAction(ISD::ROTR, VT, Expand);
799    setOperationAction(ISD::BSWAP, VT, Expand);
800    setOperationAction(ISD::SETCC, VT, Expand);
801    setOperationAction(ISD::FP_TO_UINT, VT, Expand);
802    setOperationAction(ISD::FP_TO_SINT, VT, Expand);
803    setOperationAction(ISD::UINT_TO_FP, VT, Expand);
804    setOperationAction(ISD::SINT_TO_FP, VT, Expand);
805    setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
806    setOperationAction(ISD::TRUNCATE, VT, Expand);
807    setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
808    setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
809    setOperationAction(ISD::ANY_EXTEND, VT, Expand);
810    setOperationAction(ISD::SELECT_CC, VT, Expand);
811    for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
812      setTruncStoreAction(InnerVT, VT, Expand);
813
814      setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
815      setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
816
817      // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
818      // types, we have to deal with them whether we ask for Expansion or not.
819      // Setting Expand causes its own optimisation problems though, so leave
820      // them legal.
821      if (VT.getVectorElementType() == MVT::i1)
822        setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
823
824      // EXTLOAD for MVT::f16 vectors is not legal because f16 vectors are
825      // split/scalarized right now.
826      if (VT.getVectorElementType() == MVT::f16)
827        setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
828    }
829  }
830
831  // FIXME: In order to prevent SSE instructions being expanded to MMX ones
832  // with -msoft-float, disable use of MMX as well.
833  if (!Subtarget.useSoftFloat() && Subtarget.hasMMX()) {
834    addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
835    // No operations on x86mmx supported, everything uses intrinsics.
836  }
837
838  if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1()) {
839    addRegisterClass(MVT::v4f32, Subtarget.hasVLX() ? &X86::VR128XRegClass
840                                                    : &X86::VR128RegClass);
841
842    setOperationAction(ISD::FNEG,               MVT::v4f32, Custom);
843    setOperationAction(ISD::FABS,               MVT::v4f32, Custom);
844    setOperationAction(ISD::FCOPYSIGN,          MVT::v4f32, Custom);
845    setOperationAction(ISD::BUILD_VECTOR,       MVT::v4f32, Custom);
846    setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v4f32, Custom);
847    setOperationAction(ISD::VSELECT,            MVT::v4f32, Custom);
848    setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
849    setOperationAction(ISD::SELECT,             MVT::v4f32, Custom);
850
851    setOperationAction(ISD::LOAD,               MVT::v2f32, Custom);
852    setOperationAction(ISD::STORE,              MVT::v2f32, Custom);
853
854    setOperationAction(ISD::STRICT_FADD,        MVT::v4f32, Legal);
855    setOperationAction(ISD::STRICT_FSUB,        MVT::v4f32, Legal);
856    setOperationAction(ISD::STRICT_FMUL,        MVT::v4f32, Legal);
857    setOperationAction(ISD::STRICT_FDIV,        MVT::v4f32, Legal);
858    setOperationAction(ISD::STRICT_FSQRT,       MVT::v4f32, Legal);
859  }
860
861  if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
862    addRegisterClass(MVT::v2f64, Subtarget.hasVLX() ? &X86::VR128XRegClass
863                                                    : &X86::VR128RegClass);
864
865    // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
866    // registers cannot be used even for integer operations.
867    addRegisterClass(MVT::v16i8, Subtarget.hasVLX() ? &X86::VR128XRegClass
868                                                    : &X86::VR128RegClass);
869    addRegisterClass(MVT::v8i16, Subtarget.hasVLX() ? &X86::VR128XRegClass
870                                                    : &X86::VR128RegClass);
871    addRegisterClass(MVT::v4i32, Subtarget.hasVLX() ? &X86::VR128XRegClass
872                                                    : &X86::VR128RegClass);
873    addRegisterClass(MVT::v2i64, Subtarget.hasVLX() ? &X86::VR128XRegClass
874                                                    : &X86::VR128RegClass);
875
876    for (auto VT : { MVT::v2i8, MVT::v4i8, MVT::v8i8,
877                     MVT::v2i16, MVT::v4i16, MVT::v2i32 }) {
878      setOperationAction(ISD::SDIV, VT, Custom);
879      setOperationAction(ISD::SREM, VT, Custom);
880      setOperationAction(ISD::UDIV, VT, Custom);
881      setOperationAction(ISD::UREM, VT, Custom);
882    }
883
884    setOperationAction(ISD::MUL,                MVT::v2i8,  Custom);
885    setOperationAction(ISD::MUL,                MVT::v4i8,  Custom);
886    setOperationAction(ISD::MUL,                MVT::v8i8,  Custom);
887
888    setOperationAction(ISD::MUL,                MVT::v16i8, Custom);
889    setOperationAction(ISD::MUL,                MVT::v4i32, Custom);
890    setOperationAction(ISD::MUL,                MVT::v2i64, Custom);
891    setOperationAction(ISD::MULHU,              MVT::v4i32, Custom);
892    setOperationAction(ISD::MULHS,              MVT::v4i32, Custom);
893    setOperationAction(ISD::MULHU,              MVT::v16i8, Custom);
894    setOperationAction(ISD::MULHS,              MVT::v16i8, Custom);
895    setOperationAction(ISD::MULHU,              MVT::v8i16, Legal);
896    setOperationAction(ISD::MULHS,              MVT::v8i16, Legal);
897    setOperationAction(ISD::MUL,                MVT::v8i16, Legal);
898    setOperationAction(ISD::FNEG,               MVT::v2f64, Custom);
899    setOperationAction(ISD::FABS,               MVT::v2f64, Custom);
900    setOperationAction(ISD::FCOPYSIGN,          MVT::v2f64, Custom);
901
902    for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
903      setOperationAction(ISD::SMAX, VT, VT == MVT::v8i16 ? Legal : Custom);
904      setOperationAction(ISD::SMIN, VT, VT == MVT::v8i16 ? Legal : Custom);
905      setOperationAction(ISD::UMAX, VT, VT == MVT::v16i8 ? Legal : Custom);
906      setOperationAction(ISD::UMIN, VT, VT == MVT::v16i8 ? Legal : Custom);
907    }
908
909    setOperationAction(ISD::UADDSAT,            MVT::v16i8, Legal);
910    setOperationAction(ISD::SADDSAT,            MVT::v16i8, Legal);
911    setOperationAction(ISD::USUBSAT,            MVT::v16i8, Legal);
912    setOperationAction(ISD::SSUBSAT,            MVT::v16i8, Legal);
913    setOperationAction(ISD::UADDSAT,            MVT::v8i16, Legal);
914    setOperationAction(ISD::SADDSAT,            MVT::v8i16, Legal);
915    setOperationAction(ISD::USUBSAT,            MVT::v8i16, Legal);
916    setOperationAction(ISD::SSUBSAT,            MVT::v8i16, Legal);
917    setOperationAction(ISD::UADDSAT,            MVT::v4i32, Custom);
918    setOperationAction(ISD::USUBSAT,            MVT::v4i32, Custom);
919    setOperationAction(ISD::UADDSAT,            MVT::v2i64, Custom);
920    setOperationAction(ISD::USUBSAT,            MVT::v2i64, Custom);
921
922    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v8i16, Custom);
923    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v4i32, Custom);
924    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v4f32, Custom);
925
926    for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
927      setOperationAction(ISD::SETCC,              VT, Custom);
928      setOperationAction(ISD::STRICT_FSETCC,      VT, Custom);
929      setOperationAction(ISD::STRICT_FSETCCS,     VT, Custom);
930      setOperationAction(ISD::CTPOP,              VT, Custom);
931      setOperationAction(ISD::ABS,                VT, Custom);
932
933      // The condition codes aren't legal in SSE/AVX and under AVX512 we use
934      // setcc all the way to isel and prefer SETGT in some isel patterns.
935      setCondCodeAction(ISD::SETLT, VT, Custom);
936      setCondCodeAction(ISD::SETLE, VT, Custom);
937    }
938
939    for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
940      setOperationAction(ISD::SCALAR_TO_VECTOR,   VT, Custom);
941      setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
942      setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
943      setOperationAction(ISD::VSELECT,            VT, Custom);
944      setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
945    }
946
947    for (auto VT : { MVT::v2f64, MVT::v2i64 }) {
948      setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
949      setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
950      setOperationAction(ISD::VSELECT,            VT, Custom);
951
952      if (VT == MVT::v2i64 && !Subtarget.is64Bit())
953        continue;
954
955      setOperationAction(ISD::INSERT_VECTOR_ELT,  VT, Custom);
956      setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
957    }
958
959    // Custom lower v2i64 and v2f64 selects.
960    setOperationAction(ISD::SELECT,             MVT::v2f64, Custom);
961    setOperationAction(ISD::SELECT,             MVT::v2i64, Custom);
962    setOperationAction(ISD::SELECT,             MVT::v4i32, Custom);
963    setOperationAction(ISD::SELECT,             MVT::v8i16, Custom);
964    setOperationAction(ISD::SELECT,             MVT::v16i8, Custom);
965
966    setOperationAction(ISD::FP_TO_SINT,         MVT::v4i32, Legal);
967    setOperationAction(ISD::FP_TO_SINT,         MVT::v2i32, Custom);
968    setOperationAction(ISD::STRICT_FP_TO_SINT,  MVT::v4i32, Legal);
969    setOperationAction(ISD::STRICT_FP_TO_SINT,  MVT::v2i32, Custom);
970
971    // Custom legalize these to avoid over promotion or custom promotion.
972    for (auto VT : {MVT::v2i8, MVT::v4i8, MVT::v8i8, MVT::v2i16, MVT::v4i16}) {
973      setOperationAction(ISD::FP_TO_SINT,        VT, Custom);
974      setOperationAction(ISD::FP_TO_UINT,        VT, Custom);
975      setOperationAction(ISD::STRICT_FP_TO_SINT, VT, Custom);
976      setOperationAction(ISD::STRICT_FP_TO_UINT, VT, Custom);
977    }
978
979    setOperationAction(ISD::SINT_TO_FP,         MVT::v4i32, Legal);
980    setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v4i32, Legal);
981    setOperationAction(ISD::SINT_TO_FP,         MVT::v2i32, Custom);
982    setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v2i32, Custom);
983
984    setOperationAction(ISD::UINT_TO_FP,         MVT::v2i32, Custom);
985    setOperationAction(ISD::STRICT_UINT_TO_FP,  MVT::v2i32, Custom);
986
987    setOperationAction(ISD::UINT_TO_FP,         MVT::v4i32, Custom);
988    setOperationAction(ISD::STRICT_UINT_TO_FP,  MVT::v4i32, Custom);
989
990    // Fast v2f32 UINT_TO_FP( v2i32 ) custom conversion.
991    setOperationAction(ISD::SINT_TO_FP,         MVT::v2f32, Custom);
992    setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v2f32, Custom);
993    setOperationAction(ISD::UINT_TO_FP,         MVT::v2f32, Custom);
994    setOperationAction(ISD::STRICT_UINT_TO_FP,  MVT::v2f32, Custom);
995
996    setOperationAction(ISD::FP_EXTEND,          MVT::v2f32, Custom);
997    setOperationAction(ISD::STRICT_FP_EXTEND,   MVT::v2f32, Custom);
998    setOperationAction(ISD::FP_ROUND,           MVT::v2f32, Custom);
999    setOperationAction(ISD::STRICT_FP_ROUND,    MVT::v2f32, Custom);
1000
1001    // We want to legalize this to an f64 load rather than an i64 load on
1002    // 64-bit targets and two 32-bit loads on a 32-bit target. Similar for
1003    // store.
1004    setOperationAction(ISD::LOAD,               MVT::v2i32, Custom);
1005    setOperationAction(ISD::LOAD,               MVT::v4i16, Custom);
1006    setOperationAction(ISD::LOAD,               MVT::v8i8,  Custom);
1007    setOperationAction(ISD::STORE,              MVT::v2i32, Custom);
1008    setOperationAction(ISD::STORE,              MVT::v4i16, Custom);
1009    setOperationAction(ISD::STORE,              MVT::v8i8,  Custom);
1010
1011    setOperationAction(ISD::BITCAST,            MVT::v2i32, Custom);
1012    setOperationAction(ISD::BITCAST,            MVT::v4i16, Custom);
1013    setOperationAction(ISD::BITCAST,            MVT::v8i8,  Custom);
1014    if (!Subtarget.hasAVX512())
1015      setOperationAction(ISD::BITCAST, MVT::v16i1, Custom);
1016
1017    setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v2i64, Custom);
1018    setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i32, Custom);
1019    setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i16, Custom);
1020
1021    setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
1022
1023    setOperationAction(ISD::TRUNCATE,    MVT::v2i8,  Custom);
1024    setOperationAction(ISD::TRUNCATE,    MVT::v2i16, Custom);
1025    setOperationAction(ISD::TRUNCATE,    MVT::v2i32, Custom);
1026    setOperationAction(ISD::TRUNCATE,    MVT::v4i8,  Custom);
1027    setOperationAction(ISD::TRUNCATE,    MVT::v4i16, Custom);
1028    setOperationAction(ISD::TRUNCATE,    MVT::v8i8,  Custom);
1029
1030    // In the customized shift lowering, the legal v4i32/v2i64 cases
1031    // in AVX2 will be recognized.
1032    for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1033      setOperationAction(ISD::SRL,              VT, Custom);
1034      setOperationAction(ISD::SHL,              VT, Custom);
1035      setOperationAction(ISD::SRA,              VT, Custom);
1036    }
1037
1038    setOperationAction(ISD::ROTL,               MVT::v4i32, Custom);
1039    setOperationAction(ISD::ROTL,               MVT::v8i16, Custom);
1040
1041    // With AVX512, expanding (and promoting the shifts) is better.
1042    if (!Subtarget.hasAVX512())
1043      setOperationAction(ISD::ROTL,             MVT::v16i8, Custom);
1044
1045    setOperationAction(ISD::STRICT_FSQRT,       MVT::v2f64, Legal);
1046    setOperationAction(ISD::STRICT_FADD,        MVT::v2f64, Legal);
1047    setOperationAction(ISD::STRICT_FSUB,        MVT::v2f64, Legal);
1048    setOperationAction(ISD::STRICT_FMUL,        MVT::v2f64, Legal);
1049    setOperationAction(ISD::STRICT_FDIV,        MVT::v2f64, Legal);
1050  }
1051
1052  if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) {
1053    setOperationAction(ISD::ABS,                MVT::v16i8, Legal);
1054    setOperationAction(ISD::ABS,                MVT::v8i16, Legal);
1055    setOperationAction(ISD::ABS,                MVT::v4i32, Legal);
1056    setOperationAction(ISD::BITREVERSE,         MVT::v16i8, Custom);
1057    setOperationAction(ISD::CTLZ,               MVT::v16i8, Custom);
1058    setOperationAction(ISD::CTLZ,               MVT::v8i16, Custom);
1059    setOperationAction(ISD::CTLZ,               MVT::v4i32, Custom);
1060    setOperationAction(ISD::CTLZ,               MVT::v2i64, Custom);
1061
1062    // These might be better off as horizontal vector ops.
1063    setOperationAction(ISD::ADD,                MVT::i16, Custom);
1064    setOperationAction(ISD::ADD,                MVT::i32, Custom);
1065    setOperationAction(ISD::SUB,                MVT::i16, Custom);
1066    setOperationAction(ISD::SUB,                MVT::i32, Custom);
1067  }
1068
1069  if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) {
1070    for (MVT RoundedTy : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
1071      setOperationAction(ISD::FFLOOR,            RoundedTy,  Legal);
1072      setOperationAction(ISD::STRICT_FFLOOR,     RoundedTy,  Legal);
1073      setOperationAction(ISD::FCEIL,             RoundedTy,  Legal);
1074      setOperationAction(ISD::STRICT_FCEIL,      RoundedTy,  Legal);
1075      setOperationAction(ISD::FTRUNC,            RoundedTy,  Legal);
1076      setOperationAction(ISD::STRICT_FTRUNC,     RoundedTy,  Legal);
1077      setOperationAction(ISD::FRINT,             RoundedTy,  Legal);
1078      setOperationAction(ISD::STRICT_FRINT,      RoundedTy,  Legal);
1079      setOperationAction(ISD::FNEARBYINT,        RoundedTy,  Legal);
1080      setOperationAction(ISD::STRICT_FNEARBYINT, RoundedTy,  Legal);
1081    }
1082
1083    setOperationAction(ISD::SMAX,               MVT::v16i8, Legal);
1084    setOperationAction(ISD::SMAX,               MVT::v4i32, Legal);
1085    setOperationAction(ISD::UMAX,               MVT::v8i16, Legal);
1086    setOperationAction(ISD::UMAX,               MVT::v4i32, Legal);
1087    setOperationAction(ISD::SMIN,               MVT::v16i8, Legal);
1088    setOperationAction(ISD::SMIN,               MVT::v4i32, Legal);
1089    setOperationAction(ISD::UMIN,               MVT::v8i16, Legal);
1090    setOperationAction(ISD::UMIN,               MVT::v4i32, Legal);
1091
1092    // FIXME: Do we need to handle scalar-to-vector here?
1093    setOperationAction(ISD::MUL,                MVT::v4i32, Legal);
1094
1095    // We directly match byte blends in the backend as they match the VSELECT
1096    // condition form.
1097    setOperationAction(ISD::VSELECT,            MVT::v16i8, Legal);
1098
1099    // SSE41 brings specific instructions for doing vector sign extend even in
1100    // cases where we don't have SRA.
1101    for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1102      setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Legal);
1103      setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Legal);
1104    }
1105
1106    // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1107    for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1108      setLoadExtAction(LoadExtOp, MVT::v8i16, MVT::v8i8,  Legal);
1109      setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i8,  Legal);
1110      setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i8,  Legal);
1111      setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i16, Legal);
1112      setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i16, Legal);
1113      setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i32, Legal);
1114    }
1115
1116    // i8 vectors are custom because the source register and source
1117    // source memory operand types are not the same width.
1118    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v16i8, Custom);
1119
1120    if (Subtarget.is64Bit() && !Subtarget.hasAVX512()) {
1121      // We need to scalarize v4i64->v432 uint_to_fp using cvtsi2ss, but we can
1122      // do the pre and post work in the vector domain.
1123      setOperationAction(ISD::UINT_TO_FP,        MVT::v4i64, Custom);
1124      setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i64, Custom);
1125      // We need to mark SINT_TO_FP as Custom even though we want to expand it
1126      // so that DAG combine doesn't try to turn it into uint_to_fp.
1127      setOperationAction(ISD::SINT_TO_FP,        MVT::v4i64, Custom);
1128      setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i64, Custom);
1129    }
1130  }
1131
1132  if (!Subtarget.useSoftFloat() && Subtarget.hasXOP()) {
1133    for (auto VT : { MVT::v16i8, MVT::v8i16,  MVT::v4i32, MVT::v2i64,
1134                     MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
1135      setOperationAction(ISD::ROTL, VT, Custom);
1136
1137    // XOP can efficiently perform BITREVERSE with VPPERM.
1138    for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 })
1139      setOperationAction(ISD::BITREVERSE, VT, Custom);
1140
1141    for (auto VT : { MVT::v16i8, MVT::v8i16,  MVT::v4i32, MVT::v2i64,
1142                     MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
1143      setOperationAction(ISD::BITREVERSE, VT, Custom);
1144  }
1145
1146  if (!Subtarget.useSoftFloat() && Subtarget.hasAVX()) {
1147    bool HasInt256 = Subtarget.hasInt256();
1148
1149    addRegisterClass(MVT::v32i8,  Subtarget.hasVLX() ? &X86::VR256XRegClass
1150                                                     : &X86::VR256RegClass);
1151    addRegisterClass(MVT::v16i16, Subtarget.hasVLX() ? &X86::VR256XRegClass
1152                                                     : &X86::VR256RegClass);
1153    addRegisterClass(MVT::v8i32,  Subtarget.hasVLX() ? &X86::VR256XRegClass
1154                                                     : &X86::VR256RegClass);
1155    addRegisterClass(MVT::v8f32,  Subtarget.hasVLX() ? &X86::VR256XRegClass
1156                                                     : &X86::VR256RegClass);
1157    addRegisterClass(MVT::v4i64,  Subtarget.hasVLX() ? &X86::VR256XRegClass
1158                                                     : &X86::VR256RegClass);
1159    addRegisterClass(MVT::v4f64,  Subtarget.hasVLX() ? &X86::VR256XRegClass
1160                                                     : &X86::VR256RegClass);
1161
1162    for (auto VT : { MVT::v8f32, MVT::v4f64 }) {
1163      setOperationAction(ISD::FFLOOR,            VT, Legal);
1164      setOperationAction(ISD::STRICT_FFLOOR,     VT, Legal);
1165      setOperationAction(ISD::FCEIL,             VT, Legal);
1166      setOperationAction(ISD::STRICT_FCEIL,      VT, Legal);
1167      setOperationAction(ISD::FTRUNC,            VT, Legal);
1168      setOperationAction(ISD::STRICT_FTRUNC,     VT, Legal);
1169      setOperationAction(ISD::FRINT,             VT, Legal);
1170      setOperationAction(ISD::STRICT_FRINT,      VT, Legal);
1171      setOperationAction(ISD::FNEARBYINT,        VT, Legal);
1172      setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
1173      setOperationAction(ISD::FNEG,              VT, Custom);
1174      setOperationAction(ISD::FABS,              VT, Custom);
1175      setOperationAction(ISD::FCOPYSIGN,         VT, Custom);
1176    }
1177
1178    // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1179    // even though v8i16 is a legal type.
1180    setOperationPromotedToType(ISD::FP_TO_SINT,        MVT::v8i16, MVT::v8i32);
1181    setOperationPromotedToType(ISD::FP_TO_UINT,        MVT::v8i16, MVT::v8i32);
1182    setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v8i16, MVT::v8i32);
1183    setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v8i16, MVT::v8i32);
1184    setOperationAction(ISD::FP_TO_SINT,                MVT::v8i32, Legal);
1185    setOperationAction(ISD::STRICT_FP_TO_SINT,         MVT::v8i32, Legal);
1186
1187    setOperationAction(ISD::SINT_TO_FP,         MVT::v8i32, Legal);
1188    setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v8i32, Legal);
1189
1190    setOperationAction(ISD::STRICT_FP_ROUND,    MVT::v4f32, Legal);
1191    setOperationAction(ISD::STRICT_FADD,        MVT::v8f32, Legal);
1192    setOperationAction(ISD::STRICT_FADD,        MVT::v4f64, Legal);
1193    setOperationAction(ISD::STRICT_FSUB,        MVT::v8f32, Legal);
1194    setOperationAction(ISD::STRICT_FSUB,        MVT::v4f64, Legal);
1195    setOperationAction(ISD::STRICT_FMUL,        MVT::v8f32, Legal);
1196    setOperationAction(ISD::STRICT_FMUL,        MVT::v4f64, Legal);
1197    setOperationAction(ISD::STRICT_FDIV,        MVT::v8f32, Legal);
1198    setOperationAction(ISD::STRICT_FDIV,        MVT::v4f64, Legal);
1199    setOperationAction(ISD::STRICT_FP_EXTEND,   MVT::v4f64, Legal);
1200    setOperationAction(ISD::STRICT_FSQRT,       MVT::v8f32, Legal);
1201    setOperationAction(ISD::STRICT_FSQRT,       MVT::v4f64, Legal);
1202
1203    if (!Subtarget.hasAVX512())
1204      setOperationAction(ISD::BITCAST, MVT::v32i1, Custom);
1205
1206    // In the customized shift lowering, the legal v8i32/v4i64 cases
1207    // in AVX2 will be recognized.
1208    for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1209      setOperationAction(ISD::SRL, VT, Custom);
1210      setOperationAction(ISD::SHL, VT, Custom);
1211      setOperationAction(ISD::SRA, VT, Custom);
1212    }
1213
1214    // These types need custom splitting if their input is a 128-bit vector.
1215    setOperationAction(ISD::SIGN_EXTEND,       MVT::v8i64,  Custom);
1216    setOperationAction(ISD::SIGN_EXTEND,       MVT::v16i32, Custom);
1217    setOperationAction(ISD::ZERO_EXTEND,       MVT::v8i64,  Custom);
1218    setOperationAction(ISD::ZERO_EXTEND,       MVT::v16i32, Custom);
1219
1220    setOperationAction(ISD::ROTL,              MVT::v8i32,  Custom);
1221    setOperationAction(ISD::ROTL,              MVT::v16i16, Custom);
1222
1223    // With BWI, expanding (and promoting the shifts) is the better.
1224    if (!Subtarget.hasBWI())
1225      setOperationAction(ISD::ROTL,            MVT::v32i8,  Custom);
1226
1227    setOperationAction(ISD::SELECT,            MVT::v4f64, Custom);
1228    setOperationAction(ISD::SELECT,            MVT::v4i64, Custom);
1229    setOperationAction(ISD::SELECT,            MVT::v8i32, Custom);
1230    setOperationAction(ISD::SELECT,            MVT::v16i16, Custom);
1231    setOperationAction(ISD::SELECT,            MVT::v32i8, Custom);
1232    setOperationAction(ISD::SELECT,            MVT::v8f32, Custom);
1233
1234    for (auto VT : { MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1235      setOperationAction(ISD::SIGN_EXTEND,     VT, Custom);
1236      setOperationAction(ISD::ZERO_EXTEND,     VT, Custom);
1237      setOperationAction(ISD::ANY_EXTEND,      VT, Custom);
1238    }
1239
1240    setOperationAction(ISD::TRUNCATE,          MVT::v16i8, Custom);
1241    setOperationAction(ISD::TRUNCATE,          MVT::v8i16, Custom);
1242    setOperationAction(ISD::TRUNCATE,          MVT::v4i32, Custom);
1243    setOperationAction(ISD::BITREVERSE,        MVT::v32i8, Custom);
1244
1245    for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1246      setOperationAction(ISD::SETCC,           VT, Custom);
1247      setOperationAction(ISD::STRICT_FSETCC,   VT, Custom);
1248      setOperationAction(ISD::STRICT_FSETCCS,  VT, Custom);
1249      setOperationAction(ISD::CTPOP,           VT, Custom);
1250      setOperationAction(ISD::CTLZ,            VT, Custom);
1251
1252      // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1253      // setcc all the way to isel and prefer SETGT in some isel patterns.
1254      setCondCodeAction(ISD::SETLT, VT, Custom);
1255      setCondCodeAction(ISD::SETLE, VT, Custom);
1256    }
1257
1258    if (Subtarget.hasAnyFMA()) {
1259      for (auto VT : { MVT::f32, MVT::f64, MVT::v4f32, MVT::v8f32,
1260                       MVT::v2f64, MVT::v4f64 }) {
1261        setOperationAction(ISD::FMA, VT, Legal);
1262        setOperationAction(ISD::STRICT_FMA, VT, Legal);
1263      }
1264    }
1265
1266    for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1267      setOperationAction(ISD::ADD, VT, HasInt256 ? Legal : Custom);
1268      setOperationAction(ISD::SUB, VT, HasInt256 ? Legal : Custom);
1269    }
1270
1271    setOperationAction(ISD::MUL,       MVT::v4i64,  Custom);
1272    setOperationAction(ISD::MUL,       MVT::v8i32,  HasInt256 ? Legal : Custom);
1273    setOperationAction(ISD::MUL,       MVT::v16i16, HasInt256 ? Legal : Custom);
1274    setOperationAction(ISD::MUL,       MVT::v32i8,  Custom);
1275
1276    setOperationAction(ISD::MULHU,     MVT::v8i32,  Custom);
1277    setOperationAction(ISD::MULHS,     MVT::v8i32,  Custom);
1278    setOperationAction(ISD::MULHU,     MVT::v16i16, HasInt256 ? Legal : Custom);
1279    setOperationAction(ISD::MULHS,     MVT::v16i16, HasInt256 ? Legal : Custom);
1280    setOperationAction(ISD::MULHU,     MVT::v32i8,  Custom);
1281    setOperationAction(ISD::MULHS,     MVT::v32i8,  Custom);
1282
1283    setOperationAction(ISD::ABS,       MVT::v4i64,  Custom);
1284    setOperationAction(ISD::SMAX,      MVT::v4i64,  Custom);
1285    setOperationAction(ISD::UMAX,      MVT::v4i64,  Custom);
1286    setOperationAction(ISD::SMIN,      MVT::v4i64,  Custom);
1287    setOperationAction(ISD::UMIN,      MVT::v4i64,  Custom);
1288
1289    setOperationAction(ISD::UADDSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
1290    setOperationAction(ISD::SADDSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
1291    setOperationAction(ISD::USUBSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
1292    setOperationAction(ISD::SSUBSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
1293    setOperationAction(ISD::UADDSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);
1294    setOperationAction(ISD::SADDSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);
1295    setOperationAction(ISD::USUBSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);
1296    setOperationAction(ISD::SSUBSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);
1297
1298    for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) {
1299      setOperationAction(ISD::ABS,  VT, HasInt256 ? Legal : Custom);
1300      setOperationAction(ISD::SMAX, VT, HasInt256 ? Legal : Custom);
1301      setOperationAction(ISD::UMAX, VT, HasInt256 ? Legal : Custom);
1302      setOperationAction(ISD::SMIN, VT, HasInt256 ? Legal : Custom);
1303      setOperationAction(ISD::UMIN, VT, HasInt256 ? Legal : Custom);
1304    }
1305
1306    for (auto VT : {MVT::v16i16, MVT::v8i32, MVT::v4i64}) {
1307      setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1308      setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1309    }
1310
1311    if (HasInt256) {
1312      // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1313      // when we have a 256bit-wide blend with immediate.
1314      setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1315      setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i32, Custom);
1316
1317      // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1318      for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1319        setLoadExtAction(LoadExtOp, MVT::v16i16, MVT::v16i8, Legal);
1320        setLoadExtAction(LoadExtOp, MVT::v8i32,  MVT::v8i8,  Legal);
1321        setLoadExtAction(LoadExtOp, MVT::v4i64,  MVT::v4i8,  Legal);
1322        setLoadExtAction(LoadExtOp, MVT::v8i32,  MVT::v8i16, Legal);
1323        setLoadExtAction(LoadExtOp, MVT::v4i64,  MVT::v4i16, Legal);
1324        setLoadExtAction(LoadExtOp, MVT::v4i64,  MVT::v4i32, Legal);
1325      }
1326    }
1327
1328    for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1329                     MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) {
1330      setOperationAction(ISD::MLOAD,  VT, Subtarget.hasVLX() ? Legal : Custom);
1331      setOperationAction(ISD::MSTORE, VT, Legal);
1332    }
1333
1334    // Extract subvector is special because the value type
1335    // (result) is 128-bit but the source is 256-bit wide.
1336    for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1337                     MVT::v4f32, MVT::v2f64 }) {
1338      setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1339    }
1340
1341    // Custom lower several nodes for 256-bit types.
1342    for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1343                    MVT::v8f32, MVT::v4f64 }) {
1344      setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
1345      setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
1346      setOperationAction(ISD::VSELECT,            VT, Custom);
1347      setOperationAction(ISD::INSERT_VECTOR_ELT,  VT, Custom);
1348      setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1349      setOperationAction(ISD::SCALAR_TO_VECTOR,   VT, Custom);
1350      setOperationAction(ISD::INSERT_SUBVECTOR,   VT, Legal);
1351      setOperationAction(ISD::CONCAT_VECTORS,     VT, Custom);
1352      setOperationAction(ISD::STORE,              VT, Custom);
1353    }
1354
1355    if (HasInt256) {
1356      setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1357
1358      // Custom legalize 2x32 to get a little better code.
1359      setOperationAction(ISD::MGATHER, MVT::v2f32, Custom);
1360      setOperationAction(ISD::MGATHER, MVT::v2i32, Custom);
1361
1362      for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1363                       MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
1364        setOperationAction(ISD::MGATHER,  VT, Custom);
1365    }
1366  }
1367
1368  // This block controls legalization of the mask vector sizes that are
1369  // available with AVX512. 512-bit vectors are in a separate block controlled
1370  // by useAVX512Regs.
1371  if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1372    addRegisterClass(MVT::v1i1,   &X86::VK1RegClass);
1373    addRegisterClass(MVT::v2i1,   &X86::VK2RegClass);
1374    addRegisterClass(MVT::v4i1,   &X86::VK4RegClass);
1375    addRegisterClass(MVT::v8i1,   &X86::VK8RegClass);
1376    addRegisterClass(MVT::v16i1,  &X86::VK16RegClass);
1377
1378    setOperationAction(ISD::SELECT,             MVT::v1i1, Custom);
1379    setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v1i1, Custom);
1380    setOperationAction(ISD::BUILD_VECTOR,       MVT::v1i1, Custom);
1381
1382    setOperationPromotedToType(ISD::FP_TO_SINT,        MVT::v8i1,  MVT::v8i32);
1383    setOperationPromotedToType(ISD::FP_TO_UINT,        MVT::v8i1,  MVT::v8i32);
1384    setOperationPromotedToType(ISD::FP_TO_SINT,        MVT::v4i1,  MVT::v4i32);
1385    setOperationPromotedToType(ISD::FP_TO_UINT,        MVT::v4i1,  MVT::v4i32);
1386    setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v8i1,  MVT::v8i32);
1387    setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v8i1,  MVT::v8i32);
1388    setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v4i1,  MVT::v4i32);
1389    setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v4i1,  MVT::v4i32);
1390    setOperationAction(ISD::FP_TO_SINT,                MVT::v2i1,  Custom);
1391    setOperationAction(ISD::FP_TO_UINT,                MVT::v2i1,  Custom);
1392    setOperationAction(ISD::STRICT_FP_TO_SINT,         MVT::v2i1,  Custom);
1393    setOperationAction(ISD::STRICT_FP_TO_UINT,         MVT::v2i1,  Custom);
1394
1395    // There is no byte sized k-register load or store without AVX512DQ.
1396    if (!Subtarget.hasDQI()) {
1397      setOperationAction(ISD::LOAD, MVT::v1i1, Custom);
1398      setOperationAction(ISD::LOAD, MVT::v2i1, Custom);
1399      setOperationAction(ISD::LOAD, MVT::v4i1, Custom);
1400      setOperationAction(ISD::LOAD, MVT::v8i1, Custom);
1401
1402      setOperationAction(ISD::STORE, MVT::v1i1, Custom);
1403      setOperationAction(ISD::STORE, MVT::v2i1, Custom);
1404      setOperationAction(ISD::STORE, MVT::v4i1, Custom);
1405      setOperationAction(ISD::STORE, MVT::v8i1, Custom);
1406    }
1407
1408    // Extends of v16i1/v8i1/v4i1/v2i1 to 128-bit vectors.
1409    for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1410      setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
1411      setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
1412      setOperationAction(ISD::ANY_EXTEND,  VT, Custom);
1413    }
1414
1415    for (auto VT : { MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 }) {
1416      setOperationAction(ISD::ADD,              VT, Custom);
1417      setOperationAction(ISD::SUB,              VT, Custom);
1418      setOperationAction(ISD::MUL,              VT, Custom);
1419      setOperationAction(ISD::SETCC,            VT, Custom);
1420      setOperationAction(ISD::STRICT_FSETCC,    VT, Custom);
1421      setOperationAction(ISD::STRICT_FSETCCS,   VT, Custom);
1422      setOperationAction(ISD::SELECT,           VT, Custom);
1423      setOperationAction(ISD::TRUNCATE,         VT, Custom);
1424      setOperationAction(ISD::UADDSAT,          VT, Custom);
1425      setOperationAction(ISD::SADDSAT,          VT, Custom);
1426      setOperationAction(ISD::USUBSAT,          VT, Custom);
1427      setOperationAction(ISD::SSUBSAT,          VT, Custom);
1428
1429      setOperationAction(ISD::BUILD_VECTOR,     VT, Custom);
1430      setOperationAction(ISD::CONCAT_VECTORS,   VT, Custom);
1431      setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1432      setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1433      setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1434      setOperationAction(ISD::VECTOR_SHUFFLE,   VT,  Custom);
1435      setOperationAction(ISD::VSELECT,          VT,  Expand);
1436    }
1437
1438    for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1 })
1439      setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1440  }
1441
1442  // This block controls legalization for 512-bit operations with 32/64 bit
1443  // elements. 512-bits can be disabled based on prefer-vector-width and
1444  // required-vector-width function attributes.
1445  if (!Subtarget.useSoftFloat() && Subtarget.useAVX512Regs()) {
1446    addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1447    addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1448    addRegisterClass(MVT::v8i64,  &X86::VR512RegClass);
1449    addRegisterClass(MVT::v8f64,  &X86::VR512RegClass);
1450
1451    for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) {
1452      setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i8,  Legal);
1453      setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i16, Legal);
1454      setLoadExtAction(ExtType, MVT::v8i64,  MVT::v8i8,   Legal);
1455      setLoadExtAction(ExtType, MVT::v8i64,  MVT::v8i16,  Legal);
1456      setLoadExtAction(ExtType, MVT::v8i64,  MVT::v8i32,  Legal);
1457    }
1458
1459    for (MVT VT : { MVT::v16f32, MVT::v8f64 }) {
1460      setOperationAction(ISD::FNEG,  VT, Custom);
1461      setOperationAction(ISD::FABS,  VT, Custom);
1462      setOperationAction(ISD::FMA,   VT, Legal);
1463      setOperationAction(ISD::STRICT_FMA, VT, Legal);
1464      setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1465    }
1466
1467    for (MVT VT : { MVT::v16i1, MVT::v16i8, MVT::v16i16 }) {
1468      setOperationPromotedToType(ISD::FP_TO_SINT       , VT, MVT::v16i32);
1469      setOperationPromotedToType(ISD::FP_TO_UINT       , VT, MVT::v16i32);
1470      setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, VT, MVT::v16i32);
1471      setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, VT, MVT::v16i32);
1472    }
1473    setOperationAction(ISD::FP_TO_SINT,        MVT::v16i32, Legal);
1474    setOperationAction(ISD::FP_TO_UINT,        MVT::v16i32, Legal);
1475    setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v16i32, Legal);
1476    setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v16i32, Legal);
1477    setOperationAction(ISD::SINT_TO_FP,        MVT::v16i32, Legal);
1478    setOperationAction(ISD::UINT_TO_FP,        MVT::v16i32, Legal);
1479    setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v16i32, Legal);
1480    setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v16i32, Legal);
1481
1482    setOperationAction(ISD::STRICT_FADD,      MVT::v16f32, Legal);
1483    setOperationAction(ISD::STRICT_FADD,      MVT::v8f64,  Legal);
1484    setOperationAction(ISD::STRICT_FSUB,      MVT::v16f32, Legal);
1485    setOperationAction(ISD::STRICT_FSUB,      MVT::v8f64,  Legal);
1486    setOperationAction(ISD::STRICT_FMUL,      MVT::v16f32, Legal);
1487    setOperationAction(ISD::STRICT_FMUL,      MVT::v8f64,  Legal);
1488    setOperationAction(ISD::STRICT_FDIV,      MVT::v16f32, Legal);
1489    setOperationAction(ISD::STRICT_FDIV,      MVT::v8f64,  Legal);
1490    setOperationAction(ISD::STRICT_FSQRT,     MVT::v16f32, Legal);
1491    setOperationAction(ISD::STRICT_FSQRT,     MVT::v8f64,  Legal);
1492    setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v8f64,  Legal);
1493    setOperationAction(ISD::STRICT_FP_ROUND,  MVT::v8f32,  Legal);
1494
1495    setTruncStoreAction(MVT::v8i64,   MVT::v8i8,   Legal);
1496    setTruncStoreAction(MVT::v8i64,   MVT::v8i16,  Legal);
1497    setTruncStoreAction(MVT::v8i64,   MVT::v8i32,  Legal);
1498    setTruncStoreAction(MVT::v16i32,  MVT::v16i8,  Legal);
1499    setTruncStoreAction(MVT::v16i32,  MVT::v16i16, Legal);
1500
1501    // With 512-bit vectors and no VLX, we prefer to widen MLOAD/MSTORE
1502    // to 512-bit rather than use the AVX2 instructions so that we can use
1503    // k-masks.
1504    if (!Subtarget.hasVLX()) {
1505      for (auto VT : {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1506           MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64}) {
1507        setOperationAction(ISD::MLOAD,  VT, Custom);
1508        setOperationAction(ISD::MSTORE, VT, Custom);
1509      }
1510    }
1511
1512    setOperationAction(ISD::TRUNCATE,           MVT::v8i32, Custom);
1513    setOperationAction(ISD::TRUNCATE,           MVT::v16i16, Custom);
1514    setOperationAction(ISD::ZERO_EXTEND,        MVT::v16i32, Custom);
1515    setOperationAction(ISD::ZERO_EXTEND,        MVT::v8i64, Custom);
1516    setOperationAction(ISD::ANY_EXTEND,         MVT::v16i32, Custom);
1517    setOperationAction(ISD::ANY_EXTEND,         MVT::v8i64, Custom);
1518    setOperationAction(ISD::SIGN_EXTEND,        MVT::v16i32, Custom);
1519    setOperationAction(ISD::SIGN_EXTEND,        MVT::v8i64, Custom);
1520
1521    // Need to custom widen this if we don't have AVX512BW.
1522    setOperationAction(ISD::ANY_EXTEND,         MVT::v8i8, Custom);
1523    setOperationAction(ISD::ZERO_EXTEND,        MVT::v8i8, Custom);
1524    setOperationAction(ISD::SIGN_EXTEND,        MVT::v8i8, Custom);
1525
1526    for (auto VT : { MVT::v16f32, MVT::v8f64 }) {
1527      setOperationAction(ISD::FFLOOR,            VT, Legal);
1528      setOperationAction(ISD::STRICT_FFLOOR,     VT, Legal);
1529      setOperationAction(ISD::FCEIL,             VT, Legal);
1530      setOperationAction(ISD::STRICT_FCEIL,      VT, Legal);
1531      setOperationAction(ISD::FTRUNC,            VT, Legal);
1532      setOperationAction(ISD::STRICT_FTRUNC,     VT, Legal);
1533      setOperationAction(ISD::FRINT,             VT, Legal);
1534      setOperationAction(ISD::STRICT_FRINT,      VT, Legal);
1535      setOperationAction(ISD::FNEARBYINT,        VT, Legal);
1536      setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
1537
1538      setOperationAction(ISD::SELECT,           VT, Custom);
1539    }
1540
1541    // Without BWI we need to use custom lowering to handle MVT::v64i8 input.
1542    for (auto VT : {MVT::v16i32, MVT::v8i64, MVT::v64i8}) {
1543      setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1544      setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1545    }
1546
1547    setOperationAction(ISD::CONCAT_VECTORS,     MVT::v8f64,  Custom);
1548    setOperationAction(ISD::CONCAT_VECTORS,     MVT::v8i64,  Custom);
1549    setOperationAction(ISD::CONCAT_VECTORS,     MVT::v16f32,  Custom);
1550    setOperationAction(ISD::CONCAT_VECTORS,     MVT::v16i32,  Custom);
1551
1552    setOperationAction(ISD::MUL,                MVT::v8i64, Custom);
1553    setOperationAction(ISD::MUL,                MVT::v16i32, Legal);
1554
1555    setOperationAction(ISD::MULHU,              MVT::v16i32,  Custom);
1556    setOperationAction(ISD::MULHS,              MVT::v16i32,  Custom);
1557
1558    for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
1559      setOperationAction(ISD::SMAX,             VT, Legal);
1560      setOperationAction(ISD::UMAX,             VT, Legal);
1561      setOperationAction(ISD::SMIN,             VT, Legal);
1562      setOperationAction(ISD::UMIN,             VT, Legal);
1563      setOperationAction(ISD::ABS,              VT, Legal);
1564      setOperationAction(ISD::SRL,              VT, Custom);
1565      setOperationAction(ISD::SHL,              VT, Custom);
1566      setOperationAction(ISD::SRA,              VT, Custom);
1567      setOperationAction(ISD::CTPOP,            VT, Custom);
1568      setOperationAction(ISD::ROTL,             VT, Custom);
1569      setOperationAction(ISD::ROTR,             VT, Custom);
1570      setOperationAction(ISD::SETCC,            VT, Custom);
1571      setOperationAction(ISD::STRICT_FSETCC,    VT, Custom);
1572      setOperationAction(ISD::STRICT_FSETCCS,   VT, Custom);
1573      setOperationAction(ISD::SELECT,           VT, Custom);
1574
1575      // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1576      // setcc all the way to isel and prefer SETGT in some isel patterns.
1577      setCondCodeAction(ISD::SETLT, VT, Custom);
1578      setCondCodeAction(ISD::SETLE, VT, Custom);
1579    }
1580
1581    if (Subtarget.hasDQI()) {
1582      setOperationAction(ISD::SINT_TO_FP, MVT::v8i64, Legal);
1583      setOperationAction(ISD::UINT_TO_FP, MVT::v8i64, Legal);
1584      setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v8i64, Legal);
1585      setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i64, Legal);
1586      setOperationAction(ISD::FP_TO_SINT, MVT::v8i64, Legal);
1587      setOperationAction(ISD::FP_TO_UINT, MVT::v8i64, Legal);
1588      setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v8i64, Legal);
1589      setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v8i64, Legal);
1590
1591      setOperationAction(ISD::MUL,        MVT::v8i64, Legal);
1592    }
1593
1594    if (Subtarget.hasCDI()) {
1595      // NonVLX sub-targets extend 128/256 vectors to use the 512 version.
1596      for (auto VT : { MVT::v16i32, MVT::v8i64} ) {
1597        setOperationAction(ISD::CTLZ,            VT, Legal);
1598      }
1599    } // Subtarget.hasCDI()
1600
1601    if (Subtarget.hasVPOPCNTDQ()) {
1602      for (auto VT : { MVT::v16i32, MVT::v8i64 })
1603        setOperationAction(ISD::CTPOP, VT, Legal);
1604    }
1605
1606    // Extract subvector is special because the value type
1607    // (result) is 256-bit but the source is 512-bit wide.
1608    // 128-bit was made Legal under AVX1.
1609    for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1610                     MVT::v8f32, MVT::v4f64 })
1611      setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1612
1613    for (auto VT : { MVT::v16i32, MVT::v8i64, MVT::v16f32, MVT::v8f64 }) {
1614      setOperationAction(ISD::VECTOR_SHUFFLE,      VT, Custom);
1615      setOperationAction(ISD::INSERT_VECTOR_ELT,   VT, Custom);
1616      setOperationAction(ISD::BUILD_VECTOR,        VT, Custom);
1617      setOperationAction(ISD::VSELECT,             VT, Custom);
1618      setOperationAction(ISD::EXTRACT_VECTOR_ELT,  VT, Custom);
1619      setOperationAction(ISD::SCALAR_TO_VECTOR,    VT, Custom);
1620      setOperationAction(ISD::INSERT_SUBVECTOR,    VT, Legal);
1621      setOperationAction(ISD::MLOAD,               VT, Legal);
1622      setOperationAction(ISD::MSTORE,              VT, Legal);
1623      setOperationAction(ISD::MGATHER,             VT, Custom);
1624      setOperationAction(ISD::MSCATTER,            VT, Custom);
1625    }
1626    if (!Subtarget.hasBWI()) {
1627      // Need to custom split v32i16/v64i8 bitcasts.
1628      setOperationAction(ISD::BITCAST, MVT::v32i16, Custom);
1629      setOperationAction(ISD::BITCAST, MVT::v64i8,  Custom);
1630
1631      // Better to split these into two 256-bit ops.
1632      setOperationAction(ISD::BITREVERSE, MVT::v8i64, Custom);
1633      setOperationAction(ISD::BITREVERSE, MVT::v16i32, Custom);
1634    }
1635
1636    if (Subtarget.hasVBMI2()) {
1637      for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
1638        setOperationAction(ISD::FSHL, VT, Custom);
1639        setOperationAction(ISD::FSHR, VT, Custom);
1640      }
1641    }
1642  }// has  AVX-512
1643
1644  // This block controls legalization for operations that don't have
1645  // pre-AVX512 equivalents. Without VLX we use 512-bit operations for
1646  // narrower widths.
1647  if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1648    // These operations are handled on non-VLX by artificially widening in
1649    // isel patterns.
1650
1651    setOperationAction(ISD::FP_TO_UINT, MVT::v8i32,
1652                       Subtarget.hasVLX() ? Legal : Custom);
1653    setOperationAction(ISD::FP_TO_UINT, MVT::v4i32,
1654                       Subtarget.hasVLX() ? Legal : Custom);
1655    setOperationAction(ISD::FP_TO_UINT,         MVT::v2i32, Custom);
1656    setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v8i32,
1657                       Subtarget.hasVLX() ? Legal : Custom);
1658    setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32,
1659                       Subtarget.hasVLX() ? Legal : Custom);
1660    setOperationAction(ISD::STRICT_FP_TO_UINT,  MVT::v2i32, Custom);
1661    setOperationAction(ISD::UINT_TO_FP, MVT::v8i32,
1662                       Subtarget.hasVLX() ? Legal : Custom);
1663    setOperationAction(ISD::UINT_TO_FP, MVT::v4i32,
1664                       Subtarget.hasVLX() ? Legal : Custom);
1665    setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i32,
1666                       Subtarget.hasVLX() ? Legal : Custom);
1667    setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32,
1668                       Subtarget.hasVLX() ? Legal : Custom);
1669
1670    for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
1671      setOperationAction(ISD::SMAX, VT, Legal);
1672      setOperationAction(ISD::UMAX, VT, Legal);
1673      setOperationAction(ISD::SMIN, VT, Legal);
1674      setOperationAction(ISD::UMIN, VT, Legal);
1675      setOperationAction(ISD::ABS,  VT, Legal);
1676    }
1677
1678    for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
1679      setOperationAction(ISD::ROTL,     VT, Custom);
1680      setOperationAction(ISD::ROTR,     VT, Custom);
1681    }
1682
1683    // Custom legalize 2x32 to get a little better code.
1684    setOperationAction(ISD::MSCATTER, MVT::v2f32, Custom);
1685    setOperationAction(ISD::MSCATTER, MVT::v2i32, Custom);
1686
1687    for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1688                     MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
1689      setOperationAction(ISD::MSCATTER, VT, Custom);
1690
1691    if (Subtarget.hasDQI()) {
1692      for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
1693        setOperationAction(ISD::SINT_TO_FP, VT,
1694                           Subtarget.hasVLX() ? Legal : Custom);
1695        setOperationAction(ISD::UINT_TO_FP, VT,
1696                           Subtarget.hasVLX() ? Legal : Custom);
1697        setOperationAction(ISD::STRICT_SINT_TO_FP, VT,
1698                           Subtarget.hasVLX() ? Legal : Custom);
1699        setOperationAction(ISD::STRICT_UINT_TO_FP, VT,
1700                           Subtarget.hasVLX() ? Legal : Custom);
1701        setOperationAction(ISD::FP_TO_SINT, VT,
1702                           Subtarget.hasVLX() ? Legal : Custom);
1703        setOperationAction(ISD::FP_TO_UINT, VT,
1704                           Subtarget.hasVLX() ? Legal : Custom);
1705        setOperationAction(ISD::STRICT_FP_TO_SINT, VT,
1706                           Subtarget.hasVLX() ? Legal : Custom);
1707        setOperationAction(ISD::STRICT_FP_TO_UINT, VT,
1708                           Subtarget.hasVLX() ? Legal : Custom);
1709        setOperationAction(ISD::MUL,               VT, Legal);
1710      }
1711    }
1712
1713    if (Subtarget.hasCDI()) {
1714      for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
1715        setOperationAction(ISD::CTLZ,            VT, Legal);
1716      }
1717    } // Subtarget.hasCDI()
1718
1719    if (Subtarget.hasVPOPCNTDQ()) {
1720      for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 })
1721        setOperationAction(ISD::CTPOP, VT, Legal);
1722    }
1723  }
1724
1725  // This block control legalization of v32i1/v64i1 which are available with
1726  // AVX512BW. 512-bit v32i16 and v64i8 vector legalization is controlled with
1727  // useBWIRegs.
1728  if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
1729    addRegisterClass(MVT::v32i1,  &X86::VK32RegClass);
1730    addRegisterClass(MVT::v64i1,  &X86::VK64RegClass);
1731
1732    for (auto VT : { MVT::v32i1, MVT::v64i1 }) {
1733      setOperationAction(ISD::ADD,                VT, Custom);
1734      setOperationAction(ISD::SUB,                VT, Custom);
1735      setOperationAction(ISD::MUL,                VT, Custom);
1736      setOperationAction(ISD::VSELECT,            VT, Expand);
1737      setOperationAction(ISD::UADDSAT,            VT, Custom);
1738      setOperationAction(ISD::SADDSAT,            VT, Custom);
1739      setOperationAction(ISD::USUBSAT,            VT, Custom);
1740      setOperationAction(ISD::SSUBSAT,            VT, Custom);
1741
1742      setOperationAction(ISD::TRUNCATE,           VT, Custom);
1743      setOperationAction(ISD::SETCC,              VT, Custom);
1744      setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1745      setOperationAction(ISD::INSERT_VECTOR_ELT,  VT, Custom);
1746      setOperationAction(ISD::SELECT,             VT, Custom);
1747      setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
1748      setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
1749    }
1750
1751    setOperationAction(ISD::CONCAT_VECTORS,     MVT::v32i1, Custom);
1752    setOperationAction(ISD::CONCAT_VECTORS,     MVT::v64i1, Custom);
1753    setOperationAction(ISD::INSERT_SUBVECTOR,   MVT::v32i1, Custom);
1754    setOperationAction(ISD::INSERT_SUBVECTOR,   MVT::v64i1, Custom);
1755    for (auto VT : { MVT::v16i1, MVT::v32i1 })
1756      setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1757
1758    // Extends from v32i1 masks to 256-bit vectors.
1759    setOperationAction(ISD::SIGN_EXTEND,        MVT::v32i8, Custom);
1760    setOperationAction(ISD::ZERO_EXTEND,        MVT::v32i8, Custom);
1761    setOperationAction(ISD::ANY_EXTEND,         MVT::v32i8, Custom);
1762  }
1763
1764  // This block controls legalization for v32i16 and v64i8. 512-bits can be
1765  // disabled based on prefer-vector-width and required-vector-width function
1766  // attributes.
1767  if (!Subtarget.useSoftFloat() && Subtarget.useBWIRegs()) {
1768    addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1769    addRegisterClass(MVT::v64i8,  &X86::VR512RegClass);
1770
1771    // Extends from v64i1 masks to 512-bit vectors.
1772    setOperationAction(ISD::SIGN_EXTEND,        MVT::v64i8, Custom);
1773    setOperationAction(ISD::ZERO_EXTEND,        MVT::v64i8, Custom);
1774    setOperationAction(ISD::ANY_EXTEND,         MVT::v64i8, Custom);
1775
1776    setOperationAction(ISD::MUL,                MVT::v32i16, Legal);
1777    setOperationAction(ISD::MUL,                MVT::v64i8, Custom);
1778    setOperationAction(ISD::MULHS,              MVT::v32i16, Legal);
1779    setOperationAction(ISD::MULHU,              MVT::v32i16, Legal);
1780    setOperationAction(ISD::MULHS,              MVT::v64i8, Custom);
1781    setOperationAction(ISD::MULHU,              MVT::v64i8, Custom);
1782    setOperationAction(ISD::CONCAT_VECTORS,     MVT::v32i16, Custom);
1783    setOperationAction(ISD::CONCAT_VECTORS,     MVT::v64i8, Custom);
1784    setOperationAction(ISD::INSERT_SUBVECTOR,   MVT::v32i16, Legal);
1785    setOperationAction(ISD::INSERT_SUBVECTOR,   MVT::v64i8, Legal);
1786    setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v32i16, Custom);
1787    setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v64i8, Custom);
1788    setOperationAction(ISD::SCALAR_TO_VECTOR,   MVT::v32i16, Custom);
1789    setOperationAction(ISD::SCALAR_TO_VECTOR,   MVT::v64i8, Custom);
1790    setOperationAction(ISD::SIGN_EXTEND,        MVT::v32i16, Custom);
1791    setOperationAction(ISD::ZERO_EXTEND,        MVT::v32i16, Custom);
1792    setOperationAction(ISD::ANY_EXTEND,         MVT::v32i16, Custom);
1793    setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v32i16, Custom);
1794    setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v64i8, Custom);
1795    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v32i16, Custom);
1796    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v64i8, Custom);
1797    setOperationAction(ISD::TRUNCATE,           MVT::v32i8, Custom);
1798    setOperationAction(ISD::BITREVERSE,         MVT::v64i8, Custom);
1799
1800    setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v32i16, Custom);
1801    setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, MVT::v32i16, Custom);
1802
1803    setTruncStoreAction(MVT::v32i16,  MVT::v32i8, Legal);
1804
1805    for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
1806      setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1807      setOperationAction(ISD::VSELECT,      VT, Custom);
1808      setOperationAction(ISD::ABS,          VT, Legal);
1809      setOperationAction(ISD::SRL,          VT, Custom);
1810      setOperationAction(ISD::SHL,          VT, Custom);
1811      setOperationAction(ISD::SRA,          VT, Custom);
1812      setOperationAction(ISD::MLOAD,        VT, Legal);
1813      setOperationAction(ISD::MSTORE,       VT, Legal);
1814      setOperationAction(ISD::CTPOP,        VT, Custom);
1815      setOperationAction(ISD::CTLZ,         VT, Custom);
1816      setOperationAction(ISD::SMAX,         VT, Legal);
1817      setOperationAction(ISD::UMAX,         VT, Legal);
1818      setOperationAction(ISD::SMIN,         VT, Legal);
1819      setOperationAction(ISD::UMIN,         VT, Legal);
1820      setOperationAction(ISD::SETCC,        VT, Custom);
1821      setOperationAction(ISD::UADDSAT,      VT, Legal);
1822      setOperationAction(ISD::SADDSAT,      VT, Legal);
1823      setOperationAction(ISD::USUBSAT,      VT, Legal);
1824      setOperationAction(ISD::SSUBSAT,      VT, Legal);
1825      setOperationAction(ISD::SELECT,       VT, Custom);
1826
1827      // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1828      // setcc all the way to isel and prefer SETGT in some isel patterns.
1829      setCondCodeAction(ISD::SETLT, VT, Custom);
1830      setCondCodeAction(ISD::SETLE, VT, Custom);
1831    }
1832
1833    for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) {
1834      setLoadExtAction(ExtType, MVT::v32i16, MVT::v32i8, Legal);
1835    }
1836
1837    if (Subtarget.hasBITALG()) {
1838      for (auto VT : { MVT::v64i8, MVT::v32i16 })
1839        setOperationAction(ISD::CTPOP, VT, Legal);
1840    }
1841
1842    if (Subtarget.hasVBMI2()) {
1843      setOperationAction(ISD::FSHL, MVT::v32i16, Custom);
1844      setOperationAction(ISD::FSHR, MVT::v32i16, Custom);
1845    }
1846  }
1847
1848  if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
1849    for (auto VT : { MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16 }) {
1850      setOperationAction(ISD::MLOAD,  VT, Subtarget.hasVLX() ? Legal : Custom);
1851      setOperationAction(ISD::MSTORE, VT, Subtarget.hasVLX() ? Legal : Custom);
1852    }
1853
1854    // These operations are handled on non-VLX by artificially widening in
1855    // isel patterns.
1856    // TODO: Custom widen in lowering on non-VLX and drop the isel patterns?
1857
1858    if (Subtarget.hasBITALG()) {
1859      for (auto VT : { MVT::v16i8, MVT::v32i8, MVT::v8i16, MVT::v16i16 })
1860        setOperationAction(ISD::CTPOP, VT, Legal);
1861    }
1862  }
1863
1864  if (!Subtarget.useSoftFloat() && Subtarget.hasVLX()) {
1865    setTruncStoreAction(MVT::v4i64, MVT::v4i8,  Legal);
1866    setTruncStoreAction(MVT::v4i64, MVT::v4i16, Legal);
1867    setTruncStoreAction(MVT::v4i64, MVT::v4i32, Legal);
1868    setTruncStoreAction(MVT::v8i32, MVT::v8i8,  Legal);
1869    setTruncStoreAction(MVT::v8i32, MVT::v8i16, Legal);
1870
1871    setTruncStoreAction(MVT::v2i64, MVT::v2i8,  Legal);
1872    setTruncStoreAction(MVT::v2i64, MVT::v2i16, Legal);
1873    setTruncStoreAction(MVT::v2i64, MVT::v2i32, Legal);
1874    setTruncStoreAction(MVT::v4i32, MVT::v4i8,  Legal);
1875    setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
1876
1877    if (Subtarget.hasDQI()) {
1878      // Fast v2f32 SINT_TO_FP( v2i64 ) custom conversion.
1879      // v2f32 UINT_TO_FP is already custom under SSE2.
1880      assert(isOperationCustom(ISD::UINT_TO_FP, MVT::v2f32) &&
1881             isOperationCustom(ISD::STRICT_UINT_TO_FP, MVT::v2f32) &&
1882             "Unexpected operation action!");
1883      // v2i64 FP_TO_S/UINT(v2f32) custom conversion.
1884      setOperationAction(ISD::FP_TO_SINT,        MVT::v2f32, Custom);
1885      setOperationAction(ISD::FP_TO_UINT,        MVT::v2f32, Custom);
1886      setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f32, Custom);
1887      setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f32, Custom);
1888    }
1889
1890    if (Subtarget.hasBWI()) {
1891      setTruncStoreAction(MVT::v16i16,  MVT::v16i8, Legal);
1892      setTruncStoreAction(MVT::v8i16,   MVT::v8i8,  Legal);
1893    }
1894
1895    if (Subtarget.hasVBMI2()) {
1896      // TODO: Make these legal even without VLX?
1897      for (auto VT : { MVT::v8i16,  MVT::v4i32, MVT::v2i64,
1898                       MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1899        setOperationAction(ISD::FSHL, VT, Custom);
1900        setOperationAction(ISD::FSHR, VT, Custom);
1901      }
1902    }
1903
1904    setOperationAction(ISD::TRUNCATE, MVT::v16i32, Custom);
1905    setOperationAction(ISD::TRUNCATE, MVT::v8i64, Custom);
1906    setOperationAction(ISD::TRUNCATE, MVT::v16i64, Custom);
1907  }
1908
1909  // We want to custom lower some of our intrinsics.
1910  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1911  setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1912  setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1913  if (!Subtarget.is64Bit()) {
1914    setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
1915  }
1916
1917  // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
1918  // handle type legalization for these operations here.
1919  //
1920  // FIXME: We really should do custom legalization for addition and
1921  // subtraction on x86-32 once PR3203 is fixed.  We really can't do much better
1922  // than generic legalization for 64-bit multiplication-with-overflow, though.
1923  for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
1924    if (VT == MVT::i64 && !Subtarget.is64Bit())
1925      continue;
1926    // Add/Sub/Mul with overflow operations are custom lowered.
1927    setOperationAction(ISD::SADDO, VT, Custom);
1928    setOperationAction(ISD::UADDO, VT, Custom);
1929    setOperationAction(ISD::SSUBO, VT, Custom);
1930    setOperationAction(ISD::USUBO, VT, Custom);
1931    setOperationAction(ISD::SMULO, VT, Custom);
1932    setOperationAction(ISD::UMULO, VT, Custom);
1933
1934    // Support carry in as value rather than glue.
1935    setOperationAction(ISD::ADDCARRY, VT, Custom);
1936    setOperationAction(ISD::SUBCARRY, VT, Custom);
1937    setOperationAction(ISD::SETCCCARRY, VT, Custom);
1938  }
1939
1940  if (!Subtarget.is64Bit()) {
1941    // These libcalls are not available in 32-bit.
1942    setLibcallName(RTLIB::SHL_I128, nullptr);
1943    setLibcallName(RTLIB::SRL_I128, nullptr);
1944    setLibcallName(RTLIB::SRA_I128, nullptr);
1945    setLibcallName(RTLIB::MUL_I128, nullptr);
1946  }
1947
1948  // Combine sin / cos into _sincos_stret if it is available.
1949  if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr &&
1950      getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) {
1951    setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1952    setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1953  }
1954
1955  if (Subtarget.isTargetWin64()) {
1956    setOperationAction(ISD::SDIV, MVT::i128, Custom);
1957    setOperationAction(ISD::UDIV, MVT::i128, Custom);
1958    setOperationAction(ISD::SREM, MVT::i128, Custom);
1959    setOperationAction(ISD::UREM, MVT::i128, Custom);
1960    setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
1961    setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
1962  }
1963
1964  // On 32 bit MSVC, `fmodf(f32)` is not defined - only `fmod(f64)`
1965  // is. We should promote the value to 64-bits to solve this.
1966  // This is what the CRT headers do - `fmodf` is an inline header
1967  // function casting to f64 and calling `fmod`.
1968  if (Subtarget.is32Bit() &&
1969      (Subtarget.isTargetWindowsMSVC() || Subtarget.isTargetWindowsItanium()))
1970    for (ISD::NodeType Op :
1971         {ISD::FCEIL,  ISD::STRICT_FCEIL,
1972          ISD::FCOS,   ISD::STRICT_FCOS,
1973          ISD::FEXP,   ISD::STRICT_FEXP,
1974          ISD::FFLOOR, ISD::STRICT_FFLOOR,
1975          ISD::FREM,   ISD::STRICT_FREM,
1976          ISD::FLOG,   ISD::STRICT_FLOG,
1977          ISD::FLOG10, ISD::STRICT_FLOG10,
1978          ISD::FPOW,   ISD::STRICT_FPOW,
1979          ISD::FSIN,   ISD::STRICT_FSIN})
1980      if (isOperationExpand(Op, MVT::f32))
1981        setOperationAction(Op, MVT::f32, Promote);
1982
1983  // We have target-specific dag combine patterns for the following nodes:
1984  setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1985  setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
1986  setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
1987  setTargetDAGCombine(ISD::CONCAT_VECTORS);
1988  setTargetDAGCombine(ISD::INSERT_SUBVECTOR);
1989  setTargetDAGCombine(ISD::EXTRACT_SUBVECTOR);
1990  setTargetDAGCombine(ISD::BITCAST);
1991  setTargetDAGCombine(ISD::VSELECT);
1992  setTargetDAGCombine(ISD::SELECT);
1993  setTargetDAGCombine(ISD::SHL);
1994  setTargetDAGCombine(ISD::SRA);
1995  setTargetDAGCombine(ISD::SRL);
1996  setTargetDAGCombine(ISD::OR);
1997  setTargetDAGCombine(ISD::AND);
1998  setTargetDAGCombine(ISD::ADD);
1999  setTargetDAGCombine(ISD::FADD);
2000  setTargetDAGCombine(ISD::FSUB);
2001  setTargetDAGCombine(ISD::FNEG);
2002  setTargetDAGCombine(ISD::FMA);
2003  setTargetDAGCombine(ISD::FMINNUM);
2004  setTargetDAGCombine(ISD::FMAXNUM);
2005  setTargetDAGCombine(ISD::SUB);
2006  setTargetDAGCombine(ISD::LOAD);
2007  setTargetDAGCombine(ISD::MLOAD);
2008  setTargetDAGCombine(ISD::STORE);
2009  setTargetDAGCombine(ISD::MSTORE);
2010  setTargetDAGCombine(ISD::TRUNCATE);
2011  setTargetDAGCombine(ISD::ZERO_EXTEND);
2012  setTargetDAGCombine(ISD::ANY_EXTEND);
2013  setTargetDAGCombine(ISD::SIGN_EXTEND);
2014  setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
2015  setTargetDAGCombine(ISD::ANY_EXTEND_VECTOR_INREG);
2016  setTargetDAGCombine(ISD::SIGN_EXTEND_VECTOR_INREG);
2017  setTargetDAGCombine(ISD::ZERO_EXTEND_VECTOR_INREG);
2018  setTargetDAGCombine(ISD::SINT_TO_FP);
2019  setTargetDAGCombine(ISD::UINT_TO_FP);
2020  setTargetDAGCombine(ISD::STRICT_SINT_TO_FP);
2021  setTargetDAGCombine(ISD::STRICT_UINT_TO_FP);
2022  setTargetDAGCombine(ISD::SETCC);
2023  setTargetDAGCombine(ISD::MUL);
2024  setTargetDAGCombine(ISD::XOR);
2025  setTargetDAGCombine(ISD::MSCATTER);
2026  setTargetDAGCombine(ISD::MGATHER);
2027
2028  computeRegisterProperties(Subtarget.getRegisterInfo());
2029
2030  MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
2031  MaxStoresPerMemsetOptSize = 8;
2032  MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
2033  MaxStoresPerMemcpyOptSize = 4;
2034  MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
2035  MaxStoresPerMemmoveOptSize = 4;
2036
2037  // TODO: These control memcmp expansion in CGP and could be raised higher, but
2038  // that needs to benchmarked and balanced with the potential use of vector
2039  // load/store types (PR33329, PR33914).
2040  MaxLoadsPerMemcmp = 2;
2041  MaxLoadsPerMemcmpOptSize = 2;
2042
2043  // Set loop alignment to 2^ExperimentalPrefLoopAlignment bytes (default: 2^4).
2044  setPrefLoopAlignment(Align(1ULL << ExperimentalPrefLoopAlignment));
2045
2046  // An out-of-order CPU can speculatively execute past a predictable branch,
2047  // but a conditional move could be stalled by an expensive earlier operation.
2048  PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder();
2049  EnableExtLdPromotion = true;
2050  setPrefFunctionAlignment(Align(16));
2051
2052  verifyIntrinsicTables();
2053
2054  // Default to having -disable-strictnode-mutation on
2055  IsStrictFPEnabled = true;
2056}
2057
2058// This has so far only been implemented for 64-bit MachO.
2059bool X86TargetLowering::useLoadStackGuardNode() const {
2060  return Subtarget.isTargetMachO() && Subtarget.is64Bit();
2061}
2062
2063bool X86TargetLowering::useStackGuardXorFP() const {
2064  // Currently only MSVC CRTs XOR the frame pointer into the stack guard value.
2065  return Subtarget.getTargetTriple().isOSMSVCRT() && !Subtarget.isTargetMachO();
2066}
2067
2068SDValue X86TargetLowering::emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
2069                                               const SDLoc &DL) const {
2070  EVT PtrTy = getPointerTy(DAG.getDataLayout());
2071  unsigned XorOp = Subtarget.is64Bit() ? X86::XOR64_FP : X86::XOR32_FP;
2072  MachineSDNode *Node = DAG.getMachineNode(XorOp, DL, PtrTy, Val);
2073  return SDValue(Node, 0);
2074}
2075
2076TargetLoweringBase::LegalizeTypeAction
2077X86TargetLowering::getPreferredVectorAction(MVT VT) const {
2078  if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
2079    return TypeSplitVector;
2080
2081  if (VT.getVectorNumElements() != 1 &&
2082      VT.getVectorElementType() != MVT::i1)
2083    return TypeWidenVector;
2084
2085  return TargetLoweringBase::getPreferredVectorAction(VT);
2086}
2087
2088MVT X86TargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
2089                                                     CallingConv::ID CC,
2090                                                     EVT VT) const {
2091  // v32i1 vectors should be promoted to v32i8 to match avx2.
2092  if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
2093    return MVT::v32i8;
2094  // Break wide or odd vXi1 vectors into scalars to match avx2 behavior.
2095  if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
2096      Subtarget.hasAVX512() &&
2097      (!isPowerOf2_32(VT.getVectorNumElements()) ||
2098       (VT.getVectorNumElements() > 16 && !Subtarget.hasBWI()) ||
2099       (VT.getVectorNumElements() > 64 && Subtarget.hasBWI())))
2100    return MVT::i8;
2101  // Split v64i1 vectors if we don't have v64i8 available.
2102  if (VT == MVT::v64i1 && Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&
2103      CC != CallingConv::X86_RegCall)
2104    return MVT::v32i1;
2105  // FIXME: Should we just make these types legal and custom split operations?
2106  if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !EnableOldKNLABI &&
2107      Subtarget.useAVX512Regs() && !Subtarget.hasBWI())
2108    return MVT::v16i32;
2109  return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
2110}
2111
2112unsigned X86TargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
2113                                                          CallingConv::ID CC,
2114                                                          EVT VT) const {
2115  // v32i1 vectors should be promoted to v32i8 to match avx2.
2116  if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
2117    return 1;
2118  // Break wide or odd vXi1 vectors into scalars to match avx2 behavior.
2119  if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
2120      Subtarget.hasAVX512() &&
2121      (!isPowerOf2_32(VT.getVectorNumElements()) ||
2122       (VT.getVectorNumElements() > 16 && !Subtarget.hasBWI()) ||
2123       (VT.getVectorNumElements() > 64 && Subtarget.hasBWI())))
2124    return VT.getVectorNumElements();
2125  // Split v64i1 vectors if we don't have v64i8 available.
2126  if (VT == MVT::v64i1 && Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&
2127      CC != CallingConv::X86_RegCall)
2128    return 2;
2129  // FIXME: Should we just make these types legal and custom split operations?
2130  if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !EnableOldKNLABI &&
2131      Subtarget.useAVX512Regs() && !Subtarget.hasBWI())
2132    return 1;
2133  return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
2134}
2135
2136unsigned X86TargetLowering::getVectorTypeBreakdownForCallingConv(
2137    LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
2138    unsigned &NumIntermediates, MVT &RegisterVT) const {
2139  // Break wide or odd vXi1 vectors into scalars to match avx2 behavior.
2140  if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
2141      Subtarget.hasAVX512() &&
2142      (!isPowerOf2_32(VT.getVectorNumElements()) ||
2143       (VT.getVectorNumElements() > 16 && !Subtarget.hasBWI()) ||
2144       (VT.getVectorNumElements() > 64 && Subtarget.hasBWI()))) {
2145    RegisterVT = MVT::i8;
2146    IntermediateVT = MVT::i1;
2147    NumIntermediates = VT.getVectorNumElements();
2148    return NumIntermediates;
2149  }
2150
2151  // Split v64i1 vectors if we don't have v64i8 available.
2152  if (VT == MVT::v64i1 && Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&
2153      CC != CallingConv::X86_RegCall) {
2154    RegisterVT = MVT::v32i1;
2155    IntermediateVT = MVT::v32i1;
2156    NumIntermediates = 2;
2157    return 2;
2158  }
2159
2160  return TargetLowering::getVectorTypeBreakdownForCallingConv(Context, CC, VT, IntermediateVT,
2161                                              NumIntermediates, RegisterVT);
2162}
2163
2164EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL,
2165                                          LLVMContext& Context,
2166                                          EVT VT) const {
2167  if (!VT.isVector())
2168    return MVT::i8;
2169
2170  if (Subtarget.hasAVX512()) {
2171    const unsigned NumElts = VT.getVectorNumElements();
2172
2173    // Figure out what this type will be legalized to.
2174    EVT LegalVT = VT;
2175    while (getTypeAction(Context, LegalVT) != TypeLegal)
2176      LegalVT = getTypeToTransformTo(Context, LegalVT);
2177
2178    // If we got a 512-bit vector then we'll definitely have a vXi1 compare.
2179    if (LegalVT.getSimpleVT().is512BitVector())
2180      return EVT::getVectorVT(Context, MVT::i1, NumElts);
2181
2182    if (LegalVT.getSimpleVT().isVector() && Subtarget.hasVLX()) {
2183      // If we legalized to less than a 512-bit vector, then we will use a vXi1
2184      // compare for vXi32/vXi64 for sure. If we have BWI we will also support
2185      // vXi16/vXi8.
2186      MVT EltVT = LegalVT.getSimpleVT().getVectorElementType();
2187      if (Subtarget.hasBWI() || EltVT.getSizeInBits() >= 32)
2188        return EVT::getVectorVT(Context, MVT::i1, NumElts);
2189    }
2190  }
2191
2192  return VT.changeVectorElementTypeToInteger();
2193}
2194
2195/// Helper for getByValTypeAlignment to determine
2196/// the desired ByVal argument alignment.
2197static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
2198  if (MaxAlign == 16)
2199    return;
2200  if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
2201    if (VTy->getBitWidth() == 128)
2202      MaxAlign = 16;
2203  } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
2204    unsigned EltAlign = 0;
2205    getMaxByValAlign(ATy->getElementType(), EltAlign);
2206    if (EltAlign > MaxAlign)
2207      MaxAlign = EltAlign;
2208  } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
2209    for (auto *EltTy : STy->elements()) {
2210      unsigned EltAlign = 0;
2211      getMaxByValAlign(EltTy, EltAlign);
2212      if (EltAlign > MaxAlign)
2213        MaxAlign = EltAlign;
2214      if (MaxAlign == 16)
2215        break;
2216    }
2217  }
2218}
2219
2220/// Return the desired alignment for ByVal aggregate
2221/// function arguments in the caller parameter area. For X86, aggregates
2222/// that contain SSE vectors are placed at 16-byte boundaries while the rest
2223/// are at 4-byte boundaries.
2224unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty,
2225                                                  const DataLayout &DL) const {
2226  if (Subtarget.is64Bit()) {
2227    // Max of 8 and alignment of type.
2228    unsigned TyAlign = DL.getABITypeAlignment(Ty);
2229    if (TyAlign > 8)
2230      return TyAlign;
2231    return 8;
2232  }
2233
2234  unsigned Align = 4;
2235  if (Subtarget.hasSSE1())
2236    getMaxByValAlign(Ty, Align);
2237  return Align;
2238}
2239
2240/// Returns the target specific optimal type for load
2241/// and store operations as a result of memset, memcpy, and memmove
2242/// lowering. If DstAlign is zero that means it's safe to destination
2243/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
2244/// means there isn't a need to check it against alignment requirement,
2245/// probably because the source does not need to be loaded. If 'IsMemset' is
2246/// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
2247/// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
2248/// source is constant so it does not need to be loaded.
2249/// It returns EVT::Other if the type should be determined using generic
2250/// target-independent logic.
2251/// For vector ops we check that the overall size isn't larger than our
2252/// preferred vector width.
2253EVT X86TargetLowering::getOptimalMemOpType(
2254    uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset,
2255    bool ZeroMemset, bool MemcpyStrSrc,
2256    const AttributeList &FuncAttributes) const {
2257  if (!FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat)) {
2258    if (Size >= 16 && (!Subtarget.isUnalignedMem16Slow() ||
2259                       ((DstAlign == 0 || DstAlign >= 16) &&
2260                        (SrcAlign == 0 || SrcAlign >= 16)))) {
2261      // FIXME: Check if unaligned 64-byte accesses are slow.
2262      if (Size >= 64 && Subtarget.hasAVX512() &&
2263          (Subtarget.getPreferVectorWidth() >= 512)) {
2264        return Subtarget.hasBWI() ? MVT::v64i8 : MVT::v16i32;
2265      }
2266      // FIXME: Check if unaligned 32-byte accesses are slow.
2267      if (Size >= 32 && Subtarget.hasAVX() &&
2268          (Subtarget.getPreferVectorWidth() >= 256)) {
2269        // Although this isn't a well-supported type for AVX1, we'll let
2270        // legalization and shuffle lowering produce the optimal codegen. If we
2271        // choose an optimal type with a vector element larger than a byte,
2272        // getMemsetStores() may create an intermediate splat (using an integer
2273        // multiply) before we splat as a vector.
2274        return MVT::v32i8;
2275      }
2276      if (Subtarget.hasSSE2() && (Subtarget.getPreferVectorWidth() >= 128))
2277        return MVT::v16i8;
2278      // TODO: Can SSE1 handle a byte vector?
2279      // If we have SSE1 registers we should be able to use them.
2280      if (Subtarget.hasSSE1() && (Subtarget.is64Bit() || Subtarget.hasX87()) &&
2281          (Subtarget.getPreferVectorWidth() >= 128))
2282        return MVT::v4f32;
2283    } else if ((!IsMemset || ZeroMemset) && !MemcpyStrSrc && Size >= 8 &&
2284               !Subtarget.is64Bit() && Subtarget.hasSSE2()) {
2285      // Do not use f64 to lower memcpy if source is string constant. It's
2286      // better to use i32 to avoid the loads.
2287      // Also, do not use f64 to lower memset unless this is a memset of zeros.
2288      // The gymnastics of splatting a byte value into an XMM register and then
2289      // only using 8-byte stores (because this is a CPU with slow unaligned
2290      // 16-byte accesses) makes that a loser.
2291      return MVT::f64;
2292    }
2293  }
2294  // This is a compromise. If we reach here, unaligned accesses may be slow on
2295  // this target. However, creating smaller, aligned accesses could be even
2296  // slower and would certainly be a lot more code.
2297  if (Subtarget.is64Bit() && Size >= 8)
2298    return MVT::i64;
2299  return MVT::i32;
2300}
2301
2302bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
2303  if (VT == MVT::f32)
2304    return X86ScalarSSEf32;
2305  else if (VT == MVT::f64)
2306    return X86ScalarSSEf64;
2307  return true;
2308}
2309
2310bool X86TargetLowering::allowsMisalignedMemoryAccesses(
2311    EVT VT, unsigned, unsigned Align, MachineMemOperand::Flags Flags,
2312    bool *Fast) const {
2313  if (Fast) {
2314    switch (VT.getSizeInBits()) {
2315    default:
2316      // 8-byte and under are always assumed to be fast.
2317      *Fast = true;
2318      break;
2319    case 128:
2320      *Fast = !Subtarget.isUnalignedMem16Slow();
2321      break;
2322    case 256:
2323      *Fast = !Subtarget.isUnalignedMem32Slow();
2324      break;
2325    // TODO: What about AVX-512 (512-bit) accesses?
2326    }
2327  }
2328  // NonTemporal vector memory ops must be aligned.
2329  if (!!(Flags & MachineMemOperand::MONonTemporal) && VT.isVector()) {
2330    // NT loads can only be vector aligned, so if its less aligned than the
2331    // minimum vector size (which we can split the vector down to), we might as
2332    // well use a regular unaligned vector load.
2333    // We don't have any NT loads pre-SSE41.
2334    if (!!(Flags & MachineMemOperand::MOLoad))
2335      return (Align < 16 || !Subtarget.hasSSE41());
2336    return false;
2337  }
2338  // Misaligned accesses of any size are always allowed.
2339  return true;
2340}
2341
2342/// Return the entry encoding for a jump table in the
2343/// current function.  The returned value is a member of the
2344/// MachineJumpTableInfo::JTEntryKind enum.
2345unsigned X86TargetLowering::getJumpTableEncoding() const {
2346  // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
2347  // symbol.
2348  if (isPositionIndependent() && Subtarget.isPICStyleGOT())
2349    return MachineJumpTableInfo::EK_Custom32;
2350
2351  // Otherwise, use the normal jump table encoding heuristics.
2352  return TargetLowering::getJumpTableEncoding();
2353}
2354
2355bool X86TargetLowering::useSoftFloat() const {
2356  return Subtarget.useSoftFloat();
2357}
2358
2359void X86TargetLowering::markLibCallAttributes(MachineFunction *MF, unsigned CC,
2360                                              ArgListTy &Args) const {
2361
2362  // Only relabel X86-32 for C / Stdcall CCs.
2363  if (Subtarget.is64Bit())
2364    return;
2365  if (CC != CallingConv::C && CC != CallingConv::X86_StdCall)
2366    return;
2367  unsigned ParamRegs = 0;
2368  if (auto *M = MF->getFunction().getParent())
2369    ParamRegs = M->getNumberRegisterParameters();
2370
2371  // Mark the first N int arguments as having reg
2372  for (unsigned Idx = 0; Idx < Args.size(); Idx++) {
2373    Type *T = Args[Idx].Ty;
2374    if (T->isIntOrPtrTy())
2375      if (MF->getDataLayout().getTypeAllocSize(T) <= 8) {
2376        unsigned numRegs = 1;
2377        if (MF->getDataLayout().getTypeAllocSize(T) > 4)
2378          numRegs = 2;
2379        if (ParamRegs < numRegs)
2380          return;
2381        ParamRegs -= numRegs;
2382        Args[Idx].IsInReg = true;
2383      }
2384  }
2385}
2386
2387const MCExpr *
2388X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
2389                                             const MachineBasicBlock *MBB,
2390                                             unsigned uid,MCContext &Ctx) const{
2391  assert(isPositionIndependent() && Subtarget.isPICStyleGOT());
2392  // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
2393  // entries.
2394  return MCSymbolRefExpr::create(MBB->getSymbol(),
2395                                 MCSymbolRefExpr::VK_GOTOFF, Ctx);
2396}
2397
2398/// Returns relocation base for the given PIC jumptable.
2399SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
2400                                                    SelectionDAG &DAG) const {
2401  if (!Subtarget.is64Bit())
2402    // This doesn't have SDLoc associated with it, but is not really the
2403    // same as a Register.
2404    return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
2405                       getPointerTy(DAG.getDataLayout()));
2406  return Table;
2407}
2408
2409/// This returns the relocation base for the given PIC jumptable,
2410/// the same as getPICJumpTableRelocBase, but as an MCExpr.
2411const MCExpr *X86TargetLowering::
2412getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
2413                             MCContext &Ctx) const {
2414  // X86-64 uses RIP relative addressing based on the jump table label.
2415  if (Subtarget.isPICStyleRIPRel())
2416    return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2417
2418  // Otherwise, the reference is relative to the PIC base.
2419  return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
2420}
2421
2422std::pair<const TargetRegisterClass *, uint8_t>
2423X86TargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
2424                                           MVT VT) const {
2425  const TargetRegisterClass *RRC = nullptr;
2426  uint8_t Cost = 1;
2427  switch (VT.SimpleTy) {
2428  default:
2429    return TargetLowering::findRepresentativeClass(TRI, VT);
2430  case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
2431    RRC = Subtarget.is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
2432    break;
2433  case MVT::x86mmx:
2434    RRC = &X86::VR64RegClass;
2435    break;
2436  case MVT::f32: case MVT::f64:
2437  case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
2438  case MVT::v4f32: case MVT::v2f64:
2439  case MVT::v32i8: case MVT::v16i16: case MVT::v8i32: case MVT::v4i64:
2440  case MVT::v8f32: case MVT::v4f64:
2441  case MVT::v64i8: case MVT::v32i16: case MVT::v16i32: case MVT::v8i64:
2442  case MVT::v16f32: case MVT::v8f64:
2443    RRC = &X86::VR128XRegClass;
2444    break;
2445  }
2446  return std::make_pair(RRC, Cost);
2447}
2448
2449unsigned X86TargetLowering::getAddressSpace() const {
2450  if (Subtarget.is64Bit())
2451    return (getTargetMachine().getCodeModel() == CodeModel::Kernel) ? 256 : 257;
2452  return 256;
2453}
2454
2455static bool hasStackGuardSlotTLS(const Triple &TargetTriple) {
2456  return TargetTriple.isOSGlibc() || TargetTriple.isOSFuchsia() ||
2457         (TargetTriple.isAndroid() && !TargetTriple.isAndroidVersionLT(17));
2458}
2459
2460static Constant* SegmentOffset(IRBuilder<> &IRB,
2461                               unsigned Offset, unsigned AddressSpace) {
2462  return ConstantExpr::getIntToPtr(
2463      ConstantInt::get(Type::getInt32Ty(IRB.getContext()), Offset),
2464      Type::getInt8PtrTy(IRB.getContext())->getPointerTo(AddressSpace));
2465}
2466
2467Value *X86TargetLowering::getIRStackGuard(IRBuilder<> &IRB) const {
2468  // glibc, bionic, and Fuchsia have a special slot for the stack guard in
2469  // tcbhead_t; use it instead of the usual global variable (see
2470  // sysdeps/{i386,x86_64}/nptl/tls.h)
2471  if (hasStackGuardSlotTLS(Subtarget.getTargetTriple())) {
2472    if (Subtarget.isTargetFuchsia()) {
2473      // <zircon/tls.h> defines ZX_TLS_STACK_GUARD_OFFSET with this value.
2474      return SegmentOffset(IRB, 0x10, getAddressSpace());
2475    } else {
2476      // %fs:0x28, unless we're using a Kernel code model, in which case
2477      // it's %gs:0x28.  gs:0x14 on i386.
2478      unsigned Offset = (Subtarget.is64Bit()) ? 0x28 : 0x14;
2479      return SegmentOffset(IRB, Offset, getAddressSpace());
2480    }
2481  }
2482
2483  return TargetLowering::getIRStackGuard(IRB);
2484}
2485
2486void X86TargetLowering::insertSSPDeclarations(Module &M) const {
2487  // MSVC CRT provides functionalities for stack protection.
2488  if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
2489      Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
2490    // MSVC CRT has a global variable holding security cookie.
2491    M.getOrInsertGlobal("__security_cookie",
2492                        Type::getInt8PtrTy(M.getContext()));
2493
2494    // MSVC CRT has a function to validate security cookie.
2495    FunctionCallee SecurityCheckCookie = M.getOrInsertFunction(
2496        "__security_check_cookie", Type::getVoidTy(M.getContext()),
2497        Type::getInt8PtrTy(M.getContext()));
2498    if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee())) {
2499      F->setCallingConv(CallingConv::X86_FastCall);
2500      F->addAttribute(1, Attribute::AttrKind::InReg);
2501    }
2502    return;
2503  }
2504  // glibc, bionic, and Fuchsia have a special slot for the stack guard.
2505  if (hasStackGuardSlotTLS(Subtarget.getTargetTriple()))
2506    return;
2507  TargetLowering::insertSSPDeclarations(M);
2508}
2509
2510Value *X86TargetLowering::getSDagStackGuard(const Module &M) const {
2511  // MSVC CRT has a global variable holding security cookie.
2512  if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
2513      Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
2514    return M.getGlobalVariable("__security_cookie");
2515  }
2516  return TargetLowering::getSDagStackGuard(M);
2517}
2518
2519Function *X86TargetLowering::getSSPStackGuardCheck(const Module &M) const {
2520  // MSVC CRT has a function to validate security cookie.
2521  if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
2522      Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
2523    return M.getFunction("__security_check_cookie");
2524  }
2525  return TargetLowering::getSSPStackGuardCheck(M);
2526}
2527
2528Value *X86TargetLowering::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
2529  if (Subtarget.getTargetTriple().isOSContiki())
2530    return getDefaultSafeStackPointerLocation(IRB, false);
2531
2532  // Android provides a fixed TLS slot for the SafeStack pointer. See the
2533  // definition of TLS_SLOT_SAFESTACK in
2534  // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
2535  if (Subtarget.isTargetAndroid()) {
2536    // %fs:0x48, unless we're using a Kernel code model, in which case it's %gs:
2537    // %gs:0x24 on i386
2538    unsigned Offset = (Subtarget.is64Bit()) ? 0x48 : 0x24;
2539    return SegmentOffset(IRB, Offset, getAddressSpace());
2540  }
2541
2542  // Fuchsia is similar.
2543  if (Subtarget.isTargetFuchsia()) {
2544    // <zircon/tls.h> defines ZX_TLS_UNSAFE_SP_OFFSET with this value.
2545    return SegmentOffset(IRB, 0x18, getAddressSpace());
2546  }
2547
2548  return TargetLowering::getSafeStackPointerLocation(IRB);
2549}
2550
2551bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
2552                                            unsigned DestAS) const {
2553  assert(SrcAS != DestAS && "Expected different address spaces!");
2554
2555  const TargetMachine &TM = getTargetMachine();
2556  if (TM.getPointerSize(SrcAS) != TM.getPointerSize(DestAS))
2557    return false;
2558
2559  return SrcAS < 256 && DestAS < 256;
2560}
2561
2562//===----------------------------------------------------------------------===//
2563//               Return Value Calling Convention Implementation
2564//===----------------------------------------------------------------------===//
2565
2566bool X86TargetLowering::CanLowerReturn(
2567    CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
2568    const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
2569  SmallVector<CCValAssign, 16> RVLocs;
2570  CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2571  return CCInfo.CheckReturn(Outs, RetCC_X86);
2572}
2573
2574const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
2575  static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
2576  return ScratchRegs;
2577}
2578
2579/// Lowers masks values (v*i1) to the local register values
2580/// \returns DAG node after lowering to register type
2581static SDValue lowerMasksToReg(const SDValue &ValArg, const EVT &ValLoc,
2582                               const SDLoc &Dl, SelectionDAG &DAG) {
2583  EVT ValVT = ValArg.getValueType();
2584
2585  if (ValVT == MVT::v1i1)
2586    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, Dl, ValLoc, ValArg,
2587                       DAG.getIntPtrConstant(0, Dl));
2588
2589  if ((ValVT == MVT::v8i1 && (ValLoc == MVT::i8 || ValLoc == MVT::i32)) ||
2590      (ValVT == MVT::v16i1 && (ValLoc == MVT::i16 || ValLoc == MVT::i32))) {
2591    // Two stage lowering might be required
2592    // bitcast:   v8i1 -> i8 / v16i1 -> i16
2593    // anyextend: i8   -> i32 / i16   -> i32
2594    EVT TempValLoc = ValVT == MVT::v8i1 ? MVT::i8 : MVT::i16;
2595    SDValue ValToCopy = DAG.getBitcast(TempValLoc, ValArg);
2596    if (ValLoc == MVT::i32)
2597      ValToCopy = DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValToCopy);
2598    return ValToCopy;
2599  }
2600
2601  if ((ValVT == MVT::v32i1 && ValLoc == MVT::i32) ||
2602      (ValVT == MVT::v64i1 && ValLoc == MVT::i64)) {
2603    // One stage lowering is required
2604    // bitcast:   v32i1 -> i32 / v64i1 -> i64
2605    return DAG.getBitcast(ValLoc, ValArg);
2606  }
2607
2608  return DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValArg);
2609}
2610
2611/// Breaks v64i1 value into two registers and adds the new node to the DAG
2612static void Passv64i1ArgInRegs(
2613    const SDLoc &Dl, SelectionDAG &DAG, SDValue &Arg,
2614    SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass, CCValAssign &VA,
2615    CCValAssign &NextVA, const X86Subtarget &Subtarget) {
2616  assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
2617  assert(Subtarget.is32Bit() && "Expecting 32 bit target");
2618  assert(Arg.getValueType() == MVT::i64 && "Expecting 64 bit value");
2619  assert(VA.isRegLoc() && NextVA.isRegLoc() &&
2620         "The value should reside in two registers");
2621
2622  // Before splitting the value we cast it to i64
2623  Arg = DAG.getBitcast(MVT::i64, Arg);
2624
2625  // Splitting the value into two i32 types
2626  SDValue Lo, Hi;
2627  Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
2628                   DAG.getConstant(0, Dl, MVT::i32));
2629  Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
2630                   DAG.getConstant(1, Dl, MVT::i32));
2631
2632  // Attach the two i32 types into corresponding registers
2633  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Lo));
2634  RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Hi));
2635}
2636
2637SDValue
2638X86TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2639                               bool isVarArg,
2640                               const SmallVectorImpl<ISD::OutputArg> &Outs,
2641                               const SmallVectorImpl<SDValue> &OutVals,
2642                               const SDLoc &dl, SelectionDAG &DAG) const {
2643  MachineFunction &MF = DAG.getMachineFunction();
2644  X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2645
2646  // In some cases we need to disable registers from the default CSR list.
2647  // For example, when they are used for argument passing.
2648  bool ShouldDisableCalleeSavedRegister =
2649      CallConv == CallingConv::X86_RegCall ||
2650      MF.getFunction().hasFnAttribute("no_caller_saved_registers");
2651
2652  if (CallConv == CallingConv::X86_INTR && !Outs.empty())
2653    report_fatal_error("X86 interrupts may not return any value");
2654
2655  SmallVector<CCValAssign, 16> RVLocs;
2656  CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
2657  CCInfo.AnalyzeReturn(Outs, RetCC_X86);
2658
2659  SDValue Flag;
2660  SmallVector<SDValue, 6> RetOps;
2661  RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2662  // Operand #1 = Bytes To Pop
2663  RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), dl,
2664                   MVT::i32));
2665
2666  // Copy the result values into the output registers.
2667  for (unsigned I = 0, OutsIndex = 0, E = RVLocs.size(); I != E;
2668       ++I, ++OutsIndex) {
2669    CCValAssign &VA = RVLocs[I];
2670    assert(VA.isRegLoc() && "Can only return in registers!");
2671
2672    // Add the register to the CalleeSaveDisableRegs list.
2673    if (ShouldDisableCalleeSavedRegister)
2674      MF.getRegInfo().disableCalleeSavedRegister(VA.getLocReg());
2675
2676    SDValue ValToCopy = OutVals[OutsIndex];
2677    EVT ValVT = ValToCopy.getValueType();
2678
2679    // Promote values to the appropriate types.
2680    if (VA.getLocInfo() == CCValAssign::SExt)
2681      ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2682    else if (VA.getLocInfo() == CCValAssign::ZExt)
2683      ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
2684    else if (VA.getLocInfo() == CCValAssign::AExt) {
2685      if (ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1)
2686        ValToCopy = lowerMasksToReg(ValToCopy, VA.getLocVT(), dl, DAG);
2687      else
2688        ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
2689    }
2690    else if (VA.getLocInfo() == CCValAssign::BCvt)
2691      ValToCopy = DAG.getBitcast(VA.getLocVT(), ValToCopy);
2692
2693    assert(VA.getLocInfo() != CCValAssign::FPExt &&
2694           "Unexpected FP-extend for return value.");
2695
2696    // Report an error if we have attempted to return a value via an XMM
2697    // register and SSE was disabled.
2698    if (!Subtarget.hasSSE1() && X86::FR32XRegClass.contains(VA.getLocReg())) {
2699      errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
2700      VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
2701    } else if (!Subtarget.hasSSE2() &&
2702               X86::FR64XRegClass.contains(VA.getLocReg()) &&
2703               ValVT == MVT::f64) {
2704      // When returning a double via an XMM register, report an error if SSE2 is
2705      // not enabled.
2706      errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled");
2707      VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
2708    }
2709
2710    // Returns in ST0/ST1 are handled specially: these are pushed as operands to
2711    // the RET instruction and handled by the FP Stackifier.
2712    if (VA.getLocReg() == X86::FP0 ||
2713        VA.getLocReg() == X86::FP1) {
2714      // If this is a copy from an xmm register to ST(0), use an FPExtend to
2715      // change the value to the FP stack register class.
2716      if (isScalarFPTypeInSSEReg(VA.getValVT()))
2717        ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
2718      RetOps.push_back(ValToCopy);
2719      // Don't emit a copytoreg.
2720      continue;
2721    }
2722
2723    // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
2724    // which is returned in RAX / RDX.
2725    if (Subtarget.is64Bit()) {
2726      if (ValVT == MVT::x86mmx) {
2727        if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
2728          ValToCopy = DAG.getBitcast(MVT::i64, ValToCopy);
2729          ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
2730                                  ValToCopy);
2731          // If we don't have SSE2 available, convert to v4f32 so the generated
2732          // register is legal.
2733          if (!Subtarget.hasSSE2())
2734            ValToCopy = DAG.getBitcast(MVT::v4f32, ValToCopy);
2735        }
2736      }
2737    }
2738
2739    SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2740
2741    if (VA.needsCustom()) {
2742      assert(VA.getValVT() == MVT::v64i1 &&
2743             "Currently the only custom case is when we split v64i1 to 2 regs");
2744
2745      Passv64i1ArgInRegs(dl, DAG, ValToCopy, RegsToPass, VA, RVLocs[++I],
2746                         Subtarget);
2747
2748      assert(2 == RegsToPass.size() &&
2749             "Expecting two registers after Pass64BitArgInRegs");
2750
2751      // Add the second register to the CalleeSaveDisableRegs list.
2752      if (ShouldDisableCalleeSavedRegister)
2753        MF.getRegInfo().disableCalleeSavedRegister(RVLocs[I].getLocReg());
2754    } else {
2755      RegsToPass.push_back(std::make_pair(VA.getLocReg(), ValToCopy));
2756    }
2757
2758    // Add nodes to the DAG and add the values into the RetOps list
2759    for (auto &Reg : RegsToPass) {
2760      Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, Flag);
2761      Flag = Chain.getValue(1);
2762      RetOps.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
2763    }
2764  }
2765
2766  // Swift calling convention does not require we copy the sret argument
2767  // into %rax/%eax for the return, and SRetReturnReg is not set for Swift.
2768
2769  // All x86 ABIs require that for returning structs by value we copy
2770  // the sret argument into %rax/%eax (depending on ABI) for the return.
2771  // We saved the argument into a virtual register in the entry block,
2772  // so now we copy the value out and into %rax/%eax.
2773  //
2774  // Checking Function.hasStructRetAttr() here is insufficient because the IR
2775  // may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
2776  // false, then an sret argument may be implicitly inserted in the SelDAG. In
2777  // either case FuncInfo->setSRetReturnReg() will have been called.
2778  if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
2779    // When we have both sret and another return value, we should use the
2780    // original Chain stored in RetOps[0], instead of the current Chain updated
2781    // in the above loop. If we only have sret, RetOps[0] equals to Chain.
2782
2783    // For the case of sret and another return value, we have
2784    //   Chain_0 at the function entry
2785    //   Chain_1 = getCopyToReg(Chain_0) in the above loop
2786    // If we use Chain_1 in getCopyFromReg, we will have
2787    //   Val = getCopyFromReg(Chain_1)
2788    //   Chain_2 = getCopyToReg(Chain_1, Val) from below
2789
2790    // getCopyToReg(Chain_0) will be glued together with
2791    // getCopyToReg(Chain_1, Val) into Unit A, getCopyFromReg(Chain_1) will be
2792    // in Unit B, and we will have cyclic dependency between Unit A and Unit B:
2793    //   Data dependency from Unit B to Unit A due to usage of Val in
2794    //     getCopyToReg(Chain_1, Val)
2795    //   Chain dependency from Unit A to Unit B
2796
2797    // So here, we use RetOps[0] (i.e Chain_0) for getCopyFromReg.
2798    SDValue Val = DAG.getCopyFromReg(RetOps[0], dl, SRetReg,
2799                                     getPointerTy(MF.getDataLayout()));
2800
2801    unsigned RetValReg
2802        = (Subtarget.is64Bit() && !Subtarget.isTarget64BitILP32()) ?
2803          X86::RAX : X86::EAX;
2804    Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
2805    Flag = Chain.getValue(1);
2806
2807    // RAX/EAX now acts like a return value.
2808    RetOps.push_back(
2809        DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
2810
2811    // Add the returned register to the CalleeSaveDisableRegs list.
2812    if (ShouldDisableCalleeSavedRegister)
2813      MF.getRegInfo().disableCalleeSavedRegister(RetValReg);
2814  }
2815
2816  const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
2817  const MCPhysReg *I =
2818      TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2819  if (I) {
2820    for (; *I; ++I) {
2821      if (X86::GR64RegClass.contains(*I))
2822        RetOps.push_back(DAG.getRegister(*I, MVT::i64));
2823      else
2824        llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2825    }
2826  }
2827
2828  RetOps[0] = Chain;  // Update chain.
2829
2830  // Add the flag if we have it.
2831  if (Flag.getNode())
2832    RetOps.push_back(Flag);
2833
2834  X86ISD::NodeType opcode = X86ISD::RET_FLAG;
2835  if (CallConv == CallingConv::X86_INTR)
2836    opcode = X86ISD::IRET;
2837  return DAG.getNode(opcode, dl, MVT::Other, RetOps);
2838}
2839
2840bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2841  if (N->getNumValues() != 1 || !N->hasNUsesOfValue(1, 0))
2842    return false;
2843
2844  SDValue TCChain = Chain;
2845  SDNode *Copy = *N->use_begin();
2846  if (Copy->getOpcode() == ISD::CopyToReg) {
2847    // If the copy has a glue operand, we conservatively assume it isn't safe to
2848    // perform a tail call.
2849    if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2850      return false;
2851    TCChain = Copy->getOperand(0);
2852  } else if (Copy->getOpcode() != ISD::FP_EXTEND)
2853    return false;
2854
2855  bool HasRet = false;
2856  for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2857       UI != UE; ++UI) {
2858    if (UI->getOpcode() != X86ISD::RET_FLAG)
2859      return false;
2860    // If we are returning more than one value, we can definitely
2861    // not make a tail call see PR19530
2862    if (UI->getNumOperands() > 4)
2863      return false;
2864    if (UI->getNumOperands() == 4 &&
2865        UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
2866      return false;
2867    HasRet = true;
2868  }
2869
2870  if (!HasRet)
2871    return false;
2872
2873  Chain = TCChain;
2874  return true;
2875}
2876
2877EVT X86TargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
2878                                           ISD::NodeType ExtendKind) const {
2879  MVT ReturnMVT = MVT::i32;
2880
2881  bool Darwin = Subtarget.getTargetTriple().isOSDarwin();
2882  if (VT == MVT::i1 || (!Darwin && (VT == MVT::i8 || VT == MVT::i16))) {
2883    // The ABI does not require i1, i8 or i16 to be extended.
2884    //
2885    // On Darwin, there is code in the wild relying on Clang's old behaviour of
2886    // always extending i8/i16 return values, so keep doing that for now.
2887    // (PR26665).
2888    ReturnMVT = MVT::i8;
2889  }
2890
2891  EVT MinVT = getRegisterType(Context, ReturnMVT);
2892  return VT.bitsLT(MinVT) ? MinVT : VT;
2893}
2894
2895/// Reads two 32 bit registers and creates a 64 bit mask value.
2896/// \param VA The current 32 bit value that need to be assigned.
2897/// \param NextVA The next 32 bit value that need to be assigned.
2898/// \param Root The parent DAG node.
2899/// \param [in,out] InFlag Represents SDvalue in the parent DAG node for
2900///                        glue purposes. In the case the DAG is already using
2901///                        physical register instead of virtual, we should glue
2902///                        our new SDValue to InFlag SDvalue.
2903/// \return a new SDvalue of size 64bit.
2904static SDValue getv64i1Argument(CCValAssign &VA, CCValAssign &NextVA,
2905                                SDValue &Root, SelectionDAG &DAG,
2906                                const SDLoc &Dl, const X86Subtarget &Subtarget,
2907                                SDValue *InFlag = nullptr) {
2908  assert((Subtarget.hasBWI()) && "Expected AVX512BW target!");
2909  assert(Subtarget.is32Bit() && "Expecting 32 bit target");
2910  assert(VA.getValVT() == MVT::v64i1 &&
2911         "Expecting first location of 64 bit width type");
2912  assert(NextVA.getValVT() == VA.getValVT() &&
2913         "The locations should have the same type");
2914  assert(VA.isRegLoc() && NextVA.isRegLoc() &&
2915         "The values should reside in two registers");
2916
2917  SDValue Lo, Hi;
2918  SDValue ArgValueLo, ArgValueHi;
2919
2920  MachineFunction &MF = DAG.getMachineFunction();
2921  const TargetRegisterClass *RC = &X86::GR32RegClass;
2922
2923  // Read a 32 bit value from the registers.
2924  if (nullptr == InFlag) {
2925    // When no physical register is present,
2926    // create an intermediate virtual register.
2927    unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2928    ArgValueLo = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
2929    Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
2930    ArgValueHi = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
2931  } else {
2932    // When a physical register is available read the value from it and glue
2933    // the reads together.
2934    ArgValueLo =
2935      DAG.getCopyFromReg(Root, Dl, VA.getLocReg(), MVT::i32, *InFlag);
2936    *InFlag = ArgValueLo.getValue(2);
2937    ArgValueHi =
2938      DAG.getCopyFromReg(Root, Dl, NextVA.getLocReg(), MVT::i32, *InFlag);
2939    *InFlag = ArgValueHi.getValue(2);
2940  }
2941
2942  // Convert the i32 type into v32i1 type.
2943  Lo = DAG.getBitcast(MVT::v32i1, ArgValueLo);
2944
2945  // Convert the i32 type into v32i1 type.
2946  Hi = DAG.getBitcast(MVT::v32i1, ArgValueHi);
2947
2948  // Concatenate the two values together.
2949  return DAG.getNode(ISD::CONCAT_VECTORS, Dl, MVT::v64i1, Lo, Hi);
2950}
2951
2952/// The function will lower a register of various sizes (8/16/32/64)
2953/// to a mask value of the expected size (v8i1/v16i1/v32i1/v64i1)
2954/// \returns a DAG node contains the operand after lowering to mask type.
2955static SDValue lowerRegToMasks(const SDValue &ValArg, const EVT &ValVT,
2956                               const EVT &ValLoc, const SDLoc &Dl,
2957                               SelectionDAG &DAG) {
2958  SDValue ValReturned = ValArg;
2959
2960  if (ValVT == MVT::v1i1)
2961    return DAG.getNode(ISD::SCALAR_TO_VECTOR, Dl, MVT::v1i1, ValReturned);
2962
2963  if (ValVT == MVT::v64i1) {
2964    // In 32 bit machine, this case is handled by getv64i1Argument
2965    assert(ValLoc == MVT::i64 && "Expecting only i64 locations");
2966    // In 64 bit machine, There is no need to truncate the value only bitcast
2967  } else {
2968    MVT maskLen;
2969    switch (ValVT.getSimpleVT().SimpleTy) {
2970    case MVT::v8i1:
2971      maskLen = MVT::i8;
2972      break;
2973    case MVT::v16i1:
2974      maskLen = MVT::i16;
2975      break;
2976    case MVT::v32i1:
2977      maskLen = MVT::i32;
2978      break;
2979    default:
2980      llvm_unreachable("Expecting a vector of i1 types");
2981    }
2982
2983    ValReturned = DAG.getNode(ISD::TRUNCATE, Dl, maskLen, ValReturned);
2984  }
2985  return DAG.getBitcast(ValVT, ValReturned);
2986}
2987
2988/// Lower the result values of a call into the
2989/// appropriate copies out of appropriate physical registers.
2990///
2991SDValue X86TargetLowering::LowerCallResult(
2992    SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
2993    const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
2994    SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
2995    uint32_t *RegMask) const {
2996
2997  const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
2998  // Assign locations to each value returned by this call.
2999  SmallVector<CCValAssign, 16> RVLocs;
3000  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
3001                 *DAG.getContext());
3002  CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
3003
3004  // Copy all of the result registers out of their specified physreg.
3005  for (unsigned I = 0, InsIndex = 0, E = RVLocs.size(); I != E;
3006       ++I, ++InsIndex) {
3007    CCValAssign &VA = RVLocs[I];
3008    EVT CopyVT = VA.getLocVT();
3009
3010    // In some calling conventions we need to remove the used registers
3011    // from the register mask.
3012    if (RegMask) {
3013      for (MCSubRegIterator SubRegs(VA.getLocReg(), TRI, /*IncludeSelf=*/true);
3014           SubRegs.isValid(); ++SubRegs)
3015        RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));
3016    }
3017
3018    // Report an error if there was an attempt to return FP values via XMM
3019    // registers.
3020    if (!Subtarget.hasSSE1() && X86::FR32XRegClass.contains(VA.getLocReg())) {
3021      errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
3022      if (VA.getLocReg() == X86::XMM1)
3023        VA.convertToReg(X86::FP1); // Set reg to FP1, avoid hitting asserts.
3024      else
3025        VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
3026    } else if (!Subtarget.hasSSE2() &&
3027               X86::FR64XRegClass.contains(VA.getLocReg()) &&
3028               CopyVT == MVT::f64) {
3029      errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled");
3030      if (VA.getLocReg() == X86::XMM1)
3031        VA.convertToReg(X86::FP1); // Set reg to FP1, avoid hitting asserts.
3032      else
3033        VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
3034    }
3035
3036    // If we prefer to use the value in xmm registers, copy it out as f80 and
3037    // use a truncate to move it from fp stack reg to xmm reg.
3038    bool RoundAfterCopy = false;
3039    if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
3040        isScalarFPTypeInSSEReg(VA.getValVT())) {
3041      if (!Subtarget.hasX87())
3042        report_fatal_error("X87 register return with X87 disabled");
3043      CopyVT = MVT::f80;
3044      RoundAfterCopy = (CopyVT != VA.getLocVT());
3045    }
3046
3047    SDValue Val;
3048    if (VA.needsCustom()) {
3049      assert(VA.getValVT() == MVT::v64i1 &&
3050             "Currently the only custom case is when we split v64i1 to 2 regs");
3051      Val =
3052          getv64i1Argument(VA, RVLocs[++I], Chain, DAG, dl, Subtarget, &InFlag);
3053    } else {
3054      Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), CopyVT, InFlag)
3055                  .getValue(1);
3056      Val = Chain.getValue(0);
3057      InFlag = Chain.getValue(2);
3058    }
3059
3060    if (RoundAfterCopy)
3061      Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
3062                        // This truncation won't change the value.
3063                        DAG.getIntPtrConstant(1, dl));
3064
3065    if (VA.isExtInLoc() && (VA.getValVT().getScalarType() == MVT::i1)) {
3066      if (VA.getValVT().isVector() &&
3067          ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
3068           (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
3069        // promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
3070        Val = lowerRegToMasks(Val, VA.getValVT(), VA.getLocVT(), dl, DAG);
3071      } else
3072        Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
3073    }
3074
3075    if (VA.getLocInfo() == CCValAssign::BCvt)
3076      Val = DAG.getBitcast(VA.getValVT(), Val);
3077
3078    InVals.push_back(Val);
3079  }
3080
3081  return Chain;
3082}
3083
3084//===----------------------------------------------------------------------===//
3085//                C & StdCall & Fast Calling Convention implementation
3086//===----------------------------------------------------------------------===//
3087//  StdCall calling convention seems to be standard for many Windows' API
3088//  routines and around. It differs from C calling convention just a little:
3089//  callee should clean up the stack, not caller. Symbols should be also
3090//  decorated in some fancy way :) It doesn't support any vector arguments.
3091//  For info on fast calling convention see Fast Calling Convention (tail call)
3092//  implementation LowerX86_32FastCCCallTo.
3093
3094/// CallIsStructReturn - Determines whether a call uses struct return
3095/// semantics.
3096enum StructReturnType {
3097  NotStructReturn,
3098  RegStructReturn,
3099  StackStructReturn
3100};
3101static StructReturnType
3102callIsStructReturn(ArrayRef<ISD::OutputArg> Outs, bool IsMCU) {
3103  if (Outs.empty())
3104    return NotStructReturn;
3105
3106  const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
3107  if (!Flags.isSRet())
3108    return NotStructReturn;
3109  if (Flags.isInReg() || IsMCU)
3110    return RegStructReturn;
3111  return StackStructReturn;
3112}
3113
3114/// Determines whether a function uses struct return semantics.
3115static StructReturnType
3116argsAreStructReturn(ArrayRef<ISD::InputArg> Ins, bool IsMCU) {
3117  if (Ins.empty())
3118    return NotStructReturn;
3119
3120  const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
3121  if (!Flags.isSRet())
3122    return NotStructReturn;
3123  if (Flags.isInReg() || IsMCU)
3124    return RegStructReturn;
3125  return StackStructReturn;
3126}
3127
3128/// Make a copy of an aggregate at address specified by "Src" to address
3129/// "Dst" with size and alignment information specified by the specific
3130/// parameter attribute. The copy will be passed as a byval function parameter.
3131static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
3132                                         SDValue Chain, ISD::ArgFlagsTy Flags,
3133                                         SelectionDAG &DAG, const SDLoc &dl) {
3134  SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
3135
3136  return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
3137                       /*isVolatile*/false, /*AlwaysInline=*/true,
3138                       /*isTailCall*/false,
3139                       MachinePointerInfo(), MachinePointerInfo());
3140}
3141
3142/// Return true if the calling convention is one that we can guarantee TCO for.
3143static bool canGuaranteeTCO(CallingConv::ID CC) {
3144  return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
3145          CC == CallingConv::X86_RegCall || CC == CallingConv::HiPE ||
3146          CC == CallingConv::HHVM || CC == CallingConv::Tail);
3147}
3148
3149/// Return true if we might ever do TCO for calls with this calling convention.
3150static bool mayTailCallThisCC(CallingConv::ID CC) {
3151  switch (CC) {
3152  // C calling conventions:
3153  case CallingConv::C:
3154  case CallingConv::Win64:
3155  case CallingConv::X86_64_SysV:
3156  // Callee pop conventions:
3157  case CallingConv::X86_ThisCall:
3158  case CallingConv::X86_StdCall:
3159  case CallingConv::X86_VectorCall:
3160  case CallingConv::X86_FastCall:
3161  // Swift:
3162  case CallingConv::Swift:
3163    return true;
3164  default:
3165    return canGuaranteeTCO(CC);
3166  }
3167}
3168
3169/// Return true if the function is being made into a tailcall target by
3170/// changing its ABI.
3171static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt) {
3172  return (GuaranteedTailCallOpt && canGuaranteeTCO(CC)) || CC == CallingConv::Tail;
3173}
3174
3175bool X86TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
3176  if (!CI->isTailCall())
3177    return false;
3178
3179  ImmutableCallSite CS(CI);
3180  CallingConv::ID CalleeCC = CS.getCallingConv();
3181  if (!mayTailCallThisCC(CalleeCC))
3182    return false;
3183
3184  return true;
3185}
3186
3187SDValue
3188X86TargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
3189                                    const SmallVectorImpl<ISD::InputArg> &Ins,
3190                                    const SDLoc &dl, SelectionDAG &DAG,
3191                                    const CCValAssign &VA,
3192                                    MachineFrameInfo &MFI, unsigned i) const {
3193  // Create the nodes corresponding to a load from this parameter slot.
3194  ISD::ArgFlagsTy Flags = Ins[i].Flags;
3195  bool AlwaysUseMutable = shouldGuaranteeTCO(
3196      CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
3197  bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
3198  EVT ValVT;
3199  MVT PtrVT = getPointerTy(DAG.getDataLayout());
3200
3201  // If value is passed by pointer we have address passed instead of the value
3202  // itself. No need to extend if the mask value and location share the same
3203  // absolute size.
3204  bool ExtendedInMem =
3205      VA.isExtInLoc() && VA.getValVT().getScalarType() == MVT::i1 &&
3206      VA.getValVT().getSizeInBits() != VA.getLocVT().getSizeInBits();
3207
3208  if (VA.getLocInfo() == CCValAssign::Indirect || ExtendedInMem)
3209    ValVT = VA.getLocVT();
3210  else
3211    ValVT = VA.getValVT();
3212
3213  // FIXME: For now, all byval parameter objects are marked mutable. This can be
3214  // changed with more analysis.
3215  // In case of tail call optimization mark all arguments mutable. Since they
3216  // could be overwritten by lowering of arguments in case of a tail call.
3217  if (Flags.isByVal()) {
3218    unsigned Bytes = Flags.getByValSize();
3219    if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
3220
3221    // FIXME: For now, all byval parameter objects are marked as aliasing. This
3222    // can be improved with deeper analysis.
3223    int FI = MFI.CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable,
3224                                   /*isAliased=*/true);
3225    return DAG.getFrameIndex(FI, PtrVT);
3226  }
3227
3228  // This is an argument in memory. We might be able to perform copy elision.
3229  // If the argument is passed directly in memory without any extension, then we
3230  // can perform copy elision. Large vector types, for example, may be passed
3231  // indirectly by pointer.
3232  if (Flags.isCopyElisionCandidate() &&
3233      VA.getLocInfo() != CCValAssign::Indirect && !ExtendedInMem) {
3234    EVT ArgVT = Ins[i].ArgVT;
3235    SDValue PartAddr;
3236    if (Ins[i].PartOffset == 0) {
3237      // If this is a one-part value or the first part of a multi-part value,
3238      // create a stack object for the entire argument value type and return a
3239      // load from our portion of it. This assumes that if the first part of an
3240      // argument is in memory, the rest will also be in memory.
3241      int FI = MFI.CreateFixedObject(ArgVT.getStoreSize(), VA.getLocMemOffset(),
3242                                     /*IsImmutable=*/false);
3243      PartAddr = DAG.getFrameIndex(FI, PtrVT);
3244      return DAG.getLoad(
3245          ValVT, dl, Chain, PartAddr,
3246          MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3247    } else {
3248      // This is not the first piece of an argument in memory. See if there is
3249      // already a fixed stack object including this offset. If so, assume it
3250      // was created by the PartOffset == 0 branch above and create a load from
3251      // the appropriate offset into it.
3252      int64_t PartBegin = VA.getLocMemOffset();
3253      int64_t PartEnd = PartBegin + ValVT.getSizeInBits() / 8;
3254      int FI = MFI.getObjectIndexBegin();
3255      for (; MFI.isFixedObjectIndex(FI); ++FI) {
3256        int64_t ObjBegin = MFI.getObjectOffset(FI);
3257        int64_t ObjEnd = ObjBegin + MFI.getObjectSize(FI);
3258        if (ObjBegin <= PartBegin && PartEnd <= ObjEnd)
3259          break;
3260      }
3261      if (MFI.isFixedObjectIndex(FI)) {
3262        SDValue Addr =
3263            DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getFrameIndex(FI, PtrVT),
3264                        DAG.getIntPtrConstant(Ins[i].PartOffset, dl));
3265        return DAG.getLoad(
3266            ValVT, dl, Chain, Addr,
3267            MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI,
3268                                              Ins[i].PartOffset));
3269      }
3270    }
3271  }
3272
3273  int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
3274                                 VA.getLocMemOffset(), isImmutable);
3275
3276  // Set SExt or ZExt flag.
3277  if (VA.getLocInfo() == CCValAssign::ZExt) {
3278    MFI.setObjectZExt(FI, true);
3279  } else if (VA.getLocInfo() == CCValAssign::SExt) {
3280    MFI.setObjectSExt(FI, true);
3281  }
3282
3283  SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3284  SDValue Val = DAG.getLoad(
3285      ValVT, dl, Chain, FIN,
3286      MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3287  return ExtendedInMem
3288             ? (VA.getValVT().isVector()
3289                    ? DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VA.getValVT(), Val)
3290                    : DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val))
3291             : Val;
3292}
3293
3294// FIXME: Get this from tablegen.
3295static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
3296                                                const X86Subtarget &Subtarget) {
3297  assert(Subtarget.is64Bit());
3298
3299  if (Subtarget.isCallingConvWin64(CallConv)) {
3300    static const MCPhysReg GPR64ArgRegsWin64[] = {
3301      X86::RCX, X86::RDX, X86::R8,  X86::R9
3302    };
3303    return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
3304  }
3305
3306  static const MCPhysReg GPR64ArgRegs64Bit[] = {
3307    X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
3308  };
3309  return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
3310}
3311
3312// FIXME: Get this from tablegen.
3313static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
3314                                                CallingConv::ID CallConv,
3315                                                const X86Subtarget &Subtarget) {
3316  assert(Subtarget.is64Bit());
3317  if (Subtarget.isCallingConvWin64(CallConv)) {
3318    // The XMM registers which might contain var arg parameters are shadowed
3319    // in their paired GPR.  So we only need to save the GPR to their home
3320    // slots.
3321    // TODO: __vectorcall will change this.
3322    return None;
3323  }
3324
3325  const Function &F = MF.getFunction();
3326  bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
3327  bool isSoftFloat = Subtarget.useSoftFloat();
3328  assert(!(isSoftFloat && NoImplicitFloatOps) &&
3329         "SSE register cannot be used when SSE is disabled!");
3330  if (isSoftFloat || NoImplicitFloatOps || !Subtarget.hasSSE1())
3331    // Kernel mode asks for SSE to be disabled, so there are no XMM argument
3332    // registers.
3333    return None;
3334
3335  static const MCPhysReg XMMArgRegs64Bit[] = {
3336    X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3337    X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3338  };
3339  return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
3340}
3341
3342#ifndef NDEBUG
3343static bool isSortedByValueNo(ArrayRef<CCValAssign> ArgLocs) {
3344  return std::is_sorted(ArgLocs.begin(), ArgLocs.end(),
3345                        [](const CCValAssign &A, const CCValAssign &B) -> bool {
3346                          return A.getValNo() < B.getValNo();
3347                        });
3348}
3349#endif
3350
3351SDValue X86TargetLowering::LowerFormalArguments(
3352    SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3353    const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3354    SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3355  MachineFunction &MF = DAG.getMachineFunction();
3356  X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3357  const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
3358
3359  const Function &F = MF.getFunction();
3360  if (F.hasExternalLinkage() && Subtarget.isTargetCygMing() &&
3361      F.getName() == "main")
3362    FuncInfo->setForceFramePointer(true);
3363
3364  MachineFrameInfo &MFI = MF.getFrameInfo();
3365  bool Is64Bit = Subtarget.is64Bit();
3366  bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
3367
3368  assert(
3369      !(isVarArg && canGuaranteeTCO(CallConv)) &&
3370      "Var args not supported with calling conv' regcall, fastcc, ghc or hipe");
3371
3372  // Assign locations to all of the incoming arguments.
3373  SmallVector<CCValAssign, 16> ArgLocs;
3374  CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
3375
3376  // Allocate shadow area for Win64.
3377  if (IsWin64)
3378    CCInfo.AllocateStack(32, 8);
3379
3380  CCInfo.AnalyzeArguments(Ins, CC_X86);
3381
3382  // In vectorcall calling convention a second pass is required for the HVA
3383  // types.
3384  if (CallingConv::X86_VectorCall == CallConv) {
3385    CCInfo.AnalyzeArgumentsSecondPass(Ins, CC_X86);
3386  }
3387
3388  // The next loop assumes that the locations are in the same order of the
3389  // input arguments.
3390  assert(isSortedByValueNo(ArgLocs) &&
3391         "Argument Location list must be sorted before lowering");
3392
3393  SDValue ArgValue;
3394  for (unsigned I = 0, InsIndex = 0, E = ArgLocs.size(); I != E;
3395       ++I, ++InsIndex) {
3396    assert(InsIndex < Ins.size() && "Invalid Ins index");
3397    CCValAssign &VA = ArgLocs[I];
3398
3399    if (VA.isRegLoc()) {
3400      EVT RegVT = VA.getLocVT();
3401      if (VA.needsCustom()) {
3402        assert(
3403            VA.getValVT() == MVT::v64i1 &&
3404            "Currently the only custom case is when we split v64i1 to 2 regs");
3405
3406        // v64i1 values, in regcall calling convention, that are
3407        // compiled to 32 bit arch, are split up into two registers.
3408        ArgValue =
3409            getv64i1Argument(VA, ArgLocs[++I], Chain, DAG, dl, Subtarget);
3410      } else {
3411        const TargetRegisterClass *RC;
3412        if (RegVT == MVT::i8)
3413          RC = &X86::GR8RegClass;
3414        else if (RegVT == MVT::i16)
3415          RC = &X86::GR16RegClass;
3416        else if (RegVT == MVT::i32)
3417          RC = &X86::GR32RegClass;
3418        else if (Is64Bit && RegVT == MVT::i64)
3419          RC = &X86::GR64RegClass;
3420        else if (RegVT == MVT::f32)
3421          RC = Subtarget.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
3422        else if (RegVT == MVT::f64)
3423          RC = Subtarget.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
3424        else if (RegVT == MVT::f80)
3425          RC = &X86::RFP80RegClass;
3426        else if (RegVT == MVT::f128)
3427          RC = &X86::VR128RegClass;
3428        else if (RegVT.is512BitVector())
3429          RC = &X86::VR512RegClass;
3430        else if (RegVT.is256BitVector())
3431          RC = Subtarget.hasVLX() ? &X86::VR256XRegClass : &X86::VR256RegClass;
3432        else if (RegVT.is128BitVector())
3433          RC = Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass;
3434        else if (RegVT == MVT::x86mmx)
3435          RC = &X86::VR64RegClass;
3436        else if (RegVT == MVT::v1i1)
3437          RC = &X86::VK1RegClass;
3438        else if (RegVT == MVT::v8i1)
3439          RC = &X86::VK8RegClass;
3440        else if (RegVT == MVT::v16i1)
3441          RC = &X86::VK16RegClass;
3442        else if (RegVT == MVT::v32i1)
3443          RC = &X86::VK32RegClass;
3444        else if (RegVT == MVT::v64i1)
3445          RC = &X86::VK64RegClass;
3446        else
3447          llvm_unreachable("Unknown argument type!");
3448
3449        unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3450        ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
3451      }
3452
3453      // If this is an 8 or 16-bit value, it is really passed promoted to 32
3454      // bits.  Insert an assert[sz]ext to capture this, then truncate to the
3455      // right size.
3456      if (VA.getLocInfo() == CCValAssign::SExt)
3457        ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
3458                               DAG.getValueType(VA.getValVT()));
3459      else if (VA.getLocInfo() == CCValAssign::ZExt)
3460        ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
3461                               DAG.getValueType(VA.getValVT()));
3462      else if (VA.getLocInfo() == CCValAssign::BCvt)
3463        ArgValue = DAG.getBitcast(VA.getValVT(), ArgValue);
3464
3465      if (VA.isExtInLoc()) {
3466        // Handle MMX values passed in XMM regs.
3467        if (RegVT.isVector() && VA.getValVT().getScalarType() != MVT::i1)
3468          ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
3469        else if (VA.getValVT().isVector() &&
3470                 VA.getValVT().getScalarType() == MVT::i1 &&
3471                 ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
3472                  (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
3473          // Promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
3474          ArgValue = lowerRegToMasks(ArgValue, VA.getValVT(), RegVT, dl, DAG);
3475        } else
3476          ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
3477      }
3478    } else {
3479      assert(VA.isMemLoc());
3480      ArgValue =
3481          LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, InsIndex);
3482    }
3483
3484    // If value is passed via pointer - do a load.
3485    if (VA.getLocInfo() == CCValAssign::Indirect && !Ins[I].Flags.isByVal())
3486      ArgValue =
3487          DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, MachinePointerInfo());
3488
3489    InVals.push_back(ArgValue);
3490  }
3491
3492  for (unsigned I = 0, E = Ins.size(); I != E; ++I) {
3493    // Swift calling convention does not require we copy the sret argument
3494    // into %rax/%eax for the return. We don't set SRetReturnReg for Swift.
3495    if (CallConv == CallingConv::Swift)
3496      continue;
3497
3498    // All x86 ABIs require that for returning structs by value we copy the
3499    // sret argument into %rax/%eax (depending on ABI) for the return. Save
3500    // the argument into a virtual register so that we can access it from the
3501    // return points.
3502    if (Ins[I].Flags.isSRet()) {
3503      unsigned Reg = FuncInfo->getSRetReturnReg();
3504      if (!Reg) {
3505        MVT PtrTy = getPointerTy(DAG.getDataLayout());
3506        Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
3507        FuncInfo->setSRetReturnReg(Reg);
3508      }
3509      SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[I]);
3510      Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
3511      break;
3512    }
3513  }
3514
3515  unsigned StackSize = CCInfo.getNextStackOffset();
3516  // Align stack specially for tail calls.
3517  if (shouldGuaranteeTCO(CallConv,
3518                         MF.getTarget().Options.GuaranteedTailCallOpt))
3519    StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
3520
3521  // If the function takes variable number of arguments, make a frame index for
3522  // the start of the first vararg value... for expansion of llvm.va_start. We
3523  // can skip this if there are no va_start calls.
3524  if (MFI.hasVAStart() &&
3525      (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
3526                   CallConv != CallingConv::X86_ThisCall))) {
3527    FuncInfo->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true));
3528  }
3529
3530  // Figure out if XMM registers are in use.
3531  assert(!(Subtarget.useSoftFloat() &&
3532           F.hasFnAttribute(Attribute::NoImplicitFloat)) &&
3533         "SSE register cannot be used when SSE is disabled!");
3534
3535  // 64-bit calling conventions support varargs and register parameters, so we
3536  // have to do extra work to spill them in the prologue.
3537  if (Is64Bit && isVarArg && MFI.hasVAStart()) {
3538    // Find the first unallocated argument registers.
3539    ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
3540    ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
3541    unsigned NumIntRegs = CCInfo.getFirstUnallocated(ArgGPRs);
3542    unsigned NumXMMRegs = CCInfo.getFirstUnallocated(ArgXMMs);
3543    assert(!(NumXMMRegs && !Subtarget.hasSSE1()) &&
3544           "SSE register cannot be used when SSE is disabled!");
3545
3546    // Gather all the live in physical registers.
3547    SmallVector<SDValue, 6> LiveGPRs;
3548    SmallVector<SDValue, 8> LiveXMMRegs;
3549    SDValue ALVal;
3550    for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
3551      unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
3552      LiveGPRs.push_back(
3553          DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
3554    }
3555    if (!ArgXMMs.empty()) {
3556      unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
3557      ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
3558      for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
3559        unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
3560        LiveXMMRegs.push_back(
3561            DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32));
3562      }
3563    }
3564
3565    if (IsWin64) {
3566      // Get to the caller-allocated home save location.  Add 8 to account
3567      // for the return address.
3568      int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
3569      FuncInfo->setRegSaveFrameIndex(
3570          MFI.CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
3571      // Fixup to set vararg frame on shadow area (4 x i64).
3572      if (NumIntRegs < 4)
3573        FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
3574    } else {
3575      // For X86-64, if there are vararg parameters that are passed via
3576      // registers, then we must store them to their spots on the stack so
3577      // they may be loaded by dereferencing the result of va_next.
3578      FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
3579      FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
3580      FuncInfo->setRegSaveFrameIndex(MFI.CreateStackObject(
3581          ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
3582    }
3583
3584    // Store the integer parameter registers.
3585    SmallVector<SDValue, 8> MemOps;
3586    SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
3587                                      getPointerTy(DAG.getDataLayout()));
3588    unsigned Offset = FuncInfo->getVarArgsGPOffset();
3589    for (SDValue Val : LiveGPRs) {
3590      SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
3591                                RSFIN, DAG.getIntPtrConstant(Offset, dl));
3592      SDValue Store =
3593          DAG.getStore(Val.getValue(1), dl, Val, FIN,
3594                       MachinePointerInfo::getFixedStack(
3595                           DAG.getMachineFunction(),
3596                           FuncInfo->getRegSaveFrameIndex(), Offset));
3597      MemOps.push_back(Store);
3598      Offset += 8;
3599    }
3600
3601    if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
3602      // Now store the XMM (fp + vector) parameter registers.
3603      SmallVector<SDValue, 12> SaveXMMOps;
3604      SaveXMMOps.push_back(Chain);
3605      SaveXMMOps.push_back(ALVal);
3606      SaveXMMOps.push_back(DAG.getIntPtrConstant(
3607                             FuncInfo->getRegSaveFrameIndex(), dl));
3608      SaveXMMOps.push_back(DAG.getIntPtrConstant(
3609                             FuncInfo->getVarArgsFPOffset(), dl));
3610      SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
3611                        LiveXMMRegs.end());
3612      MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
3613                                   MVT::Other, SaveXMMOps));
3614    }
3615
3616    if (!MemOps.empty())
3617      Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3618  }
3619
3620  if (isVarArg && MFI.hasMustTailInVarArgFunc()) {
3621    // Find the largest legal vector type.
3622    MVT VecVT = MVT::Other;
3623    // FIXME: Only some x86_32 calling conventions support AVX512.
3624    if (Subtarget.useAVX512Regs() &&
3625        (Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
3626                     CallConv == CallingConv::Intel_OCL_BI)))
3627      VecVT = MVT::v16f32;
3628    else if (Subtarget.hasAVX())
3629      VecVT = MVT::v8f32;
3630    else if (Subtarget.hasSSE2())
3631      VecVT = MVT::v4f32;
3632
3633    // We forward some GPRs and some vector types.
3634    SmallVector<MVT, 2> RegParmTypes;
3635    MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
3636    RegParmTypes.push_back(IntVT);
3637    if (VecVT != MVT::Other)
3638      RegParmTypes.push_back(VecVT);
3639
3640    // Compute the set of forwarded registers. The rest are scratch.
3641    SmallVectorImpl<ForwardedRegister> &Forwards =
3642        FuncInfo->getForwardedMustTailRegParms();
3643    CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
3644
3645    // Forward AL for SysV x86_64 targets, since it is used for varargs.
3646    if (Is64Bit && !IsWin64 && !CCInfo.isAllocated(X86::AL)) {
3647      unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
3648      Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
3649    }
3650
3651    // Copy all forwards from physical to virtual registers.
3652    for (ForwardedRegister &FR : Forwards) {
3653      // FIXME: Can we use a less constrained schedule?
3654      SDValue RegVal = DAG.getCopyFromReg(Chain, dl, FR.VReg, FR.VT);
3655      FR.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(FR.VT));
3656      Chain = DAG.getCopyToReg(Chain, dl, FR.VReg, RegVal);
3657    }
3658  }
3659
3660  // Some CCs need callee pop.
3661  if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
3662                       MF.getTarget().Options.GuaranteedTailCallOpt)) {
3663    FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
3664  } else if (CallConv == CallingConv::X86_INTR && Ins.size() == 2) {
3665    // X86 interrupts must pop the error code (and the alignment padding) if
3666    // present.
3667    FuncInfo->setBytesToPopOnReturn(Is64Bit ? 16 : 4);
3668  } else {
3669    FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
3670    // If this is an sret function, the return should pop the hidden pointer.
3671    if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
3672        !Subtarget.getTargetTriple().isOSMSVCRT() &&
3673        argsAreStructReturn(Ins, Subtarget.isTargetMCU()) == StackStructReturn)
3674      FuncInfo->setBytesToPopOnReturn(4);
3675  }
3676
3677  if (!Is64Bit) {
3678    // RegSaveFrameIndex is X86-64 only.
3679    FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
3680    if (CallConv == CallingConv::X86_FastCall ||
3681        CallConv == CallingConv::X86_ThisCall)
3682      // fastcc functions can't have varargs.
3683      FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
3684  }
3685
3686  FuncInfo->setArgumentStackSize(StackSize);
3687
3688  if (WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo()) {
3689    EHPersonality Personality = classifyEHPersonality(F.getPersonalityFn());
3690    if (Personality == EHPersonality::CoreCLR) {
3691      assert(Is64Bit);
3692      // TODO: Add a mechanism to frame lowering that will allow us to indicate
3693      // that we'd prefer this slot be allocated towards the bottom of the frame
3694      // (i.e. near the stack pointer after allocating the frame).  Every
3695      // funclet needs a copy of this slot in its (mostly empty) frame, and the
3696      // offset from the bottom of this and each funclet's frame must be the
3697      // same, so the size of funclets' (mostly empty) frames is dictated by
3698      // how far this slot is from the bottom (since they allocate just enough
3699      // space to accommodate holding this slot at the correct offset).
3700      int PSPSymFI = MFI.CreateStackObject(8, 8, /*isSS=*/false);
3701      EHInfo->PSPSymFrameIdx = PSPSymFI;
3702    }
3703  }
3704
3705  if (CallConv == CallingConv::X86_RegCall ||
3706      F.hasFnAttribute("no_caller_saved_registers")) {
3707    MachineRegisterInfo &MRI = MF.getRegInfo();
3708    for (std::pair<unsigned, unsigned> Pair : MRI.liveins())
3709      MRI.disableCalleeSavedRegister(Pair.first);
3710  }
3711
3712  return Chain;
3713}
3714
3715SDValue X86TargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
3716                                            SDValue Arg, const SDLoc &dl,
3717                                            SelectionDAG &DAG,
3718                                            const CCValAssign &VA,
3719                                            ISD::ArgFlagsTy Flags) const {
3720  unsigned LocMemOffset = VA.getLocMemOffset();
3721  SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
3722  PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
3723                       StackPtr, PtrOff);
3724  if (Flags.isByVal())
3725    return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
3726
3727  return DAG.getStore(
3728      Chain, dl, Arg, PtrOff,
3729      MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset));
3730}
3731
3732/// Emit a load of return address if tail call
3733/// optimization is performed and it is required.
3734SDValue X86TargetLowering::EmitTailCallLoadRetAddr(
3735    SelectionDAG &DAG, SDValue &OutRetAddr, SDValue Chain, bool IsTailCall,
3736    bool Is64Bit, int FPDiff, const SDLoc &dl) const {
3737  // Adjust the Return address stack slot.
3738  EVT VT = getPointerTy(DAG.getDataLayout());
3739  OutRetAddr = getReturnAddressFrameIndex(DAG);
3740
3741  // Load the "old" Return address.
3742  OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo());
3743  return SDValue(OutRetAddr.getNode(), 1);
3744}
3745
3746/// Emit a store of the return address if tail call
3747/// optimization is performed and it is required (FPDiff!=0).
3748static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
3749                                        SDValue Chain, SDValue RetAddrFrIdx,
3750                                        EVT PtrVT, unsigned SlotSize,
3751                                        int FPDiff, const SDLoc &dl) {
3752  // Store the return address to the appropriate stack slot.
3753  if (!FPDiff) return Chain;
3754  // Calculate the new stack slot for the return address.
3755  int NewReturnAddrFI =
3756    MF.getFrameInfo().CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
3757                                         false);
3758  SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
3759  Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
3760                       MachinePointerInfo::getFixedStack(
3761                           DAG.getMachineFunction(), NewReturnAddrFI));
3762  return Chain;
3763}
3764
3765/// Returns a vector_shuffle mask for an movs{s|d}, movd
3766/// operation of specified width.
3767static SDValue getMOVL(SelectionDAG &DAG, const SDLoc &dl, MVT VT, SDValue V1,
3768                       SDValue V2) {
3769  unsigned NumElems = VT.getVectorNumElements();
3770  SmallVector<int, 8> Mask;
3771  Mask.push_back(NumElems);
3772  for (unsigned i = 1; i != NumElems; ++i)
3773    Mask.push_back(i);
3774  return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
3775}
3776
3777SDValue
3778X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
3779                             SmallVectorImpl<SDValue> &InVals) const {
3780  SelectionDAG &DAG                     = CLI.DAG;
3781  SDLoc &dl                             = CLI.DL;
3782  SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
3783  SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
3784  SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
3785  SDValue Chain                         = CLI.Chain;
3786  SDValue Callee                        = CLI.Callee;
3787  CallingConv::ID CallConv              = CLI.CallConv;
3788  bool &isTailCall                      = CLI.IsTailCall;
3789  bool isVarArg                         = CLI.IsVarArg;
3790
3791  MachineFunction &MF = DAG.getMachineFunction();
3792  bool Is64Bit        = Subtarget.is64Bit();
3793  bool IsWin64        = Subtarget.isCallingConvWin64(CallConv);
3794  StructReturnType SR = callIsStructReturn(Outs, Subtarget.isTargetMCU());
3795  bool IsSibcall      = false;
3796  bool IsGuaranteeTCO = MF.getTarget().Options.GuaranteedTailCallOpt ||
3797      CallConv == CallingConv::Tail;
3798  X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
3799  const auto *CI = dyn_cast_or_null<CallInst>(CLI.CS.getInstruction());
3800  const Function *Fn = CI ? CI->getCalledFunction() : nullptr;
3801  bool HasNCSR = (CI && CI->hasFnAttr("no_caller_saved_registers")) ||
3802                 (Fn && Fn->hasFnAttribute("no_caller_saved_registers"));
3803  const auto *II = dyn_cast_or_null<InvokeInst>(CLI.CS.getInstruction());
3804  bool HasNoCfCheck =
3805      (CI && CI->doesNoCfCheck()) || (II && II->doesNoCfCheck());
3806  const Module *M = MF.getMMI().getModule();
3807  Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
3808
3809  MachineFunction::CallSiteInfo CSInfo;
3810
3811  if (CallConv == CallingConv::X86_INTR)
3812    report_fatal_error("X86 interrupts may not be called directly");
3813
3814  if (Subtarget.isPICStyleGOT() && !IsGuaranteeTCO) {
3815    // If we are using a GOT, disable tail calls to external symbols with
3816    // default visibility. Tail calling such a symbol requires using a GOT
3817    // relocation, which forces early binding of the symbol. This breaks code
3818    // that require lazy function symbol resolution. Using musttail or
3819    // GuaranteedTailCallOpt will override this.
3820    GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
3821    if (!G || (!G->getGlobal()->hasLocalLinkage() &&
3822               G->getGlobal()->hasDefaultVisibility()))
3823      isTailCall = false;
3824  }
3825
3826  bool IsMustTail = CLI.CS && CLI.CS.isMustTailCall();
3827  if (IsMustTail) {
3828    // Force this to be a tail call.  The verifier rules are enough to ensure
3829    // that we can lower this successfully without moving the return address
3830    // around.
3831    isTailCall = true;
3832  } else if (isTailCall) {
3833    // Check if it's really possible to do a tail call.
3834    isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
3835                    isVarArg, SR != NotStructReturn,
3836                    MF.getFunction().hasStructRetAttr(), CLI.RetTy,
3837                    Outs, OutVals, Ins, DAG);
3838
3839    // Sibcalls are automatically detected tailcalls which do not require
3840    // ABI changes.
3841    if (!IsGuaranteeTCO && isTailCall)
3842      IsSibcall = true;
3843
3844    if (isTailCall)
3845      ++NumTailCalls;
3846  }
3847
3848  assert(!(isVarArg && canGuaranteeTCO(CallConv)) &&
3849         "Var args not supported with calling convention fastcc, ghc or hipe");
3850
3851  // Analyze operands of the call, assigning locations to each operand.
3852  SmallVector<CCValAssign, 16> ArgLocs;
3853  CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
3854
3855  // Allocate shadow area for Win64.
3856  if (IsWin64)
3857    CCInfo.AllocateStack(32, 8);
3858
3859  CCInfo.AnalyzeArguments(Outs, CC_X86);
3860
3861  // In vectorcall calling convention a second pass is required for the HVA
3862  // types.
3863  if (CallingConv::X86_VectorCall == CallConv) {
3864    CCInfo.AnalyzeArgumentsSecondPass(Outs, CC_X86);
3865  }
3866
3867  // Get a count of how many bytes are to be pushed on the stack.
3868  unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
3869  if (IsSibcall)
3870    // This is a sibcall. The memory operands are available in caller's
3871    // own caller's stack.
3872    NumBytes = 0;
3873  else if (IsGuaranteeTCO && canGuaranteeTCO(CallConv))
3874    NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
3875
3876  int FPDiff = 0;
3877  if (isTailCall && !IsSibcall && !IsMustTail) {
3878    // Lower arguments at fp - stackoffset + fpdiff.
3879    unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
3880
3881    FPDiff = NumBytesCallerPushed - NumBytes;
3882
3883    // Set the delta of movement of the returnaddr stackslot.
3884    // But only set if delta is greater than previous delta.
3885    if (FPDiff < X86Info->getTCReturnAddrDelta())
3886      X86Info->setTCReturnAddrDelta(FPDiff);
3887  }
3888
3889  unsigned NumBytesToPush = NumBytes;
3890  unsigned NumBytesToPop = NumBytes;
3891
3892  // If we have an inalloca argument, all stack space has already been allocated
3893  // for us and be right at the top of the stack.  We don't support multiple
3894  // arguments passed in memory when using inalloca.
3895  if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
3896    NumBytesToPush = 0;
3897    if (!ArgLocs.back().isMemLoc())
3898      report_fatal_error("cannot use inalloca attribute on a register "
3899                         "parameter");
3900    if (ArgLocs.back().getLocMemOffset() != 0)
3901      report_fatal_error("any parameter with the inalloca attribute must be "
3902                         "the only memory argument");
3903  }
3904
3905  if (!IsSibcall && !IsMustTail)
3906    Chain = DAG.getCALLSEQ_START(Chain, NumBytesToPush,
3907                                 NumBytes - NumBytesToPush, dl);
3908
3909  SDValue RetAddrFrIdx;
3910  // Load return address for tail calls.
3911  if (isTailCall && FPDiff)
3912    Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
3913                                    Is64Bit, FPDiff, dl);
3914
3915  SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
3916  SmallVector<SDValue, 8> MemOpChains;
3917  SDValue StackPtr;
3918
3919  // The next loop assumes that the locations are in the same order of the
3920  // input arguments.
3921  assert(isSortedByValueNo(ArgLocs) &&
3922         "Argument Location list must be sorted before lowering");
3923
3924  // Walk the register/memloc assignments, inserting copies/loads.  In the case
3925  // of tail call optimization arguments are handle later.
3926  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
3927  for (unsigned I = 0, OutIndex = 0, E = ArgLocs.size(); I != E;
3928       ++I, ++OutIndex) {
3929    assert(OutIndex < Outs.size() && "Invalid Out index");
3930    // Skip inalloca arguments, they have already been written.
3931    ISD::ArgFlagsTy Flags = Outs[OutIndex].Flags;
3932    if (Flags.isInAlloca())
3933      continue;
3934
3935    CCValAssign &VA = ArgLocs[I];
3936    EVT RegVT = VA.getLocVT();
3937    SDValue Arg = OutVals[OutIndex];
3938    bool isByVal = Flags.isByVal();
3939
3940    // Promote the value if needed.
3941    switch (VA.getLocInfo()) {
3942    default: llvm_unreachable("Unknown loc info!");
3943    case CCValAssign::Full: break;
3944    case CCValAssign::SExt:
3945      Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
3946      break;
3947    case CCValAssign::ZExt:
3948      Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
3949      break;
3950    case CCValAssign::AExt:
3951      if (Arg.getValueType().isVector() &&
3952          Arg.getValueType().getVectorElementType() == MVT::i1)
3953        Arg = lowerMasksToReg(Arg, RegVT, dl, DAG);
3954      else if (RegVT.is128BitVector()) {
3955        // Special case: passing MMX values in XMM registers.
3956        Arg = DAG.getBitcast(MVT::i64, Arg);
3957        Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
3958        Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
3959      } else
3960        Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
3961      break;
3962    case CCValAssign::BCvt:
3963      Arg = DAG.getBitcast(RegVT, Arg);
3964      break;
3965    case CCValAssign::Indirect: {
3966      if (isByVal) {
3967        // Memcpy the argument to a temporary stack slot to prevent
3968        // the caller from seeing any modifications the callee may make
3969        // as guaranteed by the `byval` attribute.
3970        int FrameIdx = MF.getFrameInfo().CreateStackObject(
3971            Flags.getByValSize(), std::max(16, (int)Flags.getByValAlign()),
3972            false);
3973        SDValue StackSlot =
3974            DAG.getFrameIndex(FrameIdx, getPointerTy(DAG.getDataLayout()));
3975        Chain =
3976            CreateCopyOfByValArgument(Arg, StackSlot, Chain, Flags, DAG, dl);
3977        // From now on treat this as a regular pointer
3978        Arg = StackSlot;
3979        isByVal = false;
3980      } else {
3981        // Store the argument.
3982        SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
3983        int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
3984        Chain = DAG.getStore(
3985            Chain, dl, Arg, SpillSlot,
3986            MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3987        Arg = SpillSlot;
3988      }
3989      break;
3990    }
3991    }
3992
3993    if (VA.needsCustom()) {
3994      assert(VA.getValVT() == MVT::v64i1 &&
3995             "Currently the only custom case is when we split v64i1 to 2 regs");
3996      // Split v64i1 value into two registers
3997      Passv64i1ArgInRegs(dl, DAG, Arg, RegsToPass, VA, ArgLocs[++I], Subtarget);
3998    } else if (VA.isRegLoc()) {
3999      RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
4000      const TargetOptions &Options = DAG.getTarget().Options;
4001      if (Options.EnableDebugEntryValues)
4002        CSInfo.emplace_back(VA.getLocReg(), I);
4003      if (isVarArg && IsWin64) {
4004        // Win64 ABI requires argument XMM reg to be copied to the corresponding
4005        // shadow reg if callee is a varargs function.
4006        unsigned ShadowReg = 0;
4007        switch (VA.getLocReg()) {
4008        case X86::XMM0: ShadowReg = X86::RCX; break;
4009        case X86::XMM1: ShadowReg = X86::RDX; break;
4010        case X86::XMM2: ShadowReg = X86::R8; break;
4011        case X86::XMM3: ShadowReg = X86::R9; break;
4012        }
4013        if (ShadowReg)
4014          RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
4015      }
4016    } else if (!IsSibcall && (!isTailCall || isByVal)) {
4017      assert(VA.isMemLoc());
4018      if (!StackPtr.getNode())
4019        StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
4020                                      getPointerTy(DAG.getDataLayout()));
4021      MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
4022                                             dl, DAG, VA, Flags));
4023    }
4024  }
4025
4026  if (!MemOpChains.empty())
4027    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
4028
4029  if (Subtarget.isPICStyleGOT()) {
4030    // ELF / PIC requires GOT in the EBX register before function calls via PLT
4031    // GOT pointer.
4032    if (!isTailCall) {
4033      RegsToPass.push_back(std::make_pair(
4034          unsigned(X86::EBX), DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
4035                                          getPointerTy(DAG.getDataLayout()))));
4036    } else {
4037      // If we are tail calling and generating PIC/GOT style code load the
4038      // address of the callee into ECX. The value in ecx is used as target of
4039      // the tail jump. This is done to circumvent the ebx/callee-saved problem
4040      // for tail calls on PIC/GOT architectures. Normally we would just put the
4041      // address of GOT into ebx and then call target@PLT. But for tail calls
4042      // ebx would be restored (since ebx is callee saved) before jumping to the
4043      // target@PLT.
4044
4045      // Note: The actual moving to ECX is done further down.
4046      GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4047      if (G && !G->getGlobal()->hasLocalLinkage() &&
4048          G->getGlobal()->hasDefaultVisibility())
4049        Callee = LowerGlobalAddress(Callee, DAG);
4050      else if (isa<ExternalSymbolSDNode>(Callee))
4051        Callee = LowerExternalSymbol(Callee, DAG);
4052    }
4053  }
4054
4055  if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
4056    // From AMD64 ABI document:
4057    // For calls that may call functions that use varargs or stdargs
4058    // (prototype-less calls or calls to functions containing ellipsis (...) in
4059    // the declaration) %al is used as hidden argument to specify the number
4060    // of SSE registers used. The contents of %al do not need to match exactly
4061    // the number of registers, but must be an ubound on the number of SSE
4062    // registers used and is in the range 0 - 8 inclusive.
4063
4064    // Count the number of XMM registers allocated.
4065    static const MCPhysReg XMMArgRegs[] = {
4066      X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
4067      X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
4068    };
4069    unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
4070    assert((Subtarget.hasSSE1() || !NumXMMRegs)
4071           && "SSE registers cannot be used when SSE is disabled");
4072
4073    RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
4074                                        DAG.getConstant(NumXMMRegs, dl,
4075                                                        MVT::i8)));
4076  }
4077
4078  if (isVarArg && IsMustTail) {
4079    const auto &Forwards = X86Info->getForwardedMustTailRegParms();
4080    for (const auto &F : Forwards) {
4081      SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
4082      RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
4083    }
4084  }
4085
4086  // For tail calls lower the arguments to the 'real' stack slots.  Sibcalls
4087  // don't need this because the eligibility check rejects calls that require
4088  // shuffling arguments passed in memory.
4089  if (!IsSibcall && isTailCall) {
4090    // Force all the incoming stack arguments to be loaded from the stack
4091    // before any new outgoing arguments are stored to the stack, because the
4092    // outgoing stack slots may alias the incoming argument stack slots, and
4093    // the alias isn't otherwise explicit. This is slightly more conservative
4094    // than necessary, because it means that each store effectively depends
4095    // on every argument instead of just those arguments it would clobber.
4096    SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
4097
4098    SmallVector<SDValue, 8> MemOpChains2;
4099    SDValue FIN;
4100    int FI = 0;
4101    for (unsigned I = 0, OutsIndex = 0, E = ArgLocs.size(); I != E;
4102         ++I, ++OutsIndex) {
4103      CCValAssign &VA = ArgLocs[I];
4104
4105      if (VA.isRegLoc()) {
4106        if (VA.needsCustom()) {
4107          assert((CallConv == CallingConv::X86_RegCall) &&
4108                 "Expecting custom case only in regcall calling convention");
4109          // This means that we are in special case where one argument was
4110          // passed through two register locations - Skip the next location
4111          ++I;
4112        }
4113
4114        continue;
4115      }
4116
4117      assert(VA.isMemLoc());
4118      SDValue Arg = OutVals[OutsIndex];
4119      ISD::ArgFlagsTy Flags = Outs[OutsIndex].Flags;
4120      // Skip inalloca arguments.  They don't require any work.
4121      if (Flags.isInAlloca())
4122        continue;
4123      // Create frame index.
4124      int32_t Offset = VA.getLocMemOffset()+FPDiff;
4125      uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
4126      FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
4127      FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
4128
4129      if (Flags.isByVal()) {
4130        // Copy relative to framepointer.
4131        SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset(), dl);
4132        if (!StackPtr.getNode())
4133          StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
4134                                        getPointerTy(DAG.getDataLayout()));
4135        Source = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
4136                             StackPtr, Source);
4137
4138        MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
4139                                                         ArgChain,
4140                                                         Flags, DAG, dl));
4141      } else {
4142        // Store relative to framepointer.
4143        MemOpChains2.push_back(DAG.getStore(
4144            ArgChain, dl, Arg, FIN,
4145            MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
4146      }
4147    }
4148
4149    if (!MemOpChains2.empty())
4150      Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
4151
4152    // Store the return address to the appropriate stack slot.
4153    Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
4154                                     getPointerTy(DAG.getDataLayout()),
4155                                     RegInfo->getSlotSize(), FPDiff, dl);
4156  }
4157
4158  // Build a sequence of copy-to-reg nodes chained together with token chain
4159  // and flag operands which copy the outgoing args into registers.
4160  SDValue InFlag;
4161  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
4162    Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
4163                             RegsToPass[i].second, InFlag);
4164    InFlag = Chain.getValue(1);
4165  }
4166
4167  if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
4168    assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
4169    // In the 64-bit large code model, we have to make all calls
4170    // through a register, since the call instruction's 32-bit
4171    // pc-relative offset may not be large enough to hold the whole
4172    // address.
4173  } else if (Callee->getOpcode() == ISD::GlobalAddress ||
4174             Callee->getOpcode() == ISD::ExternalSymbol) {
4175    // Lower direct calls to global addresses and external symbols. Setting
4176    // ForCall to true here has the effect of removing WrapperRIP when possible
4177    // to allow direct calls to be selected without first materializing the
4178    // address into a register.
4179    Callee = LowerGlobalOrExternal(Callee, DAG, /*ForCall=*/true);
4180  } else if (Subtarget.isTarget64BitILP32() &&
4181             Callee->getValueType(0) == MVT::i32) {
4182    // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
4183    Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
4184  }
4185
4186  // Returns a chain & a flag for retval copy to use.
4187  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
4188  SmallVector<SDValue, 8> Ops;
4189
4190  if (!IsSibcall && isTailCall && !IsMustTail) {
4191    Chain = DAG.getCALLSEQ_END(Chain,
4192                               DAG.getIntPtrConstant(NumBytesToPop, dl, true),
4193                               DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
4194    InFlag = Chain.getValue(1);
4195  }
4196
4197  Ops.push_back(Chain);
4198  Ops.push_back(Callee);
4199
4200  if (isTailCall)
4201    Ops.push_back(DAG.getConstant(FPDiff, dl, MVT::i32));
4202
4203  // Add argument registers to the end of the list so that they are known live
4204  // into the call.
4205  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
4206    Ops.push_back(DAG.getRegister(RegsToPass[i].first,
4207                                  RegsToPass[i].second.getValueType()));
4208
4209  // Add a register mask operand representing the call-preserved registers.
4210  // If HasNCSR is asserted (attribute NoCallerSavedRegisters exists) then we
4211  // set X86_INTR calling convention because it has the same CSR mask
4212  // (same preserved registers).
4213  const uint32_t *Mask = RegInfo->getCallPreservedMask(
4214      MF, HasNCSR ? (CallingConv::ID)CallingConv::X86_INTR : CallConv);
4215  assert(Mask && "Missing call preserved mask for calling convention");
4216
4217  // If this is an invoke in a 32-bit function using a funclet-based
4218  // personality, assume the function clobbers all registers. If an exception
4219  // is thrown, the runtime will not restore CSRs.
4220  // FIXME: Model this more precisely so that we can register allocate across
4221  // the normal edge and spill and fill across the exceptional edge.
4222  if (!Is64Bit && CLI.CS && CLI.CS.isInvoke()) {
4223    const Function &CallerFn = MF.getFunction();
4224    EHPersonality Pers =
4225        CallerFn.hasPersonalityFn()
4226            ? classifyEHPersonality(CallerFn.getPersonalityFn())
4227            : EHPersonality::Unknown;
4228    if (isFuncletEHPersonality(Pers))
4229      Mask = RegInfo->getNoPreservedMask();
4230  }
4231
4232  // Define a new register mask from the existing mask.
4233  uint32_t *RegMask = nullptr;
4234
4235  // In some calling conventions we need to remove the used physical registers
4236  // from the reg mask.
4237  if (CallConv == CallingConv::X86_RegCall || HasNCSR) {
4238    const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
4239
4240    // Allocate a new Reg Mask and copy Mask.
4241    RegMask = MF.allocateRegMask();
4242    unsigned RegMaskSize = MachineOperand::getRegMaskSize(TRI->getNumRegs());
4243    memcpy(RegMask, Mask, sizeof(RegMask[0]) * RegMaskSize);
4244
4245    // Make sure all sub registers of the argument registers are reset
4246    // in the RegMask.
4247    for (auto const &RegPair : RegsToPass)
4248      for (MCSubRegIterator SubRegs(RegPair.first, TRI, /*IncludeSelf=*/true);
4249           SubRegs.isValid(); ++SubRegs)
4250        RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));
4251
4252    // Create the RegMask Operand according to our updated mask.
4253    Ops.push_back(DAG.getRegisterMask(RegMask));
4254  } else {
4255    // Create the RegMask Operand according to the static mask.
4256    Ops.push_back(DAG.getRegisterMask(Mask));
4257  }
4258
4259  if (InFlag.getNode())
4260    Ops.push_back(InFlag);
4261
4262  if (isTailCall) {
4263    // We used to do:
4264    //// If this is the first return lowered for this function, add the regs
4265    //// to the liveout set for the function.
4266    // This isn't right, although it's probably harmless on x86; liveouts
4267    // should be computed from returns not tail calls.  Consider a void
4268    // function making a tail call to a function returning int.
4269    MF.getFrameInfo().setHasTailCall();
4270    SDValue Ret = DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
4271    DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
4272    return Ret;
4273  }
4274
4275  if (HasNoCfCheck && IsCFProtectionSupported) {
4276    Chain = DAG.getNode(X86ISD::NT_CALL, dl, NodeTys, Ops);
4277  } else {
4278    Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
4279  }
4280  InFlag = Chain.getValue(1);
4281  DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
4282
4283  // Save heapallocsite metadata.
4284  if (CLI.CS)
4285    if (MDNode *HeapAlloc = CLI.CS->getMetadata("heapallocsite"))
4286      DAG.addHeapAllocSite(Chain.getNode(), HeapAlloc);
4287
4288  // Create the CALLSEQ_END node.
4289  unsigned NumBytesForCalleeToPop;
4290  if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
4291                       DAG.getTarget().Options.GuaranteedTailCallOpt))
4292    NumBytesForCalleeToPop = NumBytes;    // Callee pops everything
4293  else if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
4294           !Subtarget.getTargetTriple().isOSMSVCRT() &&
4295           SR == StackStructReturn)
4296    // If this is a call to a struct-return function, the callee
4297    // pops the hidden struct pointer, so we have to push it back.
4298    // This is common for Darwin/X86, Linux & Mingw32 targets.
4299    // For MSVC Win32 targets, the caller pops the hidden struct pointer.
4300    NumBytesForCalleeToPop = 4;
4301  else
4302    NumBytesForCalleeToPop = 0;  // Callee pops nothing.
4303
4304  if (CLI.DoesNotReturn && !getTargetMachine().Options.TrapUnreachable) {
4305    // No need to reset the stack after the call if the call doesn't return. To
4306    // make the MI verify, we'll pretend the callee does it for us.
4307    NumBytesForCalleeToPop = NumBytes;
4308  }
4309
4310  // Returns a flag for retval copy to use.
4311  if (!IsSibcall) {
4312    Chain = DAG.getCALLSEQ_END(Chain,
4313                               DAG.getIntPtrConstant(NumBytesToPop, dl, true),
4314                               DAG.getIntPtrConstant(NumBytesForCalleeToPop, dl,
4315                                                     true),
4316                               InFlag, dl);
4317    InFlag = Chain.getValue(1);
4318  }
4319
4320  // Handle result values, copying them out of physregs into vregs that we
4321  // return.
4322  return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
4323                         InVals, RegMask);
4324}
4325
4326//===----------------------------------------------------------------------===//
4327//                Fast Calling Convention (tail call) implementation
4328//===----------------------------------------------------------------------===//
4329
4330//  Like std call, callee cleans arguments, convention except that ECX is
4331//  reserved for storing the tail called function address. Only 2 registers are
4332//  free for argument passing (inreg). Tail call optimization is performed
4333//  provided:
4334//                * tailcallopt is enabled
4335//                * caller/callee are fastcc
4336//  On X86_64 architecture with GOT-style position independent code only local
4337//  (within module) calls are supported at the moment.
4338//  To keep the stack aligned according to platform abi the function
4339//  GetAlignedArgumentStackSize ensures that argument delta is always multiples
4340//  of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
4341//  If a tail called function callee has more arguments than the caller the
4342//  caller needs to make sure that there is room to move the RETADDR to. This is
4343//  achieved by reserving an area the size of the argument delta right after the
4344//  original RETADDR, but before the saved framepointer or the spilled registers
4345//  e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
4346//  stack layout:
4347//    arg1
4348//    arg2
4349//    RETADDR
4350//    [ new RETADDR
4351//      move area ]
4352//    (possible EBP)
4353//    ESI
4354//    EDI
4355//    local1 ..
4356
4357/// Make the stack size align e.g 16n + 12 aligned for a 16-byte align
4358/// requirement.
4359unsigned
4360X86TargetLowering::GetAlignedArgumentStackSize(const unsigned StackSize,
4361                                               SelectionDAG &DAG) const {
4362  const Align StackAlignment(Subtarget.getFrameLowering()->getStackAlignment());
4363  const uint64_t SlotSize = Subtarget.getRegisterInfo()->getSlotSize();
4364  assert(StackSize % SlotSize == 0 &&
4365         "StackSize must be a multiple of SlotSize");
4366  return alignTo(StackSize + SlotSize, StackAlignment) - SlotSize;
4367}
4368
4369/// Return true if the given stack call argument is already available in the
4370/// same position (relatively) of the caller's incoming argument stack.
4371static
4372bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
4373                         MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
4374                         const X86InstrInfo *TII, const CCValAssign &VA) {
4375  unsigned Bytes = Arg.getValueSizeInBits() / 8;
4376
4377  for (;;) {
4378    // Look through nodes that don't alter the bits of the incoming value.
4379    unsigned Op = Arg.getOpcode();
4380    if (Op == ISD::ZERO_EXTEND || Op == ISD::ANY_EXTEND || Op == ISD::BITCAST) {
4381      Arg = Arg.getOperand(0);
4382      continue;
4383    }
4384    if (Op == ISD::TRUNCATE) {
4385      const SDValue &TruncInput = Arg.getOperand(0);
4386      if (TruncInput.getOpcode() == ISD::AssertZext &&
4387          cast<VTSDNode>(TruncInput.getOperand(1))->getVT() ==
4388              Arg.getValueType()) {
4389        Arg = TruncInput.getOperand(0);
4390        continue;
4391      }
4392    }
4393    break;
4394  }
4395
4396  int FI = INT_MAX;
4397  if (Arg.getOpcode() == ISD::CopyFromReg) {
4398    unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
4399    if (!Register::isVirtualRegister(VR))
4400      return false;
4401    MachineInstr *Def = MRI->getVRegDef(VR);
4402    if (!Def)
4403      return false;
4404    if (!Flags.isByVal()) {
4405      if (!TII->isLoadFromStackSlot(*Def, FI))
4406        return false;
4407    } else {
4408      unsigned Opcode = Def->getOpcode();
4409      if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
4410           Opcode == X86::LEA64_32r) &&
4411          Def->getOperand(1).isFI()) {
4412        FI = Def->getOperand(1).getIndex();
4413        Bytes = Flags.getByValSize();
4414      } else
4415        return false;
4416    }
4417  } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
4418    if (Flags.isByVal())
4419      // ByVal argument is passed in as a pointer but it's now being
4420      // dereferenced. e.g.
4421      // define @foo(%struct.X* %A) {
4422      //   tail call @bar(%struct.X* byval %A)
4423      // }
4424      return false;
4425    SDValue Ptr = Ld->getBasePtr();
4426    FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
4427    if (!FINode)
4428      return false;
4429    FI = FINode->getIndex();
4430  } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
4431    FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
4432    FI = FINode->getIndex();
4433    Bytes = Flags.getByValSize();
4434  } else
4435    return false;
4436
4437  assert(FI != INT_MAX);
4438  if (!MFI.isFixedObjectIndex(FI))
4439    return false;
4440
4441  if (Offset != MFI.getObjectOffset(FI))
4442    return false;
4443
4444  // If this is not byval, check that the argument stack object is immutable.
4445  // inalloca and argument copy elision can create mutable argument stack
4446  // objects. Byval objects can be mutated, but a byval call intends to pass the
4447  // mutated memory.
4448  if (!Flags.isByVal() && !MFI.isImmutableObjectIndex(FI))
4449    return false;
4450
4451  if (VA.getLocVT().getSizeInBits() > Arg.getValueSizeInBits()) {
4452    // If the argument location is wider than the argument type, check that any
4453    // extension flags match.
4454    if (Flags.isZExt() != MFI.isObjectZExt(FI) ||
4455        Flags.isSExt() != MFI.isObjectSExt(FI)) {
4456      return false;
4457    }
4458  }
4459
4460  return Bytes == MFI.getObjectSize(FI);
4461}
4462
4463/// Check whether the call is eligible for tail call optimization. Targets
4464/// that want to do tail call optimization should implement this function.
4465bool X86TargetLowering::IsEligibleForTailCallOptimization(
4466    SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
4467    bool isCalleeStructRet, bool isCallerStructRet, Type *RetTy,
4468    const SmallVectorImpl<ISD::OutputArg> &Outs,
4469    const SmallVectorImpl<SDValue> &OutVals,
4470    const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
4471  if (!mayTailCallThisCC(CalleeCC))
4472    return false;
4473
4474  // If -tailcallopt is specified, make fastcc functions tail-callable.
4475  MachineFunction &MF = DAG.getMachineFunction();
4476  const Function &CallerF = MF.getFunction();
4477
4478  // If the function return type is x86_fp80 and the callee return type is not,
4479  // then the FP_EXTEND of the call result is not a nop. It's not safe to
4480  // perform a tailcall optimization here.
4481  if (CallerF.getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
4482    return false;
4483
4484  CallingConv::ID CallerCC = CallerF.getCallingConv();
4485  bool CCMatch = CallerCC == CalleeCC;
4486  bool IsCalleeWin64 = Subtarget.isCallingConvWin64(CalleeCC);
4487  bool IsCallerWin64 = Subtarget.isCallingConvWin64(CallerCC);
4488  bool IsGuaranteeTCO = DAG.getTarget().Options.GuaranteedTailCallOpt ||
4489      CalleeCC == CallingConv::Tail;
4490
4491  // Win64 functions have extra shadow space for argument homing. Don't do the
4492  // sibcall if the caller and callee have mismatched expectations for this
4493  // space.
4494  if (IsCalleeWin64 != IsCallerWin64)
4495    return false;
4496
4497  if (IsGuaranteeTCO) {
4498    if (canGuaranteeTCO(CalleeCC) && CCMatch)
4499      return true;
4500    return false;
4501  }
4502
4503  // Look for obvious safe cases to perform tail call optimization that do not
4504  // require ABI changes. This is what gcc calls sibcall.
4505
4506  // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
4507  // emit a special epilogue.
4508  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
4509  if (RegInfo->needsStackRealignment(MF))
4510    return false;
4511
4512  // Also avoid sibcall optimization if either caller or callee uses struct
4513  // return semantics.
4514  if (isCalleeStructRet || isCallerStructRet)
4515    return false;
4516
4517  // Do not sibcall optimize vararg calls unless all arguments are passed via
4518  // registers.
4519  LLVMContext &C = *DAG.getContext();
4520  if (isVarArg && !Outs.empty()) {
4521    // Optimizing for varargs on Win64 is unlikely to be safe without
4522    // additional testing.
4523    if (IsCalleeWin64 || IsCallerWin64)
4524      return false;
4525
4526    SmallVector<CCValAssign, 16> ArgLocs;
4527    CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
4528
4529    CCInfo.AnalyzeCallOperands(Outs, CC_X86);
4530    for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
4531      if (!ArgLocs[i].isRegLoc())
4532        return false;
4533  }
4534
4535  // If the call result is in ST0 / ST1, it needs to be popped off the x87
4536  // stack.  Therefore, if it's not used by the call it is not safe to optimize
4537  // this into a sibcall.
4538  bool Unused = false;
4539  for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
4540    if (!Ins[i].Used) {
4541      Unused = true;
4542      break;
4543    }
4544  }
4545  if (Unused) {
4546    SmallVector<CCValAssign, 16> RVLocs;
4547    CCState CCInfo(CalleeCC, false, MF, RVLocs, C);
4548    CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
4549    for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
4550      CCValAssign &VA = RVLocs[i];
4551      if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
4552        return false;
4553    }
4554  }
4555
4556  // Check that the call results are passed in the same way.
4557  if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
4558                                  RetCC_X86, RetCC_X86))
4559    return false;
4560  // The callee has to preserve all registers the caller needs to preserve.
4561  const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
4562  const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
4563  if (!CCMatch) {
4564    const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
4565    if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
4566      return false;
4567  }
4568
4569  unsigned StackArgsSize = 0;
4570
4571  // If the callee takes no arguments then go on to check the results of the
4572  // call.
4573  if (!Outs.empty()) {
4574    // Check if stack adjustment is needed. For now, do not do this if any
4575    // argument is passed on the stack.
4576    SmallVector<CCValAssign, 16> ArgLocs;
4577    CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
4578
4579    // Allocate shadow area for Win64
4580    if (IsCalleeWin64)
4581      CCInfo.AllocateStack(32, 8);
4582
4583    CCInfo.AnalyzeCallOperands(Outs, CC_X86);
4584    StackArgsSize = CCInfo.getNextStackOffset();
4585
4586    if (CCInfo.getNextStackOffset()) {
4587      // Check if the arguments are already laid out in the right way as
4588      // the caller's fixed stack objects.
4589      MachineFrameInfo &MFI = MF.getFrameInfo();
4590      const MachineRegisterInfo *MRI = &MF.getRegInfo();
4591      const X86InstrInfo *TII = Subtarget.getInstrInfo();
4592      for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
4593        CCValAssign &VA = ArgLocs[i];
4594        SDValue Arg = OutVals[i];
4595        ISD::ArgFlagsTy Flags = Outs[i].Flags;
4596        if (VA.getLocInfo() == CCValAssign::Indirect)
4597          return false;
4598        if (!VA.isRegLoc()) {
4599          if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
4600                                   MFI, MRI, TII, VA))
4601            return false;
4602        }
4603      }
4604    }
4605
4606    bool PositionIndependent = isPositionIndependent();
4607    // If the tailcall address may be in a register, then make sure it's
4608    // possible to register allocate for it. In 32-bit, the call address can
4609    // only target EAX, EDX, or ECX since the tail call must be scheduled after
4610    // callee-saved registers are restored. These happen to be the same
4611    // registers used to pass 'inreg' arguments so watch out for those.
4612    if (!Subtarget.is64Bit() && ((!isa<GlobalAddressSDNode>(Callee) &&
4613                                  !isa<ExternalSymbolSDNode>(Callee)) ||
4614                                 PositionIndependent)) {
4615      unsigned NumInRegs = 0;
4616      // In PIC we need an extra register to formulate the address computation
4617      // for the callee.
4618      unsigned MaxInRegs = PositionIndependent ? 2 : 3;
4619
4620      for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
4621        CCValAssign &VA = ArgLocs[i];
4622        if (!VA.isRegLoc())
4623          continue;
4624        Register Reg = VA.getLocReg();
4625        switch (Reg) {
4626        default: break;
4627        case X86::EAX: case X86::EDX: case X86::ECX:
4628          if (++NumInRegs == MaxInRegs)
4629            return false;
4630          break;
4631        }
4632      }
4633    }
4634
4635    const MachineRegisterInfo &MRI = MF.getRegInfo();
4636    if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
4637      return false;
4638  }
4639
4640  bool CalleeWillPop =
4641      X86::isCalleePop(CalleeCC, Subtarget.is64Bit(), isVarArg,
4642                       MF.getTarget().Options.GuaranteedTailCallOpt);
4643
4644  if (unsigned BytesToPop =
4645          MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) {
4646    // If we have bytes to pop, the callee must pop them.
4647    bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
4648    if (!CalleePopMatches)
4649      return false;
4650  } else if (CalleeWillPop && StackArgsSize > 0) {
4651    // If we don't have bytes to pop, make sure the callee doesn't pop any.
4652    return false;
4653  }
4654
4655  return true;
4656}
4657
4658FastISel *
4659X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
4660                                  const TargetLibraryInfo *libInfo) const {
4661  return X86::createFastISel(funcInfo, libInfo);
4662}
4663
4664//===----------------------------------------------------------------------===//
4665//                           Other Lowering Hooks
4666//===----------------------------------------------------------------------===//
4667
4668static bool MayFoldLoad(SDValue Op) {
4669  return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
4670}
4671
4672static bool MayFoldIntoStore(SDValue Op) {
4673  return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
4674}
4675
4676static bool MayFoldIntoZeroExtend(SDValue Op) {
4677  if (Op.hasOneUse()) {
4678    unsigned Opcode = Op.getNode()->use_begin()->getOpcode();
4679    return (ISD::ZERO_EXTEND == Opcode);
4680  }
4681  return false;
4682}
4683
4684static bool isTargetShuffle(unsigned Opcode) {
4685  switch(Opcode) {
4686  default: return false;
4687  case X86ISD::BLENDI:
4688  case X86ISD::PSHUFB:
4689  case X86ISD::PSHUFD:
4690  case X86ISD::PSHUFHW:
4691  case X86ISD::PSHUFLW:
4692  case X86ISD::SHUFP:
4693  case X86ISD::INSERTPS:
4694  case X86ISD::EXTRQI:
4695  case X86ISD::INSERTQI:
4696  case X86ISD::PALIGNR:
4697  case X86ISD::VSHLDQ:
4698  case X86ISD::VSRLDQ:
4699  case X86ISD::MOVLHPS:
4700  case X86ISD::MOVHLPS:
4701  case X86ISD::MOVSHDUP:
4702  case X86ISD::MOVSLDUP:
4703  case X86ISD::MOVDDUP:
4704  case X86ISD::MOVSS:
4705  case X86ISD::MOVSD:
4706  case X86ISD::UNPCKL:
4707  case X86ISD::UNPCKH:
4708  case X86ISD::VBROADCAST:
4709  case X86ISD::VPERMILPI:
4710  case X86ISD::VPERMILPV:
4711  case X86ISD::VPERM2X128:
4712  case X86ISD::SHUF128:
4713  case X86ISD::VPERMIL2:
4714  case X86ISD::VPERMI:
4715  case X86ISD::VPPERM:
4716  case X86ISD::VPERMV:
4717  case X86ISD::VPERMV3:
4718  case X86ISD::VZEXT_MOVL:
4719    return true;
4720  }
4721}
4722
4723static bool isTargetShuffleVariableMask(unsigned Opcode) {
4724  switch (Opcode) {
4725  default: return false;
4726  // Target Shuffles.
4727  case X86ISD::PSHUFB:
4728  case X86ISD::VPERMILPV:
4729  case X86ISD::VPERMIL2:
4730  case X86ISD::VPPERM:
4731  case X86ISD::VPERMV:
4732  case X86ISD::VPERMV3:
4733    return true;
4734  // 'Faux' Target Shuffles.
4735  case ISD::OR:
4736  case ISD::AND:
4737  case X86ISD::ANDNP:
4738    return true;
4739  }
4740}
4741
4742SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
4743  MachineFunction &MF = DAG.getMachineFunction();
4744  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
4745  X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
4746  int ReturnAddrIndex = FuncInfo->getRAIndex();
4747
4748  if (ReturnAddrIndex == 0) {
4749    // Set up a frame object for the return address.
4750    unsigned SlotSize = RegInfo->getSlotSize();
4751    ReturnAddrIndex = MF.getFrameInfo().CreateFixedObject(SlotSize,
4752                                                          -(int64_t)SlotSize,
4753                                                          false);
4754    FuncInfo->setRAIndex(ReturnAddrIndex);
4755  }
4756
4757  return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy(DAG.getDataLayout()));
4758}
4759
4760bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
4761                                       bool hasSymbolicDisplacement) {
4762  // Offset should fit into 32 bit immediate field.
4763  if (!isInt<32>(Offset))
4764    return false;
4765
4766  // If we don't have a symbolic displacement - we don't have any extra
4767  // restrictions.
4768  if (!hasSymbolicDisplacement)
4769    return true;
4770
4771  // FIXME: Some tweaks might be needed for medium code model.
4772  if (M != CodeModel::Small && M != CodeModel::Kernel)
4773    return false;
4774
4775  // For small code model we assume that latest object is 16MB before end of 31
4776  // bits boundary. We may also accept pretty large negative constants knowing
4777  // that all objects are in the positive half of address space.
4778  if (M == CodeModel::Small && Offset < 16*1024*1024)
4779    return true;
4780
4781  // For kernel code model we know that all object resist in the negative half
4782  // of 32bits address space. We may not accept negative offsets, since they may
4783  // be just off and we may accept pretty large positive ones.
4784  if (M == CodeModel::Kernel && Offset >= 0)
4785    return true;
4786
4787  return false;
4788}
4789
4790/// Determines whether the callee is required to pop its own arguments.
4791/// Callee pop is necessary to support tail calls.
4792bool X86::isCalleePop(CallingConv::ID CallingConv,
4793                      bool is64Bit, bool IsVarArg, bool GuaranteeTCO) {
4794  // If GuaranteeTCO is true, we force some calls to be callee pop so that we
4795  // can guarantee TCO.
4796  if (!IsVarArg && shouldGuaranteeTCO(CallingConv, GuaranteeTCO))
4797    return true;
4798
4799  switch (CallingConv) {
4800  default:
4801    return false;
4802  case CallingConv::X86_StdCall:
4803  case CallingConv::X86_FastCall:
4804  case CallingConv::X86_ThisCall:
4805  case CallingConv::X86_VectorCall:
4806    return !is64Bit;
4807  }
4808}
4809
4810/// Return true if the condition is an signed comparison operation.
4811static bool isX86CCSigned(unsigned X86CC) {
4812  switch (X86CC) {
4813  default:
4814    llvm_unreachable("Invalid integer condition!");
4815  case X86::COND_E:
4816  case X86::COND_NE:
4817  case X86::COND_B:
4818  case X86::COND_A:
4819  case X86::COND_BE:
4820  case X86::COND_AE:
4821    return false;
4822  case X86::COND_G:
4823  case X86::COND_GE:
4824  case X86::COND_L:
4825  case X86::COND_LE:
4826    return true;
4827  }
4828}
4829
4830static X86::CondCode TranslateIntegerX86CC(ISD::CondCode SetCCOpcode) {
4831  switch (SetCCOpcode) {
4832  default: llvm_unreachable("Invalid integer condition!");
4833  case ISD::SETEQ:  return X86::COND_E;
4834  case ISD::SETGT:  return X86::COND_G;
4835  case ISD::SETGE:  return X86::COND_GE;
4836  case ISD::SETLT:  return X86::COND_L;
4837  case ISD::SETLE:  return X86::COND_LE;
4838  case ISD::SETNE:  return X86::COND_NE;
4839  case ISD::SETULT: return X86::COND_B;
4840  case ISD::SETUGT: return X86::COND_A;
4841  case ISD::SETULE: return X86::COND_BE;
4842  case ISD::SETUGE: return X86::COND_AE;
4843  }
4844}
4845
4846/// Do a one-to-one translation of a ISD::CondCode to the X86-specific
4847/// condition code, returning the condition code and the LHS/RHS of the
4848/// comparison to make.
4849static X86::CondCode TranslateX86CC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
4850                               bool isFP, SDValue &LHS, SDValue &RHS,
4851                               SelectionDAG &DAG) {
4852  if (!isFP) {
4853    if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
4854      if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
4855        // X > -1   -> X == 0, jump !sign.
4856        RHS = DAG.getConstant(0, DL, RHS.getValueType());
4857        return X86::COND_NS;
4858      }
4859      if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
4860        // X < 0   -> X == 0, jump on sign.
4861        return X86::COND_S;
4862      }
4863      if (SetCCOpcode == ISD::SETGE && RHSC->isNullValue()) {
4864        // X >= 0   -> X == 0, jump on !sign.
4865        return X86::COND_NS;
4866      }
4867      if (SetCCOpcode == ISD::SETLT && RHSC->isOne()) {
4868        // X < 1   -> X <= 0
4869        RHS = DAG.getConstant(0, DL, RHS.getValueType());
4870        return X86::COND_LE;
4871      }
4872    }
4873
4874    return TranslateIntegerX86CC(SetCCOpcode);
4875  }
4876
4877  // First determine if it is required or is profitable to flip the operands.
4878
4879  // If LHS is a foldable load, but RHS is not, flip the condition.
4880  if (ISD::isNON_EXTLoad(LHS.getNode()) &&
4881      !ISD::isNON_EXTLoad(RHS.getNode())) {
4882    SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
4883    std::swap(LHS, RHS);
4884  }
4885
4886  switch (SetCCOpcode) {
4887  default: break;
4888  case ISD::SETOLT:
4889  case ISD::SETOLE:
4890  case ISD::SETUGT:
4891  case ISD::SETUGE:
4892    std::swap(LHS, RHS);
4893    break;
4894  }
4895
4896  // On a floating point condition, the flags are set as follows:
4897  // ZF  PF  CF   op
4898  //  0 | 0 | 0 | X > Y
4899  //  0 | 0 | 1 | X < Y
4900  //  1 | 0 | 0 | X == Y
4901  //  1 | 1 | 1 | unordered
4902  switch (SetCCOpcode) {
4903  default: llvm_unreachable("Condcode should be pre-legalized away");
4904  case ISD::SETUEQ:
4905  case ISD::SETEQ:   return X86::COND_E;
4906  case ISD::SETOLT:              // flipped
4907  case ISD::SETOGT:
4908  case ISD::SETGT:   return X86::COND_A;
4909  case ISD::SETOLE:              // flipped
4910  case ISD::SETOGE:
4911  case ISD::SETGE:   return X86::COND_AE;
4912  case ISD::SETUGT:              // flipped
4913  case ISD::SETULT:
4914  case ISD::SETLT:   return X86::COND_B;
4915  case ISD::SETUGE:              // flipped
4916  case ISD::SETULE:
4917  case ISD::SETLE:   return X86::COND_BE;
4918  case ISD::SETONE:
4919  case ISD::SETNE:   return X86::COND_NE;
4920  case ISD::SETUO:   return X86::COND_P;
4921  case ISD::SETO:    return X86::COND_NP;
4922  case ISD::SETOEQ:
4923  case ISD::SETUNE:  return X86::COND_INVALID;
4924  }
4925}
4926
4927/// Is there a floating point cmov for the specific X86 condition code?
4928/// Current x86 isa includes the following FP cmov instructions:
4929/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
4930static bool hasFPCMov(unsigned X86CC) {
4931  switch (X86CC) {
4932  default:
4933    return false;
4934  case X86::COND_B:
4935  case X86::COND_BE:
4936  case X86::COND_E:
4937  case X86::COND_P:
4938  case X86::COND_A:
4939  case X86::COND_AE:
4940  case X86::COND_NE:
4941  case X86::COND_NP:
4942    return true;
4943  }
4944}
4945
4946
4947bool X86TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
4948                                           const CallInst &I,
4949                                           MachineFunction &MF,
4950                                           unsigned Intrinsic) const {
4951
4952  const IntrinsicData* IntrData = getIntrinsicWithChain(Intrinsic);
4953  if (!IntrData)
4954    return false;
4955
4956  Info.flags = MachineMemOperand::MONone;
4957  Info.offset = 0;
4958
4959  switch (IntrData->Type) {
4960  case TRUNCATE_TO_MEM_VI8:
4961  case TRUNCATE_TO_MEM_VI16:
4962  case TRUNCATE_TO_MEM_VI32: {
4963    Info.opc = ISD::INTRINSIC_VOID;
4964    Info.ptrVal = I.getArgOperand(0);
4965    MVT VT  = MVT::getVT(I.getArgOperand(1)->getType());
4966    MVT ScalarVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
4967    if (IntrData->Type == TRUNCATE_TO_MEM_VI8)
4968      ScalarVT = MVT::i8;
4969    else if (IntrData->Type == TRUNCATE_TO_MEM_VI16)
4970      ScalarVT = MVT::i16;
4971    else if (IntrData->Type == TRUNCATE_TO_MEM_VI32)
4972      ScalarVT = MVT::i32;
4973
4974    Info.memVT = MVT::getVectorVT(ScalarVT, VT.getVectorNumElements());
4975    Info.align = Align::None();
4976    Info.flags |= MachineMemOperand::MOStore;
4977    break;
4978  }
4979  case GATHER:
4980  case GATHER_AVX2: {
4981    Info.opc = ISD::INTRINSIC_W_CHAIN;
4982    Info.ptrVal = nullptr;
4983    MVT DataVT = MVT::getVT(I.getType());
4984    MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
4985    unsigned NumElts = std::min(DataVT.getVectorNumElements(),
4986                                IndexVT.getVectorNumElements());
4987    Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
4988    Info.align = Align::None();
4989    Info.flags |= MachineMemOperand::MOLoad;
4990    break;
4991  }
4992  case SCATTER: {
4993    Info.opc = ISD::INTRINSIC_VOID;
4994    Info.ptrVal = nullptr;
4995    MVT DataVT = MVT::getVT(I.getArgOperand(3)->getType());
4996    MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
4997    unsigned NumElts = std::min(DataVT.getVectorNumElements(),
4998                                IndexVT.getVectorNumElements());
4999    Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
5000    Info.align = Align::None();
5001    Info.flags |= MachineMemOperand::MOStore;
5002    break;
5003  }
5004  default:
5005    return false;
5006  }
5007
5008  return true;
5009}
5010
5011/// Returns true if the target can instruction select the
5012/// specified FP immediate natively. If false, the legalizer will
5013/// materialize the FP immediate as a load from a constant pool.
5014bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
5015                                     bool ForCodeSize) const {
5016  for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
5017    if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
5018      return true;
5019  }
5020  return false;
5021}
5022
5023bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
5024                                              ISD::LoadExtType ExtTy,
5025                                              EVT NewVT) const {
5026  assert(cast<LoadSDNode>(Load)->isSimple() && "illegal to narrow");
5027
5028  // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
5029  // relocation target a movq or addq instruction: don't let the load shrink.
5030  SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
5031  if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
5032    if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
5033      return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
5034
5035  // If this is an (1) AVX vector load with (2) multiple uses and (3) all of
5036  // those uses are extracted directly into a store, then the extract + store
5037  // can be store-folded. Therefore, it's probably not worth splitting the load.
5038  EVT VT = Load->getValueType(0);
5039  if ((VT.is256BitVector() || VT.is512BitVector()) && !Load->hasOneUse()) {
5040    for (auto UI = Load->use_begin(), UE = Load->use_end(); UI != UE; ++UI) {
5041      // Skip uses of the chain value. Result 0 of the node is the load value.
5042      if (UI.getUse().getResNo() != 0)
5043        continue;
5044
5045      // If this use is not an extract + store, it's probably worth splitting.
5046      if (UI->getOpcode() != ISD::EXTRACT_SUBVECTOR || !UI->hasOneUse() ||
5047          UI->use_begin()->getOpcode() != ISD::STORE)
5048        return true;
5049    }
5050    // All non-chain uses are extract + store.
5051    return false;
5052  }
5053
5054  return true;
5055}
5056
5057/// Returns true if it is beneficial to convert a load of a constant
5058/// to just the constant itself.
5059bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
5060                                                          Type *Ty) const {
5061  assert(Ty->isIntegerTy());
5062
5063  unsigned BitSize = Ty->getPrimitiveSizeInBits();
5064  if (BitSize == 0 || BitSize > 64)
5065    return false;
5066  return true;
5067}
5068
5069bool X86TargetLowering::reduceSelectOfFPConstantLoads(EVT CmpOpVT) const {
5070  // If we are using XMM registers in the ABI and the condition of the select is
5071  // a floating-point compare and we have blendv or conditional move, then it is
5072  // cheaper to select instead of doing a cross-register move and creating a
5073  // load that depends on the compare result.
5074  bool IsFPSetCC = CmpOpVT.isFloatingPoint() && CmpOpVT != MVT::f128;
5075  return !IsFPSetCC || !Subtarget.isTarget64BitLP64() || !Subtarget.hasAVX();
5076}
5077
5078bool X86TargetLowering::convertSelectOfConstantsToMath(EVT VT) const {
5079  // TODO: It might be a win to ease or lift this restriction, but the generic
5080  // folds in DAGCombiner conflict with vector folds for an AVX512 target.
5081  if (VT.isVector() && Subtarget.hasAVX512())
5082    return false;
5083
5084  return true;
5085}
5086
5087bool X86TargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
5088                                               SDValue C) const {
5089  // TODO: We handle scalars using custom code, but generic combining could make
5090  // that unnecessary.
5091  APInt MulC;
5092  if (!ISD::isConstantSplatVector(C.getNode(), MulC))
5093    return false;
5094
5095  // Find the type this will be legalized too. Otherwise we might prematurely
5096  // convert this to shl+add/sub and then still have to type legalize those ops.
5097  // Another choice would be to defer the decision for illegal types until
5098  // after type legalization. But constant splat vectors of i64 can't make it
5099  // through type legalization on 32-bit targets so we would need to special
5100  // case vXi64.
5101  while (getTypeAction(Context, VT) != TypeLegal)
5102    VT = getTypeToTransformTo(Context, VT);
5103
5104  // If vector multiply is legal, assume that's faster than shl + add/sub.
5105  // TODO: Multiply is a complex op with higher latency and lower throughput in
5106  //       most implementations, so this check could be loosened based on type
5107  //       and/or a CPU attribute.
5108  if (isOperationLegal(ISD::MUL, VT))
5109    return false;
5110
5111  // shl+add, shl+sub, shl+add+neg
5112  return (MulC + 1).isPowerOf2() || (MulC - 1).isPowerOf2() ||
5113         (1 - MulC).isPowerOf2() || (-(MulC + 1)).isPowerOf2();
5114}
5115
5116bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
5117                                                unsigned Index) const {
5118  if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
5119    return false;
5120
5121  // Mask vectors support all subregister combinations and operations that
5122  // extract half of vector.
5123  if (ResVT.getVectorElementType() == MVT::i1)
5124    return Index == 0 || ((ResVT.getSizeInBits() == SrcVT.getSizeInBits()*2) &&
5125                          (Index == ResVT.getVectorNumElements()));
5126
5127  return (Index % ResVT.getVectorNumElements()) == 0;
5128}
5129
5130bool X86TargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
5131  unsigned Opc = VecOp.getOpcode();
5132
5133  // Assume target opcodes can't be scalarized.
5134  // TODO - do we have any exceptions?
5135  if (Opc >= ISD::BUILTIN_OP_END)
5136    return false;
5137
5138  // If the vector op is not supported, try to convert to scalar.
5139  EVT VecVT = VecOp.getValueType();
5140  if (!isOperationLegalOrCustomOrPromote(Opc, VecVT))
5141    return true;
5142
5143  // If the vector op is supported, but the scalar op is not, the transform may
5144  // not be worthwhile.
5145  EVT ScalarVT = VecVT.getScalarType();
5146  return isOperationLegalOrCustomOrPromote(Opc, ScalarVT);
5147}
5148
5149bool X86TargetLowering::shouldFormOverflowOp(unsigned Opcode, EVT VT) const {
5150  // TODO: Allow vectors?
5151  if (VT.isVector())
5152    return false;
5153  return VT.isSimple() || !isOperationExpand(Opcode, VT);
5154}
5155
5156bool X86TargetLowering::isCheapToSpeculateCttz() const {
5157  // Speculate cttz only if we can directly use TZCNT.
5158  return Subtarget.hasBMI();
5159}
5160
5161bool X86TargetLowering::isCheapToSpeculateCtlz() const {
5162  // Speculate ctlz only if we can directly use LZCNT.
5163  return Subtarget.hasLZCNT();
5164}
5165
5166bool X86TargetLowering::isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
5167                                                const SelectionDAG &DAG,
5168                                                const MachineMemOperand &MMO) const {
5169  if (!Subtarget.hasAVX512() && !LoadVT.isVector() && BitcastVT.isVector() &&
5170      BitcastVT.getVectorElementType() == MVT::i1)
5171    return false;
5172
5173  if (!Subtarget.hasDQI() && BitcastVT == MVT::v8i1 && LoadVT == MVT::i8)
5174    return false;
5175
5176  // If both types are legal vectors, it's always ok to convert them.
5177  if (LoadVT.isVector() && BitcastVT.isVector() &&
5178      isTypeLegal(LoadVT) && isTypeLegal(BitcastVT))
5179    return true;
5180
5181  return TargetLowering::isLoadBitCastBeneficial(LoadVT, BitcastVT, DAG, MMO);
5182}
5183
5184bool X86TargetLowering::canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
5185                                         const SelectionDAG &DAG) const {
5186  // Do not merge to float value size (128 bytes) if no implicit
5187  // float attribute is set.
5188  bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
5189      Attribute::NoImplicitFloat);
5190
5191  if (NoFloat) {
5192    unsigned MaxIntSize = Subtarget.is64Bit() ? 64 : 32;
5193    return (MemVT.getSizeInBits() <= MaxIntSize);
5194  }
5195  // Make sure we don't merge greater than our preferred vector
5196  // width.
5197  if (MemVT.getSizeInBits() > Subtarget.getPreferVectorWidth())
5198    return false;
5199  return true;
5200}
5201
5202bool X86TargetLowering::isCtlzFast() const {
5203  return Subtarget.hasFastLZCNT();
5204}
5205
5206bool X86TargetLowering::isMaskAndCmp0FoldingBeneficial(
5207    const Instruction &AndI) const {
5208  return true;
5209}
5210
5211bool X86TargetLowering::hasAndNotCompare(SDValue Y) const {
5212  EVT VT = Y.getValueType();
5213
5214  if (VT.isVector())
5215    return false;
5216
5217  if (!Subtarget.hasBMI())
5218    return false;
5219
5220  // There are only 32-bit and 64-bit forms for 'andn'.
5221  if (VT != MVT::i32 && VT != MVT::i64)
5222    return false;
5223
5224  return !isa<ConstantSDNode>(Y);
5225}
5226
5227bool X86TargetLowering::hasAndNot(SDValue Y) const {
5228  EVT VT = Y.getValueType();
5229
5230  if (!VT.isVector())
5231    return hasAndNotCompare(Y);
5232
5233  // Vector.
5234
5235  if (!Subtarget.hasSSE1() || VT.getSizeInBits() < 128)
5236    return false;
5237
5238  if (VT == MVT::v4i32)
5239    return true;
5240
5241  return Subtarget.hasSSE2();
5242}
5243
5244bool X86TargetLowering::hasBitTest(SDValue X, SDValue Y) const {
5245  return X.getValueType().isScalarInteger(); // 'bt'
5246}
5247
5248bool X86TargetLowering::
5249    shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
5250        SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
5251        unsigned OldShiftOpcode, unsigned NewShiftOpcode,
5252        SelectionDAG &DAG) const {
5253  // Does baseline recommend not to perform the fold by default?
5254  if (!TargetLowering::shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
5255          X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG))
5256    return false;
5257  // For scalars this transform is always beneficial.
5258  if (X.getValueType().isScalarInteger())
5259    return true;
5260  // If all the shift amounts are identical, then transform is beneficial even
5261  // with rudimentary SSE2 shifts.
5262  if (DAG.isSplatValue(Y, /*AllowUndefs=*/true))
5263    return true;
5264  // If we have AVX2 with it's powerful shift operations, then it's also good.
5265  if (Subtarget.hasAVX2())
5266    return true;
5267  // Pre-AVX2 vector codegen for this pattern is best for variant with 'shl'.
5268  return NewShiftOpcode == ISD::SHL;
5269}
5270
5271bool X86TargetLowering::shouldFoldConstantShiftPairToMask(
5272    const SDNode *N, CombineLevel Level) const {
5273  assert(((N->getOpcode() == ISD::SHL &&
5274           N->getOperand(0).getOpcode() == ISD::SRL) ||
5275          (N->getOpcode() == ISD::SRL &&
5276           N->getOperand(0).getOpcode() == ISD::SHL)) &&
5277         "Expected shift-shift mask");
5278  EVT VT = N->getValueType(0);
5279  if ((Subtarget.hasFastVectorShiftMasks() && VT.isVector()) ||
5280      (Subtarget.hasFastScalarShiftMasks() && !VT.isVector())) {
5281    // Only fold if the shift values are equal - so it folds to AND.
5282    // TODO - we should fold if either is a non-uniform vector but we don't do
5283    // the fold for non-splats yet.
5284    return N->getOperand(1) == N->getOperand(0).getOperand(1);
5285  }
5286  return TargetLoweringBase::shouldFoldConstantShiftPairToMask(N, Level);
5287}
5288
5289bool X86TargetLowering::shouldFoldMaskToVariableShiftPair(SDValue Y) const {
5290  EVT VT = Y.getValueType();
5291
5292  // For vectors, we don't have a preference, but we probably want a mask.
5293  if (VT.isVector())
5294    return false;
5295
5296  // 64-bit shifts on 32-bit targets produce really bad bloated code.
5297  if (VT == MVT::i64 && !Subtarget.is64Bit())
5298    return false;
5299
5300  return true;
5301}
5302
5303bool X86TargetLowering::shouldExpandShift(SelectionDAG &DAG,
5304                                          SDNode *N) const {
5305  if (DAG.getMachineFunction().getFunction().hasMinSize() &&
5306      !Subtarget.isOSWindows())
5307    return false;
5308  return true;
5309}
5310
5311bool X86TargetLowering::shouldSplatInsEltVarIndex(EVT VT) const {
5312  // Any legal vector type can be splatted more efficiently than
5313  // loading/spilling from memory.
5314  return isTypeLegal(VT);
5315}
5316
5317MVT X86TargetLowering::hasFastEqualityCompare(unsigned NumBits) const {
5318  MVT VT = MVT::getIntegerVT(NumBits);
5319  if (isTypeLegal(VT))
5320    return VT;
5321
5322  // PMOVMSKB can handle this.
5323  if (NumBits == 128 && isTypeLegal(MVT::v16i8))
5324    return MVT::v16i8;
5325
5326  // VPMOVMSKB can handle this.
5327  if (NumBits == 256 && isTypeLegal(MVT::v32i8))
5328    return MVT::v32i8;
5329
5330  // TODO: Allow 64-bit type for 32-bit target.
5331  // TODO: 512-bit types should be allowed, but make sure that those
5332  // cases are handled in combineVectorSizedSetCCEquality().
5333
5334  return MVT::INVALID_SIMPLE_VALUE_TYPE;
5335}
5336
5337/// Val is the undef sentinel value or equal to the specified value.
5338static bool isUndefOrEqual(int Val, int CmpVal) {
5339  return ((Val == SM_SentinelUndef) || (Val == CmpVal));
5340}
5341
5342/// Val is either the undef or zero sentinel value.
5343static bool isUndefOrZero(int Val) {
5344  return ((Val == SM_SentinelUndef) || (Val == SM_SentinelZero));
5345}
5346
5347/// Return true if every element in Mask, beginning from position Pos and ending
5348/// in Pos+Size is the undef sentinel value.
5349static bool isUndefInRange(ArrayRef<int> Mask, unsigned Pos, unsigned Size) {
5350  return llvm::all_of(Mask.slice(Pos, Size),
5351                      [](int M) { return M == SM_SentinelUndef; });
5352}
5353
5354/// Return true if the mask creates a vector whose lower half is undefined.
5355static bool isUndefLowerHalf(ArrayRef<int> Mask) {
5356  unsigned NumElts = Mask.size();
5357  return isUndefInRange(Mask, 0, NumElts / 2);
5358}
5359
5360/// Return true if the mask creates a vector whose upper half is undefined.
5361static bool isUndefUpperHalf(ArrayRef<int> Mask) {
5362  unsigned NumElts = Mask.size();
5363  return isUndefInRange(Mask, NumElts / 2, NumElts / 2);
5364}
5365
5366/// Return true if Val falls within the specified range (L, H].
5367static bool isInRange(int Val, int Low, int Hi) {
5368  return (Val >= Low && Val < Hi);
5369}
5370
5371/// Return true if the value of any element in Mask falls within the specified
5372/// range (L, H].
5373static bool isAnyInRange(ArrayRef<int> Mask, int Low, int Hi) {
5374  return llvm::any_of(Mask, [Low, Hi](int M) { return isInRange(M, Low, Hi); });
5375}
5376
5377/// Return true if Val is undef or if its value falls within the
5378/// specified range (L, H].
5379static bool isUndefOrInRange(int Val, int Low, int Hi) {
5380  return (Val == SM_SentinelUndef) || isInRange(Val, Low, Hi);
5381}
5382
5383/// Return true if every element in Mask is undef or if its value
5384/// falls within the specified range (L, H].
5385static bool isUndefOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
5386  return llvm::all_of(
5387      Mask, [Low, Hi](int M) { return isUndefOrInRange(M, Low, Hi); });
5388}
5389
5390/// Return true if Val is undef, zero or if its value falls within the
5391/// specified range (L, H].
5392static bool isUndefOrZeroOrInRange(int Val, int Low, int Hi) {
5393  return isUndefOrZero(Val) || isInRange(Val, Low, Hi);
5394}
5395
5396/// Return true if every element in Mask is undef, zero or if its value
5397/// falls within the specified range (L, H].
5398static bool isUndefOrZeroOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
5399  return llvm::all_of(
5400      Mask, [Low, Hi](int M) { return isUndefOrZeroOrInRange(M, Low, Hi); });
5401}
5402
5403/// Return true if every element in Mask, beginning
5404/// from position Pos and ending in Pos + Size, falls within the specified
5405/// sequence (Low, Low + Step, ..., Low + (Size - 1) * Step) or is undef.
5406static bool isSequentialOrUndefInRange(ArrayRef<int> Mask, unsigned Pos,
5407                                       unsigned Size, int Low, int Step = 1) {
5408  for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
5409    if (!isUndefOrEqual(Mask[i], Low))
5410      return false;
5411  return true;
5412}
5413
5414/// Return true if every element in Mask, beginning
5415/// from position Pos and ending in Pos+Size, falls within the specified
5416/// sequential range (Low, Low+Size], or is undef or is zero.
5417static bool isSequentialOrUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
5418                                             unsigned Size, int Low,
5419                                             int Step = 1) {
5420  for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
5421    if (!isUndefOrZero(Mask[i]) && Mask[i] != Low)
5422      return false;
5423  return true;
5424}
5425
5426/// Return true if every element in Mask, beginning
5427/// from position Pos and ending in Pos+Size is undef or is zero.
5428static bool isUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
5429                                 unsigned Size) {
5430  return llvm::all_of(Mask.slice(Pos, Size),
5431                      [](int M) { return isUndefOrZero(M); });
5432}
5433
5434/// Helper function to test whether a shuffle mask could be
5435/// simplified by widening the elements being shuffled.
5436///
5437/// Appends the mask for wider elements in WidenedMask if valid. Otherwise
5438/// leaves it in an unspecified state.
5439///
5440/// NOTE: This must handle normal vector shuffle masks and *target* vector
5441/// shuffle masks. The latter have the special property of a '-2' representing
5442/// a zero-ed lane of a vector.
5443static bool canWidenShuffleElements(ArrayRef<int> Mask,
5444                                    SmallVectorImpl<int> &WidenedMask) {
5445  WidenedMask.assign(Mask.size() / 2, 0);
5446  for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
5447    int M0 = Mask[i];
5448    int M1 = Mask[i + 1];
5449
5450    // If both elements are undef, its trivial.
5451    if (M0 == SM_SentinelUndef && M1 == SM_SentinelUndef) {
5452      WidenedMask[i / 2] = SM_SentinelUndef;
5453      continue;
5454    }
5455
5456    // Check for an undef mask and a mask value properly aligned to fit with
5457    // a pair of values. If we find such a case, use the non-undef mask's value.
5458    if (M0 == SM_SentinelUndef && M1 >= 0 && (M1 % 2) == 1) {
5459      WidenedMask[i / 2] = M1 / 2;
5460      continue;
5461    }
5462    if (M1 == SM_SentinelUndef && M0 >= 0 && (M0 % 2) == 0) {
5463      WidenedMask[i / 2] = M0 / 2;
5464      continue;
5465    }
5466
5467    // When zeroing, we need to spread the zeroing across both lanes to widen.
5468    if (M0 == SM_SentinelZero || M1 == SM_SentinelZero) {
5469      if ((M0 == SM_SentinelZero || M0 == SM_SentinelUndef) &&
5470          (M1 == SM_SentinelZero || M1 == SM_SentinelUndef)) {
5471        WidenedMask[i / 2] = SM_SentinelZero;
5472        continue;
5473      }
5474      return false;
5475    }
5476
5477    // Finally check if the two mask values are adjacent and aligned with
5478    // a pair.
5479    if (M0 != SM_SentinelUndef && (M0 % 2) == 0 && (M0 + 1) == M1) {
5480      WidenedMask[i / 2] = M0 / 2;
5481      continue;
5482    }
5483
5484    // Otherwise we can't safely widen the elements used in this shuffle.
5485    return false;
5486  }
5487  assert(WidenedMask.size() == Mask.size() / 2 &&
5488         "Incorrect size of mask after widening the elements!");
5489
5490  return true;
5491}
5492
5493static bool canWidenShuffleElements(ArrayRef<int> Mask,
5494                                    const APInt &Zeroable,
5495                                    bool V2IsZero,
5496                                    SmallVectorImpl<int> &WidenedMask) {
5497  // Create an alternative mask with info about zeroable elements.
5498  // Here we do not set undef elements as zeroable.
5499  SmallVector<int, 64> ZeroableMask(Mask.begin(), Mask.end());
5500  if (V2IsZero) {
5501    assert(!Zeroable.isNullValue() && "V2's non-undef elements are used?!");
5502    for (int i = 0, Size = Mask.size(); i != Size; ++i)
5503      if (Mask[i] != SM_SentinelUndef && Zeroable[i])
5504        ZeroableMask[i] = SM_SentinelZero;
5505  }
5506  return canWidenShuffleElements(ZeroableMask, WidenedMask);
5507}
5508
5509static bool canWidenShuffleElements(ArrayRef<int> Mask) {
5510  SmallVector<int, 32> WidenedMask;
5511  return canWidenShuffleElements(Mask, WidenedMask);
5512}
5513
5514/// Returns true if Elt is a constant zero or a floating point constant +0.0.
5515bool X86::isZeroNode(SDValue Elt) {
5516  return isNullConstant(Elt) || isNullFPConstant(Elt);
5517}
5518
5519// Build a vector of constants.
5520// Use an UNDEF node if MaskElt == -1.
5521// Split 64-bit constants in the 32-bit mode.
5522static SDValue getConstVector(ArrayRef<int> Values, MVT VT, SelectionDAG &DAG,
5523                              const SDLoc &dl, bool IsMask = false) {
5524
5525  SmallVector<SDValue, 32>  Ops;
5526  bool Split = false;
5527
5528  MVT ConstVecVT = VT;
5529  unsigned NumElts = VT.getVectorNumElements();
5530  bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
5531  if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
5532    ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
5533    Split = true;
5534  }
5535
5536  MVT EltVT = ConstVecVT.getVectorElementType();
5537  for (unsigned i = 0; i < NumElts; ++i) {
5538    bool IsUndef = Values[i] < 0 && IsMask;
5539    SDValue OpNode = IsUndef ? DAG.getUNDEF(EltVT) :
5540      DAG.getConstant(Values[i], dl, EltVT);
5541    Ops.push_back(OpNode);
5542    if (Split)
5543      Ops.push_back(IsUndef ? DAG.getUNDEF(EltVT) :
5544                    DAG.getConstant(0, dl, EltVT));
5545  }
5546  SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
5547  if (Split)
5548    ConstsNode = DAG.getBitcast(VT, ConstsNode);
5549  return ConstsNode;
5550}
5551
5552static SDValue getConstVector(ArrayRef<APInt> Bits, APInt &Undefs,
5553                              MVT VT, SelectionDAG &DAG, const SDLoc &dl) {
5554  assert(Bits.size() == Undefs.getBitWidth() &&
5555         "Unequal constant and undef arrays");
5556  SmallVector<SDValue, 32> Ops;
5557  bool Split = false;
5558
5559  MVT ConstVecVT = VT;
5560  unsigned NumElts = VT.getVectorNumElements();
5561  bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
5562  if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
5563    ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
5564    Split = true;
5565  }
5566
5567  MVT EltVT = ConstVecVT.getVectorElementType();
5568  for (unsigned i = 0, e = Bits.size(); i != e; ++i) {
5569    if (Undefs[i]) {
5570      Ops.append(Split ? 2 : 1, DAG.getUNDEF(EltVT));
5571      continue;
5572    }
5573    const APInt &V = Bits[i];
5574    assert(V.getBitWidth() == VT.getScalarSizeInBits() && "Unexpected sizes");
5575    if (Split) {
5576      Ops.push_back(DAG.getConstant(V.trunc(32), dl, EltVT));
5577      Ops.push_back(DAG.getConstant(V.lshr(32).trunc(32), dl, EltVT));
5578    } else if (EltVT == MVT::f32) {
5579      APFloat FV(APFloat::IEEEsingle(), V);
5580      Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
5581    } else if (EltVT == MVT::f64) {
5582      APFloat FV(APFloat::IEEEdouble(), V);
5583      Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
5584    } else {
5585      Ops.push_back(DAG.getConstant(V, dl, EltVT));
5586    }
5587  }
5588
5589  SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
5590  return DAG.getBitcast(VT, ConstsNode);
5591}
5592
5593/// Returns a vector of specified type with all zero elements.
5594static SDValue getZeroVector(MVT VT, const X86Subtarget &Subtarget,
5595                             SelectionDAG &DAG, const SDLoc &dl) {
5596  assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector() ||
5597          VT.getVectorElementType() == MVT::i1) &&
5598         "Unexpected vector type");
5599
5600  // Try to build SSE/AVX zero vectors as <N x i32> bitcasted to their dest
5601  // type. This ensures they get CSE'd. But if the integer type is not
5602  // available, use a floating-point +0.0 instead.
5603  SDValue Vec;
5604  if (!Subtarget.hasSSE2() && VT.is128BitVector()) {
5605    Vec = DAG.getConstantFP(+0.0, dl, MVT::v4f32);
5606  } else if (VT.isFloatingPoint()) {
5607    Vec = DAG.getConstantFP(+0.0, dl, VT);
5608  } else if (VT.getVectorElementType() == MVT::i1) {
5609    assert((Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) &&
5610           "Unexpected vector type");
5611    Vec = DAG.getConstant(0, dl, VT);
5612  } else {
5613    unsigned Num32BitElts = VT.getSizeInBits() / 32;
5614    Vec = DAG.getConstant(0, dl, MVT::getVectorVT(MVT::i32, Num32BitElts));
5615  }
5616  return DAG.getBitcast(VT, Vec);
5617}
5618
5619static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
5620                                const SDLoc &dl, unsigned vectorWidth) {
5621  EVT VT = Vec.getValueType();
5622  EVT ElVT = VT.getVectorElementType();
5623  unsigned Factor = VT.getSizeInBits()/vectorWidth;
5624  EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
5625                                  VT.getVectorNumElements()/Factor);
5626
5627  // Extract the relevant vectorWidth bits.  Generate an EXTRACT_SUBVECTOR
5628  unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
5629  assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
5630
5631  // This is the index of the first element of the vectorWidth-bit chunk
5632  // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
5633  IdxVal &= ~(ElemsPerChunk - 1);
5634
5635  // If the input is a buildvector just emit a smaller one.
5636  if (Vec.getOpcode() == ISD::BUILD_VECTOR)
5637    return DAG.getBuildVector(ResultVT, dl,
5638                              Vec->ops().slice(IdxVal, ElemsPerChunk));
5639
5640  SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
5641  return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
5642}
5643
5644/// Generate a DAG to grab 128-bits from a vector > 128 bits.  This
5645/// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
5646/// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
5647/// instructions or a simple subregister reference. Idx is an index in the
5648/// 128 bits we want.  It need not be aligned to a 128-bit boundary.  That makes
5649/// lowering EXTRACT_VECTOR_ELT operations easier.
5650static SDValue extract128BitVector(SDValue Vec, unsigned IdxVal,
5651                                   SelectionDAG &DAG, const SDLoc &dl) {
5652  assert((Vec.getValueType().is256BitVector() ||
5653          Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
5654  return extractSubVector(Vec, IdxVal, DAG, dl, 128);
5655}
5656
5657/// Generate a DAG to grab 256-bits from a 512-bit vector.
5658static SDValue extract256BitVector(SDValue Vec, unsigned IdxVal,
5659                                   SelectionDAG &DAG, const SDLoc &dl) {
5660  assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
5661  return extractSubVector(Vec, IdxVal, DAG, dl, 256);
5662}
5663
5664static SDValue insertSubVector(SDValue Result, SDValue Vec, unsigned IdxVal,
5665                               SelectionDAG &DAG, const SDLoc &dl,
5666                               unsigned vectorWidth) {
5667  assert((vectorWidth == 128 || vectorWidth == 256) &&
5668         "Unsupported vector width");
5669  // Inserting UNDEF is Result
5670  if (Vec.isUndef())
5671    return Result;
5672  EVT VT = Vec.getValueType();
5673  EVT ElVT = VT.getVectorElementType();
5674  EVT ResultVT = Result.getValueType();
5675
5676  // Insert the relevant vectorWidth bits.
5677  unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
5678  assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
5679
5680  // This is the index of the first element of the vectorWidth-bit chunk
5681  // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
5682  IdxVal &= ~(ElemsPerChunk - 1);
5683
5684  SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
5685  return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
5686}
5687
5688/// Generate a DAG to put 128-bits into a vector > 128 bits.  This
5689/// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
5690/// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
5691/// simple superregister reference.  Idx is an index in the 128 bits
5692/// we want.  It need not be aligned to a 128-bit boundary.  That makes
5693/// lowering INSERT_VECTOR_ELT operations easier.
5694static SDValue insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
5695                                  SelectionDAG &DAG, const SDLoc &dl) {
5696  assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
5697  return insertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
5698}
5699
5700/// Widen a vector to a larger size with the same scalar type, with the new
5701/// elements either zero or undef.
5702static SDValue widenSubVector(MVT VT, SDValue Vec, bool ZeroNewElements,
5703                              const X86Subtarget &Subtarget, SelectionDAG &DAG,
5704                              const SDLoc &dl) {
5705  assert(Vec.getValueSizeInBits() < VT.getSizeInBits() &&
5706         Vec.getValueType().getScalarType() == VT.getScalarType() &&
5707         "Unsupported vector widening type");
5708  SDValue Res = ZeroNewElements ? getZeroVector(VT, Subtarget, DAG, dl)
5709                                : DAG.getUNDEF(VT);
5710  return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, Vec,
5711                     DAG.getIntPtrConstant(0, dl));
5712}
5713
5714/// Widen a vector to a larger size with the same scalar type, with the new
5715/// elements either zero or undef.
5716static SDValue widenSubVector(SDValue Vec, bool ZeroNewElements,
5717                              const X86Subtarget &Subtarget, SelectionDAG &DAG,
5718                              const SDLoc &dl, unsigned WideSizeInBits) {
5719  assert(Vec.getValueSizeInBits() < WideSizeInBits &&
5720         (WideSizeInBits % Vec.getScalarValueSizeInBits()) == 0 &&
5721         "Unsupported vector widening type");
5722  unsigned WideNumElts = WideSizeInBits / Vec.getScalarValueSizeInBits();
5723  MVT SVT = Vec.getSimpleValueType().getScalarType();
5724  MVT VT = MVT::getVectorVT(SVT, WideNumElts);
5725  return widenSubVector(VT, Vec, ZeroNewElements, Subtarget, DAG, dl);
5726}
5727
5728// Helper function to collect subvector ops that are concated together,
5729// either by ISD::CONCAT_VECTORS or a ISD::INSERT_SUBVECTOR series.
5730// The subvectors in Ops are guaranteed to be the same type.
5731static bool collectConcatOps(SDNode *N, SmallVectorImpl<SDValue> &Ops) {
5732  assert(Ops.empty() && "Expected an empty ops vector");
5733
5734  if (N->getOpcode() == ISD::CONCAT_VECTORS) {
5735    Ops.append(N->op_begin(), N->op_end());
5736    return true;
5737  }
5738
5739  if (N->getOpcode() == ISD::INSERT_SUBVECTOR &&
5740      isa<ConstantSDNode>(N->getOperand(2))) {
5741    SDValue Src = N->getOperand(0);
5742    SDValue Sub = N->getOperand(1);
5743    const APInt &Idx = N->getConstantOperandAPInt(2);
5744    EVT VT = Src.getValueType();
5745    EVT SubVT = Sub.getValueType();
5746
5747    // TODO - Handle more general insert_subvector chains.
5748    if (VT.getSizeInBits() == (SubVT.getSizeInBits() * 2) &&
5749        Idx == (VT.getVectorNumElements() / 2) &&
5750        Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
5751        Src.getOperand(1).getValueType() == SubVT &&
5752        isNullConstant(Src.getOperand(2))) {
5753      Ops.push_back(Src.getOperand(1));
5754      Ops.push_back(Sub);
5755      return true;
5756    }
5757  }
5758
5759  return false;
5760}
5761
5762// Helper for splitting operands of an operation to legal target size and
5763// apply a function on each part.
5764// Useful for operations that are available on SSE2 in 128-bit, on AVX2 in
5765// 256-bit and on AVX512BW in 512-bit. The argument VT is the type used for
5766// deciding if/how to split Ops. Ops elements do *not* have to be of type VT.
5767// The argument Builder is a function that will be applied on each split part:
5768// SDValue Builder(SelectionDAG&G, SDLoc, ArrayRef<SDValue>)
5769template <typename F>
5770SDValue SplitOpsAndApply(SelectionDAG &DAG, const X86Subtarget &Subtarget,
5771                         const SDLoc &DL, EVT VT, ArrayRef<SDValue> Ops,
5772                         F Builder, bool CheckBWI = true) {
5773  assert(Subtarget.hasSSE2() && "Target assumed to support at least SSE2");
5774  unsigned NumSubs = 1;
5775  if ((CheckBWI && Subtarget.useBWIRegs()) ||
5776      (!CheckBWI && Subtarget.useAVX512Regs())) {
5777    if (VT.getSizeInBits() > 512) {
5778      NumSubs = VT.getSizeInBits() / 512;
5779      assert((VT.getSizeInBits() % 512) == 0 && "Illegal vector size");
5780    }
5781  } else if (Subtarget.hasAVX2()) {
5782    if (VT.getSizeInBits() > 256) {
5783      NumSubs = VT.getSizeInBits() / 256;
5784      assert((VT.getSizeInBits() % 256) == 0 && "Illegal vector size");
5785    }
5786  } else {
5787    if (VT.getSizeInBits() > 128) {
5788      NumSubs = VT.getSizeInBits() / 128;
5789      assert((VT.getSizeInBits() % 128) == 0 && "Illegal vector size");
5790    }
5791  }
5792
5793  if (NumSubs == 1)
5794    return Builder(DAG, DL, Ops);
5795
5796  SmallVector<SDValue, 4> Subs;
5797  for (unsigned i = 0; i != NumSubs; ++i) {
5798    SmallVector<SDValue, 2> SubOps;
5799    for (SDValue Op : Ops) {
5800      EVT OpVT = Op.getValueType();
5801      unsigned NumSubElts = OpVT.getVectorNumElements() / NumSubs;
5802      unsigned SizeSub = OpVT.getSizeInBits() / NumSubs;
5803      SubOps.push_back(extractSubVector(Op, i * NumSubElts, DAG, DL, SizeSub));
5804    }
5805    Subs.push_back(Builder(DAG, DL, SubOps));
5806  }
5807  return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Subs);
5808}
5809
5810/// Insert i1-subvector to i1-vector.
5811static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
5812                                const X86Subtarget &Subtarget) {
5813
5814  SDLoc dl(Op);
5815  SDValue Vec = Op.getOperand(0);
5816  SDValue SubVec = Op.getOperand(1);
5817  SDValue Idx = Op.getOperand(2);
5818
5819  if (!isa<ConstantSDNode>(Idx))
5820    return SDValue();
5821
5822  // Inserting undef is a nop. We can just return the original vector.
5823  if (SubVec.isUndef())
5824    return Vec;
5825
5826  unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
5827  if (IdxVal == 0 && Vec.isUndef()) // the operation is legal
5828    return Op;
5829
5830  MVT OpVT = Op.getSimpleValueType();
5831  unsigned NumElems = OpVT.getVectorNumElements();
5832
5833  SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
5834
5835  // Extend to natively supported kshift.
5836  MVT WideOpVT = OpVT;
5837  if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8)
5838    WideOpVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
5839
5840  // Inserting into the lsbs of a zero vector is legal. ISel will insert shifts
5841  // if necessary.
5842  if (IdxVal == 0 && ISD::isBuildVectorAllZeros(Vec.getNode())) {
5843    // May need to promote to a legal type.
5844    Op = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5845                     DAG.getConstant(0, dl, WideOpVT),
5846                     SubVec, Idx);
5847    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
5848  }
5849
5850  MVT SubVecVT = SubVec.getSimpleValueType();
5851  unsigned SubVecNumElems = SubVecVT.getVectorNumElements();
5852
5853  assert(IdxVal + SubVecNumElems <= NumElems &&
5854         IdxVal % SubVecVT.getSizeInBits() == 0 &&
5855         "Unexpected index value in INSERT_SUBVECTOR");
5856
5857  SDValue Undef = DAG.getUNDEF(WideOpVT);
5858
5859  if (IdxVal == 0) {
5860    // Zero lower bits of the Vec
5861    SDValue ShiftBits = DAG.getTargetConstant(SubVecNumElems, dl, MVT::i8);
5862    Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec,
5863                      ZeroIdx);
5864    Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
5865    Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
5866    // Merge them together, SubVec should be zero extended.
5867    SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5868                         DAG.getConstant(0, dl, WideOpVT),
5869                         SubVec, ZeroIdx);
5870    Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
5871    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
5872  }
5873
5874  SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5875                       Undef, SubVec, ZeroIdx);
5876
5877  if (Vec.isUndef()) {
5878    assert(IdxVal != 0 && "Unexpected index");
5879    SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
5880                         DAG.getTargetConstant(IdxVal, dl, MVT::i8));
5881    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
5882  }
5883
5884  if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
5885    assert(IdxVal != 0 && "Unexpected index");
5886    NumElems = WideOpVT.getVectorNumElements();
5887    unsigned ShiftLeft = NumElems - SubVecNumElems;
5888    unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
5889    SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
5890                         DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
5891    if (ShiftRight != 0)
5892      SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
5893                           DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
5894    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
5895  }
5896
5897  // Simple case when we put subvector in the upper part
5898  if (IdxVal + SubVecNumElems == NumElems) {
5899    SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
5900                         DAG.getTargetConstant(IdxVal, dl, MVT::i8));
5901    if (SubVecNumElems * 2 == NumElems) {
5902      // Special case, use legal zero extending insert_subvector. This allows
5903      // isel to opimitize when bits are known zero.
5904      Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVecVT, Vec, ZeroIdx);
5905      Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5906                        DAG.getConstant(0, dl, WideOpVT),
5907                        Vec, ZeroIdx);
5908    } else {
5909      // Otherwise use explicit shifts to zero the bits.
5910      Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5911                        Undef, Vec, ZeroIdx);
5912      NumElems = WideOpVT.getVectorNumElements();
5913      SDValue ShiftBits = DAG.getTargetConstant(NumElems - IdxVal, dl, MVT::i8);
5914      Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
5915      Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
5916    }
5917    Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
5918    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
5919  }
5920
5921  // Inserting into the middle is more complicated.
5922
5923  NumElems = WideOpVT.getVectorNumElements();
5924
5925  // Widen the vector if needed.
5926  Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx);
5927
5928  unsigned ShiftLeft = NumElems - SubVecNumElems;
5929  unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
5930
5931  // Do an optimization for the the most frequently used types.
5932  if (WideOpVT != MVT::v64i1 || Subtarget.is64Bit()) {
5933    APInt Mask0 = APInt::getBitsSet(NumElems, IdxVal, IdxVal + SubVecNumElems);
5934    Mask0.flipAllBits();
5935    SDValue CMask0 = DAG.getConstant(Mask0, dl, MVT::getIntegerVT(NumElems));
5936    SDValue VMask0 = DAG.getNode(ISD::BITCAST, dl, WideOpVT, CMask0);
5937    Vec = DAG.getNode(ISD::AND, dl, WideOpVT, Vec, VMask0);
5938    SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
5939                         DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
5940    SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
5941                         DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
5942    Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
5943
5944    // Reduce to original width if needed.
5945    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
5946  }
5947
5948  // Clear the upper bits of the subvector and move it to its insert position.
5949  SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
5950                       DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
5951  SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
5952                       DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
5953
5954  // Isolate the bits below the insertion point.
5955  unsigned LowShift = NumElems - IdxVal;
5956  SDValue Low = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec,
5957                            DAG.getTargetConstant(LowShift, dl, MVT::i8));
5958  Low = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Low,
5959                    DAG.getTargetConstant(LowShift, dl, MVT::i8));
5960
5961  // Isolate the bits after the last inserted bit.
5962  unsigned HighShift = IdxVal + SubVecNumElems;
5963  SDValue High = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec,
5964                            DAG.getTargetConstant(HighShift, dl, MVT::i8));
5965  High = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, High,
5966                    DAG.getTargetConstant(HighShift, dl, MVT::i8));
5967
5968  // Now OR all 3 pieces together.
5969  Vec = DAG.getNode(ISD::OR, dl, WideOpVT, Low, High);
5970  SubVec = DAG.getNode(ISD::OR, dl, WideOpVT, SubVec, Vec);
5971
5972  // Reduce to original width if needed.
5973  return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
5974}
5975
5976static SDValue concatSubVectors(SDValue V1, SDValue V2, SelectionDAG &DAG,
5977                                const SDLoc &dl) {
5978  assert(V1.getValueType() == V2.getValueType() && "subvector type mismatch");
5979  EVT SubVT = V1.getValueType();
5980  EVT SubSVT = SubVT.getScalarType();
5981  unsigned SubNumElts = SubVT.getVectorNumElements();
5982  unsigned SubVectorWidth = SubVT.getSizeInBits();
5983  EVT VT = EVT::getVectorVT(*DAG.getContext(), SubSVT, 2 * SubNumElts);
5984  SDValue V = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, dl, SubVectorWidth);
5985  return insertSubVector(V, V2, SubNumElts, DAG, dl, SubVectorWidth);
5986}
5987
5988/// Returns a vector of specified type with all bits set.
5989/// Always build ones vectors as <4 x i32>, <8 x i32> or <16 x i32>.
5990/// Then bitcast to their original type, ensuring they get CSE'd.
5991static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
5992  assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
5993         "Expected a 128/256/512-bit vector type");
5994
5995  APInt Ones = APInt::getAllOnesValue(32);
5996  unsigned NumElts = VT.getSizeInBits() / 32;
5997  SDValue Vec = DAG.getConstant(Ones, dl, MVT::getVectorVT(MVT::i32, NumElts));
5998  return DAG.getBitcast(VT, Vec);
5999}
6000
6001// Convert *_EXTEND to *_EXTEND_VECTOR_INREG opcode.
6002static unsigned getOpcode_EXTEND_VECTOR_INREG(unsigned Opcode) {
6003  switch (Opcode) {
6004  case ISD::ANY_EXTEND:
6005  case ISD::ANY_EXTEND_VECTOR_INREG:
6006    return ISD::ANY_EXTEND_VECTOR_INREG;
6007  case ISD::ZERO_EXTEND:
6008  case ISD::ZERO_EXTEND_VECTOR_INREG:
6009    return ISD::ZERO_EXTEND_VECTOR_INREG;
6010  case ISD::SIGN_EXTEND:
6011  case ISD::SIGN_EXTEND_VECTOR_INREG:
6012    return ISD::SIGN_EXTEND_VECTOR_INREG;
6013  }
6014  llvm_unreachable("Unknown opcode");
6015}
6016
6017static SDValue getExtendInVec(unsigned Opcode, const SDLoc &DL, EVT VT,
6018                              SDValue In, SelectionDAG &DAG) {
6019  EVT InVT = In.getValueType();
6020  assert(VT.isVector() && InVT.isVector() && "Expected vector VTs.");
6021  assert((ISD::ANY_EXTEND == Opcode || ISD::SIGN_EXTEND == Opcode ||
6022          ISD::ZERO_EXTEND == Opcode) &&
6023         "Unknown extension opcode");
6024
6025  // For 256-bit vectors, we only need the lower (128-bit) input half.
6026  // For 512-bit vectors, we only need the lower input half or quarter.
6027  if (InVT.getSizeInBits() > 128) {
6028    assert(VT.getSizeInBits() == InVT.getSizeInBits() &&
6029           "Expected VTs to be the same size!");
6030    unsigned Scale = VT.getScalarSizeInBits() / InVT.getScalarSizeInBits();
6031    In = extractSubVector(In, 0, DAG, DL,
6032                          std::max(128U, (unsigned)VT.getSizeInBits() / Scale));
6033    InVT = In.getValueType();
6034  }
6035
6036  if (VT.getVectorNumElements() != InVT.getVectorNumElements())
6037    Opcode = getOpcode_EXTEND_VECTOR_INREG(Opcode);
6038
6039  return DAG.getNode(Opcode, DL, VT, In);
6040}
6041
6042// Match (xor X, -1) -> X.
6043// Match extract_subvector(xor X, -1) -> extract_subvector(X).
6044// Match concat_vectors(xor X, -1, xor Y, -1) -> concat_vectors(X, Y).
6045static SDValue IsNOT(SDValue V, SelectionDAG &DAG) {
6046  V = peekThroughBitcasts(V);
6047  if (V.getOpcode() == ISD::XOR &&
6048      ISD::isBuildVectorAllOnes(V.getOperand(1).getNode()))
6049    return V.getOperand(0);
6050  if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6051      (isNullConstant(V.getOperand(1)) || V.getOperand(0).hasOneUse())) {
6052    if (SDValue Not = IsNOT(V.getOperand(0), DAG)) {
6053      Not = DAG.getBitcast(V.getOperand(0).getValueType(), Not);
6054      return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Not), V.getValueType(),
6055                         Not, V.getOperand(1));
6056    }
6057  }
6058  SmallVector<SDValue, 2> CatOps;
6059  if (collectConcatOps(V.getNode(), CatOps)) {
6060    for (SDValue &CatOp : CatOps) {
6061      SDValue NotCat = IsNOT(CatOp, DAG);
6062      if (!NotCat) return SDValue();
6063      CatOp = DAG.getBitcast(CatOp.getValueType(), NotCat);
6064    }
6065    return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(V), V.getValueType(), CatOps);
6066  }
6067  return SDValue();
6068}
6069
6070/// Returns a vector_shuffle node for an unpackl operation.
6071static SDValue getUnpackl(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
6072                          SDValue V1, SDValue V2) {
6073  SmallVector<int, 8> Mask;
6074  createUnpackShuffleMask(VT, Mask, /* Lo = */ true, /* Unary = */ false);
6075  return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
6076}
6077
6078/// Returns a vector_shuffle node for an unpackh operation.
6079static SDValue getUnpackh(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
6080                          SDValue V1, SDValue V2) {
6081  SmallVector<int, 8> Mask;
6082  createUnpackShuffleMask(VT, Mask, /* Lo = */ false, /* Unary = */ false);
6083  return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
6084}
6085
6086/// Return a vector_shuffle of the specified vector of zero or undef vector.
6087/// This produces a shuffle where the low element of V2 is swizzled into the
6088/// zero/undef vector, landing at element Idx.
6089/// This produces a shuffle mask like 4,1,2,3 (idx=0) or  0,1,2,4 (idx=3).
6090static SDValue getShuffleVectorZeroOrUndef(SDValue V2, int Idx,
6091                                           bool IsZero,
6092                                           const X86Subtarget &Subtarget,
6093                                           SelectionDAG &DAG) {
6094  MVT VT = V2.getSimpleValueType();
6095  SDValue V1 = IsZero
6096    ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
6097  int NumElems = VT.getVectorNumElements();
6098  SmallVector<int, 16> MaskVec(NumElems);
6099  for (int i = 0; i != NumElems; ++i)
6100    // If this is the insertion idx, put the low elt of V2 here.
6101    MaskVec[i] = (i == Idx) ? NumElems : i;
6102  return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, MaskVec);
6103}
6104
6105static const Constant *getTargetConstantFromNode(LoadSDNode *Load) {
6106  if (!Load || !ISD::isNormalLoad(Load))
6107    return nullptr;
6108
6109  SDValue Ptr = Load->getBasePtr();
6110  if (Ptr->getOpcode() == X86ISD::Wrapper ||
6111      Ptr->getOpcode() == X86ISD::WrapperRIP)
6112    Ptr = Ptr->getOperand(0);
6113
6114  auto *CNode = dyn_cast<ConstantPoolSDNode>(Ptr);
6115  if (!CNode || CNode->isMachineConstantPoolEntry() || CNode->getOffset() != 0)
6116    return nullptr;
6117
6118  return CNode->getConstVal();
6119}
6120
6121static const Constant *getTargetConstantFromNode(SDValue Op) {
6122  Op = peekThroughBitcasts(Op);
6123  return getTargetConstantFromNode(dyn_cast<LoadSDNode>(Op));
6124}
6125
6126const Constant *
6127X86TargetLowering::getTargetConstantFromLoad(LoadSDNode *LD) const {
6128  assert(LD && "Unexpected null LoadSDNode");
6129  return getTargetConstantFromNode(LD);
6130}
6131
6132// Extract raw constant bits from constant pools.
6133static bool getTargetConstantBitsFromNode(SDValue Op, unsigned EltSizeInBits,
6134                                          APInt &UndefElts,
6135                                          SmallVectorImpl<APInt> &EltBits,
6136                                          bool AllowWholeUndefs = true,
6137                                          bool AllowPartialUndefs = true) {
6138  assert(EltBits.empty() && "Expected an empty EltBits vector");
6139
6140  Op = peekThroughBitcasts(Op);
6141
6142  EVT VT = Op.getValueType();
6143  unsigned SizeInBits = VT.getSizeInBits();
6144  assert((SizeInBits % EltSizeInBits) == 0 && "Can't split constant!");
6145  unsigned NumElts = SizeInBits / EltSizeInBits;
6146
6147  // Bitcast a source array of element bits to the target size.
6148  auto CastBitData = [&](APInt &UndefSrcElts, ArrayRef<APInt> SrcEltBits) {
6149    unsigned NumSrcElts = UndefSrcElts.getBitWidth();
6150    unsigned SrcEltSizeInBits = SrcEltBits[0].getBitWidth();
6151    assert((NumSrcElts * SrcEltSizeInBits) == SizeInBits &&
6152           "Constant bit sizes don't match");
6153
6154    // Don't split if we don't allow undef bits.
6155    bool AllowUndefs = AllowWholeUndefs || AllowPartialUndefs;
6156    if (UndefSrcElts.getBoolValue() && !AllowUndefs)
6157      return false;
6158
6159    // If we're already the right size, don't bother bitcasting.
6160    if (NumSrcElts == NumElts) {
6161      UndefElts = UndefSrcElts;
6162      EltBits.assign(SrcEltBits.begin(), SrcEltBits.end());
6163      return true;
6164    }
6165
6166    // Extract all the undef/constant element data and pack into single bitsets.
6167    APInt UndefBits(SizeInBits, 0);
6168    APInt MaskBits(SizeInBits, 0);
6169
6170    for (unsigned i = 0; i != NumSrcElts; ++i) {
6171      unsigned BitOffset = i * SrcEltSizeInBits;
6172      if (UndefSrcElts[i])
6173        UndefBits.setBits(BitOffset, BitOffset + SrcEltSizeInBits);
6174      MaskBits.insertBits(SrcEltBits[i], BitOffset);
6175    }
6176
6177    // Split the undef/constant single bitset data into the target elements.
6178    UndefElts = APInt(NumElts, 0);
6179    EltBits.resize(NumElts, APInt(EltSizeInBits, 0));
6180
6181    for (unsigned i = 0; i != NumElts; ++i) {
6182      unsigned BitOffset = i * EltSizeInBits;
6183      APInt UndefEltBits = UndefBits.extractBits(EltSizeInBits, BitOffset);
6184
6185      // Only treat an element as UNDEF if all bits are UNDEF.
6186      if (UndefEltBits.isAllOnesValue()) {
6187        if (!AllowWholeUndefs)
6188          return false;
6189        UndefElts.setBit(i);
6190        continue;
6191      }
6192
6193      // If only some bits are UNDEF then treat them as zero (or bail if not
6194      // supported).
6195      if (UndefEltBits.getBoolValue() && !AllowPartialUndefs)
6196        return false;
6197
6198      EltBits[i] = MaskBits.extractBits(EltSizeInBits, BitOffset);
6199    }
6200    return true;
6201  };
6202
6203  // Collect constant bits and insert into mask/undef bit masks.
6204  auto CollectConstantBits = [](const Constant *Cst, APInt &Mask, APInt &Undefs,
6205                                unsigned UndefBitIndex) {
6206    if (!Cst)
6207      return false;
6208    if (isa<UndefValue>(Cst)) {
6209      Undefs.setBit(UndefBitIndex);
6210      return true;
6211    }
6212    if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
6213      Mask = CInt->getValue();
6214      return true;
6215    }
6216    if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
6217      Mask = CFP->getValueAPF().bitcastToAPInt();
6218      return true;
6219    }
6220    return false;
6221  };
6222
6223  // Handle UNDEFs.
6224  if (Op.isUndef()) {
6225    APInt UndefSrcElts = APInt::getAllOnesValue(NumElts);
6226    SmallVector<APInt, 64> SrcEltBits(NumElts, APInt(EltSizeInBits, 0));
6227    return CastBitData(UndefSrcElts, SrcEltBits);
6228  }
6229
6230  // Extract scalar constant bits.
6231  if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) {
6232    APInt UndefSrcElts = APInt::getNullValue(1);
6233    SmallVector<APInt, 64> SrcEltBits(1, Cst->getAPIntValue());
6234    return CastBitData(UndefSrcElts, SrcEltBits);
6235  }
6236  if (auto *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
6237    APInt UndefSrcElts = APInt::getNullValue(1);
6238    APInt RawBits = Cst->getValueAPF().bitcastToAPInt();
6239    SmallVector<APInt, 64> SrcEltBits(1, RawBits);
6240    return CastBitData(UndefSrcElts, SrcEltBits);
6241  }
6242
6243  // Extract constant bits from build vector.
6244  if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
6245    unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
6246    unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6247
6248    APInt UndefSrcElts(NumSrcElts, 0);
6249    SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
6250    for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
6251      const SDValue &Src = Op.getOperand(i);
6252      if (Src.isUndef()) {
6253        UndefSrcElts.setBit(i);
6254        continue;
6255      }
6256      auto *Cst = cast<ConstantSDNode>(Src);
6257      SrcEltBits[i] = Cst->getAPIntValue().zextOrTrunc(SrcEltSizeInBits);
6258    }
6259    return CastBitData(UndefSrcElts, SrcEltBits);
6260  }
6261  if (ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode())) {
6262    unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
6263    unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6264
6265    APInt UndefSrcElts(NumSrcElts, 0);
6266    SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
6267    for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
6268      const SDValue &Src = Op.getOperand(i);
6269      if (Src.isUndef()) {
6270        UndefSrcElts.setBit(i);
6271        continue;
6272      }
6273      auto *Cst = cast<ConstantFPSDNode>(Src);
6274      APInt RawBits = Cst->getValueAPF().bitcastToAPInt();
6275      SrcEltBits[i] = RawBits.zextOrTrunc(SrcEltSizeInBits);
6276    }
6277    return CastBitData(UndefSrcElts, SrcEltBits);
6278  }
6279
6280  // Extract constant bits from constant pool vector.
6281  if (auto *Cst = getTargetConstantFromNode(Op)) {
6282    Type *CstTy = Cst->getType();
6283    unsigned CstSizeInBits = CstTy->getPrimitiveSizeInBits();
6284    if (!CstTy->isVectorTy() || (CstSizeInBits % SizeInBits) != 0)
6285      return false;
6286
6287    unsigned SrcEltSizeInBits = CstTy->getScalarSizeInBits();
6288    unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6289
6290    APInt UndefSrcElts(NumSrcElts, 0);
6291    SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
6292    for (unsigned i = 0; i != NumSrcElts; ++i)
6293      if (!CollectConstantBits(Cst->getAggregateElement(i), SrcEltBits[i],
6294                               UndefSrcElts, i))
6295        return false;
6296
6297    return CastBitData(UndefSrcElts, SrcEltBits);
6298  }
6299
6300  // Extract constant bits from a broadcasted constant pool scalar.
6301  if (Op.getOpcode() == X86ISD::VBROADCAST &&
6302      EltSizeInBits <= VT.getScalarSizeInBits()) {
6303    if (auto *Broadcast = getTargetConstantFromNode(Op.getOperand(0))) {
6304      unsigned SrcEltSizeInBits = Broadcast->getType()->getScalarSizeInBits();
6305      unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6306
6307      APInt UndefSrcElts(NumSrcElts, 0);
6308      SmallVector<APInt, 64> SrcEltBits(1, APInt(SrcEltSizeInBits, 0));
6309      if (CollectConstantBits(Broadcast, SrcEltBits[0], UndefSrcElts, 0)) {
6310        if (UndefSrcElts[0])
6311          UndefSrcElts.setBits(0, NumSrcElts);
6312        SrcEltBits.append(NumSrcElts - 1, SrcEltBits[0]);
6313        return CastBitData(UndefSrcElts, SrcEltBits);
6314      }
6315    }
6316  }
6317
6318  if (Op.getOpcode() == X86ISD::VBROADCAST_LOAD &&
6319      EltSizeInBits <= VT.getScalarSizeInBits()) {
6320    auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
6321    if (MemIntr->getMemoryVT().getScalarSizeInBits() != VT.getScalarSizeInBits())
6322      return false;
6323
6324    SDValue Ptr = MemIntr->getBasePtr();
6325    if (Ptr->getOpcode() == X86ISD::Wrapper ||
6326        Ptr->getOpcode() == X86ISD::WrapperRIP)
6327      Ptr = Ptr->getOperand(0);
6328
6329    auto *CNode = dyn_cast<ConstantPoolSDNode>(Ptr);
6330    if (!CNode || CNode->isMachineConstantPoolEntry() ||
6331        CNode->getOffset() != 0)
6332      return false;
6333
6334    if (const Constant *C = CNode->getConstVal()) {
6335      unsigned SrcEltSizeInBits = C->getType()->getScalarSizeInBits();
6336      unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6337
6338      APInt UndefSrcElts(NumSrcElts, 0);
6339      SmallVector<APInt, 64> SrcEltBits(1, APInt(SrcEltSizeInBits, 0));
6340      if (CollectConstantBits(C, SrcEltBits[0], UndefSrcElts, 0)) {
6341        if (UndefSrcElts[0])
6342          UndefSrcElts.setBits(0, NumSrcElts);
6343        SrcEltBits.append(NumSrcElts - 1, SrcEltBits[0]);
6344        return CastBitData(UndefSrcElts, SrcEltBits);
6345      }
6346    }
6347  }
6348
6349  // Extract constant bits from a subvector broadcast.
6350  if (Op.getOpcode() == X86ISD::SUBV_BROADCAST) {
6351    SmallVector<APInt, 16> SubEltBits;
6352    if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6353                                      UndefElts, SubEltBits, AllowWholeUndefs,
6354                                      AllowPartialUndefs)) {
6355      UndefElts = APInt::getSplat(NumElts, UndefElts);
6356      while (EltBits.size() < NumElts)
6357        EltBits.append(SubEltBits.begin(), SubEltBits.end());
6358      return true;
6359    }
6360  }
6361
6362  // Extract a rematerialized scalar constant insertion.
6363  if (Op.getOpcode() == X86ISD::VZEXT_MOVL &&
6364      Op.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
6365      isa<ConstantSDNode>(Op.getOperand(0).getOperand(0))) {
6366    unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
6367    unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6368
6369    APInt UndefSrcElts(NumSrcElts, 0);
6370    SmallVector<APInt, 64> SrcEltBits;
6371    auto *CN = cast<ConstantSDNode>(Op.getOperand(0).getOperand(0));
6372    SrcEltBits.push_back(CN->getAPIntValue().zextOrTrunc(SrcEltSizeInBits));
6373    SrcEltBits.append(NumSrcElts - 1, APInt(SrcEltSizeInBits, 0));
6374    return CastBitData(UndefSrcElts, SrcEltBits);
6375  }
6376
6377  // Insert constant bits from a base and sub vector sources.
6378  if (Op.getOpcode() == ISD::INSERT_SUBVECTOR &&
6379      isa<ConstantSDNode>(Op.getOperand(2))) {
6380    // TODO - support insert_subvector through bitcasts.
6381    if (EltSizeInBits != VT.getScalarSizeInBits())
6382      return false;
6383
6384    APInt UndefSubElts;
6385    SmallVector<APInt, 32> EltSubBits;
6386    if (getTargetConstantBitsFromNode(Op.getOperand(1), EltSizeInBits,
6387                                      UndefSubElts, EltSubBits,
6388                                      AllowWholeUndefs, AllowPartialUndefs) &&
6389        getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6390                                      UndefElts, EltBits, AllowWholeUndefs,
6391                                      AllowPartialUndefs)) {
6392      unsigned BaseIdx = Op.getConstantOperandVal(2);
6393      UndefElts.insertBits(UndefSubElts, BaseIdx);
6394      for (unsigned i = 0, e = EltSubBits.size(); i != e; ++i)
6395        EltBits[BaseIdx + i] = EltSubBits[i];
6396      return true;
6397    }
6398  }
6399
6400  // Extract constant bits from a subvector's source.
6401  if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6402      isa<ConstantSDNode>(Op.getOperand(1))) {
6403    // TODO - support extract_subvector through bitcasts.
6404    if (EltSizeInBits != VT.getScalarSizeInBits())
6405      return false;
6406
6407    if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6408                                      UndefElts, EltBits, AllowWholeUndefs,
6409                                      AllowPartialUndefs)) {
6410      EVT SrcVT = Op.getOperand(0).getValueType();
6411      unsigned NumSrcElts = SrcVT.getVectorNumElements();
6412      unsigned NumSubElts = VT.getVectorNumElements();
6413      unsigned BaseIdx = Op.getConstantOperandVal(1);
6414      UndefElts = UndefElts.extractBits(NumSubElts, BaseIdx);
6415      if ((BaseIdx + NumSubElts) != NumSrcElts)
6416        EltBits.erase(EltBits.begin() + BaseIdx + NumSubElts, EltBits.end());
6417      if (BaseIdx != 0)
6418        EltBits.erase(EltBits.begin(), EltBits.begin() + BaseIdx);
6419      return true;
6420    }
6421  }
6422
6423  // Extract constant bits from shuffle node sources.
6424  if (auto *SVN = dyn_cast<ShuffleVectorSDNode>(Op)) {
6425    // TODO - support shuffle through bitcasts.
6426    if (EltSizeInBits != VT.getScalarSizeInBits())
6427      return false;
6428
6429    ArrayRef<int> Mask = SVN->getMask();
6430    if ((!AllowWholeUndefs || !AllowPartialUndefs) &&
6431        llvm::any_of(Mask, [](int M) { return M < 0; }))
6432      return false;
6433
6434    APInt UndefElts0, UndefElts1;
6435    SmallVector<APInt, 32> EltBits0, EltBits1;
6436    if (isAnyInRange(Mask, 0, NumElts) &&
6437        !getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6438                                       UndefElts0, EltBits0, AllowWholeUndefs,
6439                                       AllowPartialUndefs))
6440      return false;
6441    if (isAnyInRange(Mask, NumElts, 2 * NumElts) &&
6442        !getTargetConstantBitsFromNode(Op.getOperand(1), EltSizeInBits,
6443                                       UndefElts1, EltBits1, AllowWholeUndefs,
6444                                       AllowPartialUndefs))
6445      return false;
6446
6447    UndefElts = APInt::getNullValue(NumElts);
6448    for (int i = 0; i != (int)NumElts; ++i) {
6449      int M = Mask[i];
6450      if (M < 0) {
6451        UndefElts.setBit(i);
6452        EltBits.push_back(APInt::getNullValue(EltSizeInBits));
6453      } else if (M < (int)NumElts) {
6454        if (UndefElts0[M])
6455          UndefElts.setBit(i);
6456        EltBits.push_back(EltBits0[M]);
6457      } else {
6458        if (UndefElts1[M - NumElts])
6459          UndefElts.setBit(i);
6460        EltBits.push_back(EltBits1[M - NumElts]);
6461      }
6462    }
6463    return true;
6464  }
6465
6466  return false;
6467}
6468
6469namespace llvm {
6470namespace X86 {
6471bool isConstantSplat(SDValue Op, APInt &SplatVal) {
6472  APInt UndefElts;
6473  SmallVector<APInt, 16> EltBits;
6474  if (getTargetConstantBitsFromNode(Op, Op.getScalarValueSizeInBits(),
6475                                    UndefElts, EltBits, true, false)) {
6476    int SplatIndex = -1;
6477    for (int i = 0, e = EltBits.size(); i != e; ++i) {
6478      if (UndefElts[i])
6479        continue;
6480      if (0 <= SplatIndex && EltBits[i] != EltBits[SplatIndex]) {
6481        SplatIndex = -1;
6482        break;
6483      }
6484      SplatIndex = i;
6485    }
6486    if (0 <= SplatIndex) {
6487      SplatVal = EltBits[SplatIndex];
6488      return true;
6489    }
6490  }
6491
6492  return false;
6493}
6494} // namespace X86
6495} // namespace llvm
6496
6497static bool getTargetShuffleMaskIndices(SDValue MaskNode,
6498                                        unsigned MaskEltSizeInBits,
6499                                        SmallVectorImpl<uint64_t> &RawMask,
6500                                        APInt &UndefElts) {
6501  // Extract the raw target constant bits.
6502  SmallVector<APInt, 64> EltBits;
6503  if (!getTargetConstantBitsFromNode(MaskNode, MaskEltSizeInBits, UndefElts,
6504                                     EltBits, /* AllowWholeUndefs */ true,
6505                                     /* AllowPartialUndefs */ false))
6506    return false;
6507
6508  // Insert the extracted elements into the mask.
6509  for (APInt Elt : EltBits)
6510    RawMask.push_back(Elt.getZExtValue());
6511
6512  return true;
6513}
6514
6515/// Create a shuffle mask that matches the PACKSS/PACKUS truncation.
6516/// Note: This ignores saturation, so inputs must be checked first.
6517static void createPackShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
6518                                  bool Unary) {
6519  assert(Mask.empty() && "Expected an empty shuffle mask vector");
6520  unsigned NumElts = VT.getVectorNumElements();
6521  unsigned NumLanes = VT.getSizeInBits() / 128;
6522  unsigned NumEltsPerLane = 128 / VT.getScalarSizeInBits();
6523  unsigned Offset = Unary ? 0 : NumElts;
6524
6525  for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
6526    for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += 2)
6527      Mask.push_back(Elt + (Lane * NumEltsPerLane));
6528    for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += 2)
6529      Mask.push_back(Elt + (Lane * NumEltsPerLane) + Offset);
6530  }
6531}
6532
6533// Split the demanded elts of a PACKSS/PACKUS node between its operands.
6534static void getPackDemandedElts(EVT VT, const APInt &DemandedElts,
6535                                APInt &DemandedLHS, APInt &DemandedRHS) {
6536  int NumLanes = VT.getSizeInBits() / 128;
6537  int NumElts = DemandedElts.getBitWidth();
6538  int NumInnerElts = NumElts / 2;
6539  int NumEltsPerLane = NumElts / NumLanes;
6540  int NumInnerEltsPerLane = NumInnerElts / NumLanes;
6541
6542  DemandedLHS = APInt::getNullValue(NumInnerElts);
6543  DemandedRHS = APInt::getNullValue(NumInnerElts);
6544
6545  // Map DemandedElts to the packed operands.
6546  for (int Lane = 0; Lane != NumLanes; ++Lane) {
6547    for (int Elt = 0; Elt != NumInnerEltsPerLane; ++Elt) {
6548      int OuterIdx = (Lane * NumEltsPerLane) + Elt;
6549      int InnerIdx = (Lane * NumInnerEltsPerLane) + Elt;
6550      if (DemandedElts[OuterIdx])
6551        DemandedLHS.setBit(InnerIdx);
6552      if (DemandedElts[OuterIdx + NumInnerEltsPerLane])
6553        DemandedRHS.setBit(InnerIdx);
6554    }
6555  }
6556}
6557
6558// Split the demanded elts of a HADD/HSUB node between its operands.
6559static void getHorizDemandedElts(EVT VT, const APInt &DemandedElts,
6560                                 APInt &DemandedLHS, APInt &DemandedRHS) {
6561  int NumLanes = VT.getSizeInBits() / 128;
6562  int NumElts = DemandedElts.getBitWidth();
6563  int NumEltsPerLane = NumElts / NumLanes;
6564  int HalfEltsPerLane = NumEltsPerLane / 2;
6565
6566  DemandedLHS = APInt::getNullValue(NumElts);
6567  DemandedRHS = APInt::getNullValue(NumElts);
6568
6569  // Map DemandedElts to the horizontal operands.
6570  for (int Idx = 0; Idx != NumElts; ++Idx) {
6571    if (!DemandedElts[Idx])
6572      continue;
6573    int LaneIdx = (Idx / NumEltsPerLane) * NumEltsPerLane;
6574    int LocalIdx = Idx % NumEltsPerLane;
6575    if (LocalIdx < HalfEltsPerLane) {
6576      DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 0);
6577      DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 1);
6578    } else {
6579      LocalIdx -= HalfEltsPerLane;
6580      DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 0);
6581      DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 1);
6582    }
6583  }
6584}
6585
6586/// Calculates the shuffle mask corresponding to the target-specific opcode.
6587/// If the mask could be calculated, returns it in \p Mask, returns the shuffle
6588/// operands in \p Ops, and returns true.
6589/// Sets \p IsUnary to true if only one source is used. Note that this will set
6590/// IsUnary for shuffles which use a single input multiple times, and in those
6591/// cases it will adjust the mask to only have indices within that single input.
6592/// It is an error to call this with non-empty Mask/Ops vectors.
6593static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
6594                                 SmallVectorImpl<SDValue> &Ops,
6595                                 SmallVectorImpl<int> &Mask, bool &IsUnary) {
6596  unsigned NumElems = VT.getVectorNumElements();
6597  unsigned MaskEltSize = VT.getScalarSizeInBits();
6598  SmallVector<uint64_t, 32> RawMask;
6599  APInt RawUndefs;
6600  SDValue ImmN;
6601
6602  assert(Mask.empty() && "getTargetShuffleMask expects an empty Mask vector");
6603  assert(Ops.empty() && "getTargetShuffleMask expects an empty Ops vector");
6604
6605  IsUnary = false;
6606  bool IsFakeUnary = false;
6607  switch (N->getOpcode()) {
6608  case X86ISD::BLENDI:
6609    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6610    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6611    ImmN = N->getOperand(N->getNumOperands() - 1);
6612    DecodeBLENDMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6613    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6614    break;
6615  case X86ISD::SHUFP:
6616    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6617    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6618    ImmN = N->getOperand(N->getNumOperands() - 1);
6619    DecodeSHUFPMask(NumElems, MaskEltSize,
6620                    cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6621    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6622    break;
6623  case X86ISD::INSERTPS:
6624    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6625    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6626    ImmN = N->getOperand(N->getNumOperands() - 1);
6627    DecodeINSERTPSMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6628    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6629    break;
6630  case X86ISD::EXTRQI:
6631    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6632    if (isa<ConstantSDNode>(N->getOperand(1)) &&
6633        isa<ConstantSDNode>(N->getOperand(2))) {
6634      int BitLen = N->getConstantOperandVal(1);
6635      int BitIdx = N->getConstantOperandVal(2);
6636      DecodeEXTRQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
6637      IsUnary = true;
6638    }
6639    break;
6640  case X86ISD::INSERTQI:
6641    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6642    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6643    if (isa<ConstantSDNode>(N->getOperand(2)) &&
6644        isa<ConstantSDNode>(N->getOperand(3))) {
6645      int BitLen = N->getConstantOperandVal(2);
6646      int BitIdx = N->getConstantOperandVal(3);
6647      DecodeINSERTQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
6648      IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6649    }
6650    break;
6651  case X86ISD::UNPCKH:
6652    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6653    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6654    DecodeUNPCKHMask(NumElems, MaskEltSize, Mask);
6655    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6656    break;
6657  case X86ISD::UNPCKL:
6658    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6659    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6660    DecodeUNPCKLMask(NumElems, MaskEltSize, Mask);
6661    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6662    break;
6663  case X86ISD::MOVHLPS:
6664    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6665    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6666    DecodeMOVHLPSMask(NumElems, Mask);
6667    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6668    break;
6669  case X86ISD::MOVLHPS:
6670    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6671    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6672    DecodeMOVLHPSMask(NumElems, Mask);
6673    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6674    break;
6675  case X86ISD::PALIGNR:
6676    assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6677    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6678    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6679    ImmN = N->getOperand(N->getNumOperands() - 1);
6680    DecodePALIGNRMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6681                      Mask);
6682    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6683    Ops.push_back(N->getOperand(1));
6684    Ops.push_back(N->getOperand(0));
6685    break;
6686  case X86ISD::VSHLDQ:
6687    assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6688    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6689    ImmN = N->getOperand(N->getNumOperands() - 1);
6690    DecodePSLLDQMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6691                     Mask);
6692    IsUnary = true;
6693    break;
6694  case X86ISD::VSRLDQ:
6695    assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6696    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6697    ImmN = N->getOperand(N->getNumOperands() - 1);
6698    DecodePSRLDQMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6699                     Mask);
6700    IsUnary = true;
6701    break;
6702  case X86ISD::PSHUFD:
6703  case X86ISD::VPERMILPI:
6704    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6705    ImmN = N->getOperand(N->getNumOperands() - 1);
6706    DecodePSHUFMask(NumElems, MaskEltSize,
6707                    cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6708    IsUnary = true;
6709    break;
6710  case X86ISD::PSHUFHW:
6711    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6712    ImmN = N->getOperand(N->getNumOperands() - 1);
6713    DecodePSHUFHWMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6714                      Mask);
6715    IsUnary = true;
6716    break;
6717  case X86ISD::PSHUFLW:
6718    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6719    ImmN = N->getOperand(N->getNumOperands() - 1);
6720    DecodePSHUFLWMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6721                      Mask);
6722    IsUnary = true;
6723    break;
6724  case X86ISD::VZEXT_MOVL:
6725    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6726    DecodeZeroMoveLowMask(NumElems, Mask);
6727    IsUnary = true;
6728    break;
6729  case X86ISD::VBROADCAST: {
6730    SDValue N0 = N->getOperand(0);
6731    // See if we're broadcasting from index 0 of an EXTRACT_SUBVECTOR. If so,
6732    // add the pre-extracted value to the Ops vector.
6733    if (N0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6734        N0.getOperand(0).getValueType() == VT &&
6735        N0.getConstantOperandVal(1) == 0)
6736      Ops.push_back(N0.getOperand(0));
6737
6738    // We only decode broadcasts of same-sized vectors, unless the broadcast
6739    // came from an extract from the original width. If we found one, we
6740    // pushed it the Ops vector above.
6741    if (N0.getValueType() == VT || !Ops.empty()) {
6742      DecodeVectorBroadcast(NumElems, Mask);
6743      IsUnary = true;
6744      break;
6745    }
6746    return false;
6747  }
6748  case X86ISD::VPERMILPV: {
6749    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6750    IsUnary = true;
6751    SDValue MaskNode = N->getOperand(1);
6752    if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
6753                                    RawUndefs)) {
6754      DecodeVPERMILPMask(NumElems, MaskEltSize, RawMask, RawUndefs, Mask);
6755      break;
6756    }
6757    return false;
6758  }
6759  case X86ISD::PSHUFB: {
6760    assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6761    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6762    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6763    IsUnary = true;
6764    SDValue MaskNode = N->getOperand(1);
6765    if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
6766      DecodePSHUFBMask(RawMask, RawUndefs, Mask);
6767      break;
6768    }
6769    return false;
6770  }
6771  case X86ISD::VPERMI:
6772    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6773    ImmN = N->getOperand(N->getNumOperands() - 1);
6774    DecodeVPERMMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6775    IsUnary = true;
6776    break;
6777  case X86ISD::MOVSS:
6778  case X86ISD::MOVSD:
6779    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6780    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6781    DecodeScalarMoveMask(NumElems, /* IsLoad */ false, Mask);
6782    break;
6783  case X86ISD::VPERM2X128:
6784    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6785    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6786    ImmN = N->getOperand(N->getNumOperands() - 1);
6787    DecodeVPERM2X128Mask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6788                         Mask);
6789    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6790    break;
6791  case X86ISD::SHUF128:
6792    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6793    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6794    ImmN = N->getOperand(N->getNumOperands() - 1);
6795    decodeVSHUF64x2FamilyMask(NumElems, MaskEltSize,
6796                              cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6797    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6798    break;
6799  case X86ISD::MOVSLDUP:
6800    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6801    DecodeMOVSLDUPMask(NumElems, Mask);
6802    IsUnary = true;
6803    break;
6804  case X86ISD::MOVSHDUP:
6805    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6806    DecodeMOVSHDUPMask(NumElems, Mask);
6807    IsUnary = true;
6808    break;
6809  case X86ISD::MOVDDUP:
6810    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6811    DecodeMOVDDUPMask(NumElems, Mask);
6812    IsUnary = true;
6813    break;
6814  case X86ISD::VPERMIL2: {
6815    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6816    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6817    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6818    SDValue MaskNode = N->getOperand(2);
6819    SDValue CtrlNode = N->getOperand(3);
6820    if (ConstantSDNode *CtrlOp = dyn_cast<ConstantSDNode>(CtrlNode)) {
6821      unsigned CtrlImm = CtrlOp->getZExtValue();
6822      if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
6823                                      RawUndefs)) {
6824        DecodeVPERMIL2PMask(NumElems, MaskEltSize, CtrlImm, RawMask, RawUndefs,
6825                            Mask);
6826        break;
6827      }
6828    }
6829    return false;
6830  }
6831  case X86ISD::VPPERM: {
6832    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6833    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6834    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6835    SDValue MaskNode = N->getOperand(2);
6836    if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
6837      DecodeVPPERMMask(RawMask, RawUndefs, Mask);
6838      break;
6839    }
6840    return false;
6841  }
6842  case X86ISD::VPERMV: {
6843    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6844    IsUnary = true;
6845    // Unlike most shuffle nodes, VPERMV's mask operand is operand 0.
6846    Ops.push_back(N->getOperand(1));
6847    SDValue MaskNode = N->getOperand(0);
6848    if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
6849                                    RawUndefs)) {
6850      DecodeVPERMVMask(RawMask, RawUndefs, Mask);
6851      break;
6852    }
6853    return false;
6854  }
6855  case X86ISD::VPERMV3: {
6856    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6857    assert(N->getOperand(2).getValueType() == VT && "Unexpected value type");
6858    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(2);
6859    // Unlike most shuffle nodes, VPERMV3's mask operand is the middle one.
6860    Ops.push_back(N->getOperand(0));
6861    Ops.push_back(N->getOperand(2));
6862    SDValue MaskNode = N->getOperand(1);
6863    if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
6864                                    RawUndefs)) {
6865      DecodeVPERMV3Mask(RawMask, RawUndefs, Mask);
6866      break;
6867    }
6868    return false;
6869  }
6870  default: llvm_unreachable("unknown target shuffle node");
6871  }
6872
6873  // Empty mask indicates the decode failed.
6874  if (Mask.empty())
6875    return false;
6876
6877  // Check if we're getting a shuffle mask with zero'd elements.
6878  if (!AllowSentinelZero)
6879    if (any_of(Mask, [](int M) { return M == SM_SentinelZero; }))
6880      return false;
6881
6882  // If we have a fake unary shuffle, the shuffle mask is spread across two
6883  // inputs that are actually the same node. Re-map the mask to always point
6884  // into the first input.
6885  if (IsFakeUnary)
6886    for (int &M : Mask)
6887      if (M >= (int)Mask.size())
6888        M -= Mask.size();
6889
6890  // If we didn't already add operands in the opcode-specific code, default to
6891  // adding 1 or 2 operands starting at 0.
6892  if (Ops.empty()) {
6893    Ops.push_back(N->getOperand(0));
6894    if (!IsUnary || IsFakeUnary)
6895      Ops.push_back(N->getOperand(1));
6896  }
6897
6898  return true;
6899}
6900
6901/// Compute whether each element of a shuffle is zeroable.
6902///
6903/// A "zeroable" vector shuffle element is one which can be lowered to zero.
6904/// Either it is an undef element in the shuffle mask, the element of the input
6905/// referenced is undef, or the element of the input referenced is known to be
6906/// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
6907/// as many lanes with this technique as possible to simplify the remaining
6908/// shuffle.
6909static void computeZeroableShuffleElements(ArrayRef<int> Mask,
6910                                           SDValue V1, SDValue V2,
6911                                           APInt &KnownUndef, APInt &KnownZero) {
6912  int Size = Mask.size();
6913  KnownUndef = KnownZero = APInt::getNullValue(Size);
6914
6915  V1 = peekThroughBitcasts(V1);
6916  V2 = peekThroughBitcasts(V2);
6917
6918  bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
6919  bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
6920
6921  int VectorSizeInBits = V1.getValueSizeInBits();
6922  int ScalarSizeInBits = VectorSizeInBits / Size;
6923  assert(!(VectorSizeInBits % ScalarSizeInBits) && "Illegal shuffle mask size");
6924
6925  for (int i = 0; i < Size; ++i) {
6926    int M = Mask[i];
6927    // Handle the easy cases.
6928    if (M < 0) {
6929      KnownUndef.setBit(i);
6930      continue;
6931    }
6932    if ((M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
6933      KnownZero.setBit(i);
6934      continue;
6935    }
6936
6937    // Determine shuffle input and normalize the mask.
6938    SDValue V = M < Size ? V1 : V2;
6939    M %= Size;
6940
6941    // Currently we can only search BUILD_VECTOR for UNDEF/ZERO elements.
6942    if (V.getOpcode() != ISD::BUILD_VECTOR)
6943      continue;
6944
6945    // If the BUILD_VECTOR has fewer elements then the bitcasted portion of
6946    // the (larger) source element must be UNDEF/ZERO.
6947    if ((Size % V.getNumOperands()) == 0) {
6948      int Scale = Size / V->getNumOperands();
6949      SDValue Op = V.getOperand(M / Scale);
6950      if (Op.isUndef())
6951        KnownUndef.setBit(i);
6952      if (X86::isZeroNode(Op))
6953        KnownZero.setBit(i);
6954      else if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
6955        APInt Val = Cst->getAPIntValue();
6956        Val = Val.extractBits(ScalarSizeInBits, (M % Scale) * ScalarSizeInBits);
6957        if (Val == 0)
6958          KnownZero.setBit(i);
6959      } else if (ConstantFPSDNode *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
6960        APInt Val = Cst->getValueAPF().bitcastToAPInt();
6961        Val = Val.extractBits(ScalarSizeInBits, (M % Scale) * ScalarSizeInBits);
6962        if (Val == 0)
6963          KnownZero.setBit(i);
6964      }
6965      continue;
6966    }
6967
6968    // If the BUILD_VECTOR has more elements then all the (smaller) source
6969    // elements must be UNDEF or ZERO.
6970    if ((V.getNumOperands() % Size) == 0) {
6971      int Scale = V->getNumOperands() / Size;
6972      bool AllUndef = true;
6973      bool AllZero = true;
6974      for (int j = 0; j < Scale; ++j) {
6975        SDValue Op = V.getOperand((M * Scale) + j);
6976        AllUndef &= Op.isUndef();
6977        AllZero &= X86::isZeroNode(Op);
6978      }
6979      if (AllUndef)
6980        KnownUndef.setBit(i);
6981      if (AllZero)
6982        KnownZero.setBit(i);
6983      continue;
6984    }
6985  }
6986}
6987
6988/// Decode a target shuffle mask and inputs and see if any values are
6989/// known to be undef or zero from their inputs.
6990/// Returns true if the target shuffle mask was decoded.
6991/// FIXME: Merge this with computeZeroableShuffleElements?
6992static bool getTargetShuffleAndZeroables(SDValue N, SmallVectorImpl<int> &Mask,
6993                                         SmallVectorImpl<SDValue> &Ops,
6994                                         APInt &KnownUndef, APInt &KnownZero) {
6995  bool IsUnary;
6996  if (!isTargetShuffle(N.getOpcode()))
6997    return false;
6998
6999  MVT VT = N.getSimpleValueType();
7000  if (!getTargetShuffleMask(N.getNode(), VT, true, Ops, Mask, IsUnary))
7001    return false;
7002
7003  int Size = Mask.size();
7004  SDValue V1 = Ops[0];
7005  SDValue V2 = IsUnary ? V1 : Ops[1];
7006  KnownUndef = KnownZero = APInt::getNullValue(Size);
7007
7008  V1 = peekThroughBitcasts(V1);
7009  V2 = peekThroughBitcasts(V2);
7010
7011  assert((VT.getSizeInBits() % Size) == 0 &&
7012         "Illegal split of shuffle value type");
7013  unsigned EltSizeInBits = VT.getSizeInBits() / Size;
7014
7015  // Extract known constant input data.
7016  APInt UndefSrcElts[2];
7017  SmallVector<APInt, 32> SrcEltBits[2];
7018  bool IsSrcConstant[2] = {
7019      getTargetConstantBitsFromNode(V1, EltSizeInBits, UndefSrcElts[0],
7020                                    SrcEltBits[0], true, false),
7021      getTargetConstantBitsFromNode(V2, EltSizeInBits, UndefSrcElts[1],
7022                                    SrcEltBits[1], true, false)};
7023
7024  for (int i = 0; i < Size; ++i) {
7025    int M = Mask[i];
7026
7027    // Already decoded as SM_SentinelZero / SM_SentinelUndef.
7028    if (M < 0) {
7029      assert(isUndefOrZero(M) && "Unknown shuffle sentinel value!");
7030      if (SM_SentinelUndef == M)
7031        KnownUndef.setBit(i);
7032      if (SM_SentinelZero == M)
7033        KnownZero.setBit(i);
7034      continue;
7035    }
7036
7037    // Determine shuffle input and normalize the mask.
7038    unsigned SrcIdx = M / Size;
7039    SDValue V = M < Size ? V1 : V2;
7040    M %= Size;
7041
7042    // We are referencing an UNDEF input.
7043    if (V.isUndef()) {
7044      KnownUndef.setBit(i);
7045      continue;
7046    }
7047
7048    // SCALAR_TO_VECTOR - only the first element is defined, and the rest UNDEF.
7049    // TODO: We currently only set UNDEF for integer types - floats use the same
7050    // registers as vectors and many of the scalar folded loads rely on the
7051    // SCALAR_TO_VECTOR pattern.
7052    if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
7053        (Size % V.getValueType().getVectorNumElements()) == 0) {
7054      int Scale = Size / V.getValueType().getVectorNumElements();
7055      int Idx = M / Scale;
7056      if (Idx != 0 && !VT.isFloatingPoint())
7057        KnownUndef.setBit(i);
7058      else if (Idx == 0 && X86::isZeroNode(V.getOperand(0)))
7059        KnownZero.setBit(i);
7060      continue;
7061    }
7062
7063    // Attempt to extract from the source's constant bits.
7064    if (IsSrcConstant[SrcIdx]) {
7065      if (UndefSrcElts[SrcIdx][M])
7066        KnownUndef.setBit(i);
7067      else if (SrcEltBits[SrcIdx][M] == 0)
7068        KnownZero.setBit(i);
7069    }
7070  }
7071
7072  assert(VT.getVectorNumElements() == (unsigned)Size &&
7073         "Different mask size from vector size!");
7074  return true;
7075}
7076
7077// Replace target shuffle mask elements with known undef/zero sentinels.
7078static void resolveTargetShuffleFromZeroables(SmallVectorImpl<int> &Mask,
7079                                              const APInt &KnownUndef,
7080                                              const APInt &KnownZero,
7081                                              bool ResolveKnownZeros= true) {
7082  unsigned NumElts = Mask.size();
7083  assert(KnownUndef.getBitWidth() == NumElts &&
7084         KnownZero.getBitWidth() == NumElts && "Shuffle mask size mismatch");
7085
7086  for (unsigned i = 0; i != NumElts; ++i) {
7087    if (KnownUndef[i])
7088      Mask[i] = SM_SentinelUndef;
7089    else if (ResolveKnownZeros && KnownZero[i])
7090      Mask[i] = SM_SentinelZero;
7091  }
7092}
7093
7094// Extract target shuffle mask sentinel elements to known undef/zero bitmasks.
7095static void resolveZeroablesFromTargetShuffle(const SmallVectorImpl<int> &Mask,
7096                                              APInt &KnownUndef,
7097                                              APInt &KnownZero) {
7098  unsigned NumElts = Mask.size();
7099  KnownUndef = KnownZero = APInt::getNullValue(NumElts);
7100
7101  for (unsigned i = 0; i != NumElts; ++i) {
7102    int M = Mask[i];
7103    if (SM_SentinelUndef == M)
7104      KnownUndef.setBit(i);
7105    if (SM_SentinelZero == M)
7106      KnownZero.setBit(i);
7107  }
7108}
7109
7110// Forward declaration (for getFauxShuffleMask recursive check).
7111// TODO: Use DemandedElts variant.
7112static bool getTargetShuffleInputs(SDValue Op, SmallVectorImpl<SDValue> &Inputs,
7113                                   SmallVectorImpl<int> &Mask,
7114                                   SelectionDAG &DAG, unsigned Depth,
7115                                   bool ResolveKnownElts);
7116
7117// Attempt to decode ops that could be represented as a shuffle mask.
7118// The decoded shuffle mask may contain a different number of elements to the
7119// destination value type.
7120static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
7121                               SmallVectorImpl<int> &Mask,
7122                               SmallVectorImpl<SDValue> &Ops,
7123                               SelectionDAG &DAG, unsigned Depth,
7124                               bool ResolveKnownElts) {
7125  Mask.clear();
7126  Ops.clear();
7127
7128  MVT VT = N.getSimpleValueType();
7129  unsigned NumElts = VT.getVectorNumElements();
7130  unsigned NumSizeInBits = VT.getSizeInBits();
7131  unsigned NumBitsPerElt = VT.getScalarSizeInBits();
7132  if ((NumBitsPerElt % 8) != 0 || (NumSizeInBits % 8) != 0)
7133    return false;
7134  assert(NumElts == DemandedElts.getBitWidth() && "Unexpected vector size");
7135
7136  unsigned Opcode = N.getOpcode();
7137  switch (Opcode) {
7138  case ISD::VECTOR_SHUFFLE: {
7139    // Don't treat ISD::VECTOR_SHUFFLE as a target shuffle so decode it here.
7140    ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(N)->getMask();
7141    if (isUndefOrInRange(ShuffleMask, 0, 2 * NumElts)) {
7142      Mask.append(ShuffleMask.begin(), ShuffleMask.end());
7143      Ops.push_back(N.getOperand(0));
7144      Ops.push_back(N.getOperand(1));
7145      return true;
7146    }
7147    return false;
7148  }
7149  case ISD::AND:
7150  case X86ISD::ANDNP: {
7151    // Attempt to decode as a per-byte mask.
7152    APInt UndefElts;
7153    SmallVector<APInt, 32> EltBits;
7154    SDValue N0 = N.getOperand(0);
7155    SDValue N1 = N.getOperand(1);
7156    bool IsAndN = (X86ISD::ANDNP == Opcode);
7157    uint64_t ZeroMask = IsAndN ? 255 : 0;
7158    if (!getTargetConstantBitsFromNode(IsAndN ? N0 : N1, 8, UndefElts, EltBits))
7159      return false;
7160    for (int i = 0, e = (int)EltBits.size(); i != e; ++i) {
7161      if (UndefElts[i]) {
7162        Mask.push_back(SM_SentinelUndef);
7163        continue;
7164      }
7165      const APInt &ByteBits = EltBits[i];
7166      if (ByteBits != 0 && ByteBits != 255)
7167        return false;
7168      Mask.push_back(ByteBits == ZeroMask ? SM_SentinelZero : i);
7169    }
7170    Ops.push_back(IsAndN ? N1 : N0);
7171    return true;
7172  }
7173  case ISD::OR: {
7174    // Inspect each operand at the byte level. We can merge these into a
7175    // blend shuffle mask if for each byte at least one is masked out (zero).
7176    KnownBits Known0 =
7177        DAG.computeKnownBits(N.getOperand(0), DemandedElts, Depth + 1);
7178    KnownBits Known1 =
7179        DAG.computeKnownBits(N.getOperand(1), DemandedElts, Depth + 1);
7180    if (Known0.One.isNullValue() && Known1.One.isNullValue()) {
7181      bool IsByteMask = true;
7182      unsigned NumSizeInBytes = NumSizeInBits / 8;
7183      unsigned NumBytesPerElt = NumBitsPerElt / 8;
7184      APInt ZeroMask = APInt::getNullValue(NumBytesPerElt);
7185      APInt SelectMask = APInt::getNullValue(NumBytesPerElt);
7186      for (unsigned i = 0; i != NumBytesPerElt && IsByteMask; ++i) {
7187        unsigned LHS = Known0.Zero.extractBits(8, i * 8).getZExtValue();
7188        unsigned RHS = Known1.Zero.extractBits(8, i * 8).getZExtValue();
7189        if (LHS == 255 && RHS == 0)
7190          SelectMask.setBit(i);
7191        else if (LHS == 255 && RHS == 255)
7192          ZeroMask.setBit(i);
7193        else if (!(LHS == 0 && RHS == 255))
7194          IsByteMask = false;
7195      }
7196      if (IsByteMask) {
7197        for (unsigned i = 0; i != NumSizeInBytes; i += NumBytesPerElt) {
7198          for (unsigned j = 0; j != NumBytesPerElt; ++j) {
7199            unsigned Ofs = (SelectMask[j] ? NumSizeInBytes : 0);
7200            int Idx = (ZeroMask[j] ? (int)SM_SentinelZero : (i + j + Ofs));
7201            Mask.push_back(Idx);
7202          }
7203        }
7204        Ops.push_back(N.getOperand(0));
7205        Ops.push_back(N.getOperand(1));
7206        return true;
7207      }
7208    }
7209
7210    // Handle OR(SHUFFLE,SHUFFLE) case where one source is zero and the other
7211    // is a valid shuffle index.
7212    SDValue N0 = peekThroughOneUseBitcasts(N.getOperand(0));
7213    SDValue N1 = peekThroughOneUseBitcasts(N.getOperand(1));
7214    if (!N0.getValueType().isVector() || !N1.getValueType().isVector())
7215      return false;
7216    SmallVector<int, 64> SrcMask0, SrcMask1;
7217    SmallVector<SDValue, 2> SrcInputs0, SrcInputs1;
7218    if (!getTargetShuffleInputs(N0, SrcInputs0, SrcMask0, DAG, Depth + 1,
7219                                true) ||
7220        !getTargetShuffleInputs(N1, SrcInputs1, SrcMask1, DAG, Depth + 1,
7221                                true))
7222      return false;
7223    size_t MaskSize = std::max(SrcMask0.size(), SrcMask1.size());
7224    SmallVector<int, 64> Mask0, Mask1;
7225    scaleShuffleMask<int>(MaskSize / SrcMask0.size(), SrcMask0, Mask0);
7226    scaleShuffleMask<int>(MaskSize / SrcMask1.size(), SrcMask1, Mask1);
7227    for (size_t i = 0; i != MaskSize; ++i) {
7228      if (Mask0[i] == SM_SentinelUndef && Mask1[i] == SM_SentinelUndef)
7229        Mask.push_back(SM_SentinelUndef);
7230      else if (Mask0[i] == SM_SentinelZero && Mask1[i] == SM_SentinelZero)
7231        Mask.push_back(SM_SentinelZero);
7232      else if (Mask1[i] == SM_SentinelZero)
7233        Mask.push_back(Mask0[i]);
7234      else if (Mask0[i] == SM_SentinelZero)
7235        Mask.push_back(Mask1[i] + (int)(MaskSize * SrcInputs0.size()));
7236      else
7237        return false;
7238    }
7239    Ops.append(SrcInputs0.begin(), SrcInputs0.end());
7240    Ops.append(SrcInputs1.begin(), SrcInputs1.end());
7241    return true;
7242  }
7243  case ISD::INSERT_SUBVECTOR: {
7244    SDValue Src = N.getOperand(0);
7245    SDValue Sub = N.getOperand(1);
7246    EVT SubVT = Sub.getValueType();
7247    unsigned NumSubElts = SubVT.getVectorNumElements();
7248    if (!isa<ConstantSDNode>(N.getOperand(2)) ||
7249        !N->isOnlyUserOf(Sub.getNode()))
7250      return false;
7251    uint64_t InsertIdx = N.getConstantOperandVal(2);
7252    // Handle INSERT_SUBVECTOR(SRC0, EXTRACT_SUBVECTOR(SRC1)).
7253    if (Sub.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
7254        Sub.getOperand(0).getValueType() == VT &&
7255        isa<ConstantSDNode>(Sub.getOperand(1))) {
7256      uint64_t ExtractIdx = Sub.getConstantOperandVal(1);
7257      for (int i = 0; i != (int)NumElts; ++i)
7258        Mask.push_back(i);
7259      for (int i = 0; i != (int)NumSubElts; ++i)
7260        Mask[InsertIdx + i] = NumElts + ExtractIdx + i;
7261      Ops.push_back(Src);
7262      Ops.push_back(Sub.getOperand(0));
7263      return true;
7264    }
7265    // Handle INSERT_SUBVECTOR(SRC0, SHUFFLE(SRC1)).
7266    SmallVector<int, 64> SubMask;
7267    SmallVector<SDValue, 2> SubInputs;
7268    if (!getTargetShuffleInputs(peekThroughOneUseBitcasts(Sub), SubInputs,
7269                                SubMask, DAG, Depth + 1, ResolveKnownElts))
7270      return false;
7271    if (SubMask.size() != NumSubElts) {
7272      assert(((SubMask.size() % NumSubElts) == 0 ||
7273              (NumSubElts % SubMask.size()) == 0) && "Illegal submask scale");
7274      if ((NumSubElts % SubMask.size()) == 0) {
7275        int Scale = NumSubElts / SubMask.size();
7276        SmallVector<int,64> ScaledSubMask;
7277        scaleShuffleMask<int>(Scale, SubMask, ScaledSubMask);
7278        SubMask = ScaledSubMask;
7279      } else {
7280        int Scale = SubMask.size() / NumSubElts;
7281        NumSubElts = SubMask.size();
7282        NumElts *= Scale;
7283        InsertIdx *= Scale;
7284      }
7285    }
7286    Ops.push_back(Src);
7287    for (SDValue &SubInput : SubInputs) {
7288      EVT SubSVT = SubInput.getValueType().getScalarType();
7289      EVT AltVT = EVT::getVectorVT(*DAG.getContext(), SubSVT,
7290                                   NumSizeInBits / SubSVT.getSizeInBits());
7291      Ops.push_back(DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), AltVT,
7292                                DAG.getUNDEF(AltVT), SubInput,
7293                                DAG.getIntPtrConstant(0, SDLoc(N))));
7294    }
7295    for (int i = 0; i != (int)NumElts; ++i)
7296      Mask.push_back(i);
7297    for (int i = 0; i != (int)NumSubElts; ++i) {
7298      int M = SubMask[i];
7299      if (0 <= M) {
7300        int InputIdx = M / NumSubElts;
7301        M = (NumElts * (1 + InputIdx)) + (M % NumSubElts);
7302      }
7303      Mask[i + InsertIdx] = M;
7304    }
7305    return true;
7306  }
7307  case ISD::SCALAR_TO_VECTOR: {
7308    // Match against a scalar_to_vector of an extract from a vector,
7309    // for PEXTRW/PEXTRB we must handle the implicit zext of the scalar.
7310    SDValue N0 = N.getOperand(0);
7311    SDValue SrcExtract;
7312
7313    if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
7314         N0.getOperand(0).getValueType() == VT) ||
7315        (N0.getOpcode() == X86ISD::PEXTRW &&
7316         N0.getOperand(0).getValueType() == MVT::v8i16) ||
7317        (N0.getOpcode() == X86ISD::PEXTRB &&
7318         N0.getOperand(0).getValueType() == MVT::v16i8)) {
7319      SrcExtract = N0;
7320    }
7321
7322    if (!SrcExtract || !isa<ConstantSDNode>(SrcExtract.getOperand(1)))
7323      return false;
7324
7325    SDValue SrcVec = SrcExtract.getOperand(0);
7326    EVT SrcVT = SrcVec.getValueType();
7327    unsigned NumSrcElts = SrcVT.getVectorNumElements();
7328    unsigned NumZeros = (NumBitsPerElt / SrcVT.getScalarSizeInBits()) - 1;
7329
7330    unsigned SrcIdx = SrcExtract.getConstantOperandVal(1);
7331    if (NumSrcElts <= SrcIdx)
7332      return false;
7333
7334    Ops.push_back(SrcVec);
7335    Mask.push_back(SrcIdx);
7336    Mask.append(NumZeros, SM_SentinelZero);
7337    Mask.append(NumSrcElts - Mask.size(), SM_SentinelUndef);
7338    return true;
7339  }
7340  case X86ISD::PINSRB:
7341  case X86ISD::PINSRW: {
7342    SDValue InVec = N.getOperand(0);
7343    SDValue InScl = N.getOperand(1);
7344    SDValue InIndex = N.getOperand(2);
7345    if (!isa<ConstantSDNode>(InIndex) ||
7346        cast<ConstantSDNode>(InIndex)->getAPIntValue().uge(NumElts))
7347      return false;
7348    uint64_t InIdx = N.getConstantOperandVal(2);
7349
7350    // Attempt to recognise a PINSR*(VEC, 0, Idx) shuffle pattern.
7351    if (X86::isZeroNode(InScl)) {
7352      Ops.push_back(InVec);
7353      for (unsigned i = 0; i != NumElts; ++i)
7354        Mask.push_back(i == InIdx ? SM_SentinelZero : (int)i);
7355      return true;
7356    }
7357
7358    // Attempt to recognise a PINSR*(PEXTR*) shuffle pattern.
7359    // TODO: Expand this to support INSERT_VECTOR_ELT/etc.
7360    unsigned ExOp =
7361        (X86ISD::PINSRB == Opcode ? X86ISD::PEXTRB : X86ISD::PEXTRW);
7362    if (InScl.getOpcode() != ExOp)
7363      return false;
7364
7365    SDValue ExVec = InScl.getOperand(0);
7366    SDValue ExIndex = InScl.getOperand(1);
7367    if (!isa<ConstantSDNode>(ExIndex) ||
7368        cast<ConstantSDNode>(ExIndex)->getAPIntValue().uge(NumElts))
7369      return false;
7370    uint64_t ExIdx = InScl.getConstantOperandVal(1);
7371
7372    Ops.push_back(InVec);
7373    Ops.push_back(ExVec);
7374    for (unsigned i = 0; i != NumElts; ++i)
7375      Mask.push_back(i == InIdx ? NumElts + ExIdx : i);
7376    return true;
7377  }
7378  case X86ISD::PACKSS:
7379  case X86ISD::PACKUS: {
7380    SDValue N0 = N.getOperand(0);
7381    SDValue N1 = N.getOperand(1);
7382    assert(N0.getValueType().getVectorNumElements() == (NumElts / 2) &&
7383           N1.getValueType().getVectorNumElements() == (NumElts / 2) &&
7384           "Unexpected input value type");
7385
7386    APInt EltsLHS, EltsRHS;
7387    getPackDemandedElts(VT, DemandedElts, EltsLHS, EltsRHS);
7388
7389    // If we know input saturation won't happen we can treat this
7390    // as a truncation shuffle.
7391    if (Opcode == X86ISD::PACKSS) {
7392      if ((!N0.isUndef() &&
7393           DAG.ComputeNumSignBits(N0, EltsLHS, Depth + 1) <= NumBitsPerElt) ||
7394          (!N1.isUndef() &&
7395           DAG.ComputeNumSignBits(N1, EltsRHS, Depth + 1) <= NumBitsPerElt))
7396        return false;
7397    } else {
7398      APInt ZeroMask = APInt::getHighBitsSet(2 * NumBitsPerElt, NumBitsPerElt);
7399      if ((!N0.isUndef() &&
7400           !DAG.MaskedValueIsZero(N0, ZeroMask, EltsLHS, Depth + 1)) ||
7401          (!N1.isUndef() &&
7402           !DAG.MaskedValueIsZero(N1, ZeroMask, EltsRHS, Depth + 1)))
7403        return false;
7404    }
7405
7406    bool IsUnary = (N0 == N1);
7407
7408    Ops.push_back(N0);
7409    if (!IsUnary)
7410      Ops.push_back(N1);
7411
7412    createPackShuffleMask(VT, Mask, IsUnary);
7413    return true;
7414  }
7415  case X86ISD::VSHLI:
7416  case X86ISD::VSRLI: {
7417    uint64_t ShiftVal = N.getConstantOperandVal(1);
7418    // Out of range bit shifts are guaranteed to be zero.
7419    if (NumBitsPerElt <= ShiftVal) {
7420      Mask.append(NumElts, SM_SentinelZero);
7421      return true;
7422    }
7423
7424    // We can only decode 'whole byte' bit shifts as shuffles.
7425    if ((ShiftVal % 8) != 0)
7426      break;
7427
7428    uint64_t ByteShift = ShiftVal / 8;
7429    unsigned NumBytes = NumSizeInBits / 8;
7430    unsigned NumBytesPerElt = NumBitsPerElt / 8;
7431    Ops.push_back(N.getOperand(0));
7432
7433    // Clear mask to all zeros and insert the shifted byte indices.
7434    Mask.append(NumBytes, SM_SentinelZero);
7435
7436    if (X86ISD::VSHLI == Opcode) {
7437      for (unsigned i = 0; i != NumBytes; i += NumBytesPerElt)
7438        for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
7439          Mask[i + j] = i + j - ByteShift;
7440    } else {
7441      for (unsigned i = 0; i != NumBytes; i += NumBytesPerElt)
7442        for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
7443          Mask[i + j - ByteShift] = i + j;
7444    }
7445    return true;
7446  }
7447  case X86ISD::VBROADCAST: {
7448    SDValue Src = N.getOperand(0);
7449    MVT SrcVT = Src.getSimpleValueType();
7450    if (!SrcVT.isVector())
7451      return false;
7452
7453    if (NumSizeInBits != SrcVT.getSizeInBits()) {
7454      assert((NumSizeInBits % SrcVT.getSizeInBits()) == 0 &&
7455             "Illegal broadcast type");
7456      SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
7457                               NumSizeInBits / SrcVT.getScalarSizeInBits());
7458      Src = DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), SrcVT,
7459                        DAG.getUNDEF(SrcVT), Src,
7460                        DAG.getIntPtrConstant(0, SDLoc(N)));
7461    }
7462
7463    Ops.push_back(Src);
7464    Mask.append(NumElts, 0);
7465    return true;
7466  }
7467  case ISD::ZERO_EXTEND:
7468  case ISD::ANY_EXTEND:
7469  case ISD::ZERO_EXTEND_VECTOR_INREG:
7470  case ISD::ANY_EXTEND_VECTOR_INREG: {
7471    SDValue Src = N.getOperand(0);
7472    EVT SrcVT = Src.getValueType();
7473
7474    // Extended source must be a simple vector.
7475    if (!SrcVT.isSimple() || (SrcVT.getSizeInBits() % 128) != 0 ||
7476        (SrcVT.getScalarSizeInBits() % 8) != 0)
7477      return false;
7478
7479    unsigned NumSrcBitsPerElt = SrcVT.getScalarSizeInBits();
7480    bool IsAnyExtend =
7481        (ISD::ANY_EXTEND == Opcode || ISD::ANY_EXTEND_VECTOR_INREG == Opcode);
7482    DecodeZeroExtendMask(NumSrcBitsPerElt, NumBitsPerElt, NumElts, IsAnyExtend,
7483                         Mask);
7484
7485    if (NumSizeInBits != SrcVT.getSizeInBits()) {
7486      assert((NumSizeInBits % SrcVT.getSizeInBits()) == 0 &&
7487             "Illegal zero-extension type");
7488      SrcVT = MVT::getVectorVT(SrcVT.getSimpleVT().getScalarType(),
7489                               NumSizeInBits / NumSrcBitsPerElt);
7490      Src = DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), SrcVT,
7491                        DAG.getUNDEF(SrcVT), Src,
7492                        DAG.getIntPtrConstant(0, SDLoc(N)));
7493    }
7494
7495    Ops.push_back(Src);
7496    return true;
7497  }
7498  }
7499
7500  return false;
7501}
7502
7503/// Removes unused/repeated shuffle source inputs and adjusts the shuffle mask.
7504static void resolveTargetShuffleInputsAndMask(SmallVectorImpl<SDValue> &Inputs,
7505                                              SmallVectorImpl<int> &Mask) {
7506  int MaskWidth = Mask.size();
7507  SmallVector<SDValue, 16> UsedInputs;
7508  for (int i = 0, e = Inputs.size(); i < e; ++i) {
7509    int lo = UsedInputs.size() * MaskWidth;
7510    int hi = lo + MaskWidth;
7511
7512    // Strip UNDEF input usage.
7513    if (Inputs[i].isUndef())
7514      for (int &M : Mask)
7515        if ((lo <= M) && (M < hi))
7516          M = SM_SentinelUndef;
7517
7518    // Check for unused inputs.
7519    if (none_of(Mask, [lo, hi](int i) { return (lo <= i) && (i < hi); })) {
7520      for (int &M : Mask)
7521        if (lo <= M)
7522          M -= MaskWidth;
7523      continue;
7524    }
7525
7526    // Check for repeated inputs.
7527    bool IsRepeat = false;
7528    for (int j = 0, ue = UsedInputs.size(); j != ue; ++j) {
7529      if (UsedInputs[j] != Inputs[i])
7530        continue;
7531      for (int &M : Mask)
7532        if (lo <= M)
7533          M = (M < hi) ? ((M - lo) + (j * MaskWidth)) : (M - MaskWidth);
7534      IsRepeat = true;
7535      break;
7536    }
7537    if (IsRepeat)
7538      continue;
7539
7540    UsedInputs.push_back(Inputs[i]);
7541  }
7542  Inputs = UsedInputs;
7543}
7544
7545/// Calls getTargetShuffleAndZeroables to resolve a target shuffle mask's inputs
7546/// and then sets the SM_SentinelUndef and SM_SentinelZero values.
7547/// Returns true if the target shuffle mask was decoded.
7548static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
7549                                   SmallVectorImpl<SDValue> &Inputs,
7550                                   SmallVectorImpl<int> &Mask,
7551                                   APInt &KnownUndef, APInt &KnownZero,
7552                                   SelectionDAG &DAG, unsigned Depth,
7553                                   bool ResolveKnownElts) {
7554  EVT VT = Op.getValueType();
7555  if (!VT.isSimple() || !VT.isVector())
7556    return false;
7557
7558  if (getTargetShuffleAndZeroables(Op, Mask, Inputs, KnownUndef, KnownZero)) {
7559    if (ResolveKnownElts)
7560      resolveTargetShuffleFromZeroables(Mask, KnownUndef, KnownZero);
7561    return true;
7562  }
7563  if (getFauxShuffleMask(Op, DemandedElts, Mask, Inputs, DAG, Depth,
7564                         ResolveKnownElts)) {
7565    resolveZeroablesFromTargetShuffle(Mask, KnownUndef, KnownZero);
7566    return true;
7567  }
7568  return false;
7569}
7570
7571static bool getTargetShuffleInputs(SDValue Op, SmallVectorImpl<SDValue> &Inputs,
7572                                   SmallVectorImpl<int> &Mask,
7573                                   SelectionDAG &DAG, unsigned Depth = 0,
7574                                   bool ResolveKnownElts = true) {
7575  EVT VT = Op.getValueType();
7576  if (!VT.isSimple() || !VT.isVector())
7577    return false;
7578
7579  APInt KnownUndef, KnownZero;
7580  unsigned NumElts = Op.getValueType().getVectorNumElements();
7581  APInt DemandedElts = APInt::getAllOnesValue(NumElts);
7582  return getTargetShuffleInputs(Op, DemandedElts, Inputs, Mask, KnownUndef,
7583                                KnownZero, DAG, Depth, ResolveKnownElts);
7584}
7585
7586/// Returns the scalar element that will make up the ith
7587/// element of the result of the vector shuffle.
7588static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
7589                                   unsigned Depth) {
7590  if (Depth == 6)
7591    return SDValue();  // Limit search depth.
7592
7593  SDValue V = SDValue(N, 0);
7594  EVT VT = V.getValueType();
7595  unsigned Opcode = V.getOpcode();
7596
7597  // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
7598  if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
7599    int Elt = SV->getMaskElt(Index);
7600
7601    if (Elt < 0)
7602      return DAG.getUNDEF(VT.getVectorElementType());
7603
7604    unsigned NumElems = VT.getVectorNumElements();
7605    SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
7606                                         : SV->getOperand(1);
7607    return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
7608  }
7609
7610  // Recurse into target specific vector shuffles to find scalars.
7611  if (isTargetShuffle(Opcode)) {
7612    MVT ShufVT = V.getSimpleValueType();
7613    MVT ShufSVT = ShufVT.getVectorElementType();
7614    int NumElems = (int)ShufVT.getVectorNumElements();
7615    SmallVector<int, 16> ShuffleMask;
7616    SmallVector<SDValue, 16> ShuffleOps;
7617    bool IsUnary;
7618
7619    if (!getTargetShuffleMask(N, ShufVT, true, ShuffleOps, ShuffleMask, IsUnary))
7620      return SDValue();
7621
7622    int Elt = ShuffleMask[Index];
7623    if (Elt == SM_SentinelZero)
7624      return ShufSVT.isInteger() ? DAG.getConstant(0, SDLoc(N), ShufSVT)
7625                                 : DAG.getConstantFP(+0.0, SDLoc(N), ShufSVT);
7626    if (Elt == SM_SentinelUndef)
7627      return DAG.getUNDEF(ShufSVT);
7628
7629    assert(0 <= Elt && Elt < (2*NumElems) && "Shuffle index out of range");
7630    SDValue NewV = (Elt < NumElems) ? ShuffleOps[0] : ShuffleOps[1];
7631    return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
7632                               Depth+1);
7633  }
7634
7635  // Recurse into insert_subvector base/sub vector to find scalars.
7636  if (Opcode == ISD::INSERT_SUBVECTOR &&
7637      isa<ConstantSDNode>(N->getOperand(2))) {
7638    SDValue Vec = N->getOperand(0);
7639    SDValue Sub = N->getOperand(1);
7640    EVT SubVT = Sub.getValueType();
7641    unsigned NumSubElts = SubVT.getVectorNumElements();
7642    uint64_t SubIdx = N->getConstantOperandVal(2);
7643
7644    if (SubIdx <= Index && Index < (SubIdx + NumSubElts))
7645      return getShuffleScalarElt(Sub.getNode(), Index - SubIdx, DAG, Depth + 1);
7646    return getShuffleScalarElt(Vec.getNode(), Index, DAG, Depth + 1);
7647  }
7648
7649  // Recurse into extract_subvector src vector to find scalars.
7650  if (Opcode == ISD::EXTRACT_SUBVECTOR &&
7651      isa<ConstantSDNode>(N->getOperand(1))) {
7652    SDValue Src = N->getOperand(0);
7653    uint64_t SrcIdx = N->getConstantOperandVal(1);
7654    return getShuffleScalarElt(Src.getNode(), Index + SrcIdx, DAG, Depth + 1);
7655  }
7656
7657  // Actual nodes that may contain scalar elements
7658  if (Opcode == ISD::BITCAST) {
7659    V = V.getOperand(0);
7660    EVT SrcVT = V.getValueType();
7661    unsigned NumElems = VT.getVectorNumElements();
7662
7663    if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
7664      return SDValue();
7665  }
7666
7667  if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
7668    return (Index == 0) ? V.getOperand(0)
7669                        : DAG.getUNDEF(VT.getVectorElementType());
7670
7671  if (V.getOpcode() == ISD::BUILD_VECTOR)
7672    return V.getOperand(Index);
7673
7674  return SDValue();
7675}
7676
7677// Use PINSRB/PINSRW/PINSRD to create a build vector.
7678static SDValue LowerBuildVectorAsInsert(SDValue Op, unsigned NonZeros,
7679                                        unsigned NumNonZero, unsigned NumZero,
7680                                        SelectionDAG &DAG,
7681                                        const X86Subtarget &Subtarget) {
7682  MVT VT = Op.getSimpleValueType();
7683  unsigned NumElts = VT.getVectorNumElements();
7684  assert(((VT == MVT::v8i16 && Subtarget.hasSSE2()) ||
7685          ((VT == MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41())) &&
7686         "Illegal vector insertion");
7687
7688  SDLoc dl(Op);
7689  SDValue V;
7690  bool First = true;
7691
7692  for (unsigned i = 0; i < NumElts; ++i) {
7693    bool IsNonZero = (NonZeros & (1 << i)) != 0;
7694    if (!IsNonZero)
7695      continue;
7696
7697    // If the build vector contains zeros or our first insertion is not the
7698    // first index then insert into zero vector to break any register
7699    // dependency else use SCALAR_TO_VECTOR.
7700    if (First) {
7701      First = false;
7702      if (NumZero || 0 != i)
7703        V = getZeroVector(VT, Subtarget, DAG, dl);
7704      else {
7705        assert(0 == i && "Expected insertion into zero-index");
7706        V = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
7707        V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, V);
7708        V = DAG.getBitcast(VT, V);
7709        continue;
7710      }
7711    }
7712    V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, V, Op.getOperand(i),
7713                    DAG.getIntPtrConstant(i, dl));
7714  }
7715
7716  return V;
7717}
7718
7719/// Custom lower build_vector of v16i8.
7720static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
7721                                     unsigned NumNonZero, unsigned NumZero,
7722                                     SelectionDAG &DAG,
7723                                     const X86Subtarget &Subtarget) {
7724  if (NumNonZero > 8 && !Subtarget.hasSSE41())
7725    return SDValue();
7726
7727  // SSE4.1 - use PINSRB to insert each byte directly.
7728  if (Subtarget.hasSSE41())
7729    return LowerBuildVectorAsInsert(Op, NonZeros, NumNonZero, NumZero, DAG,
7730                                    Subtarget);
7731
7732  SDLoc dl(Op);
7733  SDValue V;
7734
7735  // Pre-SSE4.1 - merge byte pairs and insert with PINSRW.
7736  for (unsigned i = 0; i < 16; i += 2) {
7737    bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
7738    bool NextIsNonZero = (NonZeros & (1 << (i + 1))) != 0;
7739    if (!ThisIsNonZero && !NextIsNonZero)
7740      continue;
7741
7742    // FIXME: Investigate combining the first 4 bytes as a i32 instead.
7743    SDValue Elt;
7744    if (ThisIsNonZero) {
7745      if (NumZero || NextIsNonZero)
7746        Elt = DAG.getZExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
7747      else
7748        Elt = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
7749    }
7750
7751    if (NextIsNonZero) {
7752      SDValue NextElt = Op.getOperand(i + 1);
7753      if (i == 0 && NumZero)
7754        NextElt = DAG.getZExtOrTrunc(NextElt, dl, MVT::i32);
7755      else
7756        NextElt = DAG.getAnyExtOrTrunc(NextElt, dl, MVT::i32);
7757      NextElt = DAG.getNode(ISD::SHL, dl, MVT::i32, NextElt,
7758                            DAG.getConstant(8, dl, MVT::i8));
7759      if (ThisIsNonZero)
7760        Elt = DAG.getNode(ISD::OR, dl, MVT::i32, NextElt, Elt);
7761      else
7762        Elt = NextElt;
7763    }
7764
7765    // If our first insertion is not the first index then insert into zero
7766    // vector to break any register dependency else use SCALAR_TO_VECTOR.
7767    if (!V) {
7768      if (i != 0)
7769        V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
7770      else {
7771        V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Elt);
7772        V = DAG.getBitcast(MVT::v8i16, V);
7773        continue;
7774      }
7775    }
7776    Elt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Elt);
7777    V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, Elt,
7778                    DAG.getIntPtrConstant(i / 2, dl));
7779  }
7780
7781  return DAG.getBitcast(MVT::v16i8, V);
7782}
7783
7784/// Custom lower build_vector of v8i16.
7785static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
7786                                     unsigned NumNonZero, unsigned NumZero,
7787                                     SelectionDAG &DAG,
7788                                     const X86Subtarget &Subtarget) {
7789  if (NumNonZero > 4 && !Subtarget.hasSSE41())
7790    return SDValue();
7791
7792  // Use PINSRW to insert each byte directly.
7793  return LowerBuildVectorAsInsert(Op, NonZeros, NumNonZero, NumZero, DAG,
7794                                  Subtarget);
7795}
7796
7797/// Custom lower build_vector of v4i32 or v4f32.
7798static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
7799                                     const X86Subtarget &Subtarget) {
7800  // If this is a splat of a pair of elements, use MOVDDUP (unless the target
7801  // has XOP; in that case defer lowering to potentially use VPERMIL2PS).
7802  // Because we're creating a less complicated build vector here, we may enable
7803  // further folding of the MOVDDUP via shuffle transforms.
7804  if (Subtarget.hasSSE3() && !Subtarget.hasXOP() &&
7805      Op.getOperand(0) == Op.getOperand(2) &&
7806      Op.getOperand(1) == Op.getOperand(3) &&
7807      Op.getOperand(0) != Op.getOperand(1)) {
7808    SDLoc DL(Op);
7809    MVT VT = Op.getSimpleValueType();
7810    MVT EltVT = VT.getVectorElementType();
7811    // Create a new build vector with the first 2 elements followed by undef
7812    // padding, bitcast to v2f64, duplicate, and bitcast back.
7813    SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
7814                       DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
7815    SDValue NewBV = DAG.getBitcast(MVT::v2f64, DAG.getBuildVector(VT, DL, Ops));
7816    SDValue Dup = DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, NewBV);
7817    return DAG.getBitcast(VT, Dup);
7818  }
7819
7820  // Find all zeroable elements.
7821  std::bitset<4> Zeroable, Undefs;
7822  for (int i = 0; i < 4; ++i) {
7823    SDValue Elt = Op.getOperand(i);
7824    Undefs[i] = Elt.isUndef();
7825    Zeroable[i] = (Elt.isUndef() || X86::isZeroNode(Elt));
7826  }
7827  assert(Zeroable.size() - Zeroable.count() > 1 &&
7828         "We expect at least two non-zero elements!");
7829
7830  // We only know how to deal with build_vector nodes where elements are either
7831  // zeroable or extract_vector_elt with constant index.
7832  SDValue FirstNonZero;
7833  unsigned FirstNonZeroIdx;
7834  for (unsigned i = 0; i < 4; ++i) {
7835    if (Zeroable[i])
7836      continue;
7837    SDValue Elt = Op.getOperand(i);
7838    if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
7839        !isa<ConstantSDNode>(Elt.getOperand(1)))
7840      return SDValue();
7841    // Make sure that this node is extracting from a 128-bit vector.
7842    MVT VT = Elt.getOperand(0).getSimpleValueType();
7843    if (!VT.is128BitVector())
7844      return SDValue();
7845    if (!FirstNonZero.getNode()) {
7846      FirstNonZero = Elt;
7847      FirstNonZeroIdx = i;
7848    }
7849  }
7850
7851  assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
7852  SDValue V1 = FirstNonZero.getOperand(0);
7853  MVT VT = V1.getSimpleValueType();
7854
7855  // See if this build_vector can be lowered as a blend with zero.
7856  SDValue Elt;
7857  unsigned EltMaskIdx, EltIdx;
7858  int Mask[4];
7859  for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
7860    if (Zeroable[EltIdx]) {
7861      // The zero vector will be on the right hand side.
7862      Mask[EltIdx] = EltIdx+4;
7863      continue;
7864    }
7865
7866    Elt = Op->getOperand(EltIdx);
7867    // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
7868    EltMaskIdx = Elt.getConstantOperandVal(1);
7869    if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
7870      break;
7871    Mask[EltIdx] = EltIdx;
7872  }
7873
7874  if (EltIdx == 4) {
7875    // Let the shuffle legalizer deal with blend operations.
7876    SDValue VZeroOrUndef = (Zeroable == Undefs)
7877                               ? DAG.getUNDEF(VT)
7878                               : getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
7879    if (V1.getSimpleValueType() != VT)
7880      V1 = DAG.getBitcast(VT, V1);
7881    return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZeroOrUndef, Mask);
7882  }
7883
7884  // See if we can lower this build_vector to a INSERTPS.
7885  if (!Subtarget.hasSSE41())
7886    return SDValue();
7887
7888  SDValue V2 = Elt.getOperand(0);
7889  if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
7890    V1 = SDValue();
7891
7892  bool CanFold = true;
7893  for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
7894    if (Zeroable[i])
7895      continue;
7896
7897    SDValue Current = Op->getOperand(i);
7898    SDValue SrcVector = Current->getOperand(0);
7899    if (!V1.getNode())
7900      V1 = SrcVector;
7901    CanFold = (SrcVector == V1) && (Current.getConstantOperandAPInt(1) == i);
7902  }
7903
7904  if (!CanFold)
7905    return SDValue();
7906
7907  assert(V1.getNode() && "Expected at least two non-zero elements!");
7908  if (V1.getSimpleValueType() != MVT::v4f32)
7909    V1 = DAG.getBitcast(MVT::v4f32, V1);
7910  if (V2.getSimpleValueType() != MVT::v4f32)
7911    V2 = DAG.getBitcast(MVT::v4f32, V2);
7912
7913  // Ok, we can emit an INSERTPS instruction.
7914  unsigned ZMask = Zeroable.to_ulong();
7915
7916  unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
7917  assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
7918  SDLoc DL(Op);
7919  SDValue Result = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
7920                               DAG.getIntPtrConstant(InsertPSMask, DL, true));
7921  return DAG.getBitcast(VT, Result);
7922}
7923
7924/// Return a vector logical shift node.
7925static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, unsigned NumBits,
7926                         SelectionDAG &DAG, const TargetLowering &TLI,
7927                         const SDLoc &dl) {
7928  assert(VT.is128BitVector() && "Unknown type for VShift");
7929  MVT ShVT = MVT::v16i8;
7930  unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
7931  SrcOp = DAG.getBitcast(ShVT, SrcOp);
7932  assert(NumBits % 8 == 0 && "Only support byte sized shifts");
7933  SDValue ShiftVal = DAG.getTargetConstant(NumBits / 8, dl, MVT::i8);
7934  return DAG.getBitcast(VT, DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
7935}
7936
7937static SDValue LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, const SDLoc &dl,
7938                                      SelectionDAG &DAG) {
7939
7940  // Check if the scalar load can be widened into a vector load. And if
7941  // the address is "base + cst" see if the cst can be "absorbed" into
7942  // the shuffle mask.
7943  if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
7944    SDValue Ptr = LD->getBasePtr();
7945    if (!ISD::isNormalLoad(LD) || !LD->isSimple())
7946      return SDValue();
7947    EVT PVT = LD->getValueType(0);
7948    if (PVT != MVT::i32 && PVT != MVT::f32)
7949      return SDValue();
7950
7951    int FI = -1;
7952    int64_t Offset = 0;
7953    if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
7954      FI = FINode->getIndex();
7955      Offset = 0;
7956    } else if (DAG.isBaseWithConstantOffset(Ptr) &&
7957               isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
7958      FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
7959      Offset = Ptr.getConstantOperandVal(1);
7960      Ptr = Ptr.getOperand(0);
7961    } else {
7962      return SDValue();
7963    }
7964
7965    // FIXME: 256-bit vector instructions don't require a strict alignment,
7966    // improve this code to support it better.
7967    unsigned RequiredAlign = VT.getSizeInBits()/8;
7968    SDValue Chain = LD->getChain();
7969    // Make sure the stack object alignment is at least 16 or 32.
7970    MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
7971    if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) {
7972      if (MFI.isFixedObjectIndex(FI)) {
7973        // Can't change the alignment. FIXME: It's possible to compute
7974        // the exact stack offset and reference FI + adjust offset instead.
7975        // If someone *really* cares about this. That's the way to implement it.
7976        return SDValue();
7977      } else {
7978        MFI.setObjectAlignment(FI, RequiredAlign);
7979      }
7980    }
7981
7982    // (Offset % 16 or 32) must be multiple of 4. Then address is then
7983    // Ptr + (Offset & ~15).
7984    if (Offset < 0)
7985      return SDValue();
7986    if ((Offset % RequiredAlign) & 3)
7987      return SDValue();
7988    int64_t StartOffset = Offset & ~int64_t(RequiredAlign - 1);
7989    if (StartOffset) {
7990      SDLoc DL(Ptr);
7991      Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
7992                        DAG.getConstant(StartOffset, DL, Ptr.getValueType()));
7993    }
7994
7995    int EltNo = (Offset - StartOffset) >> 2;
7996    unsigned NumElems = VT.getVectorNumElements();
7997
7998    EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
7999    SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
8000                             LD->getPointerInfo().getWithOffset(StartOffset));
8001
8002    SmallVector<int, 8> Mask(NumElems, EltNo);
8003
8004    return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), Mask);
8005  }
8006
8007  return SDValue();
8008}
8009
8010// Recurse to find a LoadSDNode source and the accumulated ByteOffest.
8011static bool findEltLoadSrc(SDValue Elt, LoadSDNode *&Ld, int64_t &ByteOffset) {
8012  if (ISD::isNON_EXTLoad(Elt.getNode())) {
8013    auto *BaseLd = cast<LoadSDNode>(Elt);
8014    if (!BaseLd->isSimple())
8015      return false;
8016    Ld = BaseLd;
8017    ByteOffset = 0;
8018    return true;
8019  }
8020
8021  switch (Elt.getOpcode()) {
8022  case ISD::BITCAST:
8023  case ISD::TRUNCATE:
8024  case ISD::SCALAR_TO_VECTOR:
8025    return findEltLoadSrc(Elt.getOperand(0), Ld, ByteOffset);
8026  case ISD::SRL:
8027    if (isa<ConstantSDNode>(Elt.getOperand(1))) {
8028      uint64_t Idx = Elt.getConstantOperandVal(1);
8029      if ((Idx % 8) == 0 && findEltLoadSrc(Elt.getOperand(0), Ld, ByteOffset)) {
8030        ByteOffset += Idx / 8;
8031        return true;
8032      }
8033    }
8034    break;
8035  case ISD::EXTRACT_VECTOR_ELT:
8036    if (isa<ConstantSDNode>(Elt.getOperand(1))) {
8037      SDValue Src = Elt.getOperand(0);
8038      unsigned SrcSizeInBits = Src.getScalarValueSizeInBits();
8039      unsigned DstSizeInBits = Elt.getScalarValueSizeInBits();
8040      if (DstSizeInBits == SrcSizeInBits && (SrcSizeInBits % 8) == 0 &&
8041          findEltLoadSrc(Src, Ld, ByteOffset)) {
8042        uint64_t Idx = Elt.getConstantOperandVal(1);
8043        ByteOffset += Idx * (SrcSizeInBits / 8);
8044        return true;
8045      }
8046    }
8047    break;
8048  }
8049
8050  return false;
8051}
8052
8053/// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
8054/// elements can be replaced by a single large load which has the same value as
8055/// a build_vector or insert_subvector whose loaded operands are 'Elts'.
8056///
8057/// Example: <load i32 *a, load i32 *a+4, zero, undef> -> zextload a
8058static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
8059                                        const SDLoc &DL, SelectionDAG &DAG,
8060                                        const X86Subtarget &Subtarget,
8061                                        bool isAfterLegalize) {
8062  if ((VT.getScalarSizeInBits() % 8) != 0)
8063    return SDValue();
8064
8065  unsigned NumElems = Elts.size();
8066
8067  int LastLoadedElt = -1;
8068  APInt LoadMask = APInt::getNullValue(NumElems);
8069  APInt ZeroMask = APInt::getNullValue(NumElems);
8070  APInt UndefMask = APInt::getNullValue(NumElems);
8071
8072  SmallVector<LoadSDNode*, 8> Loads(NumElems, nullptr);
8073  SmallVector<int64_t, 8> ByteOffsets(NumElems, 0);
8074
8075  // For each element in the initializer, see if we've found a load, zero or an
8076  // undef.
8077  for (unsigned i = 0; i < NumElems; ++i) {
8078    SDValue Elt = peekThroughBitcasts(Elts[i]);
8079    if (!Elt.getNode())
8080      return SDValue();
8081    if (Elt.isUndef()) {
8082      UndefMask.setBit(i);
8083      continue;
8084    }
8085    if (X86::isZeroNode(Elt) || ISD::isBuildVectorAllZeros(Elt.getNode())) {
8086      ZeroMask.setBit(i);
8087      continue;
8088    }
8089
8090    // Each loaded element must be the correct fractional portion of the
8091    // requested vector load.
8092    unsigned EltSizeInBits = Elt.getValueSizeInBits();
8093    if ((NumElems * EltSizeInBits) != VT.getSizeInBits())
8094      return SDValue();
8095
8096    if (!findEltLoadSrc(Elt, Loads[i], ByteOffsets[i]) || ByteOffsets[i] < 0)
8097      return SDValue();
8098    unsigned LoadSizeInBits = Loads[i]->getValueSizeInBits(0);
8099    if (((ByteOffsets[i] * 8) + EltSizeInBits) > LoadSizeInBits)
8100      return SDValue();
8101
8102    LoadMask.setBit(i);
8103    LastLoadedElt = i;
8104  }
8105  assert((ZeroMask.countPopulation() + UndefMask.countPopulation() +
8106          LoadMask.countPopulation()) == NumElems &&
8107         "Incomplete element masks");
8108
8109  // Handle Special Cases - all undef or undef/zero.
8110  if (UndefMask.countPopulation() == NumElems)
8111    return DAG.getUNDEF(VT);
8112
8113  // FIXME: Should we return this as a BUILD_VECTOR instead?
8114  if ((ZeroMask.countPopulation() + UndefMask.countPopulation()) == NumElems)
8115    return VT.isInteger() ? DAG.getConstant(0, DL, VT)
8116                          : DAG.getConstantFP(0.0, DL, VT);
8117
8118  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8119  int FirstLoadedElt = LoadMask.countTrailingZeros();
8120  SDValue EltBase = peekThroughBitcasts(Elts[FirstLoadedElt]);
8121  EVT EltBaseVT = EltBase.getValueType();
8122  assert(EltBaseVT.getSizeInBits() == EltBaseVT.getStoreSizeInBits() &&
8123         "Register/Memory size mismatch");
8124  LoadSDNode *LDBase = Loads[FirstLoadedElt];
8125  assert(LDBase && "Did not find base load for merging consecutive loads");
8126  unsigned BaseSizeInBits = EltBaseVT.getStoreSizeInBits();
8127  unsigned BaseSizeInBytes = BaseSizeInBits / 8;
8128  int LoadSizeInBits = (1 + LastLoadedElt - FirstLoadedElt) * BaseSizeInBits;
8129  assert((BaseSizeInBits % 8) == 0 && "Sub-byte element loads detected");
8130
8131  // TODO: Support offsetting the base load.
8132  if (ByteOffsets[FirstLoadedElt] != 0)
8133    return SDValue();
8134
8135  // Check to see if the element's load is consecutive to the base load
8136  // or offset from a previous (already checked) load.
8137  auto CheckConsecutiveLoad = [&](LoadSDNode *Base, int EltIdx) {
8138    LoadSDNode *Ld = Loads[EltIdx];
8139    int64_t ByteOffset = ByteOffsets[EltIdx];
8140    if (ByteOffset && (ByteOffset % BaseSizeInBytes) == 0) {
8141      int64_t BaseIdx = EltIdx - (ByteOffset / BaseSizeInBytes);
8142      return (0 <= BaseIdx && BaseIdx < (int)NumElems && LoadMask[BaseIdx] &&
8143              Loads[BaseIdx] == Ld && ByteOffsets[BaseIdx] == 0);
8144    }
8145    return DAG.areNonVolatileConsecutiveLoads(Ld, Base, BaseSizeInBytes,
8146                                              EltIdx - FirstLoadedElt);
8147  };
8148
8149  // Consecutive loads can contain UNDEFS but not ZERO elements.
8150  // Consecutive loads with UNDEFs and ZEROs elements require a
8151  // an additional shuffle stage to clear the ZERO elements.
8152  bool IsConsecutiveLoad = true;
8153  bool IsConsecutiveLoadWithZeros = true;
8154  for (int i = FirstLoadedElt + 1; i <= LastLoadedElt; ++i) {
8155    if (LoadMask[i]) {
8156      if (!CheckConsecutiveLoad(LDBase, i)) {
8157        IsConsecutiveLoad = false;
8158        IsConsecutiveLoadWithZeros = false;
8159        break;
8160      }
8161    } else if (ZeroMask[i]) {
8162      IsConsecutiveLoad = false;
8163    }
8164  }
8165
8166  auto CreateLoad = [&DAG, &DL, &Loads](EVT VT, LoadSDNode *LDBase) {
8167    auto MMOFlags = LDBase->getMemOperand()->getFlags();
8168    assert(LDBase->isSimple() &&
8169           "Cannot merge volatile or atomic loads.");
8170    SDValue NewLd =
8171        DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
8172                    LDBase->getPointerInfo(), LDBase->getAlignment(), MMOFlags);
8173    for (auto *LD : Loads)
8174      if (LD)
8175        DAG.makeEquivalentMemoryOrdering(LD, NewLd);
8176    return NewLd;
8177  };
8178
8179  // Check if the base load is entirely dereferenceable.
8180  bool IsDereferenceable = LDBase->getPointerInfo().isDereferenceable(
8181      VT.getSizeInBits() / 8, *DAG.getContext(), DAG.getDataLayout());
8182
8183  // LOAD - all consecutive load/undefs (must start/end with a load or be
8184  // entirely dereferenceable). If we have found an entire vector of loads and
8185  // undefs, then return a large load of the entire vector width starting at the
8186  // base pointer. If the vector contains zeros, then attempt to shuffle those
8187  // elements.
8188  if (FirstLoadedElt == 0 &&
8189      (LastLoadedElt == (int)(NumElems - 1) || IsDereferenceable) &&
8190      (IsConsecutiveLoad || IsConsecutiveLoadWithZeros)) {
8191    if (isAfterLegalize && !TLI.isOperationLegal(ISD::LOAD, VT))
8192      return SDValue();
8193
8194    // Don't create 256-bit non-temporal aligned loads without AVX2 as these
8195    // will lower to regular temporal loads and use the cache.
8196    if (LDBase->isNonTemporal() && LDBase->getAlignment() >= 32 &&
8197        VT.is256BitVector() && !Subtarget.hasInt256())
8198      return SDValue();
8199
8200    if (NumElems == 1)
8201      return DAG.getBitcast(VT, Elts[FirstLoadedElt]);
8202
8203    if (!ZeroMask)
8204      return CreateLoad(VT, LDBase);
8205
8206    // IsConsecutiveLoadWithZeros - we need to create a shuffle of the loaded
8207    // vector and a zero vector to clear out the zero elements.
8208    if (!isAfterLegalize && VT.isVector()) {
8209      unsigned NumMaskElts = VT.getVectorNumElements();
8210      if ((NumMaskElts % NumElems) == 0) {
8211        unsigned Scale = NumMaskElts / NumElems;
8212        SmallVector<int, 4> ClearMask(NumMaskElts, -1);
8213        for (unsigned i = 0; i < NumElems; ++i) {
8214          if (UndefMask[i])
8215            continue;
8216          int Offset = ZeroMask[i] ? NumMaskElts : 0;
8217          for (unsigned j = 0; j != Scale; ++j)
8218            ClearMask[(i * Scale) + j] = (i * Scale) + j + Offset;
8219        }
8220        SDValue V = CreateLoad(VT, LDBase);
8221        SDValue Z = VT.isInteger() ? DAG.getConstant(0, DL, VT)
8222                                   : DAG.getConstantFP(0.0, DL, VT);
8223        return DAG.getVectorShuffle(VT, DL, V, Z, ClearMask);
8224      }
8225    }
8226  }
8227
8228  // If the upper half of a ymm/zmm load is undef then just load the lower half.
8229  if (VT.is256BitVector() || VT.is512BitVector()) {
8230    unsigned HalfNumElems = NumElems / 2;
8231    if (UndefMask.extractBits(HalfNumElems, HalfNumElems).isAllOnesValue()) {
8232      EVT HalfVT =
8233          EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), HalfNumElems);
8234      SDValue HalfLD =
8235          EltsFromConsecutiveLoads(HalfVT, Elts.drop_back(HalfNumElems), DL,
8236                                   DAG, Subtarget, isAfterLegalize);
8237      if (HalfLD)
8238        return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT),
8239                           HalfLD, DAG.getIntPtrConstant(0, DL));
8240    }
8241  }
8242
8243  // VZEXT_LOAD - consecutive 32/64-bit load/undefs followed by zeros/undefs.
8244  if (IsConsecutiveLoad && FirstLoadedElt == 0 &&
8245      (LoadSizeInBits == 32 || LoadSizeInBits == 64) &&
8246      ((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))) {
8247    MVT VecSVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(LoadSizeInBits)
8248                                      : MVT::getIntegerVT(LoadSizeInBits);
8249    MVT VecVT = MVT::getVectorVT(VecSVT, VT.getSizeInBits() / LoadSizeInBits);
8250    if (TLI.isTypeLegal(VecVT)) {
8251      SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
8252      SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
8253      SDValue ResNode =
8254          DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, VecSVT,
8255                                  LDBase->getPointerInfo(),
8256                                  LDBase->getAlignment(),
8257                                  MachineMemOperand::MOLoad);
8258      for (auto *LD : Loads)
8259        if (LD)
8260          DAG.makeEquivalentMemoryOrdering(LD, ResNode);
8261      return DAG.getBitcast(VT, ResNode);
8262    }
8263  }
8264
8265  // BROADCAST - match the smallest possible repetition pattern, load that
8266  // scalar/subvector element and then broadcast to the entire vector.
8267  if (ZeroMask.isNullValue() && isPowerOf2_32(NumElems) && Subtarget.hasAVX() &&
8268      (VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector())) {
8269    for (unsigned SubElems = 1; SubElems < NumElems; SubElems *= 2) {
8270      unsigned RepeatSize = SubElems * BaseSizeInBits;
8271      unsigned ScalarSize = std::min(RepeatSize, 64u);
8272      if (!Subtarget.hasAVX2() && ScalarSize < 32)
8273        continue;
8274
8275      bool Match = true;
8276      SmallVector<SDValue, 8> RepeatedLoads(SubElems, DAG.getUNDEF(EltBaseVT));
8277      for (unsigned i = 0; i != NumElems && Match; ++i) {
8278        if (!LoadMask[i])
8279          continue;
8280        SDValue Elt = peekThroughBitcasts(Elts[i]);
8281        if (RepeatedLoads[i % SubElems].isUndef())
8282          RepeatedLoads[i % SubElems] = Elt;
8283        else
8284          Match &= (RepeatedLoads[i % SubElems] == Elt);
8285      }
8286
8287      // We must have loads at both ends of the repetition.
8288      Match &= !RepeatedLoads.front().isUndef();
8289      Match &= !RepeatedLoads.back().isUndef();
8290      if (!Match)
8291        continue;
8292
8293      EVT RepeatVT =
8294          VT.isInteger() && (RepeatSize != 64 || TLI.isTypeLegal(MVT::i64))
8295              ? EVT::getIntegerVT(*DAG.getContext(), ScalarSize)
8296              : EVT::getFloatingPointVT(ScalarSize);
8297      if (RepeatSize > ScalarSize)
8298        RepeatVT = EVT::getVectorVT(*DAG.getContext(), RepeatVT,
8299                                    RepeatSize / ScalarSize);
8300      EVT BroadcastVT =
8301          EVT::getVectorVT(*DAG.getContext(), RepeatVT.getScalarType(),
8302                           VT.getSizeInBits() / ScalarSize);
8303      if (TLI.isTypeLegal(BroadcastVT)) {
8304        if (SDValue RepeatLoad = EltsFromConsecutiveLoads(
8305                RepeatVT, RepeatedLoads, DL, DAG, Subtarget, isAfterLegalize)) {
8306          unsigned Opcode = RepeatSize > ScalarSize ? X86ISD::SUBV_BROADCAST
8307                                                    : X86ISD::VBROADCAST;
8308          SDValue Broadcast = DAG.getNode(Opcode, DL, BroadcastVT, RepeatLoad);
8309          return DAG.getBitcast(VT, Broadcast);
8310        }
8311      }
8312    }
8313  }
8314
8315  return SDValue();
8316}
8317
8318// Combine a vector ops (shuffles etc.) that is equal to build_vector load1,
8319// load2, load3, load4, <0, 1, 2, 3> into a vector load if the load addresses
8320// are consecutive, non-overlapping, and in the right order.
8321static SDValue combineToConsecutiveLoads(EVT VT, SDNode *N, const SDLoc &DL,
8322                                         SelectionDAG &DAG,
8323                                         const X86Subtarget &Subtarget,
8324                                         bool isAfterLegalize) {
8325  SmallVector<SDValue, 64> Elts;
8326  for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
8327    if (SDValue Elt = getShuffleScalarElt(N, i, DAG, 0)) {
8328      Elts.push_back(Elt);
8329      continue;
8330    }
8331    return SDValue();
8332  }
8333  assert(Elts.size() == VT.getVectorNumElements());
8334  return EltsFromConsecutiveLoads(VT, Elts, DL, DAG, Subtarget,
8335                                  isAfterLegalize);
8336}
8337
8338static Constant *getConstantVector(MVT VT, const APInt &SplatValue,
8339                                   unsigned SplatBitSize, LLVMContext &C) {
8340  unsigned ScalarSize = VT.getScalarSizeInBits();
8341  unsigned NumElm = SplatBitSize / ScalarSize;
8342
8343  SmallVector<Constant *, 32> ConstantVec;
8344  for (unsigned i = 0; i < NumElm; i++) {
8345    APInt Val = SplatValue.extractBits(ScalarSize, ScalarSize * i);
8346    Constant *Const;
8347    if (VT.isFloatingPoint()) {
8348      if (ScalarSize == 32) {
8349        Const = ConstantFP::get(C, APFloat(APFloat::IEEEsingle(), Val));
8350      } else {
8351        assert(ScalarSize == 64 && "Unsupported floating point scalar size");
8352        Const = ConstantFP::get(C, APFloat(APFloat::IEEEdouble(), Val));
8353      }
8354    } else
8355      Const = Constant::getIntegerValue(Type::getIntNTy(C, ScalarSize), Val);
8356    ConstantVec.push_back(Const);
8357  }
8358  return ConstantVector::get(ArrayRef<Constant *>(ConstantVec));
8359}
8360
8361static bool isFoldableUseOfShuffle(SDNode *N) {
8362  for (auto *U : N->uses()) {
8363    unsigned Opc = U->getOpcode();
8364    // VPERMV/VPERMV3 shuffles can never fold their index operands.
8365    if (Opc == X86ISD::VPERMV && U->getOperand(0).getNode() == N)
8366      return false;
8367    if (Opc == X86ISD::VPERMV3 && U->getOperand(1).getNode() == N)
8368      return false;
8369    if (isTargetShuffle(Opc))
8370      return true;
8371    if (Opc == ISD::BITCAST) // Ignore bitcasts
8372      return isFoldableUseOfShuffle(U);
8373    if (N->hasOneUse())
8374      return true;
8375  }
8376  return false;
8377}
8378
8379// Check if the current node of build vector is a zero extended vector.
8380// // If so, return the value extended.
8381// // For example: (0,0,0,a,0,0,0,a,0,0,0,a,0,0,0,a) returns a.
8382// // NumElt - return the number of zero extended identical values.
8383// // EltType - return the type of the value include the zero extend.
8384static SDValue isSplatZeroExtended(const BuildVectorSDNode *Op,
8385                                   unsigned &NumElt, MVT &EltType) {
8386  SDValue ExtValue = Op->getOperand(0);
8387  unsigned NumElts = Op->getNumOperands();
8388  unsigned Delta = NumElts;
8389
8390  for (unsigned i = 1; i < NumElts; i++) {
8391    if (Op->getOperand(i) == ExtValue) {
8392      Delta = i;
8393      break;
8394    }
8395    if (!(Op->getOperand(i).isUndef() || isNullConstant(Op->getOperand(i))))
8396      return SDValue();
8397  }
8398  if (!isPowerOf2_32(Delta) || Delta == 1)
8399    return SDValue();
8400
8401  for (unsigned i = Delta; i < NumElts; i++) {
8402    if (i % Delta == 0) {
8403      if (Op->getOperand(i) != ExtValue)
8404        return SDValue();
8405    } else if (!(isNullConstant(Op->getOperand(i)) ||
8406                 Op->getOperand(i).isUndef()))
8407      return SDValue();
8408  }
8409  unsigned EltSize = Op->getSimpleValueType(0).getScalarSizeInBits();
8410  unsigned ExtVTSize = EltSize * Delta;
8411  EltType = MVT::getIntegerVT(ExtVTSize);
8412  NumElt = NumElts / Delta;
8413  return ExtValue;
8414}
8415
8416/// Attempt to use the vbroadcast instruction to generate a splat value
8417/// from a splat BUILD_VECTOR which uses:
8418///  a. A single scalar load, or a constant.
8419///  b. Repeated pattern of constants (e.g. <0,1,0,1> or <0,1,2,3,0,1,2,3>).
8420///
8421/// The VBROADCAST node is returned when a pattern is found,
8422/// or SDValue() otherwise.
8423static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp,
8424                                           const X86Subtarget &Subtarget,
8425                                           SelectionDAG &DAG) {
8426  // VBROADCAST requires AVX.
8427  // TODO: Splats could be generated for non-AVX CPUs using SSE
8428  // instructions, but there's less potential gain for only 128-bit vectors.
8429  if (!Subtarget.hasAVX())
8430    return SDValue();
8431
8432  MVT VT = BVOp->getSimpleValueType(0);
8433  SDLoc dl(BVOp);
8434
8435  assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
8436         "Unsupported vector type for broadcast.");
8437
8438  BitVector UndefElements;
8439  SDValue Ld = BVOp->getSplatValue(&UndefElements);
8440
8441  // Attempt to use VBROADCASTM
8442  // From this paterrn:
8443  // a. t0 = (zext_i64 (bitcast_i8 v2i1 X))
8444  // b. t1 = (build_vector t0 t0)
8445  //
8446  // Create (VBROADCASTM v2i1 X)
8447  if (Subtarget.hasCDI() && (VT.is512BitVector() || Subtarget.hasVLX())) {
8448    MVT EltType = VT.getScalarType();
8449    unsigned NumElts = VT.getVectorNumElements();
8450    SDValue BOperand;
8451    SDValue ZeroExtended = isSplatZeroExtended(BVOp, NumElts, EltType);
8452    if ((ZeroExtended && ZeroExtended.getOpcode() == ISD::BITCAST) ||
8453        (Ld && Ld.getOpcode() == ISD::ZERO_EXTEND &&
8454         Ld.getOperand(0).getOpcode() == ISD::BITCAST)) {
8455      if (ZeroExtended)
8456        BOperand = ZeroExtended.getOperand(0);
8457      else
8458        BOperand = Ld.getOperand(0).getOperand(0);
8459      MVT MaskVT = BOperand.getSimpleValueType();
8460      if ((EltType == MVT::i64 && MaskVT == MVT::v8i1) || // for broadcastmb2q
8461          (EltType == MVT::i32 && MaskVT == MVT::v16i1)) { // for broadcastmw2d
8462        SDValue Brdcst =
8463            DAG.getNode(X86ISD::VBROADCASTM, dl,
8464                        MVT::getVectorVT(EltType, NumElts), BOperand);
8465        return DAG.getBitcast(VT, Brdcst);
8466      }
8467    }
8468  }
8469
8470  unsigned NumElts = VT.getVectorNumElements();
8471  unsigned NumUndefElts = UndefElements.count();
8472  if (!Ld || (NumElts - NumUndefElts) <= 1) {
8473    APInt SplatValue, Undef;
8474    unsigned SplatBitSize;
8475    bool HasUndef;
8476    // Check if this is a repeated constant pattern suitable for broadcasting.
8477    if (BVOp->isConstantSplat(SplatValue, Undef, SplatBitSize, HasUndef) &&
8478        SplatBitSize > VT.getScalarSizeInBits() &&
8479        SplatBitSize < VT.getSizeInBits()) {
8480      // Avoid replacing with broadcast when it's a use of a shuffle
8481      // instruction to preserve the present custom lowering of shuffles.
8482      if (isFoldableUseOfShuffle(BVOp))
8483        return SDValue();
8484      // replace BUILD_VECTOR with broadcast of the repeated constants.
8485      const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8486      LLVMContext *Ctx = DAG.getContext();
8487      MVT PVT = TLI.getPointerTy(DAG.getDataLayout());
8488      if (Subtarget.hasAVX()) {
8489        if (SplatBitSize <= 64 && Subtarget.hasAVX2() &&
8490            !(SplatBitSize == 64 && Subtarget.is32Bit())) {
8491          // Splatted value can fit in one INTEGER constant in constant pool.
8492          // Load the constant and broadcast it.
8493          MVT CVT = MVT::getIntegerVT(SplatBitSize);
8494          Type *ScalarTy = Type::getIntNTy(*Ctx, SplatBitSize);
8495          Constant *C = Constant::getIntegerValue(ScalarTy, SplatValue);
8496          SDValue CP = DAG.getConstantPool(C, PVT);
8497          unsigned Repeat = VT.getSizeInBits() / SplatBitSize;
8498
8499          unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
8500          Ld = DAG.getLoad(
8501              CVT, dl, DAG.getEntryNode(), CP,
8502              MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
8503              Alignment);
8504          SDValue Brdcst = DAG.getNode(X86ISD::VBROADCAST, dl,
8505                                       MVT::getVectorVT(CVT, Repeat), Ld);
8506          return DAG.getBitcast(VT, Brdcst);
8507        } else if (SplatBitSize == 32 || SplatBitSize == 64) {
8508          // Splatted value can fit in one FLOAT constant in constant pool.
8509          // Load the constant and broadcast it.
8510          // AVX have support for 32 and 64 bit broadcast for floats only.
8511          // No 64bit integer in 32bit subtarget.
8512          MVT CVT = MVT::getFloatingPointVT(SplatBitSize);
8513          // Lower the splat via APFloat directly, to avoid any conversion.
8514          Constant *C =
8515              SplatBitSize == 32
8516                  ? ConstantFP::get(*Ctx,
8517                                    APFloat(APFloat::IEEEsingle(), SplatValue))
8518                  : ConstantFP::get(*Ctx,
8519                                    APFloat(APFloat::IEEEdouble(), SplatValue));
8520          SDValue CP = DAG.getConstantPool(C, PVT);
8521          unsigned Repeat = VT.getSizeInBits() / SplatBitSize;
8522
8523          unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
8524          Ld = DAG.getLoad(
8525              CVT, dl, DAG.getEntryNode(), CP,
8526              MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
8527              Alignment);
8528          SDValue Brdcst = DAG.getNode(X86ISD::VBROADCAST, dl,
8529                                       MVT::getVectorVT(CVT, Repeat), Ld);
8530          return DAG.getBitcast(VT, Brdcst);
8531        } else if (SplatBitSize > 64) {
8532          // Load the vector of constants and broadcast it.
8533          MVT CVT = VT.getScalarType();
8534          Constant *VecC = getConstantVector(VT, SplatValue, SplatBitSize,
8535                                             *Ctx);
8536          SDValue VCP = DAG.getConstantPool(VecC, PVT);
8537          unsigned NumElm = SplatBitSize / VT.getScalarSizeInBits();
8538          unsigned Alignment = cast<ConstantPoolSDNode>(VCP)->getAlignment();
8539          Ld = DAG.getLoad(
8540              MVT::getVectorVT(CVT, NumElm), dl, DAG.getEntryNode(), VCP,
8541              MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
8542              Alignment);
8543          SDValue Brdcst = DAG.getNode(X86ISD::SUBV_BROADCAST, dl, VT, Ld);
8544          return DAG.getBitcast(VT, Brdcst);
8545        }
8546      }
8547    }
8548
8549    // If we are moving a scalar into a vector (Ld must be set and all elements
8550    // but 1 are undef) and that operation is not obviously supported by
8551    // vmovd/vmovq/vmovss/vmovsd, then keep trying to form a broadcast.
8552    // That's better than general shuffling and may eliminate a load to GPR and
8553    // move from scalar to vector register.
8554    if (!Ld || NumElts - NumUndefElts != 1)
8555      return SDValue();
8556    unsigned ScalarSize = Ld.getValueSizeInBits();
8557    if (!(UndefElements[0] || (ScalarSize != 32 && ScalarSize != 64)))
8558      return SDValue();
8559  }
8560
8561  bool ConstSplatVal =
8562      (Ld.getOpcode() == ISD::Constant || Ld.getOpcode() == ISD::ConstantFP);
8563
8564  // Make sure that all of the users of a non-constant load are from the
8565  // BUILD_VECTOR node.
8566  if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
8567    return SDValue();
8568
8569  unsigned ScalarSize = Ld.getValueSizeInBits();
8570  bool IsGE256 = (VT.getSizeInBits() >= 256);
8571
8572  // When optimizing for size, generate up to 5 extra bytes for a broadcast
8573  // instruction to save 8 or more bytes of constant pool data.
8574  // TODO: If multiple splats are generated to load the same constant,
8575  // it may be detrimental to overall size. There needs to be a way to detect
8576  // that condition to know if this is truly a size win.
8577  bool OptForSize = DAG.shouldOptForSize();
8578
8579  // Handle broadcasting a single constant scalar from the constant pool
8580  // into a vector.
8581  // On Sandybridge (no AVX2), it is still better to load a constant vector
8582  // from the constant pool and not to broadcast it from a scalar.
8583  // But override that restriction when optimizing for size.
8584  // TODO: Check if splatting is recommended for other AVX-capable CPUs.
8585  if (ConstSplatVal && (Subtarget.hasAVX2() || OptForSize)) {
8586    EVT CVT = Ld.getValueType();
8587    assert(!CVT.isVector() && "Must not broadcast a vector type");
8588
8589    // Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
8590    // For size optimization, also splat v2f64 and v2i64, and for size opt
8591    // with AVX2, also splat i8 and i16.
8592    // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
8593    if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
8594        (OptForSize && (ScalarSize == 64 || Subtarget.hasAVX2()))) {
8595      const Constant *C = nullptr;
8596      if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
8597        C = CI->getConstantIntValue();
8598      else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
8599        C = CF->getConstantFPValue();
8600
8601      assert(C && "Invalid constant type");
8602
8603      const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8604      SDValue CP =
8605          DAG.getConstantPool(C, TLI.getPointerTy(DAG.getDataLayout()));
8606      unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
8607      Ld = DAG.getLoad(
8608          CVT, dl, DAG.getEntryNode(), CP,
8609          MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
8610          Alignment);
8611
8612      return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
8613    }
8614  }
8615
8616  bool IsLoad = ISD::isNormalLoad(Ld.getNode());
8617
8618  // Handle AVX2 in-register broadcasts.
8619  if (!IsLoad && Subtarget.hasInt256() &&
8620      (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
8621    return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
8622
8623  // The scalar source must be a normal load.
8624  if (!IsLoad)
8625    return SDValue();
8626
8627  if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
8628      (Subtarget.hasVLX() && ScalarSize == 64))
8629    return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
8630
8631  // The integer check is needed for the 64-bit into 128-bit so it doesn't match
8632  // double since there is no vbroadcastsd xmm
8633  if (Subtarget.hasInt256() && Ld.getValueType().isInteger()) {
8634    if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
8635      return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
8636  }
8637
8638  // Unsupported broadcast.
8639  return SDValue();
8640}
8641
8642/// For an EXTRACT_VECTOR_ELT with a constant index return the real
8643/// underlying vector and index.
8644///
8645/// Modifies \p ExtractedFromVec to the real vector and returns the real
8646/// index.
8647static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
8648                                         SDValue ExtIdx) {
8649  int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
8650  if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
8651    return Idx;
8652
8653  // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
8654  // lowered this:
8655  //   (extract_vector_elt (v8f32 %1), Constant<6>)
8656  // to:
8657  //   (extract_vector_elt (vector_shuffle<2,u,u,u>
8658  //                           (extract_subvector (v8f32 %0), Constant<4>),
8659  //                           undef)
8660  //                       Constant<0>)
8661  // In this case the vector is the extract_subvector expression and the index
8662  // is 2, as specified by the shuffle.
8663  ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
8664  SDValue ShuffleVec = SVOp->getOperand(0);
8665  MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
8666  assert(ShuffleVecVT.getVectorElementType() ==
8667         ExtractedFromVec.getSimpleValueType().getVectorElementType());
8668
8669  int ShuffleIdx = SVOp->getMaskElt(Idx);
8670  if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
8671    ExtractedFromVec = ShuffleVec;
8672    return ShuffleIdx;
8673  }
8674  return Idx;
8675}
8676
8677static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
8678  MVT VT = Op.getSimpleValueType();
8679
8680  // Skip if insert_vec_elt is not supported.
8681  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8682  if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
8683    return SDValue();
8684
8685  SDLoc DL(Op);
8686  unsigned NumElems = Op.getNumOperands();
8687
8688  SDValue VecIn1;
8689  SDValue VecIn2;
8690  SmallVector<unsigned, 4> InsertIndices;
8691  SmallVector<int, 8> Mask(NumElems, -1);
8692
8693  for (unsigned i = 0; i != NumElems; ++i) {
8694    unsigned Opc = Op.getOperand(i).getOpcode();
8695
8696    if (Opc == ISD::UNDEF)
8697      continue;
8698
8699    if (Opc != ISD::EXTRACT_VECTOR_ELT) {
8700      // Quit if more than 1 elements need inserting.
8701      if (InsertIndices.size() > 1)
8702        return SDValue();
8703
8704      InsertIndices.push_back(i);
8705      continue;
8706    }
8707
8708    SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
8709    SDValue ExtIdx = Op.getOperand(i).getOperand(1);
8710
8711    // Quit if non-constant index.
8712    if (!isa<ConstantSDNode>(ExtIdx))
8713      return SDValue();
8714    int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
8715
8716    // Quit if extracted from vector of different type.
8717    if (ExtractedFromVec.getValueType() != VT)
8718      return SDValue();
8719
8720    if (!VecIn1.getNode())
8721      VecIn1 = ExtractedFromVec;
8722    else if (VecIn1 != ExtractedFromVec) {
8723      if (!VecIn2.getNode())
8724        VecIn2 = ExtractedFromVec;
8725      else if (VecIn2 != ExtractedFromVec)
8726        // Quit if more than 2 vectors to shuffle
8727        return SDValue();
8728    }
8729
8730    if (ExtractedFromVec == VecIn1)
8731      Mask[i] = Idx;
8732    else if (ExtractedFromVec == VecIn2)
8733      Mask[i] = Idx + NumElems;
8734  }
8735
8736  if (!VecIn1.getNode())
8737    return SDValue();
8738
8739  VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
8740  SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, Mask);
8741
8742  for (unsigned Idx : InsertIndices)
8743    NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
8744                     DAG.getIntPtrConstant(Idx, DL));
8745
8746  return NV;
8747}
8748
8749static SDValue ConvertI1VectorToInteger(SDValue Op, SelectionDAG &DAG) {
8750  assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
8751         Op.getScalarValueSizeInBits() == 1 &&
8752         "Can not convert non-constant vector");
8753  uint64_t Immediate = 0;
8754  for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
8755    SDValue In = Op.getOperand(idx);
8756    if (!In.isUndef())
8757      Immediate |= (cast<ConstantSDNode>(In)->getZExtValue() & 0x1) << idx;
8758  }
8759  SDLoc dl(Op);
8760  MVT VT = MVT::getIntegerVT(std::max((int)Op.getValueSizeInBits(), 8));
8761  return DAG.getConstant(Immediate, dl, VT);
8762}
8763// Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
8764static SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG,
8765                                     const X86Subtarget &Subtarget) {
8766
8767  MVT VT = Op.getSimpleValueType();
8768  assert((VT.getVectorElementType() == MVT::i1) &&
8769         "Unexpected type in LowerBUILD_VECTORvXi1!");
8770
8771  SDLoc dl(Op);
8772  if (ISD::isBuildVectorAllZeros(Op.getNode()) ||
8773      ISD::isBuildVectorAllOnes(Op.getNode()))
8774    return Op;
8775
8776  uint64_t Immediate = 0;
8777  SmallVector<unsigned, 16> NonConstIdx;
8778  bool IsSplat = true;
8779  bool HasConstElts = false;
8780  int SplatIdx = -1;
8781  for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
8782    SDValue In = Op.getOperand(idx);
8783    if (In.isUndef())
8784      continue;
8785    if (!isa<ConstantSDNode>(In))
8786      NonConstIdx.push_back(idx);
8787    else {
8788      Immediate |= (cast<ConstantSDNode>(In)->getZExtValue() & 0x1) << idx;
8789      HasConstElts = true;
8790    }
8791    if (SplatIdx < 0)
8792      SplatIdx = idx;
8793    else if (In != Op.getOperand(SplatIdx))
8794      IsSplat = false;
8795  }
8796
8797  // for splat use " (select i1 splat_elt, all-ones, all-zeroes)"
8798  if (IsSplat) {
8799    // The build_vector allows the scalar element to be larger than the vector
8800    // element type. We need to mask it to use as a condition unless we know
8801    // the upper bits are zero.
8802    // FIXME: Use computeKnownBits instead of checking specific opcode?
8803    SDValue Cond = Op.getOperand(SplatIdx);
8804    assert(Cond.getValueType() == MVT::i8 && "Unexpected VT!");
8805    if (Cond.getOpcode() != ISD::SETCC)
8806      Cond = DAG.getNode(ISD::AND, dl, MVT::i8, Cond,
8807                         DAG.getConstant(1, dl, MVT::i8));
8808    return DAG.getSelect(dl, VT, Cond,
8809                         DAG.getConstant(1, dl, VT),
8810                         DAG.getConstant(0, dl, VT));
8811  }
8812
8813  // insert elements one by one
8814  SDValue DstVec;
8815  if (HasConstElts) {
8816    if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
8817      SDValue ImmL = DAG.getConstant(Lo_32(Immediate), dl, MVT::i32);
8818      SDValue ImmH = DAG.getConstant(Hi_32(Immediate), dl, MVT::i32);
8819      ImmL = DAG.getBitcast(MVT::v32i1, ImmL);
8820      ImmH = DAG.getBitcast(MVT::v32i1, ImmH);
8821      DstVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, ImmL, ImmH);
8822    } else {
8823      MVT ImmVT = MVT::getIntegerVT(std::max((unsigned)VT.getSizeInBits(), 8U));
8824      SDValue Imm = DAG.getConstant(Immediate, dl, ImmVT);
8825      MVT VecVT = VT.getSizeInBits() >= 8 ? VT : MVT::v8i1;
8826      DstVec = DAG.getBitcast(VecVT, Imm);
8827      DstVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, DstVec,
8828                           DAG.getIntPtrConstant(0, dl));
8829    }
8830  } else
8831    DstVec = DAG.getUNDEF(VT);
8832
8833  for (unsigned i = 0, e = NonConstIdx.size(); i != e; ++i) {
8834    unsigned InsertIdx = NonConstIdx[i];
8835    DstVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
8836                         Op.getOperand(InsertIdx),
8837                         DAG.getIntPtrConstant(InsertIdx, dl));
8838  }
8839  return DstVec;
8840}
8841
8842/// This is a helper function of LowerToHorizontalOp().
8843/// This function checks that the build_vector \p N in input implements a
8844/// 128-bit partial horizontal operation on a 256-bit vector, but that operation
8845/// may not match the layout of an x86 256-bit horizontal instruction.
8846/// In other words, if this returns true, then some extraction/insertion will
8847/// be required to produce a valid horizontal instruction.
8848///
8849/// Parameter \p Opcode defines the kind of horizontal operation to match.
8850/// For example, if \p Opcode is equal to ISD::ADD, then this function
8851/// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
8852/// is equal to ISD::SUB, then this function checks if this is a horizontal
8853/// arithmetic sub.
8854///
8855/// This function only analyzes elements of \p N whose indices are
8856/// in range [BaseIdx, LastIdx).
8857///
8858/// TODO: This function was originally used to match both real and fake partial
8859/// horizontal operations, but the index-matching logic is incorrect for that.
8860/// See the corrected implementation in isHopBuildVector(). Can we reduce this
8861/// code because it is only used for partial h-op matching now?
8862static bool isHorizontalBinOpPart(const BuildVectorSDNode *N, unsigned Opcode,
8863                                  SelectionDAG &DAG,
8864                                  unsigned BaseIdx, unsigned LastIdx,
8865                                  SDValue &V0, SDValue &V1) {
8866  EVT VT = N->getValueType(0);
8867  assert(VT.is256BitVector() && "Only use for matching partial 256-bit h-ops");
8868  assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
8869  assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
8870         "Invalid Vector in input!");
8871
8872  bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
8873  bool CanFold = true;
8874  unsigned ExpectedVExtractIdx = BaseIdx;
8875  unsigned NumElts = LastIdx - BaseIdx;
8876  V0 = DAG.getUNDEF(VT);
8877  V1 = DAG.getUNDEF(VT);
8878
8879  // Check if N implements a horizontal binop.
8880  for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
8881    SDValue Op = N->getOperand(i + BaseIdx);
8882
8883    // Skip UNDEFs.
8884    if (Op->isUndef()) {
8885      // Update the expected vector extract index.
8886      if (i * 2 == NumElts)
8887        ExpectedVExtractIdx = BaseIdx;
8888      ExpectedVExtractIdx += 2;
8889      continue;
8890    }
8891
8892    CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
8893
8894    if (!CanFold)
8895      break;
8896
8897    SDValue Op0 = Op.getOperand(0);
8898    SDValue Op1 = Op.getOperand(1);
8899
8900    // Try to match the following pattern:
8901    // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
8902    CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
8903        Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
8904        Op0.getOperand(0) == Op1.getOperand(0) &&
8905        isa<ConstantSDNode>(Op0.getOperand(1)) &&
8906        isa<ConstantSDNode>(Op1.getOperand(1)));
8907    if (!CanFold)
8908      break;
8909
8910    unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
8911    unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
8912
8913    if (i * 2 < NumElts) {
8914      if (V0.isUndef()) {
8915        V0 = Op0.getOperand(0);
8916        if (V0.getValueType() != VT)
8917          return false;
8918      }
8919    } else {
8920      if (V1.isUndef()) {
8921        V1 = Op0.getOperand(0);
8922        if (V1.getValueType() != VT)
8923          return false;
8924      }
8925      if (i * 2 == NumElts)
8926        ExpectedVExtractIdx = BaseIdx;
8927    }
8928
8929    SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
8930    if (I0 == ExpectedVExtractIdx)
8931      CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
8932    else if (IsCommutable && I1 == ExpectedVExtractIdx) {
8933      // Try to match the following dag sequence:
8934      // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
8935      CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
8936    } else
8937      CanFold = false;
8938
8939    ExpectedVExtractIdx += 2;
8940  }
8941
8942  return CanFold;
8943}
8944
8945/// Emit a sequence of two 128-bit horizontal add/sub followed by
8946/// a concat_vector.
8947///
8948/// This is a helper function of LowerToHorizontalOp().
8949/// This function expects two 256-bit vectors called V0 and V1.
8950/// At first, each vector is split into two separate 128-bit vectors.
8951/// Then, the resulting 128-bit vectors are used to implement two
8952/// horizontal binary operations.
8953///
8954/// The kind of horizontal binary operation is defined by \p X86Opcode.
8955///
8956/// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
8957/// the two new horizontal binop.
8958/// When Mode is set, the first horizontal binop dag node would take as input
8959/// the lower 128-bit of V0 and the upper 128-bit of V0. The second
8960/// horizontal binop dag node would take as input the lower 128-bit of V1
8961/// and the upper 128-bit of V1.
8962///   Example:
8963///     HADD V0_LO, V0_HI
8964///     HADD V1_LO, V1_HI
8965///
8966/// Otherwise, the first horizontal binop dag node takes as input the lower
8967/// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
8968/// dag node takes the upper 128-bit of V0 and the upper 128-bit of V1.
8969///   Example:
8970///     HADD V0_LO, V1_LO
8971///     HADD V0_HI, V1_HI
8972///
8973/// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
8974/// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
8975/// the upper 128-bits of the result.
8976static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
8977                                     const SDLoc &DL, SelectionDAG &DAG,
8978                                     unsigned X86Opcode, bool Mode,
8979                                     bool isUndefLO, bool isUndefHI) {
8980  MVT VT = V0.getSimpleValueType();
8981  assert(VT.is256BitVector() && VT == V1.getSimpleValueType() &&
8982         "Invalid nodes in input!");
8983
8984  unsigned NumElts = VT.getVectorNumElements();
8985  SDValue V0_LO = extract128BitVector(V0, 0, DAG, DL);
8986  SDValue V0_HI = extract128BitVector(V0, NumElts/2, DAG, DL);
8987  SDValue V1_LO = extract128BitVector(V1, 0, DAG, DL);
8988  SDValue V1_HI = extract128BitVector(V1, NumElts/2, DAG, DL);
8989  MVT NewVT = V0_LO.getSimpleValueType();
8990
8991  SDValue LO = DAG.getUNDEF(NewVT);
8992  SDValue HI = DAG.getUNDEF(NewVT);
8993
8994  if (Mode) {
8995    // Don't emit a horizontal binop if the result is expected to be UNDEF.
8996    if (!isUndefLO && !V0->isUndef())
8997      LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
8998    if (!isUndefHI && !V1->isUndef())
8999      HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
9000  } else {
9001    // Don't emit a horizontal binop if the result is expected to be UNDEF.
9002    if (!isUndefLO && (!V0_LO->isUndef() || !V1_LO->isUndef()))
9003      LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
9004
9005    if (!isUndefHI && (!V0_HI->isUndef() || !V1_HI->isUndef()))
9006      HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
9007  }
9008
9009  return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
9010}
9011
9012/// Returns true iff \p BV builds a vector with the result equivalent to
9013/// the result of ADDSUB/SUBADD operation.
9014/// If true is returned then the operands of ADDSUB = Opnd0 +- Opnd1
9015/// (SUBADD = Opnd0 -+ Opnd1) operation are written to the parameters
9016/// \p Opnd0 and \p Opnd1.
9017static bool isAddSubOrSubAdd(const BuildVectorSDNode *BV,
9018                             const X86Subtarget &Subtarget, SelectionDAG &DAG,
9019                             SDValue &Opnd0, SDValue &Opnd1,
9020                             unsigned &NumExtracts,
9021                             bool &IsSubAdd) {
9022
9023  MVT VT = BV->getSimpleValueType(0);
9024  if (!Subtarget.hasSSE3() || !VT.isFloatingPoint())
9025    return false;
9026
9027  unsigned NumElts = VT.getVectorNumElements();
9028  SDValue InVec0 = DAG.getUNDEF(VT);
9029  SDValue InVec1 = DAG.getUNDEF(VT);
9030
9031  NumExtracts = 0;
9032
9033  // Odd-numbered elements in the input build vector are obtained from
9034  // adding/subtracting two integer/float elements.
9035  // Even-numbered elements in the input build vector are obtained from
9036  // subtracting/adding two integer/float elements.
9037  unsigned Opc[2] = {0, 0};
9038  for (unsigned i = 0, e = NumElts; i != e; ++i) {
9039    SDValue Op = BV->getOperand(i);
9040
9041    // Skip 'undef' values.
9042    unsigned Opcode = Op.getOpcode();
9043    if (Opcode == ISD::UNDEF)
9044      continue;
9045
9046    // Early exit if we found an unexpected opcode.
9047    if (Opcode != ISD::FADD && Opcode != ISD::FSUB)
9048      return false;
9049
9050    SDValue Op0 = Op.getOperand(0);
9051    SDValue Op1 = Op.getOperand(1);
9052
9053    // Try to match the following pattern:
9054    // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
9055    // Early exit if we cannot match that sequence.
9056    if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9057        Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9058        !isa<ConstantSDNode>(Op0.getOperand(1)) ||
9059        !isa<ConstantSDNode>(Op1.getOperand(1)) ||
9060        Op0.getOperand(1) != Op1.getOperand(1))
9061      return false;
9062
9063    unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
9064    if (I0 != i)
9065      return false;
9066
9067    // We found a valid add/sub node, make sure its the same opcode as previous
9068    // elements for this parity.
9069    if (Opc[i % 2] != 0 && Opc[i % 2] != Opcode)
9070      return false;
9071    Opc[i % 2] = Opcode;
9072
9073    // Update InVec0 and InVec1.
9074    if (InVec0.isUndef()) {
9075      InVec0 = Op0.getOperand(0);
9076      if (InVec0.getSimpleValueType() != VT)
9077        return false;
9078    }
9079    if (InVec1.isUndef()) {
9080      InVec1 = Op1.getOperand(0);
9081      if (InVec1.getSimpleValueType() != VT)
9082        return false;
9083    }
9084
9085    // Make sure that operands in input to each add/sub node always
9086    // come from a same pair of vectors.
9087    if (InVec0 != Op0.getOperand(0)) {
9088      if (Opcode == ISD::FSUB)
9089        return false;
9090
9091      // FADD is commutable. Try to commute the operands
9092      // and then test again.
9093      std::swap(Op0, Op1);
9094      if (InVec0 != Op0.getOperand(0))
9095        return false;
9096    }
9097
9098    if (InVec1 != Op1.getOperand(0))
9099      return false;
9100
9101    // Increment the number of extractions done.
9102    ++NumExtracts;
9103  }
9104
9105  // Ensure we have found an opcode for both parities and that they are
9106  // different. Don't try to fold this build_vector into an ADDSUB/SUBADD if the
9107  // inputs are undef.
9108  if (!Opc[0] || !Opc[1] || Opc[0] == Opc[1] ||
9109      InVec0.isUndef() || InVec1.isUndef())
9110    return false;
9111
9112  IsSubAdd = Opc[0] == ISD::FADD;
9113
9114  Opnd0 = InVec0;
9115  Opnd1 = InVec1;
9116  return true;
9117}
9118
9119/// Returns true if is possible to fold MUL and an idiom that has already been
9120/// recognized as ADDSUB/SUBADD(\p Opnd0, \p Opnd1) into
9121/// FMADDSUB/FMSUBADD(x, y, \p Opnd1). If (and only if) true is returned, the
9122/// operands of FMADDSUB/FMSUBADD are written to parameters \p Opnd0, \p Opnd1, \p Opnd2.
9123///
9124/// Prior to calling this function it should be known that there is some
9125/// SDNode that potentially can be replaced with an X86ISD::ADDSUB operation
9126/// using \p Opnd0 and \p Opnd1 as operands. Also, this method is called
9127/// before replacement of such SDNode with ADDSUB operation. Thus the number
9128/// of \p Opnd0 uses is expected to be equal to 2.
9129/// For example, this function may be called for the following IR:
9130///    %AB = fmul fast <2 x double> %A, %B
9131///    %Sub = fsub fast <2 x double> %AB, %C
9132///    %Add = fadd fast <2 x double> %AB, %C
9133///    %Addsub = shufflevector <2 x double> %Sub, <2 x double> %Add,
9134///                            <2 x i32> <i32 0, i32 3>
9135/// There is a def for %Addsub here, which potentially can be replaced by
9136/// X86ISD::ADDSUB operation:
9137///    %Addsub = X86ISD::ADDSUB %AB, %C
9138/// and such ADDSUB can further be replaced with FMADDSUB:
9139///    %Addsub = FMADDSUB %A, %B, %C.
9140///
9141/// The main reason why this method is called before the replacement of the
9142/// recognized ADDSUB idiom with ADDSUB operation is that such replacement
9143/// is illegal sometimes. E.g. 512-bit ADDSUB is not available, while 512-bit
9144/// FMADDSUB is.
9145static bool isFMAddSubOrFMSubAdd(const X86Subtarget &Subtarget,
9146                                 SelectionDAG &DAG,
9147                                 SDValue &Opnd0, SDValue &Opnd1, SDValue &Opnd2,
9148                                 unsigned ExpectedUses) {
9149  if (Opnd0.getOpcode() != ISD::FMUL ||
9150      !Opnd0->hasNUsesOfValue(ExpectedUses, 0) || !Subtarget.hasAnyFMA())
9151    return false;
9152
9153  // FIXME: These checks must match the similar ones in
9154  // DAGCombiner::visitFADDForFMACombine. It would be good to have one
9155  // function that would answer if it is Ok to fuse MUL + ADD to FMADD
9156  // or MUL + ADDSUB to FMADDSUB.
9157  const TargetOptions &Options = DAG.getTarget().Options;
9158  bool AllowFusion =
9159      (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath);
9160  if (!AllowFusion)
9161    return false;
9162
9163  Opnd2 = Opnd1;
9164  Opnd1 = Opnd0.getOperand(1);
9165  Opnd0 = Opnd0.getOperand(0);
9166
9167  return true;
9168}
9169
9170/// Try to fold a build_vector that performs an 'addsub' or 'fmaddsub' or
9171/// 'fsubadd' operation accordingly to X86ISD::ADDSUB or X86ISD::FMADDSUB or
9172/// X86ISD::FMSUBADD node.
9173static SDValue lowerToAddSubOrFMAddSub(const BuildVectorSDNode *BV,
9174                                       const X86Subtarget &Subtarget,
9175                                       SelectionDAG &DAG) {
9176  SDValue Opnd0, Opnd1;
9177  unsigned NumExtracts;
9178  bool IsSubAdd;
9179  if (!isAddSubOrSubAdd(BV, Subtarget, DAG, Opnd0, Opnd1, NumExtracts,
9180                        IsSubAdd))
9181    return SDValue();
9182
9183  MVT VT = BV->getSimpleValueType(0);
9184  SDLoc DL(BV);
9185
9186  // Try to generate X86ISD::FMADDSUB node here.
9187  SDValue Opnd2;
9188  if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, NumExtracts)) {
9189    unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
9190    return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
9191  }
9192
9193  // We only support ADDSUB.
9194  if (IsSubAdd)
9195    return SDValue();
9196
9197  // Do not generate X86ISD::ADDSUB node for 512-bit types even though
9198  // the ADDSUB idiom has been successfully recognized. There are no known
9199  // X86 targets with 512-bit ADDSUB instructions!
9200  // 512-bit ADDSUB idiom recognition was needed only as part of FMADDSUB idiom
9201  // recognition.
9202  if (VT.is512BitVector())
9203    return SDValue();
9204
9205  return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
9206}
9207
9208static bool isHopBuildVector(const BuildVectorSDNode *BV, SelectionDAG &DAG,
9209                             unsigned &HOpcode, SDValue &V0, SDValue &V1) {
9210  // Initialize outputs to known values.
9211  MVT VT = BV->getSimpleValueType(0);
9212  HOpcode = ISD::DELETED_NODE;
9213  V0 = DAG.getUNDEF(VT);
9214  V1 = DAG.getUNDEF(VT);
9215
9216  // x86 256-bit horizontal ops are defined in a non-obvious way. Each 128-bit
9217  // half of the result is calculated independently from the 128-bit halves of
9218  // the inputs, so that makes the index-checking logic below more complicated.
9219  unsigned NumElts = VT.getVectorNumElements();
9220  unsigned GenericOpcode = ISD::DELETED_NODE;
9221  unsigned Num128BitChunks = VT.is256BitVector() ? 2 : 1;
9222  unsigned NumEltsIn128Bits = NumElts / Num128BitChunks;
9223  unsigned NumEltsIn64Bits = NumEltsIn128Bits / 2;
9224  for (unsigned i = 0; i != Num128BitChunks; ++i) {
9225    for (unsigned j = 0; j != NumEltsIn128Bits; ++j) {
9226      // Ignore undef elements.
9227      SDValue Op = BV->getOperand(i * NumEltsIn128Bits + j);
9228      if (Op.isUndef())
9229        continue;
9230
9231      // If there's an opcode mismatch, we're done.
9232      if (HOpcode != ISD::DELETED_NODE && Op.getOpcode() != GenericOpcode)
9233        return false;
9234
9235      // Initialize horizontal opcode.
9236      if (HOpcode == ISD::DELETED_NODE) {
9237        GenericOpcode = Op.getOpcode();
9238        switch (GenericOpcode) {
9239        case ISD::ADD: HOpcode = X86ISD::HADD; break;
9240        case ISD::SUB: HOpcode = X86ISD::HSUB; break;
9241        case ISD::FADD: HOpcode = X86ISD::FHADD; break;
9242        case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
9243        default: return false;
9244        }
9245      }
9246
9247      SDValue Op0 = Op.getOperand(0);
9248      SDValue Op1 = Op.getOperand(1);
9249      if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9250          Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9251          Op0.getOperand(0) != Op1.getOperand(0) ||
9252          !isa<ConstantSDNode>(Op0.getOperand(1)) ||
9253          !isa<ConstantSDNode>(Op1.getOperand(1)) || !Op.hasOneUse())
9254        return false;
9255
9256      // The source vector is chosen based on which 64-bit half of the
9257      // destination vector is being calculated.
9258      if (j < NumEltsIn64Bits) {
9259        if (V0.isUndef())
9260          V0 = Op0.getOperand(0);
9261      } else {
9262        if (V1.isUndef())
9263          V1 = Op0.getOperand(0);
9264      }
9265
9266      SDValue SourceVec = (j < NumEltsIn64Bits) ? V0 : V1;
9267      if (SourceVec != Op0.getOperand(0))
9268        return false;
9269
9270      // op (extract_vector_elt A, I), (extract_vector_elt A, I+1)
9271      unsigned ExtIndex0 = Op0.getConstantOperandVal(1);
9272      unsigned ExtIndex1 = Op1.getConstantOperandVal(1);
9273      unsigned ExpectedIndex = i * NumEltsIn128Bits +
9274                               (j % NumEltsIn64Bits) * 2;
9275      if (ExpectedIndex == ExtIndex0 && ExtIndex1 == ExtIndex0 + 1)
9276        continue;
9277
9278      // If this is not a commutative op, this does not match.
9279      if (GenericOpcode != ISD::ADD && GenericOpcode != ISD::FADD)
9280        return false;
9281
9282      // Addition is commutative, so try swapping the extract indexes.
9283      // op (extract_vector_elt A, I+1), (extract_vector_elt A, I)
9284      if (ExpectedIndex == ExtIndex1 && ExtIndex0 == ExtIndex1 + 1)
9285        continue;
9286
9287      // Extract indexes do not match horizontal requirement.
9288      return false;
9289    }
9290  }
9291  // We matched. Opcode and operands are returned by reference as arguments.
9292  return true;
9293}
9294
9295static SDValue getHopForBuildVector(const BuildVectorSDNode *BV,
9296                                    SelectionDAG &DAG, unsigned HOpcode,
9297                                    SDValue V0, SDValue V1) {
9298  // If either input vector is not the same size as the build vector,
9299  // extract/insert the low bits to the correct size.
9300  // This is free (examples: zmm --> xmm, xmm --> ymm).
9301  MVT VT = BV->getSimpleValueType(0);
9302  unsigned Width = VT.getSizeInBits();
9303  if (V0.getValueSizeInBits() > Width)
9304    V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), Width);
9305  else if (V0.getValueSizeInBits() < Width)
9306    V0 = insertSubVector(DAG.getUNDEF(VT), V0, 0, DAG, SDLoc(BV), Width);
9307
9308  if (V1.getValueSizeInBits() > Width)
9309    V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), Width);
9310  else if (V1.getValueSizeInBits() < Width)
9311    V1 = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, SDLoc(BV), Width);
9312
9313  unsigned NumElts = VT.getVectorNumElements();
9314  APInt DemandedElts = APInt::getAllOnesValue(NumElts);
9315  for (unsigned i = 0; i != NumElts; ++i)
9316    if (BV->getOperand(i).isUndef())
9317      DemandedElts.clearBit(i);
9318
9319  // If we don't need the upper xmm, then perform as a xmm hop.
9320  unsigned HalfNumElts = NumElts / 2;
9321  if (VT.is256BitVector() && DemandedElts.lshr(HalfNumElts) == 0) {
9322    MVT HalfVT = VT.getHalfNumVectorElementsVT();
9323    V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), 128);
9324    V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), 128);
9325    SDValue Half = DAG.getNode(HOpcode, SDLoc(BV), HalfVT, V0, V1);
9326    return insertSubVector(DAG.getUNDEF(VT), Half, 0, DAG, SDLoc(BV), 256);
9327  }
9328
9329  return DAG.getNode(HOpcode, SDLoc(BV), VT, V0, V1);
9330}
9331
9332/// Lower BUILD_VECTOR to a horizontal add/sub operation if possible.
9333static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
9334                                   const X86Subtarget &Subtarget,
9335                                   SelectionDAG &DAG) {
9336  // We need at least 2 non-undef elements to make this worthwhile by default.
9337  unsigned NumNonUndefs =
9338      count_if(BV->op_values(), [](SDValue V) { return !V.isUndef(); });
9339  if (NumNonUndefs < 2)
9340    return SDValue();
9341
9342  // There are 4 sets of horizontal math operations distinguished by type:
9343  // int/FP at 128-bit/256-bit. Each type was introduced with a different
9344  // subtarget feature. Try to match those "native" patterns first.
9345  MVT VT = BV->getSimpleValueType(0);
9346  if (((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget.hasSSE3()) ||
9347      ((VT == MVT::v8i16 || VT == MVT::v4i32) && Subtarget.hasSSSE3()) ||
9348      ((VT == MVT::v8f32 || VT == MVT::v4f64) && Subtarget.hasAVX()) ||
9349      ((VT == MVT::v16i16 || VT == MVT::v8i32) && Subtarget.hasAVX2())) {
9350    unsigned HOpcode;
9351    SDValue V0, V1;
9352    if (isHopBuildVector(BV, DAG, HOpcode, V0, V1))
9353      return getHopForBuildVector(BV, DAG, HOpcode, V0, V1);
9354  }
9355
9356  // Try harder to match 256-bit ops by using extract/concat.
9357  if (!Subtarget.hasAVX() || !VT.is256BitVector())
9358    return SDValue();
9359
9360  // Count the number of UNDEF operands in the build_vector in input.
9361  unsigned NumElts = VT.getVectorNumElements();
9362  unsigned Half = NumElts / 2;
9363  unsigned NumUndefsLO = 0;
9364  unsigned NumUndefsHI = 0;
9365  for (unsigned i = 0, e = Half; i != e; ++i)
9366    if (BV->getOperand(i)->isUndef())
9367      NumUndefsLO++;
9368
9369  for (unsigned i = Half, e = NumElts; i != e; ++i)
9370    if (BV->getOperand(i)->isUndef())
9371      NumUndefsHI++;
9372
9373  SDLoc DL(BV);
9374  SDValue InVec0, InVec1;
9375  if (VT == MVT::v8i32 || VT == MVT::v16i16) {
9376    SDValue InVec2, InVec3;
9377    unsigned X86Opcode;
9378    bool CanFold = true;
9379
9380    if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
9381        isHorizontalBinOpPart(BV, ISD::ADD, DAG, Half, NumElts, InVec2,
9382                              InVec3) &&
9383        ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
9384        ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
9385      X86Opcode = X86ISD::HADD;
9386    else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, Half, InVec0,
9387                                   InVec1) &&
9388             isHorizontalBinOpPart(BV, ISD::SUB, DAG, Half, NumElts, InVec2,
9389                                   InVec3) &&
9390             ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
9391             ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
9392      X86Opcode = X86ISD::HSUB;
9393    else
9394      CanFold = false;
9395
9396    if (CanFold) {
9397      // Do not try to expand this build_vector into a pair of horizontal
9398      // add/sub if we can emit a pair of scalar add/sub.
9399      if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
9400        return SDValue();
9401
9402      // Convert this build_vector into a pair of horizontal binops followed by
9403      // a concat vector. We must adjust the outputs from the partial horizontal
9404      // matching calls above to account for undefined vector halves.
9405      SDValue V0 = InVec0.isUndef() ? InVec2 : InVec0;
9406      SDValue V1 = InVec1.isUndef() ? InVec3 : InVec1;
9407      assert((!V0.isUndef() || !V1.isUndef()) && "Horizontal-op of undefs?");
9408      bool isUndefLO = NumUndefsLO == Half;
9409      bool isUndefHI = NumUndefsHI == Half;
9410      return ExpandHorizontalBinOp(V0, V1, DL, DAG, X86Opcode, false, isUndefLO,
9411                                   isUndefHI);
9412    }
9413  }
9414
9415  if (VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
9416      VT == MVT::v16i16) {
9417    unsigned X86Opcode;
9418    if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
9419      X86Opcode = X86ISD::HADD;
9420    else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, NumElts, InVec0,
9421                                   InVec1))
9422      X86Opcode = X86ISD::HSUB;
9423    else if (isHorizontalBinOpPart(BV, ISD::FADD, DAG, 0, NumElts, InVec0,
9424                                   InVec1))
9425      X86Opcode = X86ISD::FHADD;
9426    else if (isHorizontalBinOpPart(BV, ISD::FSUB, DAG, 0, NumElts, InVec0,
9427                                   InVec1))
9428      X86Opcode = X86ISD::FHSUB;
9429    else
9430      return SDValue();
9431
9432    // Don't try to expand this build_vector into a pair of horizontal add/sub
9433    // if we can simply emit a pair of scalar add/sub.
9434    if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
9435      return SDValue();
9436
9437    // Convert this build_vector into two horizontal add/sub followed by
9438    // a concat vector.
9439    bool isUndefLO = NumUndefsLO == Half;
9440    bool isUndefHI = NumUndefsHI == Half;
9441    return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
9442                                 isUndefLO, isUndefHI);
9443  }
9444
9445  return SDValue();
9446}
9447
9448/// If a BUILD_VECTOR's source elements all apply the same bit operation and
9449/// one of their operands is constant, lower to a pair of BUILD_VECTOR and
9450/// just apply the bit to the vectors.
9451/// NOTE: Its not in our interest to start make a general purpose vectorizer
9452/// from this, but enough scalar bit operations are created from the later
9453/// legalization + scalarization stages to need basic support.
9454static SDValue lowerBuildVectorToBitOp(BuildVectorSDNode *Op,
9455                                       SelectionDAG &DAG) {
9456  SDLoc DL(Op);
9457  MVT VT = Op->getSimpleValueType(0);
9458  unsigned NumElems = VT.getVectorNumElements();
9459  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9460
9461  // Check that all elements have the same opcode.
9462  // TODO: Should we allow UNDEFS and if so how many?
9463  unsigned Opcode = Op->getOperand(0).getOpcode();
9464  for (unsigned i = 1; i < NumElems; ++i)
9465    if (Opcode != Op->getOperand(i).getOpcode())
9466      return SDValue();
9467
9468  // TODO: We may be able to add support for other Ops (ADD/SUB + shifts).
9469  bool IsShift = false;
9470  switch (Opcode) {
9471  default:
9472    return SDValue();
9473  case ISD::SHL:
9474  case ISD::SRL:
9475  case ISD::SRA:
9476    IsShift = true;
9477    break;
9478  case ISD::AND:
9479  case ISD::XOR:
9480  case ISD::OR:
9481    // Don't do this if the buildvector is a splat - we'd replace one
9482    // constant with an entire vector.
9483    if (Op->getSplatValue())
9484      return SDValue();
9485    if (!TLI.isOperationLegalOrPromote(Opcode, VT))
9486      return SDValue();
9487    break;
9488  }
9489
9490  SmallVector<SDValue, 4> LHSElts, RHSElts;
9491  for (SDValue Elt : Op->ops()) {
9492    SDValue LHS = Elt.getOperand(0);
9493    SDValue RHS = Elt.getOperand(1);
9494
9495    // We expect the canonicalized RHS operand to be the constant.
9496    if (!isa<ConstantSDNode>(RHS))
9497      return SDValue();
9498
9499    // Extend shift amounts.
9500    if (RHS.getValueSizeInBits() != VT.getScalarSizeInBits()) {
9501      if (!IsShift)
9502        return SDValue();
9503      RHS = DAG.getZExtOrTrunc(RHS, DL, VT.getScalarType());
9504    }
9505
9506    LHSElts.push_back(LHS);
9507    RHSElts.push_back(RHS);
9508  }
9509
9510  // Limit to shifts by uniform immediates.
9511  // TODO: Only accept vXi8/vXi64 special cases?
9512  // TODO: Permit non-uniform XOP/AVX2/MULLO cases?
9513  if (IsShift && any_of(RHSElts, [&](SDValue V) { return RHSElts[0] != V; }))
9514    return SDValue();
9515
9516  SDValue LHS = DAG.getBuildVector(VT, DL, LHSElts);
9517  SDValue RHS = DAG.getBuildVector(VT, DL, RHSElts);
9518  return DAG.getNode(Opcode, DL, VT, LHS, RHS);
9519}
9520
9521/// Create a vector constant without a load. SSE/AVX provide the bare minimum
9522/// functionality to do this, so it's all zeros, all ones, or some derivation
9523/// that is cheap to calculate.
9524static SDValue materializeVectorConstant(SDValue Op, SelectionDAG &DAG,
9525                                         const X86Subtarget &Subtarget) {
9526  SDLoc DL(Op);
9527  MVT VT = Op.getSimpleValueType();
9528
9529  // Vectors containing all zeros can be matched by pxor and xorps.
9530  if (ISD::isBuildVectorAllZeros(Op.getNode()))
9531    return Op;
9532
9533  // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
9534  // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
9535  // vpcmpeqd on 256-bit vectors.
9536  if (Subtarget.hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
9537    if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
9538      return Op;
9539
9540    return getOnesVector(VT, DAG, DL);
9541  }
9542
9543  return SDValue();
9544}
9545
9546/// Look for opportunities to create a VPERMV/VPERMILPV/PSHUFB variable permute
9547/// from a vector of source values and a vector of extraction indices.
9548/// The vectors might be manipulated to match the type of the permute op.
9549static SDValue createVariablePermute(MVT VT, SDValue SrcVec, SDValue IndicesVec,
9550                                     SDLoc &DL, SelectionDAG &DAG,
9551                                     const X86Subtarget &Subtarget) {
9552  MVT ShuffleVT = VT;
9553  EVT IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
9554  unsigned NumElts = VT.getVectorNumElements();
9555  unsigned SizeInBits = VT.getSizeInBits();
9556
9557  // Adjust IndicesVec to match VT size.
9558  assert(IndicesVec.getValueType().getVectorNumElements() >= NumElts &&
9559         "Illegal variable permute mask size");
9560  if (IndicesVec.getValueType().getVectorNumElements() > NumElts)
9561    IndicesVec = extractSubVector(IndicesVec, 0, DAG, SDLoc(IndicesVec),
9562                                  NumElts * VT.getScalarSizeInBits());
9563  IndicesVec = DAG.getZExtOrTrunc(IndicesVec, SDLoc(IndicesVec), IndicesVT);
9564
9565  // Handle SrcVec that don't match VT type.
9566  if (SrcVec.getValueSizeInBits() != SizeInBits) {
9567    if ((SrcVec.getValueSizeInBits() % SizeInBits) == 0) {
9568      // Handle larger SrcVec by treating it as a larger permute.
9569      unsigned Scale = SrcVec.getValueSizeInBits() / SizeInBits;
9570      VT = MVT::getVectorVT(VT.getScalarType(), Scale * NumElts);
9571      IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
9572      IndicesVec = widenSubVector(IndicesVT.getSimpleVT(), IndicesVec, false,
9573                                  Subtarget, DAG, SDLoc(IndicesVec));
9574      return extractSubVector(
9575          createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget), 0,
9576          DAG, DL, SizeInBits);
9577    } else if (SrcVec.getValueSizeInBits() < SizeInBits) {
9578      // Widen smaller SrcVec to match VT.
9579      SrcVec = widenSubVector(VT, SrcVec, false, Subtarget, DAG, SDLoc(SrcVec));
9580    } else
9581      return SDValue();
9582  }
9583
9584  auto ScaleIndices = [&DAG](SDValue Idx, uint64_t Scale) {
9585    assert(isPowerOf2_64(Scale) && "Illegal variable permute shuffle scale");
9586    EVT SrcVT = Idx.getValueType();
9587    unsigned NumDstBits = SrcVT.getScalarSizeInBits() / Scale;
9588    uint64_t IndexScale = 0;
9589    uint64_t IndexOffset = 0;
9590
9591    // If we're scaling a smaller permute op, then we need to repeat the
9592    // indices, scaling and offsetting them as well.
9593    // e.g. v4i32 -> v16i8 (Scale = 4)
9594    // IndexScale = v4i32 Splat(4 << 24 | 4 << 16 | 4 << 8 | 4)
9595    // IndexOffset = v4i32 Splat(3 << 24 | 2 << 16 | 1 << 8 | 0)
9596    for (uint64_t i = 0; i != Scale; ++i) {
9597      IndexScale |= Scale << (i * NumDstBits);
9598      IndexOffset |= i << (i * NumDstBits);
9599    }
9600
9601    Idx = DAG.getNode(ISD::MUL, SDLoc(Idx), SrcVT, Idx,
9602                      DAG.getConstant(IndexScale, SDLoc(Idx), SrcVT));
9603    Idx = DAG.getNode(ISD::ADD, SDLoc(Idx), SrcVT, Idx,
9604                      DAG.getConstant(IndexOffset, SDLoc(Idx), SrcVT));
9605    return Idx;
9606  };
9607
9608  unsigned Opcode = 0;
9609  switch (VT.SimpleTy) {
9610  default:
9611    break;
9612  case MVT::v16i8:
9613    if (Subtarget.hasSSSE3())
9614      Opcode = X86ISD::PSHUFB;
9615    break;
9616  case MVT::v8i16:
9617    if (Subtarget.hasVLX() && Subtarget.hasBWI())
9618      Opcode = X86ISD::VPERMV;
9619    else if (Subtarget.hasSSSE3()) {
9620      Opcode = X86ISD::PSHUFB;
9621      ShuffleVT = MVT::v16i8;
9622    }
9623    break;
9624  case MVT::v4f32:
9625  case MVT::v4i32:
9626    if (Subtarget.hasAVX()) {
9627      Opcode = X86ISD::VPERMILPV;
9628      ShuffleVT = MVT::v4f32;
9629    } else if (Subtarget.hasSSSE3()) {
9630      Opcode = X86ISD::PSHUFB;
9631      ShuffleVT = MVT::v16i8;
9632    }
9633    break;
9634  case MVT::v2f64:
9635  case MVT::v2i64:
9636    if (Subtarget.hasAVX()) {
9637      // VPERMILPD selects using bit#1 of the index vector, so scale IndicesVec.
9638      IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
9639      Opcode = X86ISD::VPERMILPV;
9640      ShuffleVT = MVT::v2f64;
9641    } else if (Subtarget.hasSSE41()) {
9642      // SSE41 can compare v2i64 - select between indices 0 and 1.
9643      return DAG.getSelectCC(
9644          DL, IndicesVec,
9645          getZeroVector(IndicesVT.getSimpleVT(), Subtarget, DAG, DL),
9646          DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {0, 0}),
9647          DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {1, 1}),
9648          ISD::CondCode::SETEQ);
9649    }
9650    break;
9651  case MVT::v32i8:
9652    if (Subtarget.hasVLX() && Subtarget.hasVBMI())
9653      Opcode = X86ISD::VPERMV;
9654    else if (Subtarget.hasXOP()) {
9655      SDValue LoSrc = extract128BitVector(SrcVec, 0, DAG, DL);
9656      SDValue HiSrc = extract128BitVector(SrcVec, 16, DAG, DL);
9657      SDValue LoIdx = extract128BitVector(IndicesVec, 0, DAG, DL);
9658      SDValue HiIdx = extract128BitVector(IndicesVec, 16, DAG, DL);
9659      return DAG.getNode(
9660          ISD::CONCAT_VECTORS, DL, VT,
9661          DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, LoIdx),
9662          DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, HiIdx));
9663    } else if (Subtarget.hasAVX()) {
9664      SDValue Lo = extract128BitVector(SrcVec, 0, DAG, DL);
9665      SDValue Hi = extract128BitVector(SrcVec, 16, DAG, DL);
9666      SDValue LoLo = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Lo);
9667      SDValue HiHi = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Hi, Hi);
9668      auto PSHUFBBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
9669                              ArrayRef<SDValue> Ops) {
9670        // Permute Lo and Hi and then select based on index range.
9671        // This works as SHUFB uses bits[3:0] to permute elements and we don't
9672        // care about the bit[7] as its just an index vector.
9673        SDValue Idx = Ops[2];
9674        EVT VT = Idx.getValueType();
9675        return DAG.getSelectCC(DL, Idx, DAG.getConstant(15, DL, VT),
9676                               DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[1], Idx),
9677                               DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[0], Idx),
9678                               ISD::CondCode::SETGT);
9679      };
9680      SDValue Ops[] = {LoLo, HiHi, IndicesVec};
9681      return SplitOpsAndApply(DAG, Subtarget, DL, MVT::v32i8, Ops,
9682                              PSHUFBBuilder);
9683    }
9684    break;
9685  case MVT::v16i16:
9686    if (Subtarget.hasVLX() && Subtarget.hasBWI())
9687      Opcode = X86ISD::VPERMV;
9688    else if (Subtarget.hasAVX()) {
9689      // Scale to v32i8 and perform as v32i8.
9690      IndicesVec = ScaleIndices(IndicesVec, 2);
9691      return DAG.getBitcast(
9692          VT, createVariablePermute(
9693                  MVT::v32i8, DAG.getBitcast(MVT::v32i8, SrcVec),
9694                  DAG.getBitcast(MVT::v32i8, IndicesVec), DL, DAG, Subtarget));
9695    }
9696    break;
9697  case MVT::v8f32:
9698  case MVT::v8i32:
9699    if (Subtarget.hasAVX2())
9700      Opcode = X86ISD::VPERMV;
9701    else if (Subtarget.hasAVX()) {
9702      SrcVec = DAG.getBitcast(MVT::v8f32, SrcVec);
9703      SDValue LoLo = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
9704                                          {0, 1, 2, 3, 0, 1, 2, 3});
9705      SDValue HiHi = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
9706                                          {4, 5, 6, 7, 4, 5, 6, 7});
9707      if (Subtarget.hasXOP())
9708        return DAG.getBitcast(
9709            VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v8f32, LoLo, HiHi,
9710                            IndicesVec, DAG.getTargetConstant(0, DL, MVT::i8)));
9711      // Permute Lo and Hi and then select based on index range.
9712      // This works as VPERMILPS only uses index bits[0:1] to permute elements.
9713      SDValue Res = DAG.getSelectCC(
9714          DL, IndicesVec, DAG.getConstant(3, DL, MVT::v8i32),
9715          DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, HiHi, IndicesVec),
9716          DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, LoLo, IndicesVec),
9717          ISD::CondCode::SETGT);
9718      return DAG.getBitcast(VT, Res);
9719    }
9720    break;
9721  case MVT::v4i64:
9722  case MVT::v4f64:
9723    if (Subtarget.hasAVX512()) {
9724      if (!Subtarget.hasVLX()) {
9725        MVT WidenSrcVT = MVT::getVectorVT(VT.getScalarType(), 8);
9726        SrcVec = widenSubVector(WidenSrcVT, SrcVec, false, Subtarget, DAG,
9727                                SDLoc(SrcVec));
9728        IndicesVec = widenSubVector(MVT::v8i64, IndicesVec, false, Subtarget,
9729                                    DAG, SDLoc(IndicesVec));
9730        SDValue Res = createVariablePermute(WidenSrcVT, SrcVec, IndicesVec, DL,
9731                                            DAG, Subtarget);
9732        return extract256BitVector(Res, 0, DAG, DL);
9733      }
9734      Opcode = X86ISD::VPERMV;
9735    } else if (Subtarget.hasAVX()) {
9736      SrcVec = DAG.getBitcast(MVT::v4f64, SrcVec);
9737      SDValue LoLo =
9738          DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {0, 1, 0, 1});
9739      SDValue HiHi =
9740          DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {2, 3, 2, 3});
9741      // VPERMIL2PD selects with bit#1 of the index vector, so scale IndicesVec.
9742      IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
9743      if (Subtarget.hasXOP())
9744        return DAG.getBitcast(
9745            VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v4f64, LoLo, HiHi,
9746                            IndicesVec, DAG.getTargetConstant(0, DL, MVT::i8)));
9747      // Permute Lo and Hi and then select based on index range.
9748      // This works as VPERMILPD only uses index bit[1] to permute elements.
9749      SDValue Res = DAG.getSelectCC(
9750          DL, IndicesVec, DAG.getConstant(2, DL, MVT::v4i64),
9751          DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, HiHi, IndicesVec),
9752          DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, LoLo, IndicesVec),
9753          ISD::CondCode::SETGT);
9754      return DAG.getBitcast(VT, Res);
9755    }
9756    break;
9757  case MVT::v64i8:
9758    if (Subtarget.hasVBMI())
9759      Opcode = X86ISD::VPERMV;
9760    break;
9761  case MVT::v32i16:
9762    if (Subtarget.hasBWI())
9763      Opcode = X86ISD::VPERMV;
9764    break;
9765  case MVT::v16f32:
9766  case MVT::v16i32:
9767  case MVT::v8f64:
9768  case MVT::v8i64:
9769    if (Subtarget.hasAVX512())
9770      Opcode = X86ISD::VPERMV;
9771    break;
9772  }
9773  if (!Opcode)
9774    return SDValue();
9775
9776  assert((VT.getSizeInBits() == ShuffleVT.getSizeInBits()) &&
9777         (VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits()) == 0 &&
9778         "Illegal variable permute shuffle type");
9779
9780  uint64_t Scale = VT.getScalarSizeInBits() / ShuffleVT.getScalarSizeInBits();
9781  if (Scale > 1)
9782    IndicesVec = ScaleIndices(IndicesVec, Scale);
9783
9784  EVT ShuffleIdxVT = EVT(ShuffleVT).changeVectorElementTypeToInteger();
9785  IndicesVec = DAG.getBitcast(ShuffleIdxVT, IndicesVec);
9786
9787  SrcVec = DAG.getBitcast(ShuffleVT, SrcVec);
9788  SDValue Res = Opcode == X86ISD::VPERMV
9789                    ? DAG.getNode(Opcode, DL, ShuffleVT, IndicesVec, SrcVec)
9790                    : DAG.getNode(Opcode, DL, ShuffleVT, SrcVec, IndicesVec);
9791  return DAG.getBitcast(VT, Res);
9792}
9793
9794// Tries to lower a BUILD_VECTOR composed of extract-extract chains that can be
9795// reasoned to be a permutation of a vector by indices in a non-constant vector.
9796// (build_vector (extract_elt V, (extract_elt I, 0)),
9797//               (extract_elt V, (extract_elt I, 1)),
9798//                    ...
9799// ->
9800// (vpermv I, V)
9801//
9802// TODO: Handle undefs
9803// TODO: Utilize pshufb and zero mask blending to support more efficient
9804// construction of vectors with constant-0 elements.
9805static SDValue
9806LowerBUILD_VECTORAsVariablePermute(SDValue V, SelectionDAG &DAG,
9807                                   const X86Subtarget &Subtarget) {
9808  SDValue SrcVec, IndicesVec;
9809  // Check for a match of the permute source vector and permute index elements.
9810  // This is done by checking that the i-th build_vector operand is of the form:
9811  // (extract_elt SrcVec, (extract_elt IndicesVec, i)).
9812  for (unsigned Idx = 0, E = V.getNumOperands(); Idx != E; ++Idx) {
9813    SDValue Op = V.getOperand(Idx);
9814    if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9815      return SDValue();
9816
9817    // If this is the first extract encountered in V, set the source vector,
9818    // otherwise verify the extract is from the previously defined source
9819    // vector.
9820    if (!SrcVec)
9821      SrcVec = Op.getOperand(0);
9822    else if (SrcVec != Op.getOperand(0))
9823      return SDValue();
9824    SDValue ExtractedIndex = Op->getOperand(1);
9825    // Peek through extends.
9826    if (ExtractedIndex.getOpcode() == ISD::ZERO_EXTEND ||
9827        ExtractedIndex.getOpcode() == ISD::SIGN_EXTEND)
9828      ExtractedIndex = ExtractedIndex.getOperand(0);
9829    if (ExtractedIndex.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9830      return SDValue();
9831
9832    // If this is the first extract from the index vector candidate, set the
9833    // indices vector, otherwise verify the extract is from the previously
9834    // defined indices vector.
9835    if (!IndicesVec)
9836      IndicesVec = ExtractedIndex.getOperand(0);
9837    else if (IndicesVec != ExtractedIndex.getOperand(0))
9838      return SDValue();
9839
9840    auto *PermIdx = dyn_cast<ConstantSDNode>(ExtractedIndex.getOperand(1));
9841    if (!PermIdx || PermIdx->getAPIntValue() != Idx)
9842      return SDValue();
9843  }
9844
9845  SDLoc DL(V);
9846  MVT VT = V.getSimpleValueType();
9847  return createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget);
9848}
9849
9850SDValue
9851X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
9852  SDLoc dl(Op);
9853
9854  MVT VT = Op.getSimpleValueType();
9855  MVT EltVT = VT.getVectorElementType();
9856  unsigned NumElems = Op.getNumOperands();
9857
9858  // Generate vectors for predicate vectors.
9859  if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512())
9860    return LowerBUILD_VECTORvXi1(Op, DAG, Subtarget);
9861
9862  if (SDValue VectorConstant = materializeVectorConstant(Op, DAG, Subtarget))
9863    return VectorConstant;
9864
9865  BuildVectorSDNode *BV = cast<BuildVectorSDNode>(Op.getNode());
9866  if (SDValue AddSub = lowerToAddSubOrFMAddSub(BV, Subtarget, DAG))
9867    return AddSub;
9868  if (SDValue HorizontalOp = LowerToHorizontalOp(BV, Subtarget, DAG))
9869    return HorizontalOp;
9870  if (SDValue Broadcast = lowerBuildVectorAsBroadcast(BV, Subtarget, DAG))
9871    return Broadcast;
9872  if (SDValue BitOp = lowerBuildVectorToBitOp(BV, DAG))
9873    return BitOp;
9874
9875  unsigned EVTBits = EltVT.getSizeInBits();
9876
9877  unsigned NumZero  = 0;
9878  unsigned NumNonZero = 0;
9879  uint64_t NonZeros = 0;
9880  bool IsAllConstants = true;
9881  SmallSet<SDValue, 8> Values;
9882  unsigned NumConstants = NumElems;
9883  for (unsigned i = 0; i < NumElems; ++i) {
9884    SDValue Elt = Op.getOperand(i);
9885    if (Elt.isUndef())
9886      continue;
9887    Values.insert(Elt);
9888    if (!isa<ConstantSDNode>(Elt) && !isa<ConstantFPSDNode>(Elt)) {
9889      IsAllConstants = false;
9890      NumConstants--;
9891    }
9892    if (X86::isZeroNode(Elt))
9893      NumZero++;
9894    else {
9895      assert(i < sizeof(NonZeros) * 8); // Make sure the shift is within range.
9896      NonZeros |= ((uint64_t)1 << i);
9897      NumNonZero++;
9898    }
9899  }
9900
9901  // All undef vector. Return an UNDEF.  All zero vectors were handled above.
9902  if (NumNonZero == 0)
9903    return DAG.getUNDEF(VT);
9904
9905  // If we are inserting one variable into a vector of non-zero constants, try
9906  // to avoid loading each constant element as a scalar. Load the constants as a
9907  // vector and then insert the variable scalar element. If insertion is not
9908  // supported, fall back to a shuffle to get the scalar blended with the
9909  // constants. Insertion into a zero vector is handled as a special-case
9910  // somewhere below here.
9911  if (NumConstants == NumElems - 1 && NumNonZero != 1 &&
9912      (isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT) ||
9913       isOperationLegalOrCustom(ISD::VECTOR_SHUFFLE, VT))) {
9914    // Create an all-constant vector. The variable element in the old
9915    // build vector is replaced by undef in the constant vector. Save the
9916    // variable scalar element and its index for use in the insertelement.
9917    LLVMContext &Context = *DAG.getContext();
9918    Type *EltType = Op.getValueType().getScalarType().getTypeForEVT(Context);
9919    SmallVector<Constant *, 16> ConstVecOps(NumElems, UndefValue::get(EltType));
9920    SDValue VarElt;
9921    SDValue InsIndex;
9922    for (unsigned i = 0; i != NumElems; ++i) {
9923      SDValue Elt = Op.getOperand(i);
9924      if (auto *C = dyn_cast<ConstantSDNode>(Elt))
9925        ConstVecOps[i] = ConstantInt::get(Context, C->getAPIntValue());
9926      else if (auto *C = dyn_cast<ConstantFPSDNode>(Elt))
9927        ConstVecOps[i] = ConstantFP::get(Context, C->getValueAPF());
9928      else if (!Elt.isUndef()) {
9929        assert(!VarElt.getNode() && !InsIndex.getNode() &&
9930               "Expected one variable element in this vector");
9931        VarElt = Elt;
9932        InsIndex = DAG.getConstant(i, dl, getVectorIdxTy(DAG.getDataLayout()));
9933      }
9934    }
9935    Constant *CV = ConstantVector::get(ConstVecOps);
9936    SDValue DAGConstVec = DAG.getConstantPool(CV, VT);
9937
9938    // The constants we just created may not be legal (eg, floating point). We
9939    // must lower the vector right here because we can not guarantee that we'll
9940    // legalize it before loading it. This is also why we could not just create
9941    // a new build vector here. If the build vector contains illegal constants,
9942    // it could get split back up into a series of insert elements.
9943    // TODO: Improve this by using shorter loads with broadcast/VZEXT_LOAD.
9944    SDValue LegalDAGConstVec = LowerConstantPool(DAGConstVec, DAG);
9945    MachineFunction &MF = DAG.getMachineFunction();
9946    MachinePointerInfo MPI = MachinePointerInfo::getConstantPool(MF);
9947    SDValue Ld = DAG.getLoad(VT, dl, DAG.getEntryNode(), LegalDAGConstVec, MPI);
9948    unsigned InsertC = cast<ConstantSDNode>(InsIndex)->getZExtValue();
9949    unsigned NumEltsInLow128Bits = 128 / VT.getScalarSizeInBits();
9950    if (InsertC < NumEltsInLow128Bits)
9951      return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ld, VarElt, InsIndex);
9952
9953    // There's no good way to insert into the high elements of a >128-bit
9954    // vector, so use shuffles to avoid an extract/insert sequence.
9955    assert(VT.getSizeInBits() > 128 && "Invalid insertion index?");
9956    assert(Subtarget.hasAVX() && "Must have AVX with >16-byte vector");
9957    SmallVector<int, 8> ShuffleMask;
9958    unsigned NumElts = VT.getVectorNumElements();
9959    for (unsigned i = 0; i != NumElts; ++i)
9960      ShuffleMask.push_back(i == InsertC ? NumElts : i);
9961    SDValue S2V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, VarElt);
9962    return DAG.getVectorShuffle(VT, dl, Ld, S2V, ShuffleMask);
9963  }
9964
9965  // Special case for single non-zero, non-undef, element.
9966  if (NumNonZero == 1) {
9967    unsigned Idx = countTrailingZeros(NonZeros);
9968    SDValue Item = Op.getOperand(Idx);
9969
9970    // If we have a constant or non-constant insertion into the low element of
9971    // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
9972    // the rest of the elements.  This will be matched as movd/movq/movss/movsd
9973    // depending on what the source datatype is.
9974    if (Idx == 0) {
9975      if (NumZero == 0)
9976        return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
9977
9978      if (EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 ||
9979          (EltVT == MVT::i64 && Subtarget.is64Bit())) {
9980        assert((VT.is128BitVector() || VT.is256BitVector() ||
9981                VT.is512BitVector()) &&
9982               "Expected an SSE value type!");
9983        Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
9984        // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
9985        return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
9986      }
9987
9988      // We can't directly insert an i8 or i16 into a vector, so zero extend
9989      // it to i32 first.
9990      if (EltVT == MVT::i16 || EltVT == MVT::i8) {
9991        Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
9992        MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits()/32);
9993        Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, Item);
9994        Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
9995        return DAG.getBitcast(VT, Item);
9996      }
9997    }
9998
9999    // Is it a vector logical left shift?
10000    if (NumElems == 2 && Idx == 1 &&
10001        X86::isZeroNode(Op.getOperand(0)) &&
10002        !X86::isZeroNode(Op.getOperand(1))) {
10003      unsigned NumBits = VT.getSizeInBits();
10004      return getVShift(true, VT,
10005                       DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
10006                                   VT, Op.getOperand(1)),
10007                       NumBits/2, DAG, *this, dl);
10008    }
10009
10010    if (IsAllConstants) // Otherwise, it's better to do a constpool load.
10011      return SDValue();
10012
10013    // Otherwise, if this is a vector with i32 or f32 elements, and the element
10014    // is a non-constant being inserted into an element other than the low one,
10015    // we can't use a constant pool load.  Instead, use SCALAR_TO_VECTOR (aka
10016    // movd/movss) to move this into the low element, then shuffle it into
10017    // place.
10018    if (EVTBits == 32) {
10019      Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
10020      return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
10021    }
10022  }
10023
10024  // Splat is obviously ok. Let legalizer expand it to a shuffle.
10025  if (Values.size() == 1) {
10026    if (EVTBits == 32) {
10027      // Instead of a shuffle like this:
10028      // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
10029      // Check if it's possible to issue this instead.
10030      // shuffle (vload ptr)), undef, <1, 1, 1, 1>
10031      unsigned Idx = countTrailingZeros(NonZeros);
10032      SDValue Item = Op.getOperand(Idx);
10033      if (Op.getNode()->isOnlyUserOf(Item.getNode()))
10034        return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
10035    }
10036    return SDValue();
10037  }
10038
10039  // A vector full of immediates; various special cases are already
10040  // handled, so this is best done with a single constant-pool load.
10041  if (IsAllConstants)
10042    return SDValue();
10043
10044  if (SDValue V = LowerBUILD_VECTORAsVariablePermute(Op, DAG, Subtarget))
10045      return V;
10046
10047  // See if we can use a vector load to get all of the elements.
10048  {
10049    SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElems);
10050    if (SDValue LD =
10051            EltsFromConsecutiveLoads(VT, Ops, dl, DAG, Subtarget, false))
10052      return LD;
10053  }
10054
10055  // If this is a splat of pairs of 32-bit elements, we can use a narrower
10056  // build_vector and broadcast it.
10057  // TODO: We could probably generalize this more.
10058  if (Subtarget.hasAVX2() && EVTBits == 32 && Values.size() == 2) {
10059    SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
10060                       DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
10061    auto CanSplat = [](SDValue Op, unsigned NumElems, ArrayRef<SDValue> Ops) {
10062      // Make sure all the even/odd operands match.
10063      for (unsigned i = 2; i != NumElems; ++i)
10064        if (Ops[i % 2] != Op.getOperand(i))
10065          return false;
10066      return true;
10067    };
10068    if (CanSplat(Op, NumElems, Ops)) {
10069      MVT WideEltVT = VT.isFloatingPoint() ? MVT::f64 : MVT::i64;
10070      MVT NarrowVT = MVT::getVectorVT(EltVT, 4);
10071      // Create a new build vector and cast to v2i64/v2f64.
10072      SDValue NewBV = DAG.getBitcast(MVT::getVectorVT(WideEltVT, 2),
10073                                     DAG.getBuildVector(NarrowVT, dl, Ops));
10074      // Broadcast from v2i64/v2f64 and cast to final VT.
10075      MVT BcastVT = MVT::getVectorVT(WideEltVT, NumElems/2);
10076      return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, dl, BcastVT,
10077                                            NewBV));
10078    }
10079  }
10080
10081  // For AVX-length vectors, build the individual 128-bit pieces and use
10082  // shuffles to put them in place.
10083  if (VT.getSizeInBits() > 128) {
10084    MVT HVT = MVT::getVectorVT(EltVT, NumElems/2);
10085
10086    // Build both the lower and upper subvector.
10087    SDValue Lower =
10088        DAG.getBuildVector(HVT, dl, Op->ops().slice(0, NumElems / 2));
10089    SDValue Upper = DAG.getBuildVector(
10090        HVT, dl, Op->ops().slice(NumElems / 2, NumElems /2));
10091
10092    // Recreate the wider vector with the lower and upper part.
10093    return concatSubVectors(Lower, Upper, DAG, dl);
10094  }
10095
10096  // Let legalizer expand 2-wide build_vectors.
10097  if (EVTBits == 64) {
10098    if (NumNonZero == 1) {
10099      // One half is zero or undef.
10100      unsigned Idx = countTrailingZeros(NonZeros);
10101      SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
10102                               Op.getOperand(Idx));
10103      return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
10104    }
10105    return SDValue();
10106  }
10107
10108  // If element VT is < 32 bits, convert it to inserts into a zero vector.
10109  if (EVTBits == 8 && NumElems == 16)
10110    if (SDValue V = LowerBuildVectorv16i8(Op, NonZeros, NumNonZero, NumZero,
10111                                          DAG, Subtarget))
10112      return V;
10113
10114  if (EVTBits == 16 && NumElems == 8)
10115    if (SDValue V = LowerBuildVectorv8i16(Op, NonZeros, NumNonZero, NumZero,
10116                                          DAG, Subtarget))
10117      return V;
10118
10119  // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
10120  if (EVTBits == 32 && NumElems == 4)
10121    if (SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget))
10122      return V;
10123
10124  // If element VT is == 32 bits, turn it into a number of shuffles.
10125  if (NumElems == 4 && NumZero > 0) {
10126    SmallVector<SDValue, 8> Ops(NumElems);
10127    for (unsigned i = 0; i < 4; ++i) {
10128      bool isZero = !(NonZeros & (1ULL << i));
10129      if (isZero)
10130        Ops[i] = getZeroVector(VT, Subtarget, DAG, dl);
10131      else
10132        Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
10133    }
10134
10135    for (unsigned i = 0; i < 2; ++i) {
10136      switch ((NonZeros >> (i*2)) & 0x3) {
10137        default: llvm_unreachable("Unexpected NonZero count");
10138        case 0:
10139          Ops[i] = Ops[i*2];  // Must be a zero vector.
10140          break;
10141        case 1:
10142          Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2+1], Ops[i*2]);
10143          break;
10144        case 2:
10145          Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
10146          break;
10147        case 3:
10148          Ops[i] = getUnpackl(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
10149          break;
10150      }
10151    }
10152
10153    bool Reverse1 = (NonZeros & 0x3) == 2;
10154    bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
10155    int MaskVec[] = {
10156      Reverse1 ? 1 : 0,
10157      Reverse1 ? 0 : 1,
10158      static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
10159      static_cast<int>(Reverse2 ? NumElems   : NumElems+1)
10160    };
10161    return DAG.getVectorShuffle(VT, dl, Ops[0], Ops[1], MaskVec);
10162  }
10163
10164  assert(Values.size() > 1 && "Expected non-undef and non-splat vector");
10165
10166  // Check for a build vector from mostly shuffle plus few inserting.
10167  if (SDValue Sh = buildFromShuffleMostly(Op, DAG))
10168    return Sh;
10169
10170  // For SSE 4.1, use insertps to put the high elements into the low element.
10171  if (Subtarget.hasSSE41()) {
10172    SDValue Result;
10173    if (!Op.getOperand(0).isUndef())
10174      Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
10175    else
10176      Result = DAG.getUNDEF(VT);
10177
10178    for (unsigned i = 1; i < NumElems; ++i) {
10179      if (Op.getOperand(i).isUndef()) continue;
10180      Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
10181                           Op.getOperand(i), DAG.getIntPtrConstant(i, dl));
10182    }
10183    return Result;
10184  }
10185
10186  // Otherwise, expand into a number of unpckl*, start by extending each of
10187  // our (non-undef) elements to the full vector width with the element in the
10188  // bottom slot of the vector (which generates no code for SSE).
10189  SmallVector<SDValue, 8> Ops(NumElems);
10190  for (unsigned i = 0; i < NumElems; ++i) {
10191    if (!Op.getOperand(i).isUndef())
10192      Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
10193    else
10194      Ops[i] = DAG.getUNDEF(VT);
10195  }
10196
10197  // Next, we iteratively mix elements, e.g. for v4f32:
10198  //   Step 1: unpcklps 0, 1 ==> X: <?, ?, 1, 0>
10199  //         : unpcklps 2, 3 ==> Y: <?, ?, 3, 2>
10200  //   Step 2: unpcklpd X, Y ==>    <3, 2, 1, 0>
10201  for (unsigned Scale = 1; Scale < NumElems; Scale *= 2) {
10202    // Generate scaled UNPCKL shuffle mask.
10203    SmallVector<int, 16> Mask;
10204    for(unsigned i = 0; i != Scale; ++i)
10205      Mask.push_back(i);
10206    for (unsigned i = 0; i != Scale; ++i)
10207      Mask.push_back(NumElems+i);
10208    Mask.append(NumElems - Mask.size(), SM_SentinelUndef);
10209
10210    for (unsigned i = 0, e = NumElems / (2 * Scale); i != e; ++i)
10211      Ops[i] = DAG.getVectorShuffle(VT, dl, Ops[2*i], Ops[(2*i)+1], Mask);
10212  }
10213  return Ops[0];
10214}
10215
10216// 256-bit AVX can use the vinsertf128 instruction
10217// to create 256-bit vectors from two other 128-bit ones.
10218// TODO: Detect subvector broadcast here instead of DAG combine?
10219static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG,
10220                                      const X86Subtarget &Subtarget) {
10221  SDLoc dl(Op);
10222  MVT ResVT = Op.getSimpleValueType();
10223
10224  assert((ResVT.is256BitVector() ||
10225          ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
10226
10227  unsigned NumOperands = Op.getNumOperands();
10228  unsigned NumZero = 0;
10229  unsigned NumNonZero = 0;
10230  unsigned NonZeros = 0;
10231  for (unsigned i = 0; i != NumOperands; ++i) {
10232    SDValue SubVec = Op.getOperand(i);
10233    if (SubVec.isUndef())
10234      continue;
10235    if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
10236      ++NumZero;
10237    else {
10238      assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
10239      NonZeros |= 1 << i;
10240      ++NumNonZero;
10241    }
10242  }
10243
10244  // If we have more than 2 non-zeros, build each half separately.
10245  if (NumNonZero > 2) {
10246    MVT HalfVT = ResVT.getHalfNumVectorElementsVT();
10247    ArrayRef<SDUse> Ops = Op->ops();
10248    SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
10249                             Ops.slice(0, NumOperands/2));
10250    SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
10251                             Ops.slice(NumOperands/2));
10252    return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
10253  }
10254
10255  // Otherwise, build it up through insert_subvectors.
10256  SDValue Vec = NumZero ? getZeroVector(ResVT, Subtarget, DAG, dl)
10257                        : DAG.getUNDEF(ResVT);
10258
10259  MVT SubVT = Op.getOperand(0).getSimpleValueType();
10260  unsigned NumSubElems = SubVT.getVectorNumElements();
10261  for (unsigned i = 0; i != NumOperands; ++i) {
10262    if ((NonZeros & (1 << i)) == 0)
10263      continue;
10264
10265    Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec,
10266                      Op.getOperand(i),
10267                      DAG.getIntPtrConstant(i * NumSubElems, dl));
10268  }
10269
10270  return Vec;
10271}
10272
10273// Returns true if the given node is a type promotion (by concatenating i1
10274// zeros) of the result of a node that already zeros all upper bits of
10275// k-register.
10276// TODO: Merge this with LowerAVXCONCAT_VECTORS?
10277static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op,
10278                                       const X86Subtarget &Subtarget,
10279                                       SelectionDAG & DAG) {
10280  SDLoc dl(Op);
10281  MVT ResVT = Op.getSimpleValueType();
10282  unsigned NumOperands = Op.getNumOperands();
10283
10284  assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&
10285         "Unexpected number of operands in CONCAT_VECTORS");
10286
10287  uint64_t Zeros = 0;
10288  uint64_t NonZeros = 0;
10289  for (unsigned i = 0; i != NumOperands; ++i) {
10290    SDValue SubVec = Op.getOperand(i);
10291    if (SubVec.isUndef())
10292      continue;
10293    assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
10294    if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
10295      Zeros |= (uint64_t)1 << i;
10296    else
10297      NonZeros |= (uint64_t)1 << i;
10298  }
10299
10300  unsigned NumElems = ResVT.getVectorNumElements();
10301
10302  // If we are inserting non-zero vector and there are zeros in LSBs and undef
10303  // in the MSBs we need to emit a KSHIFTL. The generic lowering to
10304  // insert_subvector will give us two kshifts.
10305  if (isPowerOf2_64(NonZeros) && Zeros != 0 && NonZeros > Zeros &&
10306      Log2_64(NonZeros) != NumOperands - 1) {
10307    MVT ShiftVT = ResVT;
10308    if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8)
10309      ShiftVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
10310    unsigned Idx = Log2_64(NonZeros);
10311    SDValue SubVec = Op.getOperand(Idx);
10312    unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
10313    SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ShiftVT,
10314                         DAG.getUNDEF(ShiftVT), SubVec,
10315                         DAG.getIntPtrConstant(0, dl));
10316    Op = DAG.getNode(X86ISD::KSHIFTL, dl, ShiftVT, SubVec,
10317                     DAG.getTargetConstant(Idx * SubVecNumElts, dl, MVT::i8));
10318    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResVT, Op,
10319                       DAG.getIntPtrConstant(0, dl));
10320  }
10321
10322  // If there are zero or one non-zeros we can handle this very simply.
10323  if (NonZeros == 0 || isPowerOf2_64(NonZeros)) {
10324    SDValue Vec = Zeros ? DAG.getConstant(0, dl, ResVT) : DAG.getUNDEF(ResVT);
10325    if (!NonZeros)
10326      return Vec;
10327    unsigned Idx = Log2_64(NonZeros);
10328    SDValue SubVec = Op.getOperand(Idx);
10329    unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
10330    return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, SubVec,
10331                       DAG.getIntPtrConstant(Idx * SubVecNumElts, dl));
10332  }
10333
10334  if (NumOperands > 2) {
10335    MVT HalfVT = ResVT.getHalfNumVectorElementsVT();
10336    ArrayRef<SDUse> Ops = Op->ops();
10337    SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
10338                             Ops.slice(0, NumOperands/2));
10339    SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
10340                             Ops.slice(NumOperands/2));
10341    return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
10342  }
10343
10344  assert(countPopulation(NonZeros) == 2 && "Simple cases not handled?");
10345
10346  if (ResVT.getVectorNumElements() >= 16)
10347    return Op; // The operation is legal with KUNPCK
10348
10349  SDValue Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT,
10350                            DAG.getUNDEF(ResVT), Op.getOperand(0),
10351                            DAG.getIntPtrConstant(0, dl));
10352  return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, Op.getOperand(1),
10353                     DAG.getIntPtrConstant(NumElems/2, dl));
10354}
10355
10356static SDValue LowerCONCAT_VECTORS(SDValue Op,
10357                                   const X86Subtarget &Subtarget,
10358                                   SelectionDAG &DAG) {
10359  MVT VT = Op.getSimpleValueType();
10360  if (VT.getVectorElementType() == MVT::i1)
10361    return LowerCONCAT_VECTORSvXi1(Op, Subtarget, DAG);
10362
10363  assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
10364         (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
10365          Op.getNumOperands() == 4)));
10366
10367  // AVX can use the vinsertf128 instruction to create 256-bit vectors
10368  // from two other 128-bit ones.
10369
10370  // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
10371  return LowerAVXCONCAT_VECTORS(Op, DAG, Subtarget);
10372}
10373
10374//===----------------------------------------------------------------------===//
10375// Vector shuffle lowering
10376//
10377// This is an experimental code path for lowering vector shuffles on x86. It is
10378// designed to handle arbitrary vector shuffles and blends, gracefully
10379// degrading performance as necessary. It works hard to recognize idiomatic
10380// shuffles and lower them to optimal instruction patterns without leaving
10381// a framework that allows reasonably efficient handling of all vector shuffle
10382// patterns.
10383//===----------------------------------------------------------------------===//
10384
10385/// Tiny helper function to identify a no-op mask.
10386///
10387/// This is a somewhat boring predicate function. It checks whether the mask
10388/// array input, which is assumed to be a single-input shuffle mask of the kind
10389/// used by the X86 shuffle instructions (not a fully general
10390/// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
10391/// in-place shuffle are 'no-op's.
10392static bool isNoopShuffleMask(ArrayRef<int> Mask) {
10393  for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10394    assert(Mask[i] >= -1 && "Out of bound mask element!");
10395    if (Mask[i] >= 0 && Mask[i] != i)
10396      return false;
10397  }
10398  return true;
10399}
10400
10401/// Test whether there are elements crossing LaneSizeInBits lanes in this
10402/// shuffle mask.
10403///
10404/// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
10405/// and we routinely test for these.
10406static bool isLaneCrossingShuffleMask(unsigned LaneSizeInBits,
10407                                      unsigned ScalarSizeInBits,
10408                                      ArrayRef<int> Mask) {
10409  assert(LaneSizeInBits && ScalarSizeInBits &&
10410         (LaneSizeInBits % ScalarSizeInBits) == 0 &&
10411         "Illegal shuffle lane size");
10412  int LaneSize = LaneSizeInBits / ScalarSizeInBits;
10413  int Size = Mask.size();
10414  for (int i = 0; i < Size; ++i)
10415    if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
10416      return true;
10417  return false;
10418}
10419
10420/// Test whether there are elements crossing 128-bit lanes in this
10421/// shuffle mask.
10422static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
10423  return isLaneCrossingShuffleMask(128, VT.getScalarSizeInBits(), Mask);
10424}
10425
10426/// Test whether a shuffle mask is equivalent within each sub-lane.
10427///
10428/// This checks a shuffle mask to see if it is performing the same
10429/// lane-relative shuffle in each sub-lane. This trivially implies
10430/// that it is also not lane-crossing. It may however involve a blend from the
10431/// same lane of a second vector.
10432///
10433/// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
10434/// non-trivial to compute in the face of undef lanes. The representation is
10435/// suitable for use with existing 128-bit shuffles as entries from the second
10436/// vector have been remapped to [LaneSize, 2*LaneSize).
10437static bool isRepeatedShuffleMask(unsigned LaneSizeInBits, MVT VT,
10438                                  ArrayRef<int> Mask,
10439                                  SmallVectorImpl<int> &RepeatedMask) {
10440  auto LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
10441  RepeatedMask.assign(LaneSize, -1);
10442  int Size = Mask.size();
10443  for (int i = 0; i < Size; ++i) {
10444    assert(Mask[i] == SM_SentinelUndef || Mask[i] >= 0);
10445    if (Mask[i] < 0)
10446      continue;
10447    if ((Mask[i] % Size) / LaneSize != i / LaneSize)
10448      // This entry crosses lanes, so there is no way to model this shuffle.
10449      return false;
10450
10451    // Ok, handle the in-lane shuffles by detecting if and when they repeat.
10452    // Adjust second vector indices to start at LaneSize instead of Size.
10453    int LocalM = Mask[i] < Size ? Mask[i] % LaneSize
10454                                : Mask[i] % LaneSize + LaneSize;
10455    if (RepeatedMask[i % LaneSize] < 0)
10456      // This is the first non-undef entry in this slot of a 128-bit lane.
10457      RepeatedMask[i % LaneSize] = LocalM;
10458    else if (RepeatedMask[i % LaneSize] != LocalM)
10459      // Found a mismatch with the repeated mask.
10460      return false;
10461  }
10462  return true;
10463}
10464
10465/// Test whether a shuffle mask is equivalent within each 128-bit lane.
10466static bool
10467is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
10468                                SmallVectorImpl<int> &RepeatedMask) {
10469  return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
10470}
10471
10472static bool
10473is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask) {
10474  SmallVector<int, 32> RepeatedMask;
10475  return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
10476}
10477
10478/// Test whether a shuffle mask is equivalent within each 256-bit lane.
10479static bool
10480is256BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
10481                                SmallVectorImpl<int> &RepeatedMask) {
10482  return isRepeatedShuffleMask(256, VT, Mask, RepeatedMask);
10483}
10484
10485/// Test whether a target shuffle mask is equivalent within each sub-lane.
10486/// Unlike isRepeatedShuffleMask we must respect SM_SentinelZero.
10487static bool isRepeatedTargetShuffleMask(unsigned LaneSizeInBits, MVT VT,
10488                                        ArrayRef<int> Mask,
10489                                        SmallVectorImpl<int> &RepeatedMask) {
10490  int LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
10491  RepeatedMask.assign(LaneSize, SM_SentinelUndef);
10492  int Size = Mask.size();
10493  for (int i = 0; i < Size; ++i) {
10494    assert(isUndefOrZero(Mask[i]) || (Mask[i] >= 0));
10495    if (Mask[i] == SM_SentinelUndef)
10496      continue;
10497    if (Mask[i] == SM_SentinelZero) {
10498      if (!isUndefOrZero(RepeatedMask[i % LaneSize]))
10499        return false;
10500      RepeatedMask[i % LaneSize] = SM_SentinelZero;
10501      continue;
10502    }
10503    if ((Mask[i] % Size) / LaneSize != i / LaneSize)
10504      // This entry crosses lanes, so there is no way to model this shuffle.
10505      return false;
10506
10507    // Ok, handle the in-lane shuffles by detecting if and when they repeat.
10508    // Adjust second vector indices to start at LaneSize instead of Size.
10509    int LocalM =
10510        Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + LaneSize;
10511    if (RepeatedMask[i % LaneSize] == SM_SentinelUndef)
10512      // This is the first non-undef entry in this slot of a 128-bit lane.
10513      RepeatedMask[i % LaneSize] = LocalM;
10514    else if (RepeatedMask[i % LaneSize] != LocalM)
10515      // Found a mismatch with the repeated mask.
10516      return false;
10517  }
10518  return true;
10519}
10520
10521/// Checks whether a shuffle mask is equivalent to an explicit list of
10522/// arguments.
10523///
10524/// This is a fast way to test a shuffle mask against a fixed pattern:
10525///
10526///   if (isShuffleEquivalent(Mask, 3, 2, {1, 0})) { ... }
10527///
10528/// It returns true if the mask is exactly as wide as the argument list, and
10529/// each element of the mask is either -1 (signifying undef) or the value given
10530/// in the argument.
10531static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
10532                                ArrayRef<int> ExpectedMask) {
10533  if (Mask.size() != ExpectedMask.size())
10534    return false;
10535
10536  int Size = Mask.size();
10537
10538  // If the values are build vectors, we can look through them to find
10539  // equivalent inputs that make the shuffles equivalent.
10540  auto *BV1 = dyn_cast<BuildVectorSDNode>(V1);
10541  auto *BV2 = dyn_cast<BuildVectorSDNode>(V2);
10542
10543  for (int i = 0; i < Size; ++i) {
10544    assert(Mask[i] >= -1 && "Out of bound mask element!");
10545    if (Mask[i] >= 0 && Mask[i] != ExpectedMask[i]) {
10546      auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
10547      auto *ExpectedBV = ExpectedMask[i] < Size ? BV1 : BV2;
10548      if (!MaskBV || !ExpectedBV ||
10549          MaskBV->getOperand(Mask[i] % Size) !=
10550              ExpectedBV->getOperand(ExpectedMask[i] % Size))
10551        return false;
10552    }
10553  }
10554
10555  return true;
10556}
10557
10558/// Checks whether a target shuffle mask is equivalent to an explicit pattern.
10559///
10560/// The masks must be exactly the same width.
10561///
10562/// If an element in Mask matches SM_SentinelUndef (-1) then the corresponding
10563/// value in ExpectedMask is always accepted. Otherwise the indices must match.
10564///
10565/// SM_SentinelZero is accepted as a valid negative index but must match in
10566/// both.
10567static bool isTargetShuffleEquivalent(ArrayRef<int> Mask,
10568                                      ArrayRef<int> ExpectedMask,
10569                                      SDValue V1 = SDValue(),
10570                                      SDValue V2 = SDValue()) {
10571  int Size = Mask.size();
10572  if (Size != (int)ExpectedMask.size())
10573    return false;
10574  assert(isUndefOrZeroOrInRange(ExpectedMask, 0, 2 * Size) &&
10575         "Illegal target shuffle mask");
10576
10577  // Check for out-of-range target shuffle mask indices.
10578  if (!isUndefOrZeroOrInRange(Mask, 0, 2 * Size))
10579    return false;
10580
10581  // If the values are build vectors, we can look through them to find
10582  // equivalent inputs that make the shuffles equivalent.
10583  auto *BV1 = dyn_cast_or_null<BuildVectorSDNode>(V1);
10584  auto *BV2 = dyn_cast_or_null<BuildVectorSDNode>(V2);
10585  BV1 = ((BV1 && Size != (int)BV1->getNumOperands()) ? nullptr : BV1);
10586  BV2 = ((BV2 && Size != (int)BV2->getNumOperands()) ? nullptr : BV2);
10587
10588  for (int i = 0; i < Size; ++i) {
10589    if (Mask[i] == SM_SentinelUndef || Mask[i] == ExpectedMask[i])
10590      continue;
10591    if (0 <= Mask[i] && 0 <= ExpectedMask[i]) {
10592      auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
10593      auto *ExpectedBV = ExpectedMask[i] < Size ? BV1 : BV2;
10594      if (MaskBV && ExpectedBV &&
10595          MaskBV->getOperand(Mask[i] % Size) ==
10596              ExpectedBV->getOperand(ExpectedMask[i] % Size))
10597        continue;
10598    }
10599    // TODO - handle SM_Sentinel equivalences.
10600    return false;
10601  }
10602  return true;
10603}
10604
10605// Attempt to create a shuffle mask from a VSELECT condition mask.
10606static bool createShuffleMaskFromVSELECT(SmallVectorImpl<int> &Mask,
10607                                         SDValue Cond) {
10608  if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
10609    return false;
10610
10611  unsigned Size = Cond.getValueType().getVectorNumElements();
10612  Mask.resize(Size, SM_SentinelUndef);
10613
10614  for (int i = 0; i != (int)Size; ++i) {
10615    SDValue CondElt = Cond.getOperand(i);
10616    Mask[i] = i;
10617    // Arbitrarily choose from the 2nd operand if the select condition element
10618    // is undef.
10619    // TODO: Can we do better by matching patterns such as even/odd?
10620    if (CondElt.isUndef() || isNullConstant(CondElt))
10621      Mask[i] += Size;
10622  }
10623
10624  return true;
10625}
10626
10627// Check if the shuffle mask is suitable for the AVX vpunpcklwd or vpunpckhwd
10628// instructions.
10629static bool isUnpackWdShuffleMask(ArrayRef<int> Mask, MVT VT) {
10630  if (VT != MVT::v8i32 && VT != MVT::v8f32)
10631    return false;
10632
10633  SmallVector<int, 8> Unpcklwd;
10634  createUnpackShuffleMask(MVT::v8i16, Unpcklwd, /* Lo = */ true,
10635                          /* Unary = */ false);
10636  SmallVector<int, 8> Unpckhwd;
10637  createUnpackShuffleMask(MVT::v8i16, Unpckhwd, /* Lo = */ false,
10638                          /* Unary = */ false);
10639  bool IsUnpackwdMask = (isTargetShuffleEquivalent(Mask, Unpcklwd) ||
10640                         isTargetShuffleEquivalent(Mask, Unpckhwd));
10641  return IsUnpackwdMask;
10642}
10643
10644static bool is128BitUnpackShuffleMask(ArrayRef<int> Mask) {
10645  // Create 128-bit vector type based on mask size.
10646  MVT EltVT = MVT::getIntegerVT(128 / Mask.size());
10647  MVT VT = MVT::getVectorVT(EltVT, Mask.size());
10648
10649  // We can't assume a canonical shuffle mask, so try the commuted version too.
10650  SmallVector<int, 4> CommutedMask(Mask.begin(), Mask.end());
10651  ShuffleVectorSDNode::commuteMask(CommutedMask);
10652
10653  // Match any of unary/binary or low/high.
10654  for (unsigned i = 0; i != 4; ++i) {
10655    SmallVector<int, 16> UnpackMask;
10656    createUnpackShuffleMask(VT, UnpackMask, (i >> 1) % 2, i % 2);
10657    if (isTargetShuffleEquivalent(Mask, UnpackMask) ||
10658        isTargetShuffleEquivalent(CommutedMask, UnpackMask))
10659      return true;
10660  }
10661  return false;
10662}
10663
10664/// Return true if a shuffle mask chooses elements identically in its top and
10665/// bottom halves. For example, any splat mask has the same top and bottom
10666/// halves. If an element is undefined in only one half of the mask, the halves
10667/// are not considered identical.
10668static bool hasIdenticalHalvesShuffleMask(ArrayRef<int> Mask) {
10669  assert(Mask.size() % 2 == 0 && "Expecting even number of elements in mask");
10670  unsigned HalfSize = Mask.size() / 2;
10671  for (unsigned i = 0; i != HalfSize; ++i) {
10672    if (Mask[i] != Mask[i + HalfSize])
10673      return false;
10674  }
10675  return true;
10676}
10677
10678/// Get a 4-lane 8-bit shuffle immediate for a mask.
10679///
10680/// This helper function produces an 8-bit shuffle immediate corresponding to
10681/// the ubiquitous shuffle encoding scheme used in x86 instructions for
10682/// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
10683/// example.
10684///
10685/// NB: We rely heavily on "undef" masks preserving the input lane.
10686static unsigned getV4X86ShuffleImm(ArrayRef<int> Mask) {
10687  assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
10688  assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
10689  assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
10690  assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
10691  assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
10692
10693  unsigned Imm = 0;
10694  Imm |= (Mask[0] < 0 ? 0 : Mask[0]) << 0;
10695  Imm |= (Mask[1] < 0 ? 1 : Mask[1]) << 2;
10696  Imm |= (Mask[2] < 0 ? 2 : Mask[2]) << 4;
10697  Imm |= (Mask[3] < 0 ? 3 : Mask[3]) << 6;
10698  return Imm;
10699}
10700
10701static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, const SDLoc &DL,
10702                                          SelectionDAG &DAG) {
10703  return DAG.getTargetConstant(getV4X86ShuffleImm(Mask), DL, MVT::i8);
10704}
10705
10706// The Shuffle result is as follow:
10707// 0*a[0]0*a[1]...0*a[n] , n >=0 where a[] elements in a ascending order.
10708// Each Zeroable's element correspond to a particular Mask's element.
10709// As described in computeZeroableShuffleElements function.
10710//
10711// The function looks for a sub-mask that the nonzero elements are in
10712// increasing order. If such sub-mask exist. The function returns true.
10713static bool isNonZeroElementsInOrder(const APInt &Zeroable,
10714                                     ArrayRef<int> Mask, const EVT &VectorType,
10715                                     bool &IsZeroSideLeft) {
10716  int NextElement = -1;
10717  // Check if the Mask's nonzero elements are in increasing order.
10718  for (int i = 0, e = Mask.size(); i < e; i++) {
10719    // Checks if the mask's zeros elements are built from only zeros.
10720    assert(Mask[i] >= -1 && "Out of bound mask element!");
10721    if (Mask[i] < 0)
10722      return false;
10723    if (Zeroable[i])
10724      continue;
10725    // Find the lowest non zero element
10726    if (NextElement < 0) {
10727      NextElement = Mask[i] != 0 ? VectorType.getVectorNumElements() : 0;
10728      IsZeroSideLeft = NextElement != 0;
10729    }
10730    // Exit if the mask's non zero elements are not in increasing order.
10731    if (NextElement != Mask[i])
10732      return false;
10733    NextElement++;
10734  }
10735  return true;
10736}
10737
10738/// Try to lower a shuffle with a single PSHUFB of V1 or V2.
10739static SDValue lowerShuffleWithPSHUFB(const SDLoc &DL, MVT VT,
10740                                      ArrayRef<int> Mask, SDValue V1,
10741                                      SDValue V2, const APInt &Zeroable,
10742                                      const X86Subtarget &Subtarget,
10743                                      SelectionDAG &DAG) {
10744  int Size = Mask.size();
10745  int LaneSize = 128 / VT.getScalarSizeInBits();
10746  const int NumBytes = VT.getSizeInBits() / 8;
10747  const int NumEltBytes = VT.getScalarSizeInBits() / 8;
10748
10749  assert((Subtarget.hasSSSE3() && VT.is128BitVector()) ||
10750         (Subtarget.hasAVX2() && VT.is256BitVector()) ||
10751         (Subtarget.hasBWI() && VT.is512BitVector()));
10752
10753  SmallVector<SDValue, 64> PSHUFBMask(NumBytes);
10754  // Sign bit set in i8 mask means zero element.
10755  SDValue ZeroMask = DAG.getConstant(0x80, DL, MVT::i8);
10756
10757  SDValue V;
10758  for (int i = 0; i < NumBytes; ++i) {
10759    int M = Mask[i / NumEltBytes];
10760    if (M < 0) {
10761      PSHUFBMask[i] = DAG.getUNDEF(MVT::i8);
10762      continue;
10763    }
10764    if (Zeroable[i / NumEltBytes]) {
10765      PSHUFBMask[i] = ZeroMask;
10766      continue;
10767    }
10768
10769    // We can only use a single input of V1 or V2.
10770    SDValue SrcV = (M >= Size ? V2 : V1);
10771    if (V && V != SrcV)
10772      return SDValue();
10773    V = SrcV;
10774    M %= Size;
10775
10776    // PSHUFB can't cross lanes, ensure this doesn't happen.
10777    if ((M / LaneSize) != ((i / NumEltBytes) / LaneSize))
10778      return SDValue();
10779
10780    M = M % LaneSize;
10781    M = M * NumEltBytes + (i % NumEltBytes);
10782    PSHUFBMask[i] = DAG.getConstant(M, DL, MVT::i8);
10783  }
10784  assert(V && "Failed to find a source input");
10785
10786  MVT I8VT = MVT::getVectorVT(MVT::i8, NumBytes);
10787  return DAG.getBitcast(
10788      VT, DAG.getNode(X86ISD::PSHUFB, DL, I8VT, DAG.getBitcast(I8VT, V),
10789                      DAG.getBuildVector(I8VT, DL, PSHUFBMask)));
10790}
10791
10792static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
10793                           const X86Subtarget &Subtarget, SelectionDAG &DAG,
10794                           const SDLoc &dl);
10795
10796// X86 has dedicated shuffle that can be lowered to VEXPAND
10797static SDValue lowerShuffleToEXPAND(const SDLoc &DL, MVT VT,
10798                                    const APInt &Zeroable,
10799                                    ArrayRef<int> Mask, SDValue &V1,
10800                                    SDValue &V2, SelectionDAG &DAG,
10801                                    const X86Subtarget &Subtarget) {
10802  bool IsLeftZeroSide = true;
10803  if (!isNonZeroElementsInOrder(Zeroable, Mask, V1.getValueType(),
10804                                IsLeftZeroSide))
10805    return SDValue();
10806  unsigned VEXPANDMask = (~Zeroable).getZExtValue();
10807  MVT IntegerType =
10808      MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
10809  SDValue MaskNode = DAG.getConstant(VEXPANDMask, DL, IntegerType);
10810  unsigned NumElts = VT.getVectorNumElements();
10811  assert((NumElts == 4 || NumElts == 8 || NumElts == 16) &&
10812         "Unexpected number of vector elements");
10813  SDValue VMask = getMaskNode(MaskNode, MVT::getVectorVT(MVT::i1, NumElts),
10814                              Subtarget, DAG, DL);
10815  SDValue ZeroVector = getZeroVector(VT, Subtarget, DAG, DL);
10816  SDValue ExpandedVector = IsLeftZeroSide ? V2 : V1;
10817  return DAG.getNode(X86ISD::EXPAND, DL, VT, ExpandedVector, ZeroVector, VMask);
10818}
10819
10820static bool matchShuffleWithUNPCK(MVT VT, SDValue &V1, SDValue &V2,
10821                                  unsigned &UnpackOpcode, bool IsUnary,
10822                                  ArrayRef<int> TargetMask, const SDLoc &DL,
10823                                  SelectionDAG &DAG,
10824                                  const X86Subtarget &Subtarget) {
10825  int NumElts = VT.getVectorNumElements();
10826
10827  bool Undef1 = true, Undef2 = true, Zero1 = true, Zero2 = true;
10828  for (int i = 0; i != NumElts; i += 2) {
10829    int M1 = TargetMask[i + 0];
10830    int M2 = TargetMask[i + 1];
10831    Undef1 &= (SM_SentinelUndef == M1);
10832    Undef2 &= (SM_SentinelUndef == M2);
10833    Zero1 &= isUndefOrZero(M1);
10834    Zero2 &= isUndefOrZero(M2);
10835  }
10836  assert(!((Undef1 || Zero1) && (Undef2 || Zero2)) &&
10837         "Zeroable shuffle detected");
10838
10839  // Attempt to match the target mask against the unpack lo/hi mask patterns.
10840  SmallVector<int, 64> Unpckl, Unpckh;
10841  createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, IsUnary);
10842  if (isTargetShuffleEquivalent(TargetMask, Unpckl)) {
10843    UnpackOpcode = X86ISD::UNPCKL;
10844    V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
10845    V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
10846    return true;
10847  }
10848
10849  createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, IsUnary);
10850  if (isTargetShuffleEquivalent(TargetMask, Unpckh)) {
10851    UnpackOpcode = X86ISD::UNPCKH;
10852    V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
10853    V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
10854    return true;
10855  }
10856
10857  // If an unary shuffle, attempt to match as an unpack lo/hi with zero.
10858  if (IsUnary && (Zero1 || Zero2)) {
10859    // Don't bother if we can blend instead.
10860    if ((Subtarget.hasSSE41() || VT == MVT::v2i64 || VT == MVT::v2f64) &&
10861        isSequentialOrUndefOrZeroInRange(TargetMask, 0, NumElts, 0))
10862      return false;
10863
10864    bool MatchLo = true, MatchHi = true;
10865    for (int i = 0; (i != NumElts) && (MatchLo || MatchHi); ++i) {
10866      int M = TargetMask[i];
10867
10868      // Ignore if the input is known to be zero or the index is undef.
10869      if ((((i & 1) == 0) && Zero1) || (((i & 1) == 1) && Zero2) ||
10870          (M == SM_SentinelUndef))
10871        continue;
10872
10873      MatchLo &= (M == Unpckl[i]);
10874      MatchHi &= (M == Unpckh[i]);
10875    }
10876
10877    if (MatchLo || MatchHi) {
10878      UnpackOpcode = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
10879      V2 = Zero2 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
10880      V1 = Zero1 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
10881      return true;
10882    }
10883  }
10884
10885  // If a binary shuffle, commute and try again.
10886  if (!IsUnary) {
10887    ShuffleVectorSDNode::commuteMask(Unpckl);
10888    if (isTargetShuffleEquivalent(TargetMask, Unpckl)) {
10889      UnpackOpcode = X86ISD::UNPCKL;
10890      std::swap(V1, V2);
10891      return true;
10892    }
10893
10894    ShuffleVectorSDNode::commuteMask(Unpckh);
10895    if (isTargetShuffleEquivalent(TargetMask, Unpckh)) {
10896      UnpackOpcode = X86ISD::UNPCKH;
10897      std::swap(V1, V2);
10898      return true;
10899    }
10900  }
10901
10902  return false;
10903}
10904
10905// X86 has dedicated unpack instructions that can handle specific blend
10906// operations: UNPCKH and UNPCKL.
10907static SDValue lowerShuffleWithUNPCK(const SDLoc &DL, MVT VT,
10908                                     ArrayRef<int> Mask, SDValue V1, SDValue V2,
10909                                     SelectionDAG &DAG) {
10910  SmallVector<int, 8> Unpckl;
10911  createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, /* Unary = */ false);
10912  if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
10913    return DAG.getNode(X86ISD::UNPCKL, DL, VT, V1, V2);
10914
10915  SmallVector<int, 8> Unpckh;
10916  createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, /* Unary = */ false);
10917  if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
10918    return DAG.getNode(X86ISD::UNPCKH, DL, VT, V1, V2);
10919
10920  // Commute and try again.
10921  ShuffleVectorSDNode::commuteMask(Unpckl);
10922  if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
10923    return DAG.getNode(X86ISD::UNPCKL, DL, VT, V2, V1);
10924
10925  ShuffleVectorSDNode::commuteMask(Unpckh);
10926  if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
10927    return DAG.getNode(X86ISD::UNPCKH, DL, VT, V2, V1);
10928
10929  return SDValue();
10930}
10931
10932static bool matchShuffleAsVPMOV(ArrayRef<int> Mask, bool SwappedOps,
10933                                int Delta) {
10934  int Size = (int)Mask.size();
10935  int Split = Size / Delta;
10936  int TruncatedVectorStart = SwappedOps ? Size : 0;
10937
10938  // Match for mask starting with e.g.: <8, 10, 12, 14,... or <0, 2, 4, 6,...
10939  if (!isSequentialOrUndefInRange(Mask, 0, Split, TruncatedVectorStart, Delta))
10940    return false;
10941
10942  // The rest of the mask should not refer to the truncated vector's elements.
10943  if (isAnyInRange(Mask.slice(Split, Size - Split), TruncatedVectorStart,
10944                   TruncatedVectorStart + Size))
10945    return false;
10946
10947  return true;
10948}
10949
10950// Try to lower trunc+vector_shuffle to a vpmovdb or a vpmovdw instruction.
10951//
10952// An example is the following:
10953//
10954// t0: ch = EntryToken
10955//           t2: v4i64,ch = CopyFromReg t0, Register:v4i64 %0
10956//         t25: v4i32 = truncate t2
10957//       t41: v8i16 = bitcast t25
10958//       t21: v8i16 = BUILD_VECTOR undef:i16, undef:i16, undef:i16, undef:i16,
10959//       Constant:i16<0>, Constant:i16<0>, Constant:i16<0>, Constant:i16<0>
10960//     t51: v8i16 = vector_shuffle<0,2,4,6,12,13,14,15> t41, t21
10961//   t18: v2i64 = bitcast t51
10962//
10963// Without avx512vl, this is lowered to:
10964//
10965// vpmovqd %zmm0, %ymm0
10966// vpshufb {{.*#+}} xmm0 =
10967// xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
10968//
10969// But when avx512vl is available, one can just use a single vpmovdw
10970// instruction.
10971static SDValue lowerShuffleWithVPMOV(const SDLoc &DL, ArrayRef<int> Mask,
10972                                     MVT VT, SDValue V1, SDValue V2,
10973                                     SelectionDAG &DAG,
10974                                     const X86Subtarget &Subtarget) {
10975  if (VT != MVT::v16i8 && VT != MVT::v8i16)
10976    return SDValue();
10977
10978  if (Mask.size() != VT.getVectorNumElements())
10979    return SDValue();
10980
10981  bool SwappedOps = false;
10982
10983  if (!ISD::isBuildVectorAllZeros(V2.getNode())) {
10984    if (!ISD::isBuildVectorAllZeros(V1.getNode()))
10985      return SDValue();
10986
10987    std::swap(V1, V2);
10988    SwappedOps = true;
10989  }
10990
10991  // Look for:
10992  //
10993  // bitcast (truncate <8 x i32> %vec to <8 x i16>) to <16 x i8>
10994  // bitcast (truncate <4 x i64> %vec to <4 x i32>) to <8 x i16>
10995  //
10996  // and similar ones.
10997  if (V1.getOpcode() != ISD::BITCAST)
10998    return SDValue();
10999  if (V1.getOperand(0).getOpcode() != ISD::TRUNCATE)
11000    return SDValue();
11001
11002  SDValue Src = V1.getOperand(0).getOperand(0);
11003  MVT SrcVT = Src.getSimpleValueType();
11004
11005  // The vptrunc** instructions truncating 128 bit and 256 bit vectors
11006  // are only available with avx512vl.
11007  if (!SrcVT.is512BitVector() && !Subtarget.hasVLX())
11008    return SDValue();
11009
11010  // Down Convert Word to Byte is only available with avx512bw. The case with
11011  // 256-bit output doesn't contain a shuffle and is therefore not handled here.
11012  if (SrcVT.getVectorElementType() == MVT::i16 && VT == MVT::v16i8 &&
11013      !Subtarget.hasBWI())
11014    return SDValue();
11015
11016  // The first half/quarter of the mask should refer to every second/fourth
11017  // element of the vector truncated and bitcasted.
11018  if (!matchShuffleAsVPMOV(Mask, SwappedOps, 2) &&
11019      !matchShuffleAsVPMOV(Mask, SwappedOps, 4))
11020    return SDValue();
11021
11022  return DAG.getNode(X86ISD::VTRUNC, DL, VT, Src);
11023}
11024
11025// X86 has dedicated pack instructions that can handle specific truncation
11026// operations: PACKSS and PACKUS.
11027static bool matchShuffleWithPACK(MVT VT, MVT &SrcVT, SDValue &V1, SDValue &V2,
11028                                 unsigned &PackOpcode, ArrayRef<int> TargetMask,
11029                                 SelectionDAG &DAG,
11030                                 const X86Subtarget &Subtarget) {
11031  unsigned NumElts = VT.getVectorNumElements();
11032  unsigned BitSize = VT.getScalarSizeInBits();
11033  MVT PackSVT = MVT::getIntegerVT(BitSize * 2);
11034  MVT PackVT = MVT::getVectorVT(PackSVT, NumElts / 2);
11035
11036  auto MatchPACK = [&](SDValue N1, SDValue N2) {
11037    SDValue VV1 = DAG.getBitcast(PackVT, N1);
11038    SDValue VV2 = DAG.getBitcast(PackVT, N2);
11039    if (Subtarget.hasSSE41() || PackSVT == MVT::i16) {
11040      APInt ZeroMask = APInt::getHighBitsSet(BitSize * 2, BitSize);
11041      if ((N1.isUndef() || DAG.MaskedValueIsZero(VV1, ZeroMask)) &&
11042          (N2.isUndef() || DAG.MaskedValueIsZero(VV2, ZeroMask))) {
11043        V1 = VV1;
11044        V2 = VV2;
11045        SrcVT = PackVT;
11046        PackOpcode = X86ISD::PACKUS;
11047        return true;
11048      }
11049    }
11050    if ((N1.isUndef() || DAG.ComputeNumSignBits(VV1) > BitSize) &&
11051        (N2.isUndef() || DAG.ComputeNumSignBits(VV2) > BitSize)) {
11052      V1 = VV1;
11053      V2 = VV2;
11054      SrcVT = PackVT;
11055      PackOpcode = X86ISD::PACKSS;
11056      return true;
11057    }
11058    return false;
11059  };
11060
11061  // Try binary shuffle.
11062  SmallVector<int, 32> BinaryMask;
11063  createPackShuffleMask(VT, BinaryMask, false);
11064  if (isTargetShuffleEquivalent(TargetMask, BinaryMask, V1, V2))
11065    if (MatchPACK(V1, V2))
11066      return true;
11067
11068  // Try unary shuffle.
11069  SmallVector<int, 32> UnaryMask;
11070  createPackShuffleMask(VT, UnaryMask, true);
11071  if (isTargetShuffleEquivalent(TargetMask, UnaryMask, V1))
11072    if (MatchPACK(V1, V1))
11073      return true;
11074
11075  return false;
11076}
11077
11078static SDValue lowerShuffleWithPACK(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
11079                                    SDValue V1, SDValue V2, SelectionDAG &DAG,
11080                                    const X86Subtarget &Subtarget) {
11081  MVT PackVT;
11082  unsigned PackOpcode;
11083  if (matchShuffleWithPACK(VT, PackVT, V1, V2, PackOpcode, Mask, DAG,
11084                           Subtarget))
11085    return DAG.getNode(PackOpcode, DL, VT, DAG.getBitcast(PackVT, V1),
11086                       DAG.getBitcast(PackVT, V2));
11087
11088  return SDValue();
11089}
11090
11091/// Try to emit a bitmask instruction for a shuffle.
11092///
11093/// This handles cases where we can model a blend exactly as a bitmask due to
11094/// one of the inputs being zeroable.
11095static SDValue lowerShuffleAsBitMask(const SDLoc &DL, MVT VT, SDValue V1,
11096                                     SDValue V2, ArrayRef<int> Mask,
11097                                     const APInt &Zeroable,
11098                                     const X86Subtarget &Subtarget,
11099                                     SelectionDAG &DAG) {
11100  MVT MaskVT = VT;
11101  MVT EltVT = VT.getVectorElementType();
11102  SDValue Zero, AllOnes;
11103  // Use f64 if i64 isn't legal.
11104  if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
11105    EltVT = MVT::f64;
11106    MaskVT = MVT::getVectorVT(EltVT, Mask.size());
11107  }
11108
11109  MVT LogicVT = VT;
11110  if (EltVT == MVT::f32 || EltVT == MVT::f64) {
11111    Zero = DAG.getConstantFP(0.0, DL, EltVT);
11112    AllOnes = DAG.getConstantFP(
11113        APFloat::getAllOnesValue(EltVT.getSizeInBits(), true), DL, EltVT);
11114    LogicVT =
11115        MVT::getVectorVT(EltVT == MVT::f64 ? MVT::i64 : MVT::i32, Mask.size());
11116  } else {
11117    Zero = DAG.getConstant(0, DL, EltVT);
11118    AllOnes = DAG.getAllOnesConstant(DL, EltVT);
11119  }
11120
11121  SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
11122  SDValue V;
11123  for (int i = 0, Size = Mask.size(); i < Size; ++i) {
11124    if (Zeroable[i])
11125      continue;
11126    if (Mask[i] % Size != i)
11127      return SDValue(); // Not a blend.
11128    if (!V)
11129      V = Mask[i] < Size ? V1 : V2;
11130    else if (V != (Mask[i] < Size ? V1 : V2))
11131      return SDValue(); // Can only let one input through the mask.
11132
11133    VMaskOps[i] = AllOnes;
11134  }
11135  if (!V)
11136    return SDValue(); // No non-zeroable elements!
11137
11138  SDValue VMask = DAG.getBuildVector(MaskVT, DL, VMaskOps);
11139  VMask = DAG.getBitcast(LogicVT, VMask);
11140  V = DAG.getBitcast(LogicVT, V);
11141  SDValue And = DAG.getNode(ISD::AND, DL, LogicVT, V, VMask);
11142  return DAG.getBitcast(VT, And);
11143}
11144
11145/// Try to emit a blend instruction for a shuffle using bit math.
11146///
11147/// This is used as a fallback approach when first class blend instructions are
11148/// unavailable. Currently it is only suitable for integer vectors, but could
11149/// be generalized for floating point vectors if desirable.
11150static SDValue lowerShuffleAsBitBlend(const SDLoc &DL, MVT VT, SDValue V1,
11151                                      SDValue V2, ArrayRef<int> Mask,
11152                                      SelectionDAG &DAG) {
11153  assert(VT.isInteger() && "Only supports integer vector types!");
11154  MVT EltVT = VT.getVectorElementType();
11155  SDValue Zero = DAG.getConstant(0, DL, EltVT);
11156  SDValue AllOnes = DAG.getAllOnesConstant(DL, EltVT);
11157  SmallVector<SDValue, 16> MaskOps;
11158  for (int i = 0, Size = Mask.size(); i < Size; ++i) {
11159    if (Mask[i] >= 0 && Mask[i] != i && Mask[i] != i + Size)
11160      return SDValue(); // Shuffled input!
11161    MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero);
11162  }
11163
11164  SDValue V1Mask = DAG.getBuildVector(VT, DL, MaskOps);
11165  V1 = DAG.getNode(ISD::AND, DL, VT, V1, V1Mask);
11166  V2 = DAG.getNode(X86ISD::ANDNP, DL, VT, V1Mask, V2);
11167  return DAG.getNode(ISD::OR, DL, VT, V1, V2);
11168}
11169
11170static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
11171                                    SDValue PreservedSrc,
11172                                    const X86Subtarget &Subtarget,
11173                                    SelectionDAG &DAG);
11174
11175static bool matchShuffleAsBlend(SDValue V1, SDValue V2,
11176                                MutableArrayRef<int> Mask,
11177                                const APInt &Zeroable, bool &ForceV1Zero,
11178                                bool &ForceV2Zero, uint64_t &BlendMask) {
11179  bool V1IsZeroOrUndef =
11180      V1.isUndef() || ISD::isBuildVectorAllZeros(V1.getNode());
11181  bool V2IsZeroOrUndef =
11182      V2.isUndef() || ISD::isBuildVectorAllZeros(V2.getNode());
11183
11184  BlendMask = 0;
11185  ForceV1Zero = false, ForceV2Zero = false;
11186  assert(Mask.size() <= 64 && "Shuffle mask too big for blend mask");
11187
11188  // Attempt to generate the binary blend mask. If an input is zero then
11189  // we can use any lane.
11190  for (int i = 0, Size = Mask.size(); i < Size; ++i) {
11191    int M = Mask[i];
11192    if (M == SM_SentinelUndef)
11193      continue;
11194    if (M == i)
11195      continue;
11196    if (M == i + Size) {
11197      BlendMask |= 1ull << i;
11198      continue;
11199    }
11200    if (Zeroable[i]) {
11201      if (V1IsZeroOrUndef) {
11202        ForceV1Zero = true;
11203        Mask[i] = i;
11204        continue;
11205      }
11206      if (V2IsZeroOrUndef) {
11207        ForceV2Zero = true;
11208        BlendMask |= 1ull << i;
11209        Mask[i] = i + Size;
11210        continue;
11211      }
11212    }
11213    return false;
11214  }
11215  return true;
11216}
11217
11218static uint64_t scaleVectorShuffleBlendMask(uint64_t BlendMask, int Size,
11219                                            int Scale) {
11220  uint64_t ScaledMask = 0;
11221  for (int i = 0; i != Size; ++i)
11222    if (BlendMask & (1ull << i))
11223      ScaledMask |= ((1ull << Scale) - 1) << (i * Scale);
11224  return ScaledMask;
11225}
11226
11227/// Try to emit a blend instruction for a shuffle.
11228///
11229/// This doesn't do any checks for the availability of instructions for blending
11230/// these values. It relies on the availability of the X86ISD::BLENDI pattern to
11231/// be matched in the backend with the type given. What it does check for is
11232/// that the shuffle mask is a blend, or convertible into a blend with zero.
11233static SDValue lowerShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
11234                                   SDValue V2, ArrayRef<int> Original,
11235                                   const APInt &Zeroable,
11236                                   const X86Subtarget &Subtarget,
11237                                   SelectionDAG &DAG) {
11238  uint64_t BlendMask = 0;
11239  bool ForceV1Zero = false, ForceV2Zero = false;
11240  SmallVector<int, 64> Mask(Original.begin(), Original.end());
11241  if (!matchShuffleAsBlend(V1, V2, Mask, Zeroable, ForceV1Zero, ForceV2Zero,
11242                           BlendMask))
11243    return SDValue();
11244
11245  // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
11246  if (ForceV1Zero)
11247    V1 = getZeroVector(VT, Subtarget, DAG, DL);
11248  if (ForceV2Zero)
11249    V2 = getZeroVector(VT, Subtarget, DAG, DL);
11250
11251  switch (VT.SimpleTy) {
11252  case MVT::v4i64:
11253  case MVT::v8i32:
11254    assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
11255    LLVM_FALLTHROUGH;
11256  case MVT::v4f64:
11257  case MVT::v8f32:
11258    assert(Subtarget.hasAVX() && "256-bit float blends require AVX!");
11259    LLVM_FALLTHROUGH;
11260  case MVT::v2f64:
11261  case MVT::v2i64:
11262  case MVT::v4f32:
11263  case MVT::v4i32:
11264  case MVT::v8i16:
11265    assert(Subtarget.hasSSE41() && "128-bit blends require SSE41!");
11266    return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
11267                       DAG.getTargetConstant(BlendMask, DL, MVT::i8));
11268  case MVT::v16i16: {
11269    assert(Subtarget.hasAVX2() && "v16i16 blends require AVX2!");
11270    SmallVector<int, 8> RepeatedMask;
11271    if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
11272      // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
11273      assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
11274      BlendMask = 0;
11275      for (int i = 0; i < 8; ++i)
11276        if (RepeatedMask[i] >= 8)
11277          BlendMask |= 1ull << i;
11278      return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
11279                         DAG.getTargetConstant(BlendMask, DL, MVT::i8));
11280    }
11281    // Use PBLENDW for lower/upper lanes and then blend lanes.
11282    // TODO - we should allow 2 PBLENDW here and leave shuffle combine to
11283    // merge to VSELECT where useful.
11284    uint64_t LoMask = BlendMask & 0xFF;
11285    uint64_t HiMask = (BlendMask >> 8) & 0xFF;
11286    if (LoMask == 0 || LoMask == 255 || HiMask == 0 || HiMask == 255) {
11287      SDValue Lo = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
11288                               DAG.getTargetConstant(LoMask, DL, MVT::i8));
11289      SDValue Hi = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
11290                               DAG.getTargetConstant(HiMask, DL, MVT::i8));
11291      return DAG.getVectorShuffle(
11292          MVT::v16i16, DL, Lo, Hi,
11293          {0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31});
11294    }
11295    LLVM_FALLTHROUGH;
11296  }
11297  case MVT::v32i8:
11298    assert(Subtarget.hasAVX2() && "256-bit byte-blends require AVX2!");
11299    LLVM_FALLTHROUGH;
11300  case MVT::v16i8: {
11301    assert(Subtarget.hasSSE41() && "128-bit byte-blends require SSE41!");
11302
11303    // Attempt to lower to a bitmask if we can. VPAND is faster than VPBLENDVB.
11304    if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
11305                                               Subtarget, DAG))
11306      return Masked;
11307
11308    if (Subtarget.hasBWI() && Subtarget.hasVLX()) {
11309      MVT IntegerType =
11310          MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
11311      SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
11312      return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
11313    }
11314
11315    // Scale the blend by the number of bytes per element.
11316    int Scale = VT.getScalarSizeInBits() / 8;
11317
11318    // This form of blend is always done on bytes. Compute the byte vector
11319    // type.
11320    MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
11321
11322    // x86 allows load folding with blendvb from the 2nd source operand. But
11323    // we are still using LLVM select here (see comment below), so that's V1.
11324    // If V2 can be load-folded and V1 cannot be load-folded, then commute to
11325    // allow that load-folding possibility.
11326    if (!ISD::isNormalLoad(V1.getNode()) && ISD::isNormalLoad(V2.getNode())) {
11327      ShuffleVectorSDNode::commuteMask(Mask);
11328      std::swap(V1, V2);
11329    }
11330
11331    // Compute the VSELECT mask. Note that VSELECT is really confusing in the
11332    // mix of LLVM's code generator and the x86 backend. We tell the code
11333    // generator that boolean values in the elements of an x86 vector register
11334    // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
11335    // mapping a select to operand #1, and 'false' mapping to operand #2. The
11336    // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
11337    // of the element (the remaining are ignored) and 0 in that high bit would
11338    // mean operand #1 while 1 in the high bit would mean operand #2. So while
11339    // the LLVM model for boolean values in vector elements gets the relevant
11340    // bit set, it is set backwards and over constrained relative to x86's
11341    // actual model.
11342    SmallVector<SDValue, 32> VSELECTMask;
11343    for (int i = 0, Size = Mask.size(); i < Size; ++i)
11344      for (int j = 0; j < Scale; ++j)
11345        VSELECTMask.push_back(
11346            Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
11347                        : DAG.getConstant(Mask[i] < Size ? -1 : 0, DL,
11348                                          MVT::i8));
11349
11350    V1 = DAG.getBitcast(BlendVT, V1);
11351    V2 = DAG.getBitcast(BlendVT, V2);
11352    return DAG.getBitcast(
11353        VT,
11354        DAG.getSelect(DL, BlendVT, DAG.getBuildVector(BlendVT, DL, VSELECTMask),
11355                      V1, V2));
11356  }
11357  case MVT::v16f32:
11358  case MVT::v8f64:
11359  case MVT::v8i64:
11360  case MVT::v16i32:
11361  case MVT::v32i16:
11362  case MVT::v64i8: {
11363    // Attempt to lower to a bitmask if we can. Only if not optimizing for size.
11364    bool OptForSize = DAG.shouldOptForSize();
11365    if (!OptForSize) {
11366      if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
11367                                                 Subtarget, DAG))
11368        return Masked;
11369    }
11370
11371    // Otherwise load an immediate into a GPR, cast to k-register, and use a
11372    // masked move.
11373    MVT IntegerType =
11374        MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
11375    SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
11376    return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
11377  }
11378  default:
11379    llvm_unreachable("Not a supported integer vector type!");
11380  }
11381}
11382
11383/// Try to lower as a blend of elements from two inputs followed by
11384/// a single-input permutation.
11385///
11386/// This matches the pattern where we can blend elements from two inputs and
11387/// then reduce the shuffle to a single-input permutation.
11388static SDValue lowerShuffleAsBlendAndPermute(const SDLoc &DL, MVT VT,
11389                                             SDValue V1, SDValue V2,
11390                                             ArrayRef<int> Mask,
11391                                             SelectionDAG &DAG,
11392                                             bool ImmBlends = false) {
11393  // We build up the blend mask while checking whether a blend is a viable way
11394  // to reduce the shuffle.
11395  SmallVector<int, 32> BlendMask(Mask.size(), -1);
11396  SmallVector<int, 32> PermuteMask(Mask.size(), -1);
11397
11398  for (int i = 0, Size = Mask.size(); i < Size; ++i) {
11399    if (Mask[i] < 0)
11400      continue;
11401
11402    assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
11403
11404    if (BlendMask[Mask[i] % Size] < 0)
11405      BlendMask[Mask[i] % Size] = Mask[i];
11406    else if (BlendMask[Mask[i] % Size] != Mask[i])
11407      return SDValue(); // Can't blend in the needed input!
11408
11409    PermuteMask[i] = Mask[i] % Size;
11410  }
11411
11412  // If only immediate blends, then bail if the blend mask can't be widened to
11413  // i16.
11414  unsigned EltSize = VT.getScalarSizeInBits();
11415  if (ImmBlends && EltSize == 8 && !canWidenShuffleElements(BlendMask))
11416    return SDValue();
11417
11418  SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
11419  return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
11420}
11421
11422/// Try to lower as an unpack of elements from two inputs followed by
11423/// a single-input permutation.
11424///
11425/// This matches the pattern where we can unpack elements from two inputs and
11426/// then reduce the shuffle to a single-input (wider) permutation.
11427static SDValue lowerShuffleAsUNPCKAndPermute(const SDLoc &DL, MVT VT,
11428                                             SDValue V1, SDValue V2,
11429                                             ArrayRef<int> Mask,
11430                                             SelectionDAG &DAG) {
11431  int NumElts = Mask.size();
11432  int NumLanes = VT.getSizeInBits() / 128;
11433  int NumLaneElts = NumElts / NumLanes;
11434  int NumHalfLaneElts = NumLaneElts / 2;
11435
11436  bool MatchLo = true, MatchHi = true;
11437  SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
11438
11439  // Determine UNPCKL/UNPCKH type and operand order.
11440  for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
11441    for (int Elt = 0; Elt != NumLaneElts; ++Elt) {
11442      int M = Mask[Lane + Elt];
11443      if (M < 0)
11444        continue;
11445
11446      SDValue &Op = Ops[Elt & 1];
11447      if (M < NumElts && (Op.isUndef() || Op == V1))
11448        Op = V1;
11449      else if (NumElts <= M && (Op.isUndef() || Op == V2))
11450        Op = V2;
11451      else
11452        return SDValue();
11453
11454      int Lo = Lane, Mid = Lane + NumHalfLaneElts, Hi = Lane + NumLaneElts;
11455      MatchLo &= isUndefOrInRange(M, Lo, Mid) ||
11456                 isUndefOrInRange(M, NumElts + Lo, NumElts + Mid);
11457      MatchHi &= isUndefOrInRange(M, Mid, Hi) ||
11458                 isUndefOrInRange(M, NumElts + Mid, NumElts + Hi);
11459      if (!MatchLo && !MatchHi)
11460        return SDValue();
11461    }
11462  }
11463  assert((MatchLo ^ MatchHi) && "Failed to match UNPCKLO/UNPCKHI");
11464
11465  // Now check that each pair of elts come from the same unpack pair
11466  // and set the permute mask based on each pair.
11467  // TODO - Investigate cases where we permute individual elements.
11468  SmallVector<int, 32> PermuteMask(NumElts, -1);
11469  for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
11470    for (int Elt = 0; Elt != NumLaneElts; Elt += 2) {
11471      int M0 = Mask[Lane + Elt + 0];
11472      int M1 = Mask[Lane + Elt + 1];
11473      if (0 <= M0 && 0 <= M1 &&
11474          (M0 % NumHalfLaneElts) != (M1 % NumHalfLaneElts))
11475        return SDValue();
11476      if (0 <= M0)
11477        PermuteMask[Lane + Elt + 0] = Lane + (2 * (M0 % NumHalfLaneElts));
11478      if (0 <= M1)
11479        PermuteMask[Lane + Elt + 1] = Lane + (2 * (M1 % NumHalfLaneElts)) + 1;
11480    }
11481  }
11482
11483  unsigned UnpckOp = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
11484  SDValue Unpck = DAG.getNode(UnpckOp, DL, VT, Ops);
11485  return DAG.getVectorShuffle(VT, DL, Unpck, DAG.getUNDEF(VT), PermuteMask);
11486}
11487
11488/// Helper to form a PALIGNR-based rotate+permute, merging 2 inputs and then
11489/// permuting the elements of the result in place.
11490static SDValue lowerShuffleAsByteRotateAndPermute(
11491    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11492    const X86Subtarget &Subtarget, SelectionDAG &DAG) {
11493  if ((VT.is128BitVector() && !Subtarget.hasSSSE3()) ||
11494      (VT.is256BitVector() && !Subtarget.hasAVX2()) ||
11495      (VT.is512BitVector() && !Subtarget.hasBWI()))
11496    return SDValue();
11497
11498  // We don't currently support lane crossing permutes.
11499  if (is128BitLaneCrossingShuffleMask(VT, Mask))
11500    return SDValue();
11501
11502  int Scale = VT.getScalarSizeInBits() / 8;
11503  int NumLanes = VT.getSizeInBits() / 128;
11504  int NumElts = VT.getVectorNumElements();
11505  int NumEltsPerLane = NumElts / NumLanes;
11506
11507  // Determine range of mask elts.
11508  bool Blend1 = true;
11509  bool Blend2 = true;
11510  std::pair<int, int> Range1 = std::make_pair(INT_MAX, INT_MIN);
11511  std::pair<int, int> Range2 = std::make_pair(INT_MAX, INT_MIN);
11512  for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
11513    for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
11514      int M = Mask[Lane + Elt];
11515      if (M < 0)
11516        continue;
11517      if (M < NumElts) {
11518        Blend1 &= (M == (Lane + Elt));
11519        assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
11520        M = M % NumEltsPerLane;
11521        Range1.first = std::min(Range1.first, M);
11522        Range1.second = std::max(Range1.second, M);
11523      } else {
11524        M -= NumElts;
11525        Blend2 &= (M == (Lane + Elt));
11526        assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
11527        M = M % NumEltsPerLane;
11528        Range2.first = std::min(Range2.first, M);
11529        Range2.second = std::max(Range2.second, M);
11530      }
11531    }
11532  }
11533
11534  // Bail if we don't need both elements.
11535  // TODO - it might be worth doing this for unary shuffles if the permute
11536  // can be widened.
11537  if (!(0 <= Range1.first && Range1.second < NumEltsPerLane) ||
11538      !(0 <= Range2.first && Range2.second < NumEltsPerLane))
11539    return SDValue();
11540
11541  if (VT.getSizeInBits() > 128 && (Blend1 || Blend2))
11542    return SDValue();
11543
11544  // Rotate the 2 ops so we can access both ranges, then permute the result.
11545  auto RotateAndPermute = [&](SDValue Lo, SDValue Hi, int RotAmt, int Ofs) {
11546    MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
11547    SDValue Rotate = DAG.getBitcast(
11548        VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, DAG.getBitcast(ByteVT, Hi),
11549                        DAG.getBitcast(ByteVT, Lo),
11550                        DAG.getTargetConstant(Scale * RotAmt, DL, MVT::i8)));
11551    SmallVector<int, 64> PermMask(NumElts, SM_SentinelUndef);
11552    for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
11553      for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
11554        int M = Mask[Lane + Elt];
11555        if (M < 0)
11556          continue;
11557        if (M < NumElts)
11558          PermMask[Lane + Elt] = Lane + ((M + Ofs - RotAmt) % NumEltsPerLane);
11559        else
11560          PermMask[Lane + Elt] = Lane + ((M - Ofs - RotAmt) % NumEltsPerLane);
11561      }
11562    }
11563    return DAG.getVectorShuffle(VT, DL, Rotate, DAG.getUNDEF(VT), PermMask);
11564  };
11565
11566  // Check if the ranges are small enough to rotate from either direction.
11567  if (Range2.second < Range1.first)
11568    return RotateAndPermute(V1, V2, Range1.first, 0);
11569  if (Range1.second < Range2.first)
11570    return RotateAndPermute(V2, V1, Range2.first, NumElts);
11571  return SDValue();
11572}
11573
11574/// Generic routine to decompose a shuffle and blend into independent
11575/// blends and permutes.
11576///
11577/// This matches the extremely common pattern for handling combined
11578/// shuffle+blend operations on newer X86 ISAs where we have very fast blend
11579/// operations. It will try to pick the best arrangement of shuffles and
11580/// blends.
11581static SDValue lowerShuffleAsDecomposedShuffleBlend(
11582    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11583    const X86Subtarget &Subtarget, SelectionDAG &DAG) {
11584  // Shuffle the input elements into the desired positions in V1 and V2 and
11585  // blend them together.
11586  SmallVector<int, 32> V1Mask(Mask.size(), -1);
11587  SmallVector<int, 32> V2Mask(Mask.size(), -1);
11588  SmallVector<int, 32> BlendMask(Mask.size(), -1);
11589  for (int i = 0, Size = Mask.size(); i < Size; ++i)
11590    if (Mask[i] >= 0 && Mask[i] < Size) {
11591      V1Mask[i] = Mask[i];
11592      BlendMask[i] = i;
11593    } else if (Mask[i] >= Size) {
11594      V2Mask[i] = Mask[i] - Size;
11595      BlendMask[i] = i + Size;
11596    }
11597
11598  // Try to lower with the simpler initial blend/unpack/rotate strategies unless
11599  // one of the input shuffles would be a no-op. We prefer to shuffle inputs as
11600  // the shuffle may be able to fold with a load or other benefit. However, when
11601  // we'll have to do 2x as many shuffles in order to achieve this, a 2-input
11602  // pre-shuffle first is a better strategy.
11603  if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask)) {
11604    // Only prefer immediate blends to unpack/rotate.
11605    if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
11606                                                          DAG, true))
11607      return BlendPerm;
11608    if (SDValue UnpackPerm = lowerShuffleAsUNPCKAndPermute(DL, VT, V1, V2, Mask,
11609                                                           DAG))
11610      return UnpackPerm;
11611    if (SDValue RotatePerm = lowerShuffleAsByteRotateAndPermute(
11612            DL, VT, V1, V2, Mask, Subtarget, DAG))
11613      return RotatePerm;
11614    // Unpack/rotate failed - try again with variable blends.
11615    if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
11616                                                          DAG))
11617      return BlendPerm;
11618  }
11619
11620  V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
11621  V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
11622  return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
11623}
11624
11625/// Try to lower a vector shuffle as a rotation.
11626///
11627/// This is used for support PALIGNR for SSSE3 or VALIGND/Q for AVX512.
11628static int matchShuffleAsRotate(SDValue &V1, SDValue &V2, ArrayRef<int> Mask) {
11629  int NumElts = Mask.size();
11630
11631  // We need to detect various ways of spelling a rotation:
11632  //   [11, 12, 13, 14, 15,  0,  1,  2]
11633  //   [-1, 12, 13, 14, -1, -1,  1, -1]
11634  //   [-1, -1, -1, -1, -1, -1,  1,  2]
11635  //   [ 3,  4,  5,  6,  7,  8,  9, 10]
11636  //   [-1,  4,  5,  6, -1, -1,  9, -1]
11637  //   [-1,  4,  5,  6, -1, -1, -1, -1]
11638  int Rotation = 0;
11639  SDValue Lo, Hi;
11640  for (int i = 0; i < NumElts; ++i) {
11641    int M = Mask[i];
11642    assert((M == SM_SentinelUndef || (0 <= M && M < (2*NumElts))) &&
11643           "Unexpected mask index.");
11644    if (M < 0)
11645      continue;
11646
11647    // Determine where a rotated vector would have started.
11648    int StartIdx = i - (M % NumElts);
11649    if (StartIdx == 0)
11650      // The identity rotation isn't interesting, stop.
11651      return -1;
11652
11653    // If we found the tail of a vector the rotation must be the missing
11654    // front. If we found the head of a vector, it must be how much of the
11655    // head.
11656    int CandidateRotation = StartIdx < 0 ? -StartIdx : NumElts - StartIdx;
11657
11658    if (Rotation == 0)
11659      Rotation = CandidateRotation;
11660    else if (Rotation != CandidateRotation)
11661      // The rotations don't match, so we can't match this mask.
11662      return -1;
11663
11664    // Compute which value this mask is pointing at.
11665    SDValue MaskV = M < NumElts ? V1 : V2;
11666
11667    // Compute which of the two target values this index should be assigned
11668    // to. This reflects whether the high elements are remaining or the low
11669    // elements are remaining.
11670    SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
11671
11672    // Either set up this value if we've not encountered it before, or check
11673    // that it remains consistent.
11674    if (!TargetV)
11675      TargetV = MaskV;
11676    else if (TargetV != MaskV)
11677      // This may be a rotation, but it pulls from the inputs in some
11678      // unsupported interleaving.
11679      return -1;
11680  }
11681
11682  // Check that we successfully analyzed the mask, and normalize the results.
11683  assert(Rotation != 0 && "Failed to locate a viable rotation!");
11684  assert((Lo || Hi) && "Failed to find a rotated input vector!");
11685  if (!Lo)
11686    Lo = Hi;
11687  else if (!Hi)
11688    Hi = Lo;
11689
11690  V1 = Lo;
11691  V2 = Hi;
11692
11693  return Rotation;
11694}
11695
11696/// Try to lower a vector shuffle as a byte rotation.
11697///
11698/// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
11699/// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
11700/// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
11701/// try to generically lower a vector shuffle through such an pattern. It
11702/// does not check for the profitability of lowering either as PALIGNR or
11703/// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
11704/// This matches shuffle vectors that look like:
11705///
11706///   v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
11707///
11708/// Essentially it concatenates V1 and V2, shifts right by some number of
11709/// elements, and takes the low elements as the result. Note that while this is
11710/// specified as a *right shift* because x86 is little-endian, it is a *left
11711/// rotate* of the vector lanes.
11712static int matchShuffleAsByteRotate(MVT VT, SDValue &V1, SDValue &V2,
11713                                    ArrayRef<int> Mask) {
11714  // Don't accept any shuffles with zero elements.
11715  if (any_of(Mask, [](int M) { return M == SM_SentinelZero; }))
11716    return -1;
11717
11718  // PALIGNR works on 128-bit lanes.
11719  SmallVector<int, 16> RepeatedMask;
11720  if (!is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedMask))
11721    return -1;
11722
11723  int Rotation = matchShuffleAsRotate(V1, V2, RepeatedMask);
11724  if (Rotation <= 0)
11725    return -1;
11726
11727  // PALIGNR rotates bytes, so we need to scale the
11728  // rotation based on how many bytes are in the vector lane.
11729  int NumElts = RepeatedMask.size();
11730  int Scale = 16 / NumElts;
11731  return Rotation * Scale;
11732}
11733
11734static SDValue lowerShuffleAsByteRotate(const SDLoc &DL, MVT VT, SDValue V1,
11735                                        SDValue V2, ArrayRef<int> Mask,
11736                                        const X86Subtarget &Subtarget,
11737                                        SelectionDAG &DAG) {
11738  assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
11739
11740  SDValue Lo = V1, Hi = V2;
11741  int ByteRotation = matchShuffleAsByteRotate(VT, Lo, Hi, Mask);
11742  if (ByteRotation <= 0)
11743    return SDValue();
11744
11745  // Cast the inputs to i8 vector of correct length to match PALIGNR or
11746  // PSLLDQ/PSRLDQ.
11747  MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
11748  Lo = DAG.getBitcast(ByteVT, Lo);
11749  Hi = DAG.getBitcast(ByteVT, Hi);
11750
11751  // SSSE3 targets can use the palignr instruction.
11752  if (Subtarget.hasSSSE3()) {
11753    assert((!VT.is512BitVector() || Subtarget.hasBWI()) &&
11754           "512-bit PALIGNR requires BWI instructions");
11755    return DAG.getBitcast(
11756        VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, Lo, Hi,
11757                        DAG.getTargetConstant(ByteRotation, DL, MVT::i8)));
11758  }
11759
11760  assert(VT.is128BitVector() &&
11761         "Rotate-based lowering only supports 128-bit lowering!");
11762  assert(Mask.size() <= 16 &&
11763         "Can shuffle at most 16 bytes in a 128-bit vector!");
11764  assert(ByteVT == MVT::v16i8 &&
11765         "SSE2 rotate lowering only needed for v16i8!");
11766
11767  // Default SSE2 implementation
11768  int LoByteShift = 16 - ByteRotation;
11769  int HiByteShift = ByteRotation;
11770
11771  SDValue LoShift =
11772      DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Lo,
11773                  DAG.getTargetConstant(LoByteShift, DL, MVT::i8));
11774  SDValue HiShift =
11775      DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Hi,
11776                  DAG.getTargetConstant(HiByteShift, DL, MVT::i8));
11777  return DAG.getBitcast(VT,
11778                        DAG.getNode(ISD::OR, DL, MVT::v16i8, LoShift, HiShift));
11779}
11780
11781/// Try to lower a vector shuffle as a dword/qword rotation.
11782///
11783/// AVX512 has a VALIGND/VALIGNQ instructions that will do an arbitrary
11784/// rotation of the concatenation of two vectors; This routine will
11785/// try to generically lower a vector shuffle through such an pattern.
11786///
11787/// Essentially it concatenates V1 and V2, shifts right by some number of
11788/// elements, and takes the low elements as the result. Note that while this is
11789/// specified as a *right shift* because x86 is little-endian, it is a *left
11790/// rotate* of the vector lanes.
11791static SDValue lowerShuffleAsRotate(const SDLoc &DL, MVT VT, SDValue V1,
11792                                    SDValue V2, ArrayRef<int> Mask,
11793                                    const X86Subtarget &Subtarget,
11794                                    SelectionDAG &DAG) {
11795  assert((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) &&
11796         "Only 32-bit and 64-bit elements are supported!");
11797
11798  // 128/256-bit vectors are only supported with VLX.
11799  assert((Subtarget.hasVLX() || (!VT.is128BitVector() && !VT.is256BitVector()))
11800         && "VLX required for 128/256-bit vectors");
11801
11802  SDValue Lo = V1, Hi = V2;
11803  int Rotation = matchShuffleAsRotate(Lo, Hi, Mask);
11804  if (Rotation <= 0)
11805    return SDValue();
11806
11807  return DAG.getNode(X86ISD::VALIGN, DL, VT, Lo, Hi,
11808                     DAG.getTargetConstant(Rotation, DL, MVT::i8));
11809}
11810
11811/// Try to lower a vector shuffle as a byte shift sequence.
11812static SDValue lowerShuffleAsByteShiftMask(const SDLoc &DL, MVT VT, SDValue V1,
11813                                           SDValue V2, ArrayRef<int> Mask,
11814                                           const APInt &Zeroable,
11815                                           const X86Subtarget &Subtarget,
11816                                           SelectionDAG &DAG) {
11817  assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
11818  assert(VT.is128BitVector() && "Only 128-bit vectors supported");
11819
11820  // We need a shuffle that has zeros at one/both ends and a sequential
11821  // shuffle from one source within.
11822  unsigned ZeroLo = Zeroable.countTrailingOnes();
11823  unsigned ZeroHi = Zeroable.countLeadingOnes();
11824  if (!ZeroLo && !ZeroHi)
11825    return SDValue();
11826
11827  unsigned NumElts = Mask.size();
11828  unsigned Len = NumElts - (ZeroLo + ZeroHi);
11829  if (!isSequentialOrUndefInRange(Mask, ZeroLo, Len, Mask[ZeroLo]))
11830    return SDValue();
11831
11832  unsigned Scale = VT.getScalarSizeInBits() / 8;
11833  ArrayRef<int> StubMask = Mask.slice(ZeroLo, Len);
11834  if (!isUndefOrInRange(StubMask, 0, NumElts) &&
11835      !isUndefOrInRange(StubMask, NumElts, 2 * NumElts))
11836    return SDValue();
11837
11838  SDValue Res = Mask[ZeroLo] < (int)NumElts ? V1 : V2;
11839  Res = DAG.getBitcast(MVT::v16i8, Res);
11840
11841  // Use VSHLDQ/VSRLDQ ops to zero the ends of a vector and leave an
11842  // inner sequential set of elements, possibly offset:
11843  // 01234567 --> zzzzzz01 --> 1zzzzzzz
11844  // 01234567 --> 4567zzzz --> zzzzz456
11845  // 01234567 --> z0123456 --> 3456zzzz --> zz3456zz
11846  if (ZeroLo == 0) {
11847    unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
11848    Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11849                      DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11850    Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
11851                      DAG.getTargetConstant(Scale * ZeroHi, DL, MVT::i8));
11852  } else if (ZeroHi == 0) {
11853    unsigned Shift = Mask[ZeroLo] % NumElts;
11854    Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
11855                      DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11856    Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11857                      DAG.getTargetConstant(Scale * ZeroLo, DL, MVT::i8));
11858  } else if (!Subtarget.hasSSSE3()) {
11859    // If we don't have PSHUFB then its worth avoiding an AND constant mask
11860    // by performing 3 byte shifts. Shuffle combining can kick in above that.
11861    // TODO: There may be some cases where VSH{LR}DQ+PAND is still better.
11862    unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
11863    Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11864                      DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11865    Shift += Mask[ZeroLo] % NumElts;
11866    Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
11867                      DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11868    Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11869                      DAG.getTargetConstant(Scale * ZeroLo, DL, MVT::i8));
11870  } else
11871    return SDValue();
11872
11873  return DAG.getBitcast(VT, Res);
11874}
11875
11876/// Try to lower a vector shuffle as a bit shift (shifts in zeros).
11877///
11878/// Attempts to match a shuffle mask against the PSLL(W/D/Q/DQ) and
11879/// PSRL(W/D/Q/DQ) SSE2 and AVX2 logical bit-shift instructions. The function
11880/// matches elements from one of the input vectors shuffled to the left or
11881/// right with zeroable elements 'shifted in'. It handles both the strictly
11882/// bit-wise element shifts and the byte shift across an entire 128-bit double
11883/// quad word lane.
11884///
11885/// PSHL : (little-endian) left bit shift.
11886/// [ zz, 0, zz,  2 ]
11887/// [ -1, 4, zz, -1 ]
11888/// PSRL : (little-endian) right bit shift.
11889/// [  1, zz,  3, zz]
11890/// [ -1, -1,  7, zz]
11891/// PSLLDQ : (little-endian) left byte shift
11892/// [ zz,  0,  1,  2,  3,  4,  5,  6]
11893/// [ zz, zz, -1, -1,  2,  3,  4, -1]
11894/// [ zz, zz, zz, zz, zz, zz, -1,  1]
11895/// PSRLDQ : (little-endian) right byte shift
11896/// [  5, 6,  7, zz, zz, zz, zz, zz]
11897/// [ -1, 5,  6,  7, zz, zz, zz, zz]
11898/// [  1, 2, -1, -1, -1, -1, zz, zz]
11899static int matchShuffleAsShift(MVT &ShiftVT, unsigned &Opcode,
11900                               unsigned ScalarSizeInBits, ArrayRef<int> Mask,
11901                               int MaskOffset, const APInt &Zeroable,
11902                               const X86Subtarget &Subtarget) {
11903  int Size = Mask.size();
11904  unsigned SizeInBits = Size * ScalarSizeInBits;
11905
11906  auto CheckZeros = [&](int Shift, int Scale, bool Left) {
11907    for (int i = 0; i < Size; i += Scale)
11908      for (int j = 0; j < Shift; ++j)
11909        if (!Zeroable[i + j + (Left ? 0 : (Scale - Shift))])
11910          return false;
11911
11912    return true;
11913  };
11914
11915  auto MatchShift = [&](int Shift, int Scale, bool Left) {
11916    for (int i = 0; i != Size; i += Scale) {
11917      unsigned Pos = Left ? i + Shift : i;
11918      unsigned Low = Left ? i : i + Shift;
11919      unsigned Len = Scale - Shift;
11920      if (!isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset))
11921        return -1;
11922    }
11923
11924    int ShiftEltBits = ScalarSizeInBits * Scale;
11925    bool ByteShift = ShiftEltBits > 64;
11926    Opcode = Left ? (ByteShift ? X86ISD::VSHLDQ : X86ISD::VSHLI)
11927                  : (ByteShift ? X86ISD::VSRLDQ : X86ISD::VSRLI);
11928    int ShiftAmt = Shift * ScalarSizeInBits / (ByteShift ? 8 : 1);
11929
11930    // Normalize the scale for byte shifts to still produce an i64 element
11931    // type.
11932    Scale = ByteShift ? Scale / 2 : Scale;
11933
11934    // We need to round trip through the appropriate type for the shift.
11935    MVT ShiftSVT = MVT::getIntegerVT(ScalarSizeInBits * Scale);
11936    ShiftVT = ByteShift ? MVT::getVectorVT(MVT::i8, SizeInBits / 8)
11937                        : MVT::getVectorVT(ShiftSVT, Size / Scale);
11938    return (int)ShiftAmt;
11939  };
11940
11941  // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
11942  // keep doubling the size of the integer elements up to that. We can
11943  // then shift the elements of the integer vector by whole multiples of
11944  // their width within the elements of the larger integer vector. Test each
11945  // multiple to see if we can find a match with the moved element indices
11946  // and that the shifted in elements are all zeroable.
11947  unsigned MaxWidth = ((SizeInBits == 512) && !Subtarget.hasBWI() ? 64 : 128);
11948  for (int Scale = 2; Scale * ScalarSizeInBits <= MaxWidth; Scale *= 2)
11949    for (int Shift = 1; Shift != Scale; ++Shift)
11950      for (bool Left : {true, false})
11951        if (CheckZeros(Shift, Scale, Left)) {
11952          int ShiftAmt = MatchShift(Shift, Scale, Left);
11953          if (0 < ShiftAmt)
11954            return ShiftAmt;
11955        }
11956
11957  // no match
11958  return -1;
11959}
11960
11961static SDValue lowerShuffleAsShift(const SDLoc &DL, MVT VT, SDValue V1,
11962                                   SDValue V2, ArrayRef<int> Mask,
11963                                   const APInt &Zeroable,
11964                                   const X86Subtarget &Subtarget,
11965                                   SelectionDAG &DAG) {
11966  int Size = Mask.size();
11967  assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
11968
11969  MVT ShiftVT;
11970  SDValue V = V1;
11971  unsigned Opcode;
11972
11973  // Try to match shuffle against V1 shift.
11974  int ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
11975                                     Mask, 0, Zeroable, Subtarget);
11976
11977  // If V1 failed, try to match shuffle against V2 shift.
11978  if (ShiftAmt < 0) {
11979    ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
11980                                   Mask, Size, Zeroable, Subtarget);
11981    V = V2;
11982  }
11983
11984  if (ShiftAmt < 0)
11985    return SDValue();
11986
11987  assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
11988         "Illegal integer vector type");
11989  V = DAG.getBitcast(ShiftVT, V);
11990  V = DAG.getNode(Opcode, DL, ShiftVT, V,
11991                  DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
11992  return DAG.getBitcast(VT, V);
11993}
11994
11995// EXTRQ: Extract Len elements from lower half of source, starting at Idx.
11996// Remainder of lower half result is zero and upper half is all undef.
11997static bool matchShuffleAsEXTRQ(MVT VT, SDValue &V1, SDValue &V2,
11998                                ArrayRef<int> Mask, uint64_t &BitLen,
11999                                uint64_t &BitIdx, const APInt &Zeroable) {
12000  int Size = Mask.size();
12001  int HalfSize = Size / 2;
12002  assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
12003  assert(!Zeroable.isAllOnesValue() && "Fully zeroable shuffle mask");
12004
12005  // Upper half must be undefined.
12006  if (!isUndefUpperHalf(Mask))
12007    return false;
12008
12009  // Determine the extraction length from the part of the
12010  // lower half that isn't zeroable.
12011  int Len = HalfSize;
12012  for (; Len > 0; --Len)
12013    if (!Zeroable[Len - 1])
12014      break;
12015  assert(Len > 0 && "Zeroable shuffle mask");
12016
12017  // Attempt to match first Len sequential elements from the lower half.
12018  SDValue Src;
12019  int Idx = -1;
12020  for (int i = 0; i != Len; ++i) {
12021    int M = Mask[i];
12022    if (M == SM_SentinelUndef)
12023      continue;
12024    SDValue &V = (M < Size ? V1 : V2);
12025    M = M % Size;
12026
12027    // The extracted elements must start at a valid index and all mask
12028    // elements must be in the lower half.
12029    if (i > M || M >= HalfSize)
12030      return false;
12031
12032    if (Idx < 0 || (Src == V && Idx == (M - i))) {
12033      Src = V;
12034      Idx = M - i;
12035      continue;
12036    }
12037    return false;
12038  }
12039
12040  if (!Src || Idx < 0)
12041    return false;
12042
12043  assert((Idx + Len) <= HalfSize && "Illegal extraction mask");
12044  BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
12045  BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
12046  V1 = Src;
12047  return true;
12048}
12049
12050// INSERTQ: Extract lowest Len elements from lower half of second source and
12051// insert over first source, starting at Idx.
12052// { A[0], .., A[Idx-1], B[0], .., B[Len-1], A[Idx+Len], .., UNDEF, ... }
12053static bool matchShuffleAsINSERTQ(MVT VT, SDValue &V1, SDValue &V2,
12054                                  ArrayRef<int> Mask, uint64_t &BitLen,
12055                                  uint64_t &BitIdx) {
12056  int Size = Mask.size();
12057  int HalfSize = Size / 2;
12058  assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
12059
12060  // Upper half must be undefined.
12061  if (!isUndefUpperHalf(Mask))
12062    return false;
12063
12064  for (int Idx = 0; Idx != HalfSize; ++Idx) {
12065    SDValue Base;
12066
12067    // Attempt to match first source from mask before insertion point.
12068    if (isUndefInRange(Mask, 0, Idx)) {
12069      /* EMPTY */
12070    } else if (isSequentialOrUndefInRange(Mask, 0, Idx, 0)) {
12071      Base = V1;
12072    } else if (isSequentialOrUndefInRange(Mask, 0, Idx, Size)) {
12073      Base = V2;
12074    } else {
12075      continue;
12076    }
12077
12078    // Extend the extraction length looking to match both the insertion of
12079    // the second source and the remaining elements of the first.
12080    for (int Hi = Idx + 1; Hi <= HalfSize; ++Hi) {
12081      SDValue Insert;
12082      int Len = Hi - Idx;
12083
12084      // Match insertion.
12085      if (isSequentialOrUndefInRange(Mask, Idx, Len, 0)) {
12086        Insert = V1;
12087      } else if (isSequentialOrUndefInRange(Mask, Idx, Len, Size)) {
12088        Insert = V2;
12089      } else {
12090        continue;
12091      }
12092
12093      // Match the remaining elements of the lower half.
12094      if (isUndefInRange(Mask, Hi, HalfSize - Hi)) {
12095        /* EMPTY */
12096      } else if ((!Base || (Base == V1)) &&
12097                 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi, Hi)) {
12098        Base = V1;
12099      } else if ((!Base || (Base == V2)) &&
12100                 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi,
12101                                            Size + Hi)) {
12102        Base = V2;
12103      } else {
12104        continue;
12105      }
12106
12107      BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
12108      BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
12109      V1 = Base;
12110      V2 = Insert;
12111      return true;
12112    }
12113  }
12114
12115  return false;
12116}
12117
12118/// Try to lower a vector shuffle using SSE4a EXTRQ/INSERTQ.
12119static SDValue lowerShuffleWithSSE4A(const SDLoc &DL, MVT VT, SDValue V1,
12120                                     SDValue V2, ArrayRef<int> Mask,
12121                                     const APInt &Zeroable, SelectionDAG &DAG) {
12122  uint64_t BitLen, BitIdx;
12123  if (matchShuffleAsEXTRQ(VT, V1, V2, Mask, BitLen, BitIdx, Zeroable))
12124    return DAG.getNode(X86ISD::EXTRQI, DL, VT, V1,
12125                       DAG.getTargetConstant(BitLen, DL, MVT::i8),
12126                       DAG.getTargetConstant(BitIdx, DL, MVT::i8));
12127
12128  if (matchShuffleAsINSERTQ(VT, V1, V2, Mask, BitLen, BitIdx))
12129    return DAG.getNode(X86ISD::INSERTQI, DL, VT, V1 ? V1 : DAG.getUNDEF(VT),
12130                       V2 ? V2 : DAG.getUNDEF(VT),
12131                       DAG.getTargetConstant(BitLen, DL, MVT::i8),
12132                       DAG.getTargetConstant(BitIdx, DL, MVT::i8));
12133
12134  return SDValue();
12135}
12136
12137/// Lower a vector shuffle as a zero or any extension.
12138///
12139/// Given a specific number of elements, element bit width, and extension
12140/// stride, produce either a zero or any extension based on the available
12141/// features of the subtarget. The extended elements are consecutive and
12142/// begin and can start from an offsetted element index in the input; to
12143/// avoid excess shuffling the offset must either being in the bottom lane
12144/// or at the start of a higher lane. All extended elements must be from
12145/// the same lane.
12146static SDValue lowerShuffleAsSpecificZeroOrAnyExtend(
12147    const SDLoc &DL, MVT VT, int Scale, int Offset, bool AnyExt, SDValue InputV,
12148    ArrayRef<int> Mask, const X86Subtarget &Subtarget, SelectionDAG &DAG) {
12149  assert(Scale > 1 && "Need a scale to extend.");
12150  int EltBits = VT.getScalarSizeInBits();
12151  int NumElements = VT.getVectorNumElements();
12152  int NumEltsPerLane = 128 / EltBits;
12153  int OffsetLane = Offset / NumEltsPerLane;
12154  assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
12155         "Only 8, 16, and 32 bit elements can be extended.");
12156  assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
12157  assert(0 <= Offset && "Extension offset must be positive.");
12158  assert((Offset < NumEltsPerLane || Offset % NumEltsPerLane == 0) &&
12159         "Extension offset must be in the first lane or start an upper lane.");
12160
12161  // Check that an index is in same lane as the base offset.
12162  auto SafeOffset = [&](int Idx) {
12163    return OffsetLane == (Idx / NumEltsPerLane);
12164  };
12165
12166  // Shift along an input so that the offset base moves to the first element.
12167  auto ShuffleOffset = [&](SDValue V) {
12168    if (!Offset)
12169      return V;
12170
12171    SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
12172    for (int i = 0; i * Scale < NumElements; ++i) {
12173      int SrcIdx = i + Offset;
12174      ShMask[i] = SafeOffset(SrcIdx) ? SrcIdx : -1;
12175    }
12176    return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), ShMask);
12177  };
12178
12179  // Found a valid a/zext mask! Try various lowering strategies based on the
12180  // input type and available ISA extensions.
12181  if (Subtarget.hasSSE41()) {
12182    // Not worth offsetting 128-bit vectors if scale == 2, a pattern using
12183    // PUNPCK will catch this in a later shuffle match.
12184    if (Offset && Scale == 2 && VT.is128BitVector())
12185      return SDValue();
12186    MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
12187                                 NumElements / Scale);
12188    InputV = ShuffleOffset(InputV);
12189    InputV = getExtendInVec(AnyExt ? ISD::ANY_EXTEND : ISD::ZERO_EXTEND, DL,
12190                            ExtVT, InputV, DAG);
12191    return DAG.getBitcast(VT, InputV);
12192  }
12193
12194  assert(VT.is128BitVector() && "Only 128-bit vectors can be extended.");
12195
12196  // For any extends we can cheat for larger element sizes and use shuffle
12197  // instructions that can fold with a load and/or copy.
12198  if (AnyExt && EltBits == 32) {
12199    int PSHUFDMask[4] = {Offset, -1, SafeOffset(Offset + 1) ? Offset + 1 : -1,
12200                         -1};
12201    return DAG.getBitcast(
12202        VT, DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
12203                        DAG.getBitcast(MVT::v4i32, InputV),
12204                        getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
12205  }
12206  if (AnyExt && EltBits == 16 && Scale > 2) {
12207    int PSHUFDMask[4] = {Offset / 2, -1,
12208                         SafeOffset(Offset + 1) ? (Offset + 1) / 2 : -1, -1};
12209    InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
12210                         DAG.getBitcast(MVT::v4i32, InputV),
12211                         getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
12212    int PSHUFWMask[4] = {1, -1, -1, -1};
12213    unsigned OddEvenOp = (Offset & 1) ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
12214    return DAG.getBitcast(
12215        VT, DAG.getNode(OddEvenOp, DL, MVT::v8i16,
12216                        DAG.getBitcast(MVT::v8i16, InputV),
12217                        getV4X86ShuffleImm8ForMask(PSHUFWMask, DL, DAG)));
12218  }
12219
12220  // The SSE4A EXTRQ instruction can efficiently extend the first 2 lanes
12221  // to 64-bits.
12222  if ((Scale * EltBits) == 64 && EltBits < 32 && Subtarget.hasSSE4A()) {
12223    assert(NumElements == (int)Mask.size() && "Unexpected shuffle mask size!");
12224    assert(VT.is128BitVector() && "Unexpected vector width!");
12225
12226    int LoIdx = Offset * EltBits;
12227    SDValue Lo = DAG.getBitcast(
12228        MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
12229                                DAG.getTargetConstant(EltBits, DL, MVT::i8),
12230                                DAG.getTargetConstant(LoIdx, DL, MVT::i8)));
12231
12232    if (isUndefUpperHalf(Mask) || !SafeOffset(Offset + 1))
12233      return DAG.getBitcast(VT, Lo);
12234
12235    int HiIdx = (Offset + 1) * EltBits;
12236    SDValue Hi = DAG.getBitcast(
12237        MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
12238                                DAG.getTargetConstant(EltBits, DL, MVT::i8),
12239                                DAG.getTargetConstant(HiIdx, DL, MVT::i8)));
12240    return DAG.getBitcast(VT,
12241                          DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, Lo, Hi));
12242  }
12243
12244  // If this would require more than 2 unpack instructions to expand, use
12245  // pshufb when available. We can only use more than 2 unpack instructions
12246  // when zero extending i8 elements which also makes it easier to use pshufb.
12247  if (Scale > 4 && EltBits == 8 && Subtarget.hasSSSE3()) {
12248    assert(NumElements == 16 && "Unexpected byte vector width!");
12249    SDValue PSHUFBMask[16];
12250    for (int i = 0; i < 16; ++i) {
12251      int Idx = Offset + (i / Scale);
12252      if ((i % Scale == 0 && SafeOffset(Idx))) {
12253        PSHUFBMask[i] = DAG.getConstant(Idx, DL, MVT::i8);
12254        continue;
12255      }
12256      PSHUFBMask[i] =
12257          AnyExt ? DAG.getUNDEF(MVT::i8) : DAG.getConstant(0x80, DL, MVT::i8);
12258    }
12259    InputV = DAG.getBitcast(MVT::v16i8, InputV);
12260    return DAG.getBitcast(
12261        VT, DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
12262                        DAG.getBuildVector(MVT::v16i8, DL, PSHUFBMask)));
12263  }
12264
12265  // If we are extending from an offset, ensure we start on a boundary that
12266  // we can unpack from.
12267  int AlignToUnpack = Offset % (NumElements / Scale);
12268  if (AlignToUnpack) {
12269    SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
12270    for (int i = AlignToUnpack; i < NumElements; ++i)
12271      ShMask[i - AlignToUnpack] = i;
12272    InputV = DAG.getVectorShuffle(VT, DL, InputV, DAG.getUNDEF(VT), ShMask);
12273    Offset -= AlignToUnpack;
12274  }
12275
12276  // Otherwise emit a sequence of unpacks.
12277  do {
12278    unsigned UnpackLoHi = X86ISD::UNPCKL;
12279    if (Offset >= (NumElements / 2)) {
12280      UnpackLoHi = X86ISD::UNPCKH;
12281      Offset -= (NumElements / 2);
12282    }
12283
12284    MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
12285    SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
12286                         : getZeroVector(InputVT, Subtarget, DAG, DL);
12287    InputV = DAG.getBitcast(InputVT, InputV);
12288    InputV = DAG.getNode(UnpackLoHi, DL, InputVT, InputV, Ext);
12289    Scale /= 2;
12290    EltBits *= 2;
12291    NumElements /= 2;
12292  } while (Scale > 1);
12293  return DAG.getBitcast(VT, InputV);
12294}
12295
12296/// Try to lower a vector shuffle as a zero extension on any microarch.
12297///
12298/// This routine will try to do everything in its power to cleverly lower
12299/// a shuffle which happens to match the pattern of a zero extend. It doesn't
12300/// check for the profitability of this lowering,  it tries to aggressively
12301/// match this pattern. It will use all of the micro-architectural details it
12302/// can to emit an efficient lowering. It handles both blends with all-zero
12303/// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
12304/// masking out later).
12305///
12306/// The reason we have dedicated lowering for zext-style shuffles is that they
12307/// are both incredibly common and often quite performance sensitive.
12308static SDValue lowerShuffleAsZeroOrAnyExtend(
12309    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
12310    const APInt &Zeroable, const X86Subtarget &Subtarget,
12311    SelectionDAG &DAG) {
12312  int Bits = VT.getSizeInBits();
12313  int NumLanes = Bits / 128;
12314  int NumElements = VT.getVectorNumElements();
12315  int NumEltsPerLane = NumElements / NumLanes;
12316  assert(VT.getScalarSizeInBits() <= 32 &&
12317         "Exceeds 32-bit integer zero extension limit");
12318  assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
12319
12320  // Define a helper function to check a particular ext-scale and lower to it if
12321  // valid.
12322  auto Lower = [&](int Scale) -> SDValue {
12323    SDValue InputV;
12324    bool AnyExt = true;
12325    int Offset = 0;
12326    int Matches = 0;
12327    for (int i = 0; i < NumElements; ++i) {
12328      int M = Mask[i];
12329      if (M < 0)
12330        continue; // Valid anywhere but doesn't tell us anything.
12331      if (i % Scale != 0) {
12332        // Each of the extended elements need to be zeroable.
12333        if (!Zeroable[i])
12334          return SDValue();
12335
12336        // We no longer are in the anyext case.
12337        AnyExt = false;
12338        continue;
12339      }
12340
12341      // Each of the base elements needs to be consecutive indices into the
12342      // same input vector.
12343      SDValue V = M < NumElements ? V1 : V2;
12344      M = M % NumElements;
12345      if (!InputV) {
12346        InputV = V;
12347        Offset = M - (i / Scale);
12348      } else if (InputV != V)
12349        return SDValue(); // Flip-flopping inputs.
12350
12351      // Offset must start in the lowest 128-bit lane or at the start of an
12352      // upper lane.
12353      // FIXME: Is it ever worth allowing a negative base offset?
12354      if (!((0 <= Offset && Offset < NumEltsPerLane) ||
12355            (Offset % NumEltsPerLane) == 0))
12356        return SDValue();
12357
12358      // If we are offsetting, all referenced entries must come from the same
12359      // lane.
12360      if (Offset && (Offset / NumEltsPerLane) != (M / NumEltsPerLane))
12361        return SDValue();
12362
12363      if ((M % NumElements) != (Offset + (i / Scale)))
12364        return SDValue(); // Non-consecutive strided elements.
12365      Matches++;
12366    }
12367
12368    // If we fail to find an input, we have a zero-shuffle which should always
12369    // have already been handled.
12370    // FIXME: Maybe handle this here in case during blending we end up with one?
12371    if (!InputV)
12372      return SDValue();
12373
12374    // If we are offsetting, don't extend if we only match a single input, we
12375    // can always do better by using a basic PSHUF or PUNPCK.
12376    if (Offset != 0 && Matches < 2)
12377      return SDValue();
12378
12379    return lowerShuffleAsSpecificZeroOrAnyExtend(DL, VT, Scale, Offset, AnyExt,
12380                                                 InputV, Mask, Subtarget, DAG);
12381  };
12382
12383  // The widest scale possible for extending is to a 64-bit integer.
12384  assert(Bits % 64 == 0 &&
12385         "The number of bits in a vector must be divisible by 64 on x86!");
12386  int NumExtElements = Bits / 64;
12387
12388  // Each iteration, try extending the elements half as much, but into twice as
12389  // many elements.
12390  for (; NumExtElements < NumElements; NumExtElements *= 2) {
12391    assert(NumElements % NumExtElements == 0 &&
12392           "The input vector size must be divisible by the extended size.");
12393    if (SDValue V = Lower(NumElements / NumExtElements))
12394      return V;
12395  }
12396
12397  // General extends failed, but 128-bit vectors may be able to use MOVQ.
12398  if (Bits != 128)
12399    return SDValue();
12400
12401  // Returns one of the source operands if the shuffle can be reduced to a
12402  // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
12403  auto CanZExtLowHalf = [&]() {
12404    for (int i = NumElements / 2; i != NumElements; ++i)
12405      if (!Zeroable[i])
12406        return SDValue();
12407    if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
12408      return V1;
12409    if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
12410      return V2;
12411    return SDValue();
12412  };
12413
12414  if (SDValue V = CanZExtLowHalf()) {
12415    V = DAG.getBitcast(MVT::v2i64, V);
12416    V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
12417    return DAG.getBitcast(VT, V);
12418  }
12419
12420  // No viable ext lowering found.
12421  return SDValue();
12422}
12423
12424/// Try to get a scalar value for a specific element of a vector.
12425///
12426/// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
12427static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
12428                                              SelectionDAG &DAG) {
12429  MVT VT = V.getSimpleValueType();
12430  MVT EltVT = VT.getVectorElementType();
12431  V = peekThroughBitcasts(V);
12432
12433  // If the bitcasts shift the element size, we can't extract an equivalent
12434  // element from it.
12435  MVT NewVT = V.getSimpleValueType();
12436  if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
12437    return SDValue();
12438
12439  if (V.getOpcode() == ISD::BUILD_VECTOR ||
12440      (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR)) {
12441    // Ensure the scalar operand is the same size as the destination.
12442    // FIXME: Add support for scalar truncation where possible.
12443    SDValue S = V.getOperand(Idx);
12444    if (EltVT.getSizeInBits() == S.getSimpleValueType().getSizeInBits())
12445      return DAG.getBitcast(EltVT, S);
12446  }
12447
12448  return SDValue();
12449}
12450
12451/// Helper to test for a load that can be folded with x86 shuffles.
12452///
12453/// This is particularly important because the set of instructions varies
12454/// significantly based on whether the operand is a load or not.
12455static bool isShuffleFoldableLoad(SDValue V) {
12456  V = peekThroughBitcasts(V);
12457  return ISD::isNON_EXTLoad(V.getNode());
12458}
12459
12460/// Try to lower insertion of a single element into a zero vector.
12461///
12462/// This is a common pattern that we have especially efficient patterns to lower
12463/// across all subtarget feature sets.
12464static SDValue lowerShuffleAsElementInsertion(
12465    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
12466    const APInt &Zeroable, const X86Subtarget &Subtarget,
12467    SelectionDAG &DAG) {
12468  MVT ExtVT = VT;
12469  MVT EltVT = VT.getVectorElementType();
12470
12471  int V2Index =
12472      find_if(Mask, [&Mask](int M) { return M >= (int)Mask.size(); }) -
12473      Mask.begin();
12474  bool IsV1Zeroable = true;
12475  for (int i = 0, Size = Mask.size(); i < Size; ++i)
12476    if (i != V2Index && !Zeroable[i]) {
12477      IsV1Zeroable = false;
12478      break;
12479    }
12480
12481  // Check for a single input from a SCALAR_TO_VECTOR node.
12482  // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
12483  // all the smarts here sunk into that routine. However, the current
12484  // lowering of BUILD_VECTOR makes that nearly impossible until the old
12485  // vector shuffle lowering is dead.
12486  SDValue V2S = getScalarValueForVectorElement(V2, Mask[V2Index] - Mask.size(),
12487                                               DAG);
12488  if (V2S && DAG.getTargetLoweringInfo().isTypeLegal(V2S.getValueType())) {
12489    // We need to zext the scalar if it is smaller than an i32.
12490    V2S = DAG.getBitcast(EltVT, V2S);
12491    if (EltVT == MVT::i8 || EltVT == MVT::i16) {
12492      // Using zext to expand a narrow element won't work for non-zero
12493      // insertions.
12494      if (!IsV1Zeroable)
12495        return SDValue();
12496
12497      // Zero-extend directly to i32.
12498      ExtVT = MVT::getVectorVT(MVT::i32, ExtVT.getSizeInBits() / 32);
12499      V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
12500    }
12501    V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
12502  } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
12503             EltVT == MVT::i16) {
12504    // Either not inserting from the low element of the input or the input
12505    // element size is too small to use VZEXT_MOVL to clear the high bits.
12506    return SDValue();
12507  }
12508
12509  if (!IsV1Zeroable) {
12510    // If V1 can't be treated as a zero vector we have fewer options to lower
12511    // this. We can't support integer vectors or non-zero targets cheaply, and
12512    // the V1 elements can't be permuted in any way.
12513    assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
12514    if (!VT.isFloatingPoint() || V2Index != 0)
12515      return SDValue();
12516    SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
12517    V1Mask[V2Index] = -1;
12518    if (!isNoopShuffleMask(V1Mask))
12519      return SDValue();
12520    if (!VT.is128BitVector())
12521      return SDValue();
12522
12523    // Otherwise, use MOVSD or MOVSS.
12524    assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
12525           "Only two types of floating point element types to handle!");
12526    return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
12527                       ExtVT, V1, V2);
12528  }
12529
12530  // This lowering only works for the low element with floating point vectors.
12531  if (VT.isFloatingPoint() && V2Index != 0)
12532    return SDValue();
12533
12534  V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
12535  if (ExtVT != VT)
12536    V2 = DAG.getBitcast(VT, V2);
12537
12538  if (V2Index != 0) {
12539    // If we have 4 or fewer lanes we can cheaply shuffle the element into
12540    // the desired position. Otherwise it is more efficient to do a vector
12541    // shift left. We know that we can do a vector shift left because all
12542    // the inputs are zero.
12543    if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
12544      SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
12545      V2Shuffle[V2Index] = 0;
12546      V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
12547    } else {
12548      V2 = DAG.getBitcast(MVT::v16i8, V2);
12549      V2 = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, V2,
12550                       DAG.getTargetConstant(
12551                           V2Index * EltVT.getSizeInBits() / 8, DL, MVT::i8));
12552      V2 = DAG.getBitcast(VT, V2);
12553    }
12554  }
12555  return V2;
12556}
12557
12558/// Try to lower broadcast of a single - truncated - integer element,
12559/// coming from a scalar_to_vector/build_vector node \p V0 with larger elements.
12560///
12561/// This assumes we have AVX2.
12562static SDValue lowerShuffleAsTruncBroadcast(const SDLoc &DL, MVT VT, SDValue V0,
12563                                            int BroadcastIdx,
12564                                            const X86Subtarget &Subtarget,
12565                                            SelectionDAG &DAG) {
12566  assert(Subtarget.hasAVX2() &&
12567         "We can only lower integer broadcasts with AVX2!");
12568
12569  EVT EltVT = VT.getVectorElementType();
12570  EVT V0VT = V0.getValueType();
12571
12572  assert(VT.isInteger() && "Unexpected non-integer trunc broadcast!");
12573  assert(V0VT.isVector() && "Unexpected non-vector vector-sized value!");
12574
12575  EVT V0EltVT = V0VT.getVectorElementType();
12576  if (!V0EltVT.isInteger())
12577    return SDValue();
12578
12579  const unsigned EltSize = EltVT.getSizeInBits();
12580  const unsigned V0EltSize = V0EltVT.getSizeInBits();
12581
12582  // This is only a truncation if the original element type is larger.
12583  if (V0EltSize <= EltSize)
12584    return SDValue();
12585
12586  assert(((V0EltSize % EltSize) == 0) &&
12587         "Scalar type sizes must all be powers of 2 on x86!");
12588
12589  const unsigned V0Opc = V0.getOpcode();
12590  const unsigned Scale = V0EltSize / EltSize;
12591  const unsigned V0BroadcastIdx = BroadcastIdx / Scale;
12592
12593  if ((V0Opc != ISD::SCALAR_TO_VECTOR || V0BroadcastIdx != 0) &&
12594      V0Opc != ISD::BUILD_VECTOR)
12595    return SDValue();
12596
12597  SDValue Scalar = V0.getOperand(V0BroadcastIdx);
12598
12599  // If we're extracting non-least-significant bits, shift so we can truncate.
12600  // Hopefully, we can fold away the trunc/srl/load into the broadcast.
12601  // Even if we can't (and !isShuffleFoldableLoad(Scalar)), prefer
12602  // vpbroadcast+vmovd+shr to vpshufb(m)+vmovd.
12603  if (const int OffsetIdx = BroadcastIdx % Scale)
12604    Scalar = DAG.getNode(ISD::SRL, DL, Scalar.getValueType(), Scalar,
12605                         DAG.getConstant(OffsetIdx * EltSize, DL, MVT::i8));
12606
12607  return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
12608                     DAG.getNode(ISD::TRUNCATE, DL, EltVT, Scalar));
12609}
12610
12611/// Test whether this can be lowered with a single SHUFPS instruction.
12612///
12613/// This is used to disable more specialized lowerings when the shufps lowering
12614/// will happen to be efficient.
12615static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
12616  // This routine only handles 128-bit shufps.
12617  assert(Mask.size() == 4 && "Unsupported mask size!");
12618  assert(Mask[0] >= -1 && Mask[0] < 8 && "Out of bound mask element!");
12619  assert(Mask[1] >= -1 && Mask[1] < 8 && "Out of bound mask element!");
12620  assert(Mask[2] >= -1 && Mask[2] < 8 && "Out of bound mask element!");
12621  assert(Mask[3] >= -1 && Mask[3] < 8 && "Out of bound mask element!");
12622
12623  // To lower with a single SHUFPS we need to have the low half and high half
12624  // each requiring a single input.
12625  if (Mask[0] >= 0 && Mask[1] >= 0 && (Mask[0] < 4) != (Mask[1] < 4))
12626    return false;
12627  if (Mask[2] >= 0 && Mask[3] >= 0 && (Mask[2] < 4) != (Mask[3] < 4))
12628    return false;
12629
12630  return true;
12631}
12632
12633/// If we are extracting two 128-bit halves of a vector and shuffling the
12634/// result, match that to a 256-bit AVX2 vperm* instruction to avoid a
12635/// multi-shuffle lowering.
12636static SDValue lowerShuffleOfExtractsAsVperm(const SDLoc &DL, SDValue N0,
12637                                             SDValue N1, ArrayRef<int> Mask,
12638                                             SelectionDAG &DAG) {
12639  EVT VT = N0.getValueType();
12640  assert((VT.is128BitVector() &&
12641          (VT.getScalarSizeInBits() == 32 || VT.getScalarSizeInBits() == 64)) &&
12642         "VPERM* family of shuffles requires 32-bit or 64-bit elements");
12643
12644  // Check that both sources are extracts of the same source vector.
12645  if (!N0.hasOneUse() || !N1.hasOneUse() ||
12646      N0.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
12647      N1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
12648      N0.getOperand(0) != N1.getOperand(0))
12649    return SDValue();
12650
12651  SDValue WideVec = N0.getOperand(0);
12652  EVT WideVT = WideVec.getValueType();
12653  if (!WideVT.is256BitVector() || !isa<ConstantSDNode>(N0.getOperand(1)) ||
12654      !isa<ConstantSDNode>(N1.getOperand(1)))
12655    return SDValue();
12656
12657  // Match extracts of each half of the wide source vector. Commute the shuffle
12658  // if the extract of the low half is N1.
12659  unsigned NumElts = VT.getVectorNumElements();
12660  SmallVector<int, 4> NewMask(Mask.begin(), Mask.end());
12661  const APInt &ExtIndex0 = N0.getConstantOperandAPInt(1);
12662  const APInt &ExtIndex1 = N1.getConstantOperandAPInt(1);
12663  if (ExtIndex1 == 0 && ExtIndex0 == NumElts)
12664    ShuffleVectorSDNode::commuteMask(NewMask);
12665  else if (ExtIndex0 != 0 || ExtIndex1 != NumElts)
12666    return SDValue();
12667
12668  // Final bailout: if the mask is simple, we are better off using an extract
12669  // and a simple narrow shuffle. Prefer extract+unpack(h/l)ps to vpermps
12670  // because that avoids a constant load from memory.
12671  if (NumElts == 4 &&
12672      (isSingleSHUFPSMask(NewMask) || is128BitUnpackShuffleMask(NewMask)))
12673    return SDValue();
12674
12675  // Extend the shuffle mask with undef elements.
12676  NewMask.append(NumElts, -1);
12677
12678  // shuf (extract X, 0), (extract X, 4), M --> extract (shuf X, undef, M'), 0
12679  SDValue Shuf = DAG.getVectorShuffle(WideVT, DL, WideVec, DAG.getUNDEF(WideVT),
12680                                      NewMask);
12681  // This is free: ymm -> xmm.
12682  return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Shuf,
12683                     DAG.getIntPtrConstant(0, DL));
12684}
12685
12686/// Try to lower broadcast of a single element.
12687///
12688/// For convenience, this code also bundles all of the subtarget feature set
12689/// filtering. While a little annoying to re-dispatch on type here, there isn't
12690/// a convenient way to factor it out.
12691static SDValue lowerShuffleAsBroadcast(const SDLoc &DL, MVT VT, SDValue V1,
12692                                       SDValue V2, ArrayRef<int> Mask,
12693                                       const X86Subtarget &Subtarget,
12694                                       SelectionDAG &DAG) {
12695  if (!((Subtarget.hasSSE3() && VT == MVT::v2f64) ||
12696        (Subtarget.hasAVX() && VT.isFloatingPoint()) ||
12697        (Subtarget.hasAVX2() && VT.isInteger())))
12698    return SDValue();
12699
12700  // With MOVDDUP (v2f64) we can broadcast from a register or a load, otherwise
12701  // we can only broadcast from a register with AVX2.
12702  unsigned NumElts = Mask.size();
12703  unsigned NumEltBits = VT.getScalarSizeInBits();
12704  unsigned Opcode = (VT == MVT::v2f64 && !Subtarget.hasAVX2())
12705                        ? X86ISD::MOVDDUP
12706                        : X86ISD::VBROADCAST;
12707  bool BroadcastFromReg = (Opcode == X86ISD::MOVDDUP) || Subtarget.hasAVX2();
12708
12709  // Check that the mask is a broadcast.
12710  int BroadcastIdx = -1;
12711  for (int i = 0; i != (int)NumElts; ++i) {
12712    SmallVector<int, 8> BroadcastMask(NumElts, i);
12713    if (isShuffleEquivalent(V1, V2, Mask, BroadcastMask)) {
12714      BroadcastIdx = i;
12715      break;
12716    }
12717  }
12718
12719  if (BroadcastIdx < 0)
12720    return SDValue();
12721  assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
12722                                            "a sorted mask where the broadcast "
12723                                            "comes from V1.");
12724
12725  // Go up the chain of (vector) values to find a scalar load that we can
12726  // combine with the broadcast.
12727  int BitOffset = BroadcastIdx * NumEltBits;
12728  SDValue V = V1;
12729  for (;;) {
12730    switch (V.getOpcode()) {
12731    case ISD::BITCAST: {
12732      V = V.getOperand(0);
12733      continue;
12734    }
12735    case ISD::CONCAT_VECTORS: {
12736      int OpBitWidth = V.getOperand(0).getValueSizeInBits();
12737      int OpIdx = BitOffset / OpBitWidth;
12738      V = V.getOperand(OpIdx);
12739      BitOffset %= OpBitWidth;
12740      continue;
12741    }
12742    case ISD::INSERT_SUBVECTOR: {
12743      SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
12744      auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
12745      if (!ConstantIdx)
12746        break;
12747
12748      int EltBitWidth = VOuter.getScalarValueSizeInBits();
12749      int Idx = (int)ConstantIdx->getZExtValue();
12750      int NumSubElts = (int)VInner.getSimpleValueType().getVectorNumElements();
12751      int BeginOffset = Idx * EltBitWidth;
12752      int EndOffset = BeginOffset + NumSubElts * EltBitWidth;
12753      if (BeginOffset <= BitOffset && BitOffset < EndOffset) {
12754        BitOffset -= BeginOffset;
12755        V = VInner;
12756      } else {
12757        V = VOuter;
12758      }
12759      continue;
12760    }
12761    }
12762    break;
12763  }
12764  assert((BitOffset % NumEltBits) == 0 && "Illegal bit-offset");
12765  BroadcastIdx = BitOffset / NumEltBits;
12766
12767  // Do we need to bitcast the source to retrieve the original broadcast index?
12768  bool BitCastSrc = V.getScalarValueSizeInBits() != NumEltBits;
12769
12770  // Check if this is a broadcast of a scalar. We special case lowering
12771  // for scalars so that we can more effectively fold with loads.
12772  // If the original value has a larger element type than the shuffle, the
12773  // broadcast element is in essence truncated. Make that explicit to ease
12774  // folding.
12775  if (BitCastSrc && VT.isInteger())
12776    if (SDValue TruncBroadcast = lowerShuffleAsTruncBroadcast(
12777            DL, VT, V, BroadcastIdx, Subtarget, DAG))
12778      return TruncBroadcast;
12779
12780  MVT BroadcastVT = VT;
12781
12782  // Also check the simpler case, where we can directly reuse the scalar.
12783  if (!BitCastSrc &&
12784      ((V.getOpcode() == ISD::BUILD_VECTOR && V.hasOneUse()) ||
12785       (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0))) {
12786    V = V.getOperand(BroadcastIdx);
12787
12788    // If we can't broadcast from a register, check that the input is a load.
12789    if (!BroadcastFromReg && !isShuffleFoldableLoad(V))
12790      return SDValue();
12791  } else if (MayFoldLoad(V) && cast<LoadSDNode>(V)->isSimple()) {
12792    // 32-bit targets need to load i64 as a f64 and then bitcast the result.
12793    if (!Subtarget.is64Bit() && VT.getScalarType() == MVT::i64) {
12794      BroadcastVT = MVT::getVectorVT(MVT::f64, VT.getVectorNumElements());
12795      Opcode = (BroadcastVT.is128BitVector() && !Subtarget.hasAVX2())
12796                   ? X86ISD::MOVDDUP
12797                   : Opcode;
12798    }
12799
12800    // If we are broadcasting a load that is only used by the shuffle
12801    // then we can reduce the vector load to the broadcasted scalar load.
12802    LoadSDNode *Ld = cast<LoadSDNode>(V);
12803    SDValue BaseAddr = Ld->getOperand(1);
12804    EVT SVT = BroadcastVT.getScalarType();
12805    unsigned Offset = BroadcastIdx * SVT.getStoreSize();
12806    assert((int)(Offset * 8) == BitOffset && "Unexpected bit-offset");
12807    SDValue NewAddr = DAG.getMemBasePlusOffset(BaseAddr, Offset, DL);
12808    V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
12809                    DAG.getMachineFunction().getMachineMemOperand(
12810                        Ld->getMemOperand(), Offset, SVT.getStoreSize()));
12811    DAG.makeEquivalentMemoryOrdering(Ld, V);
12812  } else if (!BroadcastFromReg) {
12813    // We can't broadcast from a vector register.
12814    return SDValue();
12815  } else if (BitOffset != 0) {
12816    // We can only broadcast from the zero-element of a vector register,
12817    // but it can be advantageous to broadcast from the zero-element of a
12818    // subvector.
12819    if (!VT.is256BitVector() && !VT.is512BitVector())
12820      return SDValue();
12821
12822    // VPERMQ/VPERMPD can perform the cross-lane shuffle directly.
12823    if (VT == MVT::v4f64 || VT == MVT::v4i64)
12824      return SDValue();
12825
12826    // Only broadcast the zero-element of a 128-bit subvector.
12827    if ((BitOffset % 128) != 0)
12828      return SDValue();
12829
12830    assert((BitOffset % V.getScalarValueSizeInBits()) == 0 &&
12831           "Unexpected bit-offset");
12832    assert((V.getValueSizeInBits() == 256 || V.getValueSizeInBits() == 512) &&
12833           "Unexpected vector size");
12834    unsigned ExtractIdx = BitOffset / V.getScalarValueSizeInBits();
12835    V = extract128BitVector(V, ExtractIdx, DAG, DL);
12836  }
12837
12838  if (Opcode == X86ISD::MOVDDUP && !V.getValueType().isVector())
12839    V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
12840                    DAG.getBitcast(MVT::f64, V));
12841
12842  // Bitcast back to the same scalar type as BroadcastVT.
12843  if (V.getValueType().getScalarType() != BroadcastVT.getScalarType()) {
12844    assert(NumEltBits == BroadcastVT.getScalarSizeInBits() &&
12845           "Unexpected vector element size");
12846    MVT ExtVT;
12847    if (V.getValueType().isVector()) {
12848      unsigned NumSrcElts = V.getValueSizeInBits() / NumEltBits;
12849      ExtVT = MVT::getVectorVT(BroadcastVT.getScalarType(), NumSrcElts);
12850    } else {
12851      ExtVT = BroadcastVT.getScalarType();
12852    }
12853    V = DAG.getBitcast(ExtVT, V);
12854  }
12855
12856  // 32-bit targets need to load i64 as a f64 and then bitcast the result.
12857  if (!Subtarget.is64Bit() && V.getValueType() == MVT::i64) {
12858    V = DAG.getBitcast(MVT::f64, V);
12859    unsigned NumBroadcastElts = BroadcastVT.getVectorNumElements();
12860    BroadcastVT = MVT::getVectorVT(MVT::f64, NumBroadcastElts);
12861  }
12862
12863  // We only support broadcasting from 128-bit vectors to minimize the
12864  // number of patterns we need to deal with in isel. So extract down to
12865  // 128-bits, removing as many bitcasts as possible.
12866  if (V.getValueSizeInBits() > 128) {
12867    MVT ExtVT = V.getSimpleValueType().getScalarType();
12868    ExtVT = MVT::getVectorVT(ExtVT, 128 / ExtVT.getScalarSizeInBits());
12869    V = extract128BitVector(peekThroughBitcasts(V), 0, DAG, DL);
12870    V = DAG.getBitcast(ExtVT, V);
12871  }
12872
12873  return DAG.getBitcast(VT, DAG.getNode(Opcode, DL, BroadcastVT, V));
12874}
12875
12876// Check for whether we can use INSERTPS to perform the shuffle. We only use
12877// INSERTPS when the V1 elements are already in the correct locations
12878// because otherwise we can just always use two SHUFPS instructions which
12879// are much smaller to encode than a SHUFPS and an INSERTPS. We can also
12880// perform INSERTPS if a single V1 element is out of place and all V2
12881// elements are zeroable.
12882static bool matchShuffleAsInsertPS(SDValue &V1, SDValue &V2,
12883                                   unsigned &InsertPSMask,
12884                                   const APInt &Zeroable,
12885                                   ArrayRef<int> Mask, SelectionDAG &DAG) {
12886  assert(V1.getSimpleValueType().is128BitVector() && "Bad operand type!");
12887  assert(V2.getSimpleValueType().is128BitVector() && "Bad operand type!");
12888  assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
12889
12890  // Attempt to match INSERTPS with one element from VA or VB being
12891  // inserted into VA (or undef). If successful, V1, V2 and InsertPSMask
12892  // are updated.
12893  auto matchAsInsertPS = [&](SDValue VA, SDValue VB,
12894                             ArrayRef<int> CandidateMask) {
12895    unsigned ZMask = 0;
12896    int VADstIndex = -1;
12897    int VBDstIndex = -1;
12898    bool VAUsedInPlace = false;
12899
12900    for (int i = 0; i < 4; ++i) {
12901      // Synthesize a zero mask from the zeroable elements (includes undefs).
12902      if (Zeroable[i]) {
12903        ZMask |= 1 << i;
12904        continue;
12905      }
12906
12907      // Flag if we use any VA inputs in place.
12908      if (i == CandidateMask[i]) {
12909        VAUsedInPlace = true;
12910        continue;
12911      }
12912
12913      // We can only insert a single non-zeroable element.
12914      if (VADstIndex >= 0 || VBDstIndex >= 0)
12915        return false;
12916
12917      if (CandidateMask[i] < 4) {
12918        // VA input out of place for insertion.
12919        VADstIndex = i;
12920      } else {
12921        // VB input for insertion.
12922        VBDstIndex = i;
12923      }
12924    }
12925
12926    // Don't bother if we have no (non-zeroable) element for insertion.
12927    if (VADstIndex < 0 && VBDstIndex < 0)
12928      return false;
12929
12930    // Determine element insertion src/dst indices. The src index is from the
12931    // start of the inserted vector, not the start of the concatenated vector.
12932    unsigned VBSrcIndex = 0;
12933    if (VADstIndex >= 0) {
12934      // If we have a VA input out of place, we use VA as the V2 element
12935      // insertion and don't use the original V2 at all.
12936      VBSrcIndex = CandidateMask[VADstIndex];
12937      VBDstIndex = VADstIndex;
12938      VB = VA;
12939    } else {
12940      VBSrcIndex = CandidateMask[VBDstIndex] - 4;
12941    }
12942
12943    // If no V1 inputs are used in place, then the result is created only from
12944    // the zero mask and the V2 insertion - so remove V1 dependency.
12945    if (!VAUsedInPlace)
12946      VA = DAG.getUNDEF(MVT::v4f32);
12947
12948    // Update V1, V2 and InsertPSMask accordingly.
12949    V1 = VA;
12950    V2 = VB;
12951
12952    // Insert the V2 element into the desired position.
12953    InsertPSMask = VBSrcIndex << 6 | VBDstIndex << 4 | ZMask;
12954    assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
12955    return true;
12956  };
12957
12958  if (matchAsInsertPS(V1, V2, Mask))
12959    return true;
12960
12961  // Commute and try again.
12962  SmallVector<int, 4> CommutedMask(Mask.begin(), Mask.end());
12963  ShuffleVectorSDNode::commuteMask(CommutedMask);
12964  if (matchAsInsertPS(V2, V1, CommutedMask))
12965    return true;
12966
12967  return false;
12968}
12969
12970static SDValue lowerShuffleAsInsertPS(const SDLoc &DL, SDValue V1, SDValue V2,
12971                                      ArrayRef<int> Mask, const APInt &Zeroable,
12972                                      SelectionDAG &DAG) {
12973  assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
12974  assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
12975
12976  // Attempt to match the insertps pattern.
12977  unsigned InsertPSMask;
12978  if (!matchShuffleAsInsertPS(V1, V2, InsertPSMask, Zeroable, Mask, DAG))
12979    return SDValue();
12980
12981  // Insert the V2 element into the desired position.
12982  return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
12983                     DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
12984}
12985
12986/// Try to lower a shuffle as a permute of the inputs followed by an
12987/// UNPCK instruction.
12988///
12989/// This specifically targets cases where we end up with alternating between
12990/// the two inputs, and so can permute them into something that feeds a single
12991/// UNPCK instruction. Note that this routine only targets integer vectors
12992/// because for floating point vectors we have a generalized SHUFPS lowering
12993/// strategy that handles everything that doesn't *exactly* match an unpack,
12994/// making this clever lowering unnecessary.
12995static SDValue lowerShuffleAsPermuteAndUnpack(
12996    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
12997    const X86Subtarget &Subtarget, SelectionDAG &DAG) {
12998  assert(!VT.isFloatingPoint() &&
12999         "This routine only supports integer vectors.");
13000  assert(VT.is128BitVector() &&
13001         "This routine only works on 128-bit vectors.");
13002  assert(!V2.isUndef() &&
13003         "This routine should only be used when blending two inputs.");
13004  assert(Mask.size() >= 2 && "Single element masks are invalid.");
13005
13006  int Size = Mask.size();
13007
13008  int NumLoInputs =
13009      count_if(Mask, [Size](int M) { return M >= 0 && M % Size < Size / 2; });
13010  int NumHiInputs =
13011      count_if(Mask, [Size](int M) { return M % Size >= Size / 2; });
13012
13013  bool UnpackLo = NumLoInputs >= NumHiInputs;
13014
13015  auto TryUnpack = [&](int ScalarSize, int Scale) {
13016    SmallVector<int, 16> V1Mask((unsigned)Size, -1);
13017    SmallVector<int, 16> V2Mask((unsigned)Size, -1);
13018
13019    for (int i = 0; i < Size; ++i) {
13020      if (Mask[i] < 0)
13021        continue;
13022
13023      // Each element of the unpack contains Scale elements from this mask.
13024      int UnpackIdx = i / Scale;
13025
13026      // We only handle the case where V1 feeds the first slots of the unpack.
13027      // We rely on canonicalization to ensure this is the case.
13028      if ((UnpackIdx % 2 == 0) != (Mask[i] < Size))
13029        return SDValue();
13030
13031      // Setup the mask for this input. The indexing is tricky as we have to
13032      // handle the unpack stride.
13033      SmallVectorImpl<int> &VMask = (UnpackIdx % 2 == 0) ? V1Mask : V2Mask;
13034      VMask[(UnpackIdx / 2) * Scale + i % Scale + (UnpackLo ? 0 : Size / 2)] =
13035          Mask[i] % Size;
13036    }
13037
13038    // If we will have to shuffle both inputs to use the unpack, check whether
13039    // we can just unpack first and shuffle the result. If so, skip this unpack.
13040    if ((NumLoInputs == 0 || NumHiInputs == 0) && !isNoopShuffleMask(V1Mask) &&
13041        !isNoopShuffleMask(V2Mask))
13042      return SDValue();
13043
13044    // Shuffle the inputs into place.
13045    V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
13046    V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
13047
13048    // Cast the inputs to the type we will use to unpack them.
13049    MVT UnpackVT = MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), Size / Scale);
13050    V1 = DAG.getBitcast(UnpackVT, V1);
13051    V2 = DAG.getBitcast(UnpackVT, V2);
13052
13053    // Unpack the inputs and cast the result back to the desired type.
13054    return DAG.getBitcast(
13055        VT, DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
13056                        UnpackVT, V1, V2));
13057  };
13058
13059  // We try each unpack from the largest to the smallest to try and find one
13060  // that fits this mask.
13061  int OrigScalarSize = VT.getScalarSizeInBits();
13062  for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2)
13063    if (SDValue Unpack = TryUnpack(ScalarSize, ScalarSize / OrigScalarSize))
13064      return Unpack;
13065
13066  // If we're shuffling with a zero vector then we're better off not doing
13067  // VECTOR_SHUFFLE(UNPCK()) as we lose track of those zero elements.
13068  if (ISD::isBuildVectorAllZeros(V1.getNode()) ||
13069      ISD::isBuildVectorAllZeros(V2.getNode()))
13070    return SDValue();
13071
13072  // If none of the unpack-rooted lowerings worked (or were profitable) try an
13073  // initial unpack.
13074  if (NumLoInputs == 0 || NumHiInputs == 0) {
13075    assert((NumLoInputs > 0 || NumHiInputs > 0) &&
13076           "We have to have *some* inputs!");
13077    int HalfOffset = NumLoInputs == 0 ? Size / 2 : 0;
13078
13079    // FIXME: We could consider the total complexity of the permute of each
13080    // possible unpacking. Or at the least we should consider how many
13081    // half-crossings are created.
13082    // FIXME: We could consider commuting the unpacks.
13083
13084    SmallVector<int, 32> PermMask((unsigned)Size, -1);
13085    for (int i = 0; i < Size; ++i) {
13086      if (Mask[i] < 0)
13087        continue;
13088
13089      assert(Mask[i] % Size >= HalfOffset && "Found input from wrong half!");
13090
13091      PermMask[i] =
13092          2 * ((Mask[i] % Size) - HalfOffset) + (Mask[i] < Size ? 0 : 1);
13093    }
13094    return DAG.getVectorShuffle(
13095        VT, DL, DAG.getNode(NumLoInputs == 0 ? X86ISD::UNPCKH : X86ISD::UNPCKL,
13096                            DL, VT, V1, V2),
13097        DAG.getUNDEF(VT), PermMask);
13098  }
13099
13100  return SDValue();
13101}
13102
13103/// Handle lowering of 2-lane 64-bit floating point shuffles.
13104///
13105/// This is the basis function for the 2-lane 64-bit shuffles as we have full
13106/// support for floating point shuffles but not integer shuffles. These
13107/// instructions will incur a domain crossing penalty on some chips though so
13108/// it is better to avoid lowering through this for integer vectors where
13109/// possible.
13110static SDValue lowerV2F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13111                                 const APInt &Zeroable, SDValue V1, SDValue V2,
13112                                 const X86Subtarget &Subtarget,
13113                                 SelectionDAG &DAG) {
13114  assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
13115  assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
13116  assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
13117
13118  if (V2.isUndef()) {
13119    // Check for being able to broadcast a single element.
13120    if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2f64, V1, V2,
13121                                                    Mask, Subtarget, DAG))
13122      return Broadcast;
13123
13124    // Straight shuffle of a single input vector. Simulate this by using the
13125    // single input as both of the "inputs" to this instruction..
13126    unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
13127
13128    if (Subtarget.hasAVX()) {
13129      // If we have AVX, we can use VPERMILPS which will allow folding a load
13130      // into the shuffle.
13131      return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
13132                         DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
13133    }
13134
13135    return DAG.getNode(
13136        X86ISD::SHUFP, DL, MVT::v2f64,
13137        Mask[0] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
13138        Mask[1] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
13139        DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
13140  }
13141  assert(Mask[0] >= 0 && "No undef lanes in multi-input v2 shuffles!");
13142  assert(Mask[1] >= 0 && "No undef lanes in multi-input v2 shuffles!");
13143  assert(Mask[0] < 2 && "We sort V1 to be the first input.");
13144  assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
13145
13146  if (Subtarget.hasAVX2())
13147    if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
13148      return Extract;
13149
13150  // When loading a scalar and then shuffling it into a vector we can often do
13151  // the insertion cheaply.
13152  if (SDValue Insertion = lowerShuffleAsElementInsertion(
13153          DL, MVT::v2f64, V1, V2, Mask, Zeroable, Subtarget, DAG))
13154    return Insertion;
13155  // Try inverting the insertion since for v2 masks it is easy to do and we
13156  // can't reliably sort the mask one way or the other.
13157  int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
13158                        Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
13159  if (SDValue Insertion = lowerShuffleAsElementInsertion(
13160          DL, MVT::v2f64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
13161    return Insertion;
13162
13163  // Try to use one of the special instruction patterns to handle two common
13164  // blend patterns if a zero-blend above didn't work.
13165  if (isShuffleEquivalent(V1, V2, Mask, {0, 3}) ||
13166      isShuffleEquivalent(V1, V2, Mask, {1, 3}))
13167    if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
13168      // We can either use a special instruction to load over the low double or
13169      // to move just the low double.
13170      return DAG.getNode(
13171          X86ISD::MOVSD, DL, MVT::v2f64, V2,
13172          DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
13173
13174  if (Subtarget.hasSSE41())
13175    if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
13176                                            Zeroable, Subtarget, DAG))
13177      return Blend;
13178
13179  // Use dedicated unpack instructions for masks that match their pattern.
13180  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2f64, Mask, V1, V2, DAG))
13181    return V;
13182
13183  unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
13184  return DAG.getNode(X86ISD::SHUFP, DL, MVT::v2f64, V1, V2,
13185                     DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
13186}
13187
13188/// Handle lowering of 2-lane 64-bit integer shuffles.
13189///
13190/// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
13191/// the integer unit to minimize domain crossing penalties. However, for blends
13192/// it falls back to the floating point shuffle operation with appropriate bit
13193/// casting.
13194static SDValue lowerV2I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13195                                 const APInt &Zeroable, SDValue V1, SDValue V2,
13196                                 const X86Subtarget &Subtarget,
13197                                 SelectionDAG &DAG) {
13198  assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
13199  assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
13200  assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
13201
13202  if (V2.isUndef()) {
13203    // Check for being able to broadcast a single element.
13204    if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2i64, V1, V2,
13205                                                    Mask, Subtarget, DAG))
13206      return Broadcast;
13207
13208    // Straight shuffle of a single input vector. For everything from SSE2
13209    // onward this has a single fast instruction with no scary immediates.
13210    // We have to map the mask as it is actually a v4i32 shuffle instruction.
13211    V1 = DAG.getBitcast(MVT::v4i32, V1);
13212    int WidenedMask[4] = {
13213        std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
13214        std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
13215    return DAG.getBitcast(
13216        MVT::v2i64,
13217        DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
13218                    getV4X86ShuffleImm8ForMask(WidenedMask, DL, DAG)));
13219  }
13220  assert(Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!");
13221  assert(Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!");
13222  assert(Mask[0] < 2 && "We sort V1 to be the first input.");
13223  assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
13224
13225  if (Subtarget.hasAVX2())
13226    if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
13227      return Extract;
13228
13229  // Try to use shift instructions.
13230  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask,
13231                                          Zeroable, Subtarget, DAG))
13232    return Shift;
13233
13234  // When loading a scalar and then shuffling it into a vector we can often do
13235  // the insertion cheaply.
13236  if (SDValue Insertion = lowerShuffleAsElementInsertion(
13237          DL, MVT::v2i64, V1, V2, Mask, Zeroable, Subtarget, DAG))
13238    return Insertion;
13239  // Try inverting the insertion since for v2 masks it is easy to do and we
13240  // can't reliably sort the mask one way or the other.
13241  int InverseMask[2] = {Mask[0] ^ 2, Mask[1] ^ 2};
13242  if (SDValue Insertion = lowerShuffleAsElementInsertion(
13243          DL, MVT::v2i64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
13244    return Insertion;
13245
13246  // We have different paths for blend lowering, but they all must use the
13247  // *exact* same predicate.
13248  bool IsBlendSupported = Subtarget.hasSSE41();
13249  if (IsBlendSupported)
13250    if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
13251                                            Zeroable, Subtarget, DAG))
13252      return Blend;
13253
13254  // Use dedicated unpack instructions for masks that match their pattern.
13255  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2i64, Mask, V1, V2, DAG))
13256    return V;
13257
13258  // Try to use byte rotation instructions.
13259  // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
13260  if (Subtarget.hasSSSE3()) {
13261    if (Subtarget.hasVLX())
13262      if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v2i64, V1, V2, Mask,
13263                                                Subtarget, DAG))
13264        return Rotate;
13265
13266    if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v2i64, V1, V2, Mask,
13267                                                  Subtarget, DAG))
13268      return Rotate;
13269  }
13270
13271  // If we have direct support for blends, we should lower by decomposing into
13272  // a permute. That will be faster than the domain cross.
13273  if (IsBlendSupported)
13274    return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v2i64, V1, V2, Mask,
13275                                                Subtarget, DAG);
13276
13277  // We implement this with SHUFPD which is pretty lame because it will likely
13278  // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
13279  // However, all the alternatives are still more cycles and newer chips don't
13280  // have this problem. It would be really nice if x86 had better shuffles here.
13281  V1 = DAG.getBitcast(MVT::v2f64, V1);
13282  V2 = DAG.getBitcast(MVT::v2f64, V2);
13283  return DAG.getBitcast(MVT::v2i64,
13284                        DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
13285}
13286
13287/// Lower a vector shuffle using the SHUFPS instruction.
13288///
13289/// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
13290/// It makes no assumptions about whether this is the *best* lowering, it simply
13291/// uses it.
13292static SDValue lowerShuffleWithSHUFPS(const SDLoc &DL, MVT VT,
13293                                      ArrayRef<int> Mask, SDValue V1,
13294                                      SDValue V2, SelectionDAG &DAG) {
13295  SDValue LowV = V1, HighV = V2;
13296  int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
13297
13298  int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
13299
13300  if (NumV2Elements == 1) {
13301    int V2Index = find_if(Mask, [](int M) { return M >= 4; }) - Mask.begin();
13302
13303    // Compute the index adjacent to V2Index and in the same half by toggling
13304    // the low bit.
13305    int V2AdjIndex = V2Index ^ 1;
13306
13307    if (Mask[V2AdjIndex] < 0) {
13308      // Handles all the cases where we have a single V2 element and an undef.
13309      // This will only ever happen in the high lanes because we commute the
13310      // vector otherwise.
13311      if (V2Index < 2)
13312        std::swap(LowV, HighV);
13313      NewMask[V2Index] -= 4;
13314    } else {
13315      // Handle the case where the V2 element ends up adjacent to a V1 element.
13316      // To make this work, blend them together as the first step.
13317      int V1Index = V2AdjIndex;
13318      int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
13319      V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
13320                       getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
13321
13322      // Now proceed to reconstruct the final blend as we have the necessary
13323      // high or low half formed.
13324      if (V2Index < 2) {
13325        LowV = V2;
13326        HighV = V1;
13327      } else {
13328        HighV = V2;
13329      }
13330      NewMask[V1Index] = 2; // We put the V1 element in V2[2].
13331      NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
13332    }
13333  } else if (NumV2Elements == 2) {
13334    if (Mask[0] < 4 && Mask[1] < 4) {
13335      // Handle the easy case where we have V1 in the low lanes and V2 in the
13336      // high lanes.
13337      NewMask[2] -= 4;
13338      NewMask[3] -= 4;
13339    } else if (Mask[2] < 4 && Mask[3] < 4) {
13340      // We also handle the reversed case because this utility may get called
13341      // when we detect a SHUFPS pattern but can't easily commute the shuffle to
13342      // arrange things in the right direction.
13343      NewMask[0] -= 4;
13344      NewMask[1] -= 4;
13345      HighV = V1;
13346      LowV = V2;
13347    } else {
13348      // We have a mixture of V1 and V2 in both low and high lanes. Rather than
13349      // trying to place elements directly, just blend them and set up the final
13350      // shuffle to place them.
13351
13352      // The first two blend mask elements are for V1, the second two are for
13353      // V2.
13354      int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
13355                          Mask[2] < 4 ? Mask[2] : Mask[3],
13356                          (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
13357                          (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
13358      V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
13359                       getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
13360
13361      // Now we do a normal shuffle of V1 by giving V1 as both operands to
13362      // a blend.
13363      LowV = HighV = V1;
13364      NewMask[0] = Mask[0] < 4 ? 0 : 2;
13365      NewMask[1] = Mask[0] < 4 ? 2 : 0;
13366      NewMask[2] = Mask[2] < 4 ? 1 : 3;
13367      NewMask[3] = Mask[2] < 4 ? 3 : 1;
13368    }
13369  }
13370  return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
13371                     getV4X86ShuffleImm8ForMask(NewMask, DL, DAG));
13372}
13373
13374/// Lower 4-lane 32-bit floating point shuffles.
13375///
13376/// Uses instructions exclusively from the floating point unit to minimize
13377/// domain crossing penalties, as these are sufficient to implement all v4f32
13378/// shuffles.
13379static SDValue lowerV4F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13380                                 const APInt &Zeroable, SDValue V1, SDValue V2,
13381                                 const X86Subtarget &Subtarget,
13382                                 SelectionDAG &DAG) {
13383  assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
13384  assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
13385  assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
13386
13387  int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
13388
13389  if (NumV2Elements == 0) {
13390    // Check for being able to broadcast a single element.
13391    if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f32, V1, V2,
13392                                                    Mask, Subtarget, DAG))
13393      return Broadcast;
13394
13395    // Use even/odd duplicate instructions for masks that match their pattern.
13396    if (Subtarget.hasSSE3()) {
13397      if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
13398        return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
13399      if (isShuffleEquivalent(V1, V2, Mask, {1, 1, 3, 3}))
13400        return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
13401    }
13402
13403    if (Subtarget.hasAVX()) {
13404      // If we have AVX, we can use VPERMILPS which will allow folding a load
13405      // into the shuffle.
13406      return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
13407                         getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
13408    }
13409
13410    // Use MOVLHPS/MOVHLPS to simulate unary shuffles. These are only valid
13411    // in SSE1 because otherwise they are widened to v2f64 and never get here.
13412    if (!Subtarget.hasSSE2()) {
13413      if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1}))
13414        return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V1);
13415      if (isShuffleEquivalent(V1, V2, Mask, {2, 3, 2, 3}))
13416        return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V1, V1);
13417    }
13418
13419    // Otherwise, use a straight shuffle of a single input vector. We pass the
13420    // input vector to both operands to simulate this with a SHUFPS.
13421    return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
13422                       getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
13423  }
13424
13425  if (Subtarget.hasAVX2())
13426    if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
13427      return Extract;
13428
13429  // There are special ways we can lower some single-element blends. However, we
13430  // have custom ways we can lower more complex single-element blends below that
13431  // we defer to if both this and BLENDPS fail to match, so restrict this to
13432  // when the V2 input is targeting element 0 of the mask -- that is the fast
13433  // case here.
13434  if (NumV2Elements == 1 && Mask[0] >= 4)
13435    if (SDValue V = lowerShuffleAsElementInsertion(
13436            DL, MVT::v4f32, V1, V2, Mask, Zeroable, Subtarget, DAG))
13437      return V;
13438
13439  if (Subtarget.hasSSE41()) {
13440    if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
13441                                            Zeroable, Subtarget, DAG))
13442      return Blend;
13443
13444    // Use INSERTPS if we can complete the shuffle efficiently.
13445    if (SDValue V = lowerShuffleAsInsertPS(DL, V1, V2, Mask, Zeroable, DAG))
13446      return V;
13447
13448    if (!isSingleSHUFPSMask(Mask))
13449      if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, MVT::v4f32, V1,
13450                                                            V2, Mask, DAG))
13451        return BlendPerm;
13452  }
13453
13454  // Use low/high mov instructions. These are only valid in SSE1 because
13455  // otherwise they are widened to v2f64 and never get here.
13456  if (!Subtarget.hasSSE2()) {
13457    if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5}))
13458      return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V2);
13459    if (isShuffleEquivalent(V1, V2, Mask, {2, 3, 6, 7}))
13460      return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V2, V1);
13461  }
13462
13463  // Use dedicated unpack instructions for masks that match their pattern.
13464  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f32, Mask, V1, V2, DAG))
13465    return V;
13466
13467  // Otherwise fall back to a SHUFPS lowering strategy.
13468  return lowerShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
13469}
13470
13471/// Lower 4-lane i32 vector shuffles.
13472///
13473/// We try to handle these with integer-domain shuffles where we can, but for
13474/// blends we use the floating point domain blend instructions.
13475static SDValue lowerV4I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13476                                 const APInt &Zeroable, SDValue V1, SDValue V2,
13477                                 const X86Subtarget &Subtarget,
13478                                 SelectionDAG &DAG) {
13479  assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
13480  assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
13481  assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
13482
13483  // Whenever we can lower this as a zext, that instruction is strictly faster
13484  // than any alternative. It also allows us to fold memory operands into the
13485  // shuffle in many cases.
13486  if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2, Mask,
13487                                                   Zeroable, Subtarget, DAG))
13488    return ZExt;
13489
13490  int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
13491
13492  if (NumV2Elements == 0) {
13493    // Try to use broadcast unless the mask only has one non-undef element.
13494    if (count_if(Mask, [](int M) { return M >= 0 && M < 4; }) > 1) {
13495      if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i32, V1, V2,
13496                                                      Mask, Subtarget, DAG))
13497        return Broadcast;
13498    }
13499
13500    // Straight shuffle of a single input vector. For everything from SSE2
13501    // onward this has a single fast instruction with no scary immediates.
13502    // We coerce the shuffle pattern to be compatible with UNPCK instructions
13503    // but we aren't actually going to use the UNPCK instruction because doing
13504    // so prevents folding a load into this instruction or making a copy.
13505    const int UnpackLoMask[] = {0, 0, 1, 1};
13506    const int UnpackHiMask[] = {2, 2, 3, 3};
13507    if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 1, 1}))
13508      Mask = UnpackLoMask;
13509    else if (isShuffleEquivalent(V1, V2, Mask, {2, 2, 3, 3}))
13510      Mask = UnpackHiMask;
13511
13512    return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
13513                       getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
13514  }
13515
13516  if (Subtarget.hasAVX2())
13517    if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
13518      return Extract;
13519
13520  // Try to use shift instructions.
13521  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask,
13522                                          Zeroable, Subtarget, DAG))
13523    return Shift;
13524
13525  // There are special ways we can lower some single-element blends.
13526  if (NumV2Elements == 1)
13527    if (SDValue V = lowerShuffleAsElementInsertion(
13528            DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
13529      return V;
13530
13531  // We have different paths for blend lowering, but they all must use the
13532  // *exact* same predicate.
13533  bool IsBlendSupported = Subtarget.hasSSE41();
13534  if (IsBlendSupported)
13535    if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
13536                                            Zeroable, Subtarget, DAG))
13537      return Blend;
13538
13539  if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask,
13540                                             Zeroable, Subtarget, DAG))
13541    return Masked;
13542
13543  // Use dedicated unpack instructions for masks that match their pattern.
13544  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i32, Mask, V1, V2, DAG))
13545    return V;
13546
13547  // Try to use byte rotation instructions.
13548  // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
13549  if (Subtarget.hasSSSE3()) {
13550    if (Subtarget.hasVLX())
13551      if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v4i32, V1, V2, Mask,
13552                                                Subtarget, DAG))
13553        return Rotate;
13554
13555    if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i32, V1, V2, Mask,
13556                                                  Subtarget, DAG))
13557      return Rotate;
13558  }
13559
13560  // Assume that a single SHUFPS is faster than an alternative sequence of
13561  // multiple instructions (even if the CPU has a domain penalty).
13562  // If some CPU is harmed by the domain switch, we can fix it in a later pass.
13563  if (!isSingleSHUFPSMask(Mask)) {
13564    // If we have direct support for blends, we should lower by decomposing into
13565    // a permute. That will be faster than the domain cross.
13566    if (IsBlendSupported)
13567      return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4i32, V1, V2, Mask,
13568                                                  Subtarget, DAG);
13569
13570    // Try to lower by permuting the inputs into an unpack instruction.
13571    if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v4i32, V1, V2,
13572                                                        Mask, Subtarget, DAG))
13573      return Unpack;
13574  }
13575
13576  // We implement this with SHUFPS because it can blend from two vectors.
13577  // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
13578  // up the inputs, bypassing domain shift penalties that we would incur if we
13579  // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
13580  // relevant.
13581  SDValue CastV1 = DAG.getBitcast(MVT::v4f32, V1);
13582  SDValue CastV2 = DAG.getBitcast(MVT::v4f32, V2);
13583  SDValue ShufPS = DAG.getVectorShuffle(MVT::v4f32, DL, CastV1, CastV2, Mask);
13584  return DAG.getBitcast(MVT::v4i32, ShufPS);
13585}
13586
13587/// Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
13588/// shuffle lowering, and the most complex part.
13589///
13590/// The lowering strategy is to try to form pairs of input lanes which are
13591/// targeted at the same half of the final vector, and then use a dword shuffle
13592/// to place them onto the right half, and finally unpack the paired lanes into
13593/// their final position.
13594///
13595/// The exact breakdown of how to form these dword pairs and align them on the
13596/// correct sides is really tricky. See the comments within the function for
13597/// more of the details.
13598///
13599/// This code also handles repeated 128-bit lanes of v8i16 shuffles, but each
13600/// lane must shuffle the *exact* same way. In fact, you must pass a v8 Mask to
13601/// this routine for it to work correctly. To shuffle a 256-bit or 512-bit i16
13602/// vector, form the analogous 128-bit 8-element Mask.
13603static SDValue lowerV8I16GeneralSingleInputShuffle(
13604    const SDLoc &DL, MVT VT, SDValue V, MutableArrayRef<int> Mask,
13605    const X86Subtarget &Subtarget, SelectionDAG &DAG) {
13606  assert(VT.getVectorElementType() == MVT::i16 && "Bad input type!");
13607  MVT PSHUFDVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
13608
13609  assert(Mask.size() == 8 && "Shuffle mask length doesn't match!");
13610  MutableArrayRef<int> LoMask = Mask.slice(0, 4);
13611  MutableArrayRef<int> HiMask = Mask.slice(4, 4);
13612
13613  // Attempt to directly match PSHUFLW or PSHUFHW.
13614  if (isUndefOrInRange(LoMask, 0, 4) &&
13615      isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
13616    return DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
13617                       getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
13618  }
13619  if (isUndefOrInRange(HiMask, 4, 8) &&
13620      isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
13621    for (int i = 0; i != 4; ++i)
13622      HiMask[i] = (HiMask[i] < 0 ? HiMask[i] : (HiMask[i] - 4));
13623    return DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
13624                       getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
13625  }
13626
13627  SmallVector<int, 4> LoInputs;
13628  copy_if(LoMask, std::back_inserter(LoInputs), [](int M) { return M >= 0; });
13629  array_pod_sort(LoInputs.begin(), LoInputs.end());
13630  LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
13631  SmallVector<int, 4> HiInputs;
13632  copy_if(HiMask, std::back_inserter(HiInputs), [](int M) { return M >= 0; });
13633  array_pod_sort(HiInputs.begin(), HiInputs.end());
13634  HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
13635  int NumLToL = llvm::lower_bound(LoInputs, 4) - LoInputs.begin();
13636  int NumHToL = LoInputs.size() - NumLToL;
13637  int NumLToH = llvm::lower_bound(HiInputs, 4) - HiInputs.begin();
13638  int NumHToH = HiInputs.size() - NumLToH;
13639  MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
13640  MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
13641  MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
13642  MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
13643
13644  // If we are shuffling values from one half - check how many different DWORD
13645  // pairs we need to create. If only 1 or 2 then we can perform this as a
13646  // PSHUFLW/PSHUFHW + PSHUFD instead of the PSHUFD+PSHUFLW+PSHUFHW chain below.
13647  auto ShuffleDWordPairs = [&](ArrayRef<int> PSHUFHalfMask,
13648                               ArrayRef<int> PSHUFDMask, unsigned ShufWOp) {
13649    V = DAG.getNode(ShufWOp, DL, VT, V,
13650                    getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
13651    V = DAG.getBitcast(PSHUFDVT, V);
13652    V = DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, V,
13653                    getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
13654    return DAG.getBitcast(VT, V);
13655  };
13656
13657  if ((NumHToL + NumHToH) == 0 || (NumLToL + NumLToH) == 0) {
13658    int PSHUFDMask[4] = { -1, -1, -1, -1 };
13659    SmallVector<std::pair<int, int>, 4> DWordPairs;
13660    int DOffset = ((NumHToL + NumHToH) == 0 ? 0 : 2);
13661
13662    // Collect the different DWORD pairs.
13663    for (int DWord = 0; DWord != 4; ++DWord) {
13664      int M0 = Mask[2 * DWord + 0];
13665      int M1 = Mask[2 * DWord + 1];
13666      M0 = (M0 >= 0 ? M0 % 4 : M0);
13667      M1 = (M1 >= 0 ? M1 % 4 : M1);
13668      if (M0 < 0 && M1 < 0)
13669        continue;
13670
13671      bool Match = false;
13672      for (int j = 0, e = DWordPairs.size(); j < e; ++j) {
13673        auto &DWordPair = DWordPairs[j];
13674        if ((M0 < 0 || isUndefOrEqual(DWordPair.first, M0)) &&
13675            (M1 < 0 || isUndefOrEqual(DWordPair.second, M1))) {
13676          DWordPair.first = (M0 >= 0 ? M0 : DWordPair.first);
13677          DWordPair.second = (M1 >= 0 ? M1 : DWordPair.second);
13678          PSHUFDMask[DWord] = DOffset + j;
13679          Match = true;
13680          break;
13681        }
13682      }
13683      if (!Match) {
13684        PSHUFDMask[DWord] = DOffset + DWordPairs.size();
13685        DWordPairs.push_back(std::make_pair(M0, M1));
13686      }
13687    }
13688
13689    if (DWordPairs.size() <= 2) {
13690      DWordPairs.resize(2, std::make_pair(-1, -1));
13691      int PSHUFHalfMask[4] = {DWordPairs[0].first, DWordPairs[0].second,
13692                              DWordPairs[1].first, DWordPairs[1].second};
13693      if ((NumHToL + NumHToH) == 0)
13694        return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFLW);
13695      if ((NumLToL + NumLToH) == 0)
13696        return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFHW);
13697    }
13698  }
13699
13700  // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
13701  // such inputs we can swap two of the dwords across the half mark and end up
13702  // with <=2 inputs to each half in each half. Once there, we can fall through
13703  // to the generic code below. For example:
13704  //
13705  // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
13706  // Mask:  [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
13707  //
13708  // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
13709  // and an existing 2-into-2 on the other half. In this case we may have to
13710  // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
13711  // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
13712  // Fortunately, we don't have to handle anything but a 2-into-2 pattern
13713  // because any other situation (including a 3-into-1 or 1-into-3 in the other
13714  // half than the one we target for fixing) will be fixed when we re-enter this
13715  // path. We will also combine away any sequence of PSHUFD instructions that
13716  // result into a single instruction. Here is an example of the tricky case:
13717  //
13718  // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
13719  // Mask:  [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
13720  //
13721  // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
13722  //
13723  // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
13724  // Mask:  [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
13725  //
13726  // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
13727  // Mask:  [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
13728  //
13729  // The result is fine to be handled by the generic logic.
13730  auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
13731                          ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
13732                          int AOffset, int BOffset) {
13733    assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
13734           "Must call this with A having 3 or 1 inputs from the A half.");
13735    assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
13736           "Must call this with B having 1 or 3 inputs from the B half.");
13737    assert(AToAInputs.size() + BToAInputs.size() == 4 &&
13738           "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
13739
13740    bool ThreeAInputs = AToAInputs.size() == 3;
13741
13742    // Compute the index of dword with only one word among the three inputs in
13743    // a half by taking the sum of the half with three inputs and subtracting
13744    // the sum of the actual three inputs. The difference is the remaining
13745    // slot.
13746    int ADWord = 0, BDWord = 0;
13747    int &TripleDWord = ThreeAInputs ? ADWord : BDWord;
13748    int &OneInputDWord = ThreeAInputs ? BDWord : ADWord;
13749    int TripleInputOffset = ThreeAInputs ? AOffset : BOffset;
13750    ArrayRef<int> TripleInputs = ThreeAInputs ? AToAInputs : BToAInputs;
13751    int OneInput = ThreeAInputs ? BToAInputs[0] : AToAInputs[0];
13752    int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
13753    int TripleNonInputIdx =
13754        TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
13755    TripleDWord = TripleNonInputIdx / 2;
13756
13757    // We use xor with one to compute the adjacent DWord to whichever one the
13758    // OneInput is in.
13759    OneInputDWord = (OneInput / 2) ^ 1;
13760
13761    // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
13762    // and BToA inputs. If there is also such a problem with the BToB and AToB
13763    // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
13764    // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
13765    // is essential that we don't *create* a 3<-1 as then we might oscillate.
13766    if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
13767      // Compute how many inputs will be flipped by swapping these DWords. We
13768      // need
13769      // to balance this to ensure we don't form a 3-1 shuffle in the other
13770      // half.
13771      int NumFlippedAToBInputs =
13772          std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
13773          std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
13774      int NumFlippedBToBInputs =
13775          std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
13776          std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
13777      if ((NumFlippedAToBInputs == 1 &&
13778           (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
13779          (NumFlippedBToBInputs == 1 &&
13780           (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
13781        // We choose whether to fix the A half or B half based on whether that
13782        // half has zero flipped inputs. At zero, we may not be able to fix it
13783        // with that half. We also bias towards fixing the B half because that
13784        // will more commonly be the high half, and we have to bias one way.
13785        auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
13786                                                       ArrayRef<int> Inputs) {
13787          int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
13788          bool IsFixIdxInput = is_contained(Inputs, PinnedIdx ^ 1);
13789          // Determine whether the free index is in the flipped dword or the
13790          // unflipped dword based on where the pinned index is. We use this bit
13791          // in an xor to conditionally select the adjacent dword.
13792          int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
13793          bool IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
13794          if (IsFixIdxInput == IsFixFreeIdxInput)
13795            FixFreeIdx += 1;
13796          IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
13797          assert(IsFixIdxInput != IsFixFreeIdxInput &&
13798                 "We need to be changing the number of flipped inputs!");
13799          int PSHUFHalfMask[] = {0, 1, 2, 3};
13800          std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
13801          V = DAG.getNode(
13802              FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
13803              MVT::getVectorVT(MVT::i16, V.getValueSizeInBits() / 16), V,
13804              getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
13805
13806          for (int &M : Mask)
13807            if (M >= 0 && M == FixIdx)
13808              M = FixFreeIdx;
13809            else if (M >= 0 && M == FixFreeIdx)
13810              M = FixIdx;
13811        };
13812        if (NumFlippedBToBInputs != 0) {
13813          int BPinnedIdx =
13814              BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
13815          FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
13816        } else {
13817          assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
13818          int APinnedIdx = ThreeAInputs ? TripleNonInputIdx : OneInput;
13819          FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
13820        }
13821      }
13822    }
13823
13824    int PSHUFDMask[] = {0, 1, 2, 3};
13825    PSHUFDMask[ADWord] = BDWord;
13826    PSHUFDMask[BDWord] = ADWord;
13827    V = DAG.getBitcast(
13828        VT,
13829        DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
13830                    getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
13831
13832    // Adjust the mask to match the new locations of A and B.
13833    for (int &M : Mask)
13834      if (M >= 0 && M/2 == ADWord)
13835        M = 2 * BDWord + M % 2;
13836      else if (M >= 0 && M/2 == BDWord)
13837        M = 2 * ADWord + M % 2;
13838
13839    // Recurse back into this routine to re-compute state now that this isn't
13840    // a 3 and 1 problem.
13841    return lowerV8I16GeneralSingleInputShuffle(DL, VT, V, Mask, Subtarget, DAG);
13842  };
13843  if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
13844    return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
13845  if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
13846    return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
13847
13848  // At this point there are at most two inputs to the low and high halves from
13849  // each half. That means the inputs can always be grouped into dwords and
13850  // those dwords can then be moved to the correct half with a dword shuffle.
13851  // We use at most one low and one high word shuffle to collect these paired
13852  // inputs into dwords, and finally a dword shuffle to place them.
13853  int PSHUFLMask[4] = {-1, -1, -1, -1};
13854  int PSHUFHMask[4] = {-1, -1, -1, -1};
13855  int PSHUFDMask[4] = {-1, -1, -1, -1};
13856
13857  // First fix the masks for all the inputs that are staying in their
13858  // original halves. This will then dictate the targets of the cross-half
13859  // shuffles.
13860  auto fixInPlaceInputs =
13861      [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
13862                    MutableArrayRef<int> SourceHalfMask,
13863                    MutableArrayRef<int> HalfMask, int HalfOffset) {
13864    if (InPlaceInputs.empty())
13865      return;
13866    if (InPlaceInputs.size() == 1) {
13867      SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
13868          InPlaceInputs[0] - HalfOffset;
13869      PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
13870      return;
13871    }
13872    if (IncomingInputs.empty()) {
13873      // Just fix all of the in place inputs.
13874      for (int Input : InPlaceInputs) {
13875        SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
13876        PSHUFDMask[Input / 2] = Input / 2;
13877      }
13878      return;
13879    }
13880
13881    assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
13882    SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
13883        InPlaceInputs[0] - HalfOffset;
13884    // Put the second input next to the first so that they are packed into
13885    // a dword. We find the adjacent index by toggling the low bit.
13886    int AdjIndex = InPlaceInputs[0] ^ 1;
13887    SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
13888    std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
13889    PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
13890  };
13891  fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
13892  fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
13893
13894  // Now gather the cross-half inputs and place them into a free dword of
13895  // their target half.
13896  // FIXME: This operation could almost certainly be simplified dramatically to
13897  // look more like the 3-1 fixing operation.
13898  auto moveInputsToRightHalf = [&PSHUFDMask](
13899      MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
13900      MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
13901      MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
13902      int DestOffset) {
13903    auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
13904      return SourceHalfMask[Word] >= 0 && SourceHalfMask[Word] != Word;
13905    };
13906    auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
13907                                               int Word) {
13908      int LowWord = Word & ~1;
13909      int HighWord = Word | 1;
13910      return isWordClobbered(SourceHalfMask, LowWord) ||
13911             isWordClobbered(SourceHalfMask, HighWord);
13912    };
13913
13914    if (IncomingInputs.empty())
13915      return;
13916
13917    if (ExistingInputs.empty()) {
13918      // Map any dwords with inputs from them into the right half.
13919      for (int Input : IncomingInputs) {
13920        // If the source half mask maps over the inputs, turn those into
13921        // swaps and use the swapped lane.
13922        if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
13923          if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] < 0) {
13924            SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
13925                Input - SourceOffset;
13926            // We have to swap the uses in our half mask in one sweep.
13927            for (int &M : HalfMask)
13928              if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
13929                M = Input;
13930              else if (M == Input)
13931                M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
13932          } else {
13933            assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
13934                       Input - SourceOffset &&
13935                   "Previous placement doesn't match!");
13936          }
13937          // Note that this correctly re-maps both when we do a swap and when
13938          // we observe the other side of the swap above. We rely on that to
13939          // avoid swapping the members of the input list directly.
13940          Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
13941        }
13942
13943        // Map the input's dword into the correct half.
13944        if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] < 0)
13945          PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
13946        else
13947          assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
13948                     Input / 2 &&
13949                 "Previous placement doesn't match!");
13950      }
13951
13952      // And just directly shift any other-half mask elements to be same-half
13953      // as we will have mirrored the dword containing the element into the
13954      // same position within that half.
13955      for (int &M : HalfMask)
13956        if (M >= SourceOffset && M < SourceOffset + 4) {
13957          M = M - SourceOffset + DestOffset;
13958          assert(M >= 0 && "This should never wrap below zero!");
13959        }
13960      return;
13961    }
13962
13963    // Ensure we have the input in a viable dword of its current half. This
13964    // is particularly tricky because the original position may be clobbered
13965    // by inputs being moved and *staying* in that half.
13966    if (IncomingInputs.size() == 1) {
13967      if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
13968        int InputFixed = find(SourceHalfMask, -1) - std::begin(SourceHalfMask) +
13969                         SourceOffset;
13970        SourceHalfMask[InputFixed - SourceOffset] =
13971            IncomingInputs[0] - SourceOffset;
13972        std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
13973                     InputFixed);
13974        IncomingInputs[0] = InputFixed;
13975      }
13976    } else if (IncomingInputs.size() == 2) {
13977      if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
13978          isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
13979        // We have two non-adjacent or clobbered inputs we need to extract from
13980        // the source half. To do this, we need to map them into some adjacent
13981        // dword slot in the source mask.
13982        int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
13983                              IncomingInputs[1] - SourceOffset};
13984
13985        // If there is a free slot in the source half mask adjacent to one of
13986        // the inputs, place the other input in it. We use (Index XOR 1) to
13987        // compute an adjacent index.
13988        if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
13989            SourceHalfMask[InputsFixed[0] ^ 1] < 0) {
13990          SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
13991          SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
13992          InputsFixed[1] = InputsFixed[0] ^ 1;
13993        } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
13994                   SourceHalfMask[InputsFixed[1] ^ 1] < 0) {
13995          SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
13996          SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
13997          InputsFixed[0] = InputsFixed[1] ^ 1;
13998        } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] < 0 &&
13999                   SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] < 0) {
14000          // The two inputs are in the same DWord but it is clobbered and the
14001          // adjacent DWord isn't used at all. Move both inputs to the free
14002          // slot.
14003          SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
14004          SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
14005          InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
14006          InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
14007        } else {
14008          // The only way we hit this point is if there is no clobbering
14009          // (because there are no off-half inputs to this half) and there is no
14010          // free slot adjacent to one of the inputs. In this case, we have to
14011          // swap an input with a non-input.
14012          for (int i = 0; i < 4; ++i)
14013            assert((SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) &&
14014                   "We can't handle any clobbers here!");
14015          assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
14016                 "Cannot have adjacent inputs here!");
14017
14018          SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
14019          SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
14020
14021          // We also have to update the final source mask in this case because
14022          // it may need to undo the above swap.
14023          for (int &M : FinalSourceHalfMask)
14024            if (M == (InputsFixed[0] ^ 1) + SourceOffset)
14025              M = InputsFixed[1] + SourceOffset;
14026            else if (M == InputsFixed[1] + SourceOffset)
14027              M = (InputsFixed[0] ^ 1) + SourceOffset;
14028
14029          InputsFixed[1] = InputsFixed[0] ^ 1;
14030        }
14031
14032        // Point everything at the fixed inputs.
14033        for (int &M : HalfMask)
14034          if (M == IncomingInputs[0])
14035            M = InputsFixed[0] + SourceOffset;
14036          else if (M == IncomingInputs[1])
14037            M = InputsFixed[1] + SourceOffset;
14038
14039        IncomingInputs[0] = InputsFixed[0] + SourceOffset;
14040        IncomingInputs[1] = InputsFixed[1] + SourceOffset;
14041      }
14042    } else {
14043      llvm_unreachable("Unhandled input size!");
14044    }
14045
14046    // Now hoist the DWord down to the right half.
14047    int FreeDWord = (PSHUFDMask[DestOffset / 2] < 0 ? 0 : 1) + DestOffset / 2;
14048    assert(PSHUFDMask[FreeDWord] < 0 && "DWord not free");
14049    PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
14050    for (int &M : HalfMask)
14051      for (int Input : IncomingInputs)
14052        if (M == Input)
14053          M = FreeDWord * 2 + Input % 2;
14054  };
14055  moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
14056                        /*SourceOffset*/ 4, /*DestOffset*/ 0);
14057  moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
14058                        /*SourceOffset*/ 0, /*DestOffset*/ 4);
14059
14060  // Now enact all the shuffles we've computed to move the inputs into their
14061  // target half.
14062  if (!isNoopShuffleMask(PSHUFLMask))
14063    V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
14064                    getV4X86ShuffleImm8ForMask(PSHUFLMask, DL, DAG));
14065  if (!isNoopShuffleMask(PSHUFHMask))
14066    V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
14067                    getV4X86ShuffleImm8ForMask(PSHUFHMask, DL, DAG));
14068  if (!isNoopShuffleMask(PSHUFDMask))
14069    V = DAG.getBitcast(
14070        VT,
14071        DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
14072                    getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
14073
14074  // At this point, each half should contain all its inputs, and we can then
14075  // just shuffle them into their final position.
14076  assert(count_if(LoMask, [](int M) { return M >= 4; }) == 0 &&
14077         "Failed to lift all the high half inputs to the low mask!");
14078  assert(count_if(HiMask, [](int M) { return M >= 0 && M < 4; }) == 0 &&
14079         "Failed to lift all the low half inputs to the high mask!");
14080
14081  // Do a half shuffle for the low mask.
14082  if (!isNoopShuffleMask(LoMask))
14083    V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
14084                    getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
14085
14086  // Do a half shuffle with the high mask after shifting its values down.
14087  for (int &M : HiMask)
14088    if (M >= 0)
14089      M -= 4;
14090  if (!isNoopShuffleMask(HiMask))
14091    V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
14092                    getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
14093
14094  return V;
14095}
14096
14097/// Helper to form a PSHUFB-based shuffle+blend, opportunistically avoiding the
14098/// blend if only one input is used.
14099static SDValue lowerShuffleAsBlendOfPSHUFBs(
14100    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14101    const APInt &Zeroable, SelectionDAG &DAG, bool &V1InUse, bool &V2InUse) {
14102  assert(!is128BitLaneCrossingShuffleMask(VT, Mask) &&
14103         "Lane crossing shuffle masks not supported");
14104
14105  int NumBytes = VT.getSizeInBits() / 8;
14106  int Size = Mask.size();
14107  int Scale = NumBytes / Size;
14108
14109  SmallVector<SDValue, 64> V1Mask(NumBytes, DAG.getUNDEF(MVT::i8));
14110  SmallVector<SDValue, 64> V2Mask(NumBytes, DAG.getUNDEF(MVT::i8));
14111  V1InUse = false;
14112  V2InUse = false;
14113
14114  for (int i = 0; i < NumBytes; ++i) {
14115    int M = Mask[i / Scale];
14116    if (M < 0)
14117      continue;
14118
14119    const int ZeroMask = 0x80;
14120    int V1Idx = M < Size ? M * Scale + i % Scale : ZeroMask;
14121    int V2Idx = M < Size ? ZeroMask : (M - Size) * Scale + i % Scale;
14122    if (Zeroable[i / Scale])
14123      V1Idx = V2Idx = ZeroMask;
14124
14125    V1Mask[i] = DAG.getConstant(V1Idx, DL, MVT::i8);
14126    V2Mask[i] = DAG.getConstant(V2Idx, DL, MVT::i8);
14127    V1InUse |= (ZeroMask != V1Idx);
14128    V2InUse |= (ZeroMask != V2Idx);
14129  }
14130
14131  MVT ShufVT = MVT::getVectorVT(MVT::i8, NumBytes);
14132  if (V1InUse)
14133    V1 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V1),
14134                     DAG.getBuildVector(ShufVT, DL, V1Mask));
14135  if (V2InUse)
14136    V2 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V2),
14137                     DAG.getBuildVector(ShufVT, DL, V2Mask));
14138
14139  // If we need shuffled inputs from both, blend the two.
14140  SDValue V;
14141  if (V1InUse && V2InUse)
14142    V = DAG.getNode(ISD::OR, DL, ShufVT, V1, V2);
14143  else
14144    V = V1InUse ? V1 : V2;
14145
14146  // Cast the result back to the correct type.
14147  return DAG.getBitcast(VT, V);
14148}
14149
14150/// Generic lowering of 8-lane i16 shuffles.
14151///
14152/// This handles both single-input shuffles and combined shuffle/blends with
14153/// two inputs. The single input shuffles are immediately delegated to
14154/// a dedicated lowering routine.
14155///
14156/// The blends are lowered in one of three fundamental ways. If there are few
14157/// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
14158/// of the input is significantly cheaper when lowered as an interleaving of
14159/// the two inputs, try to interleave them. Otherwise, blend the low and high
14160/// halves of the inputs separately (making them have relatively few inputs)
14161/// and then concatenate them.
14162static SDValue lowerV8I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
14163                                 const APInt &Zeroable, SDValue V1, SDValue V2,
14164                                 const X86Subtarget &Subtarget,
14165                                 SelectionDAG &DAG) {
14166  assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
14167  assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
14168  assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
14169
14170  // Whenever we can lower this as a zext, that instruction is strictly faster
14171  // than any alternative.
14172  if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i16, V1, V2, Mask,
14173                                                   Zeroable, Subtarget, DAG))
14174    return ZExt;
14175
14176  int NumV2Inputs = count_if(Mask, [](int M) { return M >= 8; });
14177
14178  if (NumV2Inputs == 0) {
14179    // Try to use shift instructions.
14180    if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i16, V1, V1, Mask,
14181                                            Zeroable, Subtarget, DAG))
14182      return Shift;
14183
14184    // Check for being able to broadcast a single element.
14185    if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i16, V1, V2,
14186                                                    Mask, Subtarget, DAG))
14187      return Broadcast;
14188
14189    // Use dedicated unpack instructions for masks that match their pattern.
14190    if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
14191      return V;
14192
14193    // Use dedicated pack instructions for masks that match their pattern.
14194    if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
14195                                         Subtarget))
14196      return V;
14197
14198    // Try to use byte rotation instructions.
14199    if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V1, Mask,
14200                                                  Subtarget, DAG))
14201      return Rotate;
14202
14203    // Make a copy of the mask so it can be modified.
14204    SmallVector<int, 8> MutableMask(Mask.begin(), Mask.end());
14205    return lowerV8I16GeneralSingleInputShuffle(DL, MVT::v8i16, V1, MutableMask,
14206                                               Subtarget, DAG);
14207  }
14208
14209  assert(llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) &&
14210         "All single-input shuffles should be canonicalized to be V1-input "
14211         "shuffles.");
14212
14213  // Try to use shift instructions.
14214  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask,
14215                                          Zeroable, Subtarget, DAG))
14216    return Shift;
14217
14218  // See if we can use SSE4A Extraction / Insertion.
14219  if (Subtarget.hasSSE4A())
14220    if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v8i16, V1, V2, Mask,
14221                                          Zeroable, DAG))
14222      return V;
14223
14224  // There are special ways we can lower some single-element blends.
14225  if (NumV2Inputs == 1)
14226    if (SDValue V = lowerShuffleAsElementInsertion(
14227            DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
14228      return V;
14229
14230  // We have different paths for blend lowering, but they all must use the
14231  // *exact* same predicate.
14232  bool IsBlendSupported = Subtarget.hasSSE41();
14233  if (IsBlendSupported)
14234    if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
14235                                            Zeroable, Subtarget, DAG))
14236      return Blend;
14237
14238  if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask,
14239                                             Zeroable, Subtarget, DAG))
14240    return Masked;
14241
14242  // Use dedicated unpack instructions for masks that match their pattern.
14243  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
14244    return V;
14245
14246  // Use dedicated pack instructions for masks that match their pattern.
14247  if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
14248                                       Subtarget))
14249    return V;
14250
14251  // Try to use byte rotation instructions.
14252  if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V2, Mask,
14253                                                Subtarget, DAG))
14254    return Rotate;
14255
14256  if (SDValue BitBlend =
14257          lowerShuffleAsBitBlend(DL, MVT::v8i16, V1, V2, Mask, DAG))
14258    return BitBlend;
14259
14260  // Try to use byte shift instructions to mask.
14261  if (SDValue V = lowerShuffleAsByteShiftMask(DL, MVT::v8i16, V1, V2, Mask,
14262                                              Zeroable, Subtarget, DAG))
14263    return V;
14264
14265  // Try to lower by permuting the inputs into an unpack instruction.
14266  if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v8i16, V1, V2,
14267                                                      Mask, Subtarget, DAG))
14268    return Unpack;
14269
14270  // If we can't directly blend but can use PSHUFB, that will be better as it
14271  // can both shuffle and set up the inefficient blend.
14272  if (!IsBlendSupported && Subtarget.hasSSSE3()) {
14273    bool V1InUse, V2InUse;
14274    return lowerShuffleAsBlendOfPSHUFBs(DL, MVT::v8i16, V1, V2, Mask,
14275                                        Zeroable, DAG, V1InUse, V2InUse);
14276  }
14277
14278  // We can always bit-blend if we have to so the fallback strategy is to
14279  // decompose into single-input permutes and blends.
14280  return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v8i16, V1, V2,
14281                                              Mask, Subtarget, DAG);
14282}
14283
14284/// Check whether a compaction lowering can be done by dropping even
14285/// elements and compute how many times even elements must be dropped.
14286///
14287/// This handles shuffles which take every Nth element where N is a power of
14288/// two. Example shuffle masks:
14289///
14290///  N = 1:  0,  2,  4,  6,  8, 10, 12, 14,  0,  2,  4,  6,  8, 10, 12, 14
14291///  N = 1:  0,  2,  4,  6,  8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
14292///  N = 2:  0,  4,  8, 12,  0,  4,  8, 12,  0,  4,  8, 12,  0,  4,  8, 12
14293///  N = 2:  0,  4,  8, 12, 16, 20, 24, 28,  0,  4,  8, 12, 16, 20, 24, 28
14294///  N = 3:  0,  8,  0,  8,  0,  8,  0,  8,  0,  8,  0,  8,  0,  8,  0,  8
14295///  N = 3:  0,  8, 16, 24,  0,  8, 16, 24,  0,  8, 16, 24,  0,  8, 16, 24
14296///
14297/// Any of these lanes can of course be undef.
14298///
14299/// This routine only supports N <= 3.
14300/// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
14301/// for larger N.
14302///
14303/// \returns N above, or the number of times even elements must be dropped if
14304/// there is such a number. Otherwise returns zero.
14305static int canLowerByDroppingEvenElements(ArrayRef<int> Mask,
14306                                          bool IsSingleInput) {
14307  // The modulus for the shuffle vector entries is based on whether this is
14308  // a single input or not.
14309  int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
14310  assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
14311         "We should only be called with masks with a power-of-2 size!");
14312
14313  uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
14314
14315  // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
14316  // and 2^3 simultaneously. This is because we may have ambiguity with
14317  // partially undef inputs.
14318  bool ViableForN[3] = {true, true, true};
14319
14320  for (int i = 0, e = Mask.size(); i < e; ++i) {
14321    // Ignore undef lanes, we'll optimistically collapse them to the pattern we
14322    // want.
14323    if (Mask[i] < 0)
14324      continue;
14325
14326    bool IsAnyViable = false;
14327    for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
14328      if (ViableForN[j]) {
14329        uint64_t N = j + 1;
14330
14331        // The shuffle mask must be equal to (i * 2^N) % M.
14332        if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
14333          IsAnyViable = true;
14334        else
14335          ViableForN[j] = false;
14336      }
14337    // Early exit if we exhaust the possible powers of two.
14338    if (!IsAnyViable)
14339      break;
14340  }
14341
14342  for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
14343    if (ViableForN[j])
14344      return j + 1;
14345
14346  // Return 0 as there is no viable power of two.
14347  return 0;
14348}
14349
14350static SDValue lowerShuffleWithPERMV(const SDLoc &DL, MVT VT,
14351                                     ArrayRef<int> Mask, SDValue V1,
14352                                     SDValue V2, SelectionDAG &DAG) {
14353  MVT MaskEltVT = MVT::getIntegerVT(VT.getScalarSizeInBits());
14354  MVT MaskVecVT = MVT::getVectorVT(MaskEltVT, VT.getVectorNumElements());
14355
14356  SDValue MaskNode = getConstVector(Mask, MaskVecVT, DAG, DL, true);
14357  if (V2.isUndef())
14358    return DAG.getNode(X86ISD::VPERMV, DL, VT, MaskNode, V1);
14359
14360  return DAG.getNode(X86ISD::VPERMV3, DL, VT, V1, MaskNode, V2);
14361}
14362
14363/// Generic lowering of v16i8 shuffles.
14364///
14365/// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
14366/// detect any complexity reducing interleaving. If that doesn't help, it uses
14367/// UNPCK to spread the i8 elements across two i16-element vectors, and uses
14368/// the existing lowering for v8i16 blends on each half, finally PACK-ing them
14369/// back together.
14370static SDValue lowerV16I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
14371                                 const APInt &Zeroable, SDValue V1, SDValue V2,
14372                                 const X86Subtarget &Subtarget,
14373                                 SelectionDAG &DAG) {
14374  assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
14375  assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
14376  assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
14377
14378  // Try to use shift instructions.
14379  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask,
14380                                          Zeroable, Subtarget, DAG))
14381    return Shift;
14382
14383  // Try to use byte rotation instructions.
14384  if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i8, V1, V2, Mask,
14385                                                Subtarget, DAG))
14386    return Rotate;
14387
14388  // Use dedicated pack instructions for masks that match their pattern.
14389  if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i8, Mask, V1, V2, DAG,
14390                                       Subtarget))
14391    return V;
14392
14393  // Try to use a zext lowering.
14394  if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v16i8, V1, V2, Mask,
14395                                                   Zeroable, Subtarget, DAG))
14396    return ZExt;
14397
14398  // See if we can use SSE4A Extraction / Insertion.
14399  if (Subtarget.hasSSE4A())
14400    if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v16i8, V1, V2, Mask,
14401                                          Zeroable, DAG))
14402      return V;
14403
14404  int NumV2Elements = count_if(Mask, [](int M) { return M >= 16; });
14405
14406  // For single-input shuffles, there are some nicer lowering tricks we can use.
14407  if (NumV2Elements == 0) {
14408    // Check for being able to broadcast a single element.
14409    if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i8, V1, V2,
14410                                                    Mask, Subtarget, DAG))
14411      return Broadcast;
14412
14413    if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
14414      return V;
14415
14416    // Check whether we can widen this to an i16 shuffle by duplicating bytes.
14417    // Notably, this handles splat and partial-splat shuffles more efficiently.
14418    // However, it only makes sense if the pre-duplication shuffle simplifies
14419    // things significantly. Currently, this means we need to be able to
14420    // express the pre-duplication shuffle as an i16 shuffle.
14421    //
14422    // FIXME: We should check for other patterns which can be widened into an
14423    // i16 shuffle as well.
14424    auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
14425      for (int i = 0; i < 16; i += 2)
14426        if (Mask[i] >= 0 && Mask[i + 1] >= 0 && Mask[i] != Mask[i + 1])
14427          return false;
14428
14429      return true;
14430    };
14431    auto tryToWidenViaDuplication = [&]() -> SDValue {
14432      if (!canWidenViaDuplication(Mask))
14433        return SDValue();
14434      SmallVector<int, 4> LoInputs;
14435      copy_if(Mask, std::back_inserter(LoInputs),
14436              [](int M) { return M >= 0 && M < 8; });
14437      array_pod_sort(LoInputs.begin(), LoInputs.end());
14438      LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
14439                     LoInputs.end());
14440      SmallVector<int, 4> HiInputs;
14441      copy_if(Mask, std::back_inserter(HiInputs), [](int M) { return M >= 8; });
14442      array_pod_sort(HiInputs.begin(), HiInputs.end());
14443      HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
14444                     HiInputs.end());
14445
14446      bool TargetLo = LoInputs.size() >= HiInputs.size();
14447      ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
14448      ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
14449
14450      int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
14451      SmallDenseMap<int, int, 8> LaneMap;
14452      for (int I : InPlaceInputs) {
14453        PreDupI16Shuffle[I/2] = I/2;
14454        LaneMap[I] = I;
14455      }
14456      int j = TargetLo ? 0 : 4, je = j + 4;
14457      for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
14458        // Check if j is already a shuffle of this input. This happens when
14459        // there are two adjacent bytes after we move the low one.
14460        if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
14461          // If we haven't yet mapped the input, search for a slot into which
14462          // we can map it.
14463          while (j < je && PreDupI16Shuffle[j] >= 0)
14464            ++j;
14465
14466          if (j == je)
14467            // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
14468            return SDValue();
14469
14470          // Map this input with the i16 shuffle.
14471          PreDupI16Shuffle[j] = MovingInputs[i] / 2;
14472        }
14473
14474        // Update the lane map based on the mapping we ended up with.
14475        LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
14476      }
14477      V1 = DAG.getBitcast(
14478          MVT::v16i8,
14479          DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
14480                               DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
14481
14482      // Unpack the bytes to form the i16s that will be shuffled into place.
14483      bool EvenInUse = false, OddInUse = false;
14484      for (int i = 0; i < 16; i += 2) {
14485        EvenInUse |= (Mask[i + 0] >= 0);
14486        OddInUse |= (Mask[i + 1] >= 0);
14487        if (EvenInUse && OddInUse)
14488          break;
14489      }
14490      V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
14491                       MVT::v16i8, EvenInUse ? V1 : DAG.getUNDEF(MVT::v16i8),
14492                       OddInUse ? V1 : DAG.getUNDEF(MVT::v16i8));
14493
14494      int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
14495      for (int i = 0; i < 16; ++i)
14496        if (Mask[i] >= 0) {
14497          int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
14498          assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
14499          if (PostDupI16Shuffle[i / 2] < 0)
14500            PostDupI16Shuffle[i / 2] = MappedMask;
14501          else
14502            assert(PostDupI16Shuffle[i / 2] == MappedMask &&
14503                   "Conflicting entries in the original shuffle!");
14504        }
14505      return DAG.getBitcast(
14506          MVT::v16i8,
14507          DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
14508                               DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
14509    };
14510    if (SDValue V = tryToWidenViaDuplication())
14511      return V;
14512  }
14513
14514  if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v16i8, V1, V2, Mask,
14515                                             Zeroable, Subtarget, DAG))
14516    return Masked;
14517
14518  // Use dedicated unpack instructions for masks that match their pattern.
14519  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
14520    return V;
14521
14522  // Try to use byte shift instructions to mask.
14523  if (SDValue V = lowerShuffleAsByteShiftMask(DL, MVT::v16i8, V1, V2, Mask,
14524                                              Zeroable, Subtarget, DAG))
14525    return V;
14526
14527  // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
14528  // with PSHUFB. It is important to do this before we attempt to generate any
14529  // blends but after all of the single-input lowerings. If the single input
14530  // lowerings can find an instruction sequence that is faster than a PSHUFB, we
14531  // want to preserve that and we can DAG combine any longer sequences into
14532  // a PSHUFB in the end. But once we start blending from multiple inputs,
14533  // the complexity of DAG combining bad patterns back into PSHUFB is too high,
14534  // and there are *very* few patterns that would actually be faster than the
14535  // PSHUFB approach because of its ability to zero lanes.
14536  //
14537  // FIXME: The only exceptions to the above are blends which are exact
14538  // interleavings with direct instructions supporting them. We currently don't
14539  // handle those well here.
14540  if (Subtarget.hasSSSE3()) {
14541    bool V1InUse = false;
14542    bool V2InUse = false;
14543
14544    SDValue PSHUFB = lowerShuffleAsBlendOfPSHUFBs(
14545        DL, MVT::v16i8, V1, V2, Mask, Zeroable, DAG, V1InUse, V2InUse);
14546
14547    // If both V1 and V2 are in use and we can use a direct blend or an unpack,
14548    // do so. This avoids using them to handle blends-with-zero which is
14549    // important as a single pshufb is significantly faster for that.
14550    if (V1InUse && V2InUse) {
14551      if (Subtarget.hasSSE41())
14552        if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i8, V1, V2, Mask,
14553                                                Zeroable, Subtarget, DAG))
14554          return Blend;
14555
14556      // We can use an unpack to do the blending rather than an or in some
14557      // cases. Even though the or may be (very minorly) more efficient, we
14558      // preference this lowering because there are common cases where part of
14559      // the complexity of the shuffles goes away when we do the final blend as
14560      // an unpack.
14561      // FIXME: It might be worth trying to detect if the unpack-feeding
14562      // shuffles will both be pshufb, in which case we shouldn't bother with
14563      // this.
14564      if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(
14565              DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
14566        return Unpack;
14567
14568      // If we have VBMI we can use one VPERM instead of multiple PSHUFBs.
14569      if (Subtarget.hasVBMI() && Subtarget.hasVLX())
14570        return lowerShuffleWithPERMV(DL, MVT::v16i8, Mask, V1, V2, DAG);
14571
14572      // Use PALIGNR+Permute if possible - permute might become PSHUFB but the
14573      // PALIGNR will be cheaper than the second PSHUFB+OR.
14574      if (SDValue V = lowerShuffleAsByteRotateAndPermute(
14575              DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
14576        return V;
14577    }
14578
14579    return PSHUFB;
14580  }
14581
14582  // There are special ways we can lower some single-element blends.
14583  if (NumV2Elements == 1)
14584    if (SDValue V = lowerShuffleAsElementInsertion(
14585            DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
14586      return V;
14587
14588  if (SDValue Blend = lowerShuffleAsBitBlend(DL, MVT::v16i8, V1, V2, Mask, DAG))
14589    return Blend;
14590
14591  // Check whether a compaction lowering can be done. This handles shuffles
14592  // which take every Nth element for some even N. See the helper function for
14593  // details.
14594  //
14595  // We special case these as they can be particularly efficiently handled with
14596  // the PACKUSB instruction on x86 and they show up in common patterns of
14597  // rearranging bytes to truncate wide elements.
14598  bool IsSingleInput = V2.isUndef();
14599  if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask, IsSingleInput)) {
14600    // NumEvenDrops is the power of two stride of the elements. Another way of
14601    // thinking about it is that we need to drop the even elements this many
14602    // times to get the original input.
14603
14604    // First we need to zero all the dropped bytes.
14605    assert(NumEvenDrops <= 3 &&
14606           "No support for dropping even elements more than 3 times.");
14607    SmallVector<SDValue, 16> ByteClearOps(16, DAG.getConstant(0, DL, MVT::i8));
14608    for (unsigned i = 0; i != 16; i += 1 << NumEvenDrops)
14609      ByteClearOps[i] = DAG.getConstant(0xFF, DL, MVT::i8);
14610    SDValue ByteClearMask = DAG.getBuildVector(MVT::v16i8, DL, ByteClearOps);
14611    V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
14612    if (!IsSingleInput)
14613      V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);
14614
14615    // Now pack things back together.
14616    V1 = DAG.getBitcast(MVT::v8i16, V1);
14617    V2 = IsSingleInput ? V1 : DAG.getBitcast(MVT::v8i16, V2);
14618    SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2);
14619    for (int i = 1; i < NumEvenDrops; ++i) {
14620      Result = DAG.getBitcast(MVT::v8i16, Result);
14621      Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
14622    }
14623
14624    return Result;
14625  }
14626
14627  // Handle multi-input cases by blending single-input shuffles.
14628  if (NumV2Elements > 0)
14629    return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v16i8, V1, V2, Mask,
14630                                                Subtarget, DAG);
14631
14632  // The fallback path for single-input shuffles widens this into two v8i16
14633  // vectors with unpacks, shuffles those, and then pulls them back together
14634  // with a pack.
14635  SDValue V = V1;
14636
14637  std::array<int, 8> LoBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
14638  std::array<int, 8> HiBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
14639  for (int i = 0; i < 16; ++i)
14640    if (Mask[i] >= 0)
14641      (i < 8 ? LoBlendMask[i] : HiBlendMask[i % 8]) = Mask[i];
14642
14643  SDValue VLoHalf, VHiHalf;
14644  // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
14645  // them out and avoid using UNPCK{L,H} to extract the elements of V as
14646  // i16s.
14647  if (none_of(LoBlendMask, [](int M) { return M >= 0 && M % 2 == 1; }) &&
14648      none_of(HiBlendMask, [](int M) { return M >= 0 && M % 2 == 1; })) {
14649    // Use a mask to drop the high bytes.
14650    VLoHalf = DAG.getBitcast(MVT::v8i16, V);
14651    VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
14652                          DAG.getConstant(0x00FF, DL, MVT::v8i16));
14653
14654    // This will be a single vector shuffle instead of a blend so nuke VHiHalf.
14655    VHiHalf = DAG.getUNDEF(MVT::v8i16);
14656
14657    // Squash the masks to point directly into VLoHalf.
14658    for (int &M : LoBlendMask)
14659      if (M >= 0)
14660        M /= 2;
14661    for (int &M : HiBlendMask)
14662      if (M >= 0)
14663        M /= 2;
14664  } else {
14665    // Otherwise just unpack the low half of V into VLoHalf and the high half into
14666    // VHiHalf so that we can blend them as i16s.
14667    SDValue Zero = getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
14668
14669    VLoHalf = DAG.getBitcast(
14670        MVT::v8i16, DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
14671    VHiHalf = DAG.getBitcast(
14672        MVT::v8i16, DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
14673  }
14674
14675  SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, LoBlendMask);
14676  SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, HiBlendMask);
14677
14678  return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
14679}
14680
14681/// Dispatching routine to lower various 128-bit x86 vector shuffles.
14682///
14683/// This routine breaks down the specific type of 128-bit shuffle and
14684/// dispatches to the lowering routines accordingly.
14685static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
14686                                  MVT VT, SDValue V1, SDValue V2,
14687                                  const APInt &Zeroable,
14688                                  const X86Subtarget &Subtarget,
14689                                  SelectionDAG &DAG) {
14690  switch (VT.SimpleTy) {
14691  case MVT::v2i64:
14692    return lowerV2I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14693  case MVT::v2f64:
14694    return lowerV2F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14695  case MVT::v4i32:
14696    return lowerV4I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14697  case MVT::v4f32:
14698    return lowerV4F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14699  case MVT::v8i16:
14700    return lowerV8I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14701  case MVT::v16i8:
14702    return lowerV16I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14703
14704  default:
14705    llvm_unreachable("Unimplemented!");
14706  }
14707}
14708
14709/// Generic routine to split vector shuffle into half-sized shuffles.
14710///
14711/// This routine just extracts two subvectors, shuffles them independently, and
14712/// then concatenates them back together. This should work effectively with all
14713/// AVX vector shuffle types.
14714static SDValue splitAndLowerShuffle(const SDLoc &DL, MVT VT, SDValue V1,
14715                                    SDValue V2, ArrayRef<int> Mask,
14716                                    SelectionDAG &DAG) {
14717  assert(VT.getSizeInBits() >= 256 &&
14718         "Only for 256-bit or wider vector shuffles!");
14719  assert(V1.getSimpleValueType() == VT && "Bad operand type!");
14720  assert(V2.getSimpleValueType() == VT && "Bad operand type!");
14721
14722  ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
14723  ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
14724
14725  int NumElements = VT.getVectorNumElements();
14726  int SplitNumElements = NumElements / 2;
14727  MVT ScalarVT = VT.getVectorElementType();
14728  MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);
14729
14730  // Rather than splitting build-vectors, just build two narrower build
14731  // vectors. This helps shuffling with splats and zeros.
14732  auto SplitVector = [&](SDValue V) {
14733    V = peekThroughBitcasts(V);
14734
14735    MVT OrigVT = V.getSimpleValueType();
14736    int OrigNumElements = OrigVT.getVectorNumElements();
14737    int OrigSplitNumElements = OrigNumElements / 2;
14738    MVT OrigScalarVT = OrigVT.getVectorElementType();
14739    MVT OrigSplitVT = MVT::getVectorVT(OrigScalarVT, OrigNumElements / 2);
14740
14741    SDValue LoV, HiV;
14742
14743    auto *BV = dyn_cast<BuildVectorSDNode>(V);
14744    if (!BV) {
14745      LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
14746                        DAG.getIntPtrConstant(0, DL));
14747      HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
14748                        DAG.getIntPtrConstant(OrigSplitNumElements, DL));
14749    } else {
14750
14751      SmallVector<SDValue, 16> LoOps, HiOps;
14752      for (int i = 0; i < OrigSplitNumElements; ++i) {
14753        LoOps.push_back(BV->getOperand(i));
14754        HiOps.push_back(BV->getOperand(i + OrigSplitNumElements));
14755      }
14756      LoV = DAG.getBuildVector(OrigSplitVT, DL, LoOps);
14757      HiV = DAG.getBuildVector(OrigSplitVT, DL, HiOps);
14758    }
14759    return std::make_pair(DAG.getBitcast(SplitVT, LoV),
14760                          DAG.getBitcast(SplitVT, HiV));
14761  };
14762
14763  SDValue LoV1, HiV1, LoV2, HiV2;
14764  std::tie(LoV1, HiV1) = SplitVector(V1);
14765  std::tie(LoV2, HiV2) = SplitVector(V2);
14766
14767  // Now create two 4-way blends of these half-width vectors.
14768  auto HalfBlend = [&](ArrayRef<int> HalfMask) {
14769    bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
14770    SmallVector<int, 32> V1BlendMask((unsigned)SplitNumElements, -1);
14771    SmallVector<int, 32> V2BlendMask((unsigned)SplitNumElements, -1);
14772    SmallVector<int, 32> BlendMask((unsigned)SplitNumElements, -1);
14773    for (int i = 0; i < SplitNumElements; ++i) {
14774      int M = HalfMask[i];
14775      if (M >= NumElements) {
14776        if (M >= NumElements + SplitNumElements)
14777          UseHiV2 = true;
14778        else
14779          UseLoV2 = true;
14780        V2BlendMask[i] = M - NumElements;
14781        BlendMask[i] = SplitNumElements + i;
14782      } else if (M >= 0) {
14783        if (M >= SplitNumElements)
14784          UseHiV1 = true;
14785        else
14786          UseLoV1 = true;
14787        V1BlendMask[i] = M;
14788        BlendMask[i] = i;
14789      }
14790    }
14791
14792    // Because the lowering happens after all combining takes place, we need to
14793    // manually combine these blend masks as much as possible so that we create
14794    // a minimal number of high-level vector shuffle nodes.
14795
14796    // First try just blending the halves of V1 or V2.
14797    if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
14798      return DAG.getUNDEF(SplitVT);
14799    if (!UseLoV2 && !UseHiV2)
14800      return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
14801    if (!UseLoV1 && !UseHiV1)
14802      return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
14803
14804    SDValue V1Blend, V2Blend;
14805    if (UseLoV1 && UseHiV1) {
14806      V1Blend =
14807        DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
14808    } else {
14809      // We only use half of V1 so map the usage down into the final blend mask.
14810      V1Blend = UseLoV1 ? LoV1 : HiV1;
14811      for (int i = 0; i < SplitNumElements; ++i)
14812        if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
14813          BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
14814    }
14815    if (UseLoV2 && UseHiV2) {
14816      V2Blend =
14817        DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
14818    } else {
14819      // We only use half of V2 so map the usage down into the final blend mask.
14820      V2Blend = UseLoV2 ? LoV2 : HiV2;
14821      for (int i = 0; i < SplitNumElements; ++i)
14822        if (BlendMask[i] >= SplitNumElements)
14823          BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
14824    }
14825    return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
14826  };
14827  SDValue Lo = HalfBlend(LoMask);
14828  SDValue Hi = HalfBlend(HiMask);
14829  return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
14830}
14831
14832/// Either split a vector in halves or decompose the shuffles and the
14833/// blend.
14834///
14835/// This is provided as a good fallback for many lowerings of non-single-input
14836/// shuffles with more than one 128-bit lane. In those cases, we want to select
14837/// between splitting the shuffle into 128-bit components and stitching those
14838/// back together vs. extracting the single-input shuffles and blending those
14839/// results.
14840static SDValue lowerShuffleAsSplitOrBlend(const SDLoc &DL, MVT VT, SDValue V1,
14841                                          SDValue V2, ArrayRef<int> Mask,
14842                                          const X86Subtarget &Subtarget,
14843                                          SelectionDAG &DAG) {
14844  assert(!V2.isUndef() && "This routine must not be used to lower single-input "
14845         "shuffles as it could then recurse on itself.");
14846  int Size = Mask.size();
14847
14848  // If this can be modeled as a broadcast of two elements followed by a blend,
14849  // prefer that lowering. This is especially important because broadcasts can
14850  // often fold with memory operands.
14851  auto DoBothBroadcast = [&] {
14852    int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
14853    for (int M : Mask)
14854      if (M >= Size) {
14855        if (V2BroadcastIdx < 0)
14856          V2BroadcastIdx = M - Size;
14857        else if (M - Size != V2BroadcastIdx)
14858          return false;
14859      } else if (M >= 0) {
14860        if (V1BroadcastIdx < 0)
14861          V1BroadcastIdx = M;
14862        else if (M != V1BroadcastIdx)
14863          return false;
14864      }
14865    return true;
14866  };
14867  if (DoBothBroadcast())
14868    return lowerShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
14869                                                Subtarget, DAG);
14870
14871  // If the inputs all stem from a single 128-bit lane of each input, then we
14872  // split them rather than blending because the split will decompose to
14873  // unusually few instructions.
14874  int LaneCount = VT.getSizeInBits() / 128;
14875  int LaneSize = Size / LaneCount;
14876  SmallBitVector LaneInputs[2];
14877  LaneInputs[0].resize(LaneCount, false);
14878  LaneInputs[1].resize(LaneCount, false);
14879  for (int i = 0; i < Size; ++i)
14880    if (Mask[i] >= 0)
14881      LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
14882  if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
14883    return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
14884
14885  // Otherwise, just fall back to decomposed shuffles and a blend. This requires
14886  // that the decomposed single-input shuffles don't end up here.
14887  return lowerShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, Subtarget,
14888                                              DAG);
14889}
14890
14891// Lower as SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
14892// TODO: Extend to support v8f32 (+ 512-bit shuffles).
14893static SDValue lowerShuffleAsLanePermuteAndSHUFP(const SDLoc &DL, MVT VT,
14894                                                 SDValue V1, SDValue V2,
14895                                                 ArrayRef<int> Mask,
14896                                                 SelectionDAG &DAG) {
14897  assert(VT == MVT::v4f64 && "Only for v4f64 shuffles");
14898
14899  int LHSMask[4] = {-1, -1, -1, -1};
14900  int RHSMask[4] = {-1, -1, -1, -1};
14901  unsigned SHUFPMask = 0;
14902
14903  // As SHUFPD uses a single LHS/RHS element per lane, we can always
14904  // perform the shuffle once the lanes have been shuffled in place.
14905  for (int i = 0; i != 4; ++i) {
14906    int M = Mask[i];
14907    if (M < 0)
14908      continue;
14909    int LaneBase = i & ~1;
14910    auto &LaneMask = (i & 1) ? RHSMask : LHSMask;
14911    LaneMask[LaneBase + (M & 1)] = M;
14912    SHUFPMask |= (M & 1) << i;
14913  }
14914
14915  SDValue LHS = DAG.getVectorShuffle(VT, DL, V1, V2, LHSMask);
14916  SDValue RHS = DAG.getVectorShuffle(VT, DL, V1, V2, RHSMask);
14917  return DAG.getNode(X86ISD::SHUFP, DL, VT, LHS, RHS,
14918                     DAG.getTargetConstant(SHUFPMask, DL, MVT::i8));
14919}
14920
14921/// Lower a vector shuffle crossing multiple 128-bit lanes as
14922/// a lane permutation followed by a per-lane permutation.
14923///
14924/// This is mainly for cases where we can have non-repeating permutes
14925/// in each lane.
14926///
14927/// TODO: This is very similar to lowerShuffleAsLanePermuteAndRepeatedMask,
14928/// we should investigate merging them.
14929static SDValue lowerShuffleAsLanePermuteAndPermute(
14930    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14931    SelectionDAG &DAG, const X86Subtarget &Subtarget) {
14932  int NumElts = VT.getVectorNumElements();
14933  int NumLanes = VT.getSizeInBits() / 128;
14934  int NumEltsPerLane = NumElts / NumLanes;
14935
14936  SmallVector<int, 4> SrcLaneMask(NumLanes, SM_SentinelUndef);
14937  SmallVector<int, 16> PermMask(NumElts, SM_SentinelUndef);
14938
14939  for (int i = 0; i != NumElts; ++i) {
14940    int M = Mask[i];
14941    if (M < 0)
14942      continue;
14943
14944    // Ensure that each lane comes from a single source lane.
14945    int SrcLane = M / NumEltsPerLane;
14946    int DstLane = i / NumEltsPerLane;
14947    if (!isUndefOrEqual(SrcLaneMask[DstLane], SrcLane))
14948      return SDValue();
14949    SrcLaneMask[DstLane] = SrcLane;
14950
14951    PermMask[i] = (DstLane * NumEltsPerLane) + (M % NumEltsPerLane);
14952  }
14953
14954  // Make sure we set all elements of the lane mask, to avoid undef propagation.
14955  SmallVector<int, 16> LaneMask(NumElts, SM_SentinelUndef);
14956  for (int DstLane = 0; DstLane != NumLanes; ++DstLane) {
14957    int SrcLane = SrcLaneMask[DstLane];
14958    if (0 <= SrcLane)
14959      for (int j = 0; j != NumEltsPerLane; ++j) {
14960        LaneMask[(DstLane * NumEltsPerLane) + j] =
14961            (SrcLane * NumEltsPerLane) + j;
14962      }
14963  }
14964
14965  // If we're only shuffling a single lowest lane and the rest are identity
14966  // then don't bother.
14967  // TODO - isShuffleMaskInputInPlace could be extended to something like this.
14968  int NumIdentityLanes = 0;
14969  bool OnlyShuffleLowestLane = true;
14970  for (int i = 0; i != NumLanes; ++i) {
14971    if (isSequentialOrUndefInRange(PermMask, i * NumEltsPerLane, NumEltsPerLane,
14972                                   i * NumEltsPerLane))
14973      NumIdentityLanes++;
14974    else if (SrcLaneMask[i] != 0 && SrcLaneMask[i] != NumLanes)
14975      OnlyShuffleLowestLane = false;
14976  }
14977  if (OnlyShuffleLowestLane && NumIdentityLanes == (NumLanes - 1))
14978    return SDValue();
14979
14980  SDValue LanePermute = DAG.getVectorShuffle(VT, DL, V1, V2, LaneMask);
14981  return DAG.getVectorShuffle(VT, DL, LanePermute, DAG.getUNDEF(VT), PermMask);
14982}
14983
14984/// Lower a vector shuffle crossing multiple 128-bit lanes by shuffling one
14985/// source with a lane permutation.
14986///
14987/// This lowering strategy results in four instructions in the worst case for a
14988/// single-input cross lane shuffle which is lower than any other fully general
14989/// cross-lane shuffle strategy I'm aware of. Special cases for each particular
14990/// shuffle pattern should be handled prior to trying this lowering.
14991static SDValue lowerShuffleAsLanePermuteAndShuffle(
14992    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14993    SelectionDAG &DAG, const X86Subtarget &Subtarget) {
14994  // FIXME: This should probably be generalized for 512-bit vectors as well.
14995  assert(VT.is256BitVector() && "Only for 256-bit vector shuffles!");
14996  int Size = Mask.size();
14997  int LaneSize = Size / 2;
14998
14999  // Fold to SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
15000  // Only do this if the elements aren't all from the lower lane,
15001  // otherwise we're (probably) better off doing a split.
15002  if (VT == MVT::v4f64 &&
15003      !all_of(Mask, [LaneSize](int M) { return M < LaneSize; }))
15004    if (SDValue V =
15005            lowerShuffleAsLanePermuteAndSHUFP(DL, VT, V1, V2, Mask, DAG))
15006      return V;
15007
15008  // If there are only inputs from one 128-bit lane, splitting will in fact be
15009  // less expensive. The flags track whether the given lane contains an element
15010  // that crosses to another lane.
15011  if (!Subtarget.hasAVX2()) {
15012    bool LaneCrossing[2] = {false, false};
15013    for (int i = 0; i < Size; ++i)
15014      if (Mask[i] >= 0 && ((Mask[i] % Size) / LaneSize) != (i / LaneSize))
15015        LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
15016    if (!LaneCrossing[0] || !LaneCrossing[1])
15017      return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
15018  } else {
15019    bool LaneUsed[2] = {false, false};
15020    for (int i = 0; i < Size; ++i)
15021      if (Mask[i] >= 0)
15022        LaneUsed[(Mask[i] % Size) / LaneSize] = true;
15023    if (!LaneUsed[0] || !LaneUsed[1])
15024      return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
15025  }
15026
15027  // TODO - we could support shuffling V2 in the Flipped input.
15028  assert(V2.isUndef() &&
15029         "This last part of this routine only works on single input shuffles");
15030
15031  SmallVector<int, 32> InLaneMask(Mask.begin(), Mask.end());
15032  for (int i = 0; i < Size; ++i) {
15033    int &M = InLaneMask[i];
15034    if (M < 0)
15035      continue;
15036    if (((M % Size) / LaneSize) != (i / LaneSize))
15037      M = (M % LaneSize) + ((i / LaneSize) * LaneSize) + Size;
15038  }
15039  assert(!is128BitLaneCrossingShuffleMask(VT, InLaneMask) &&
15040         "In-lane shuffle mask expected");
15041
15042  // Flip the lanes, and shuffle the results which should now be in-lane.
15043  MVT PVT = VT.isFloatingPoint() ? MVT::v4f64 : MVT::v4i64;
15044  SDValue Flipped = DAG.getBitcast(PVT, V1);
15045  Flipped =
15046      DAG.getVectorShuffle(PVT, DL, Flipped, DAG.getUNDEF(PVT), {2, 3, 0, 1});
15047  Flipped = DAG.getBitcast(VT, Flipped);
15048  return DAG.getVectorShuffle(VT, DL, V1, Flipped, InLaneMask);
15049}
15050
15051/// Handle lowering 2-lane 128-bit shuffles.
15052static SDValue lowerV2X128Shuffle(const SDLoc &DL, MVT VT, SDValue V1,
15053                                  SDValue V2, ArrayRef<int> Mask,
15054                                  const APInt &Zeroable,
15055                                  const X86Subtarget &Subtarget,
15056                                  SelectionDAG &DAG) {
15057  // With AVX2, use VPERMQ/VPERMPD for unary shuffles to allow memory folding.
15058  if (Subtarget.hasAVX2() && V2.isUndef())
15059    return SDValue();
15060
15061  bool V2IsZero = !V2.isUndef() && ISD::isBuildVectorAllZeros(V2.getNode());
15062
15063  SmallVector<int, 4> WidenedMask;
15064  if (!canWidenShuffleElements(Mask, Zeroable, V2IsZero, WidenedMask))
15065    return SDValue();
15066
15067  bool IsLowZero = (Zeroable & 0x3) == 0x3;
15068  bool IsHighZero = (Zeroable & 0xc) == 0xc;
15069
15070  // Try to use an insert into a zero vector.
15071  if (WidenedMask[0] == 0 && IsHighZero) {
15072    MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
15073    SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
15074                              DAG.getIntPtrConstant(0, DL));
15075    return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
15076                       getZeroVector(VT, Subtarget, DAG, DL), LoV,
15077                       DAG.getIntPtrConstant(0, DL));
15078  }
15079
15080  // TODO: If minimizing size and one of the inputs is a zero vector and the
15081  // the zero vector has only one use, we could use a VPERM2X128 to save the
15082  // instruction bytes needed to explicitly generate the zero vector.
15083
15084  // Blends are faster and handle all the non-lane-crossing cases.
15085  if (SDValue Blend = lowerShuffleAsBlend(DL, VT, V1, V2, Mask, Zeroable,
15086                                          Subtarget, DAG))
15087    return Blend;
15088
15089  // If either input operand is a zero vector, use VPERM2X128 because its mask
15090  // allows us to replace the zero input with an implicit zero.
15091  if (!IsLowZero && !IsHighZero) {
15092    // Check for patterns which can be matched with a single insert of a 128-bit
15093    // subvector.
15094    bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1});
15095    if (OnlyUsesV1 || isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5})) {
15096
15097      // With AVX1, use vperm2f128 (below) to allow load folding. Otherwise,
15098      // this will likely become vinsertf128 which can't fold a 256-bit memop.
15099      if (!isa<LoadSDNode>(peekThroughBitcasts(V1))) {
15100        MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
15101        SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
15102                                     OnlyUsesV1 ? V1 : V2,
15103                                     DAG.getIntPtrConstant(0, DL));
15104        return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
15105                           DAG.getIntPtrConstant(2, DL));
15106      }
15107    }
15108
15109    // Try to use SHUF128 if possible.
15110    if (Subtarget.hasVLX()) {
15111      if (WidenedMask[0] < 2 && WidenedMask[1] >= 2) {
15112        unsigned PermMask = ((WidenedMask[0] % 2) << 0) |
15113                            ((WidenedMask[1] % 2) << 1);
15114        return DAG.getNode(X86ISD::SHUF128, DL, VT, V1, V2,
15115                           DAG.getTargetConstant(PermMask, DL, MVT::i8));
15116      }
15117    }
15118  }
15119
15120  // Otherwise form a 128-bit permutation. After accounting for undefs,
15121  // convert the 64-bit shuffle mask selection values into 128-bit
15122  // selection bits by dividing the indexes by 2 and shifting into positions
15123  // defined by a vperm2*128 instruction's immediate control byte.
15124
15125  // The immediate permute control byte looks like this:
15126  //    [1:0] - select 128 bits from sources for low half of destination
15127  //    [2]   - ignore
15128  //    [3]   - zero low half of destination
15129  //    [5:4] - select 128 bits from sources for high half of destination
15130  //    [6]   - ignore
15131  //    [7]   - zero high half of destination
15132
15133  assert((WidenedMask[0] >= 0 || IsLowZero) &&
15134         (WidenedMask[1] >= 0 || IsHighZero) && "Undef half?");
15135
15136  unsigned PermMask = 0;
15137  PermMask |= IsLowZero  ? 0x08 : (WidenedMask[0] << 0);
15138  PermMask |= IsHighZero ? 0x80 : (WidenedMask[1] << 4);
15139
15140  // Check the immediate mask and replace unused sources with undef.
15141  if ((PermMask & 0x0a) != 0x00 && (PermMask & 0xa0) != 0x00)
15142    V1 = DAG.getUNDEF(VT);
15143  if ((PermMask & 0x0a) != 0x02 && (PermMask & 0xa0) != 0x20)
15144    V2 = DAG.getUNDEF(VT);
15145
15146  return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
15147                     DAG.getTargetConstant(PermMask, DL, MVT::i8));
15148}
15149
15150/// Lower a vector shuffle by first fixing the 128-bit lanes and then
15151/// shuffling each lane.
15152///
15153/// This attempts to create a repeated lane shuffle where each lane uses one
15154/// or two of the lanes of the inputs. The lanes of the input vectors are
15155/// shuffled in one or two independent shuffles to get the lanes into the
15156/// position needed by the final shuffle.
15157static SDValue lowerShuffleAsLanePermuteAndRepeatedMask(
15158    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
15159    const X86Subtarget &Subtarget, SelectionDAG &DAG) {
15160  assert(!V2.isUndef() && "This is only useful with multiple inputs.");
15161
15162  if (is128BitLaneRepeatedShuffleMask(VT, Mask))
15163    return SDValue();
15164
15165  int NumElts = Mask.size();
15166  int NumLanes = VT.getSizeInBits() / 128;
15167  int NumLaneElts = 128 / VT.getScalarSizeInBits();
15168  SmallVector<int, 16> RepeatMask(NumLaneElts, -1);
15169  SmallVector<std::array<int, 2>, 2> LaneSrcs(NumLanes, {{-1, -1}});
15170
15171  // First pass will try to fill in the RepeatMask from lanes that need two
15172  // sources.
15173  for (int Lane = 0; Lane != NumLanes; ++Lane) {
15174    int Srcs[2] = {-1, -1};
15175    SmallVector<int, 16> InLaneMask(NumLaneElts, -1);
15176    for (int i = 0; i != NumLaneElts; ++i) {
15177      int M = Mask[(Lane * NumLaneElts) + i];
15178      if (M < 0)
15179        continue;
15180      // Determine which of the possible input lanes (NumLanes from each source)
15181      // this element comes from. Assign that as one of the sources for this
15182      // lane. We can assign up to 2 sources for this lane. If we run out
15183      // sources we can't do anything.
15184      int LaneSrc = M / NumLaneElts;
15185      int Src;
15186      if (Srcs[0] < 0 || Srcs[0] == LaneSrc)
15187        Src = 0;
15188      else if (Srcs[1] < 0 || Srcs[1] == LaneSrc)
15189        Src = 1;
15190      else
15191        return SDValue();
15192
15193      Srcs[Src] = LaneSrc;
15194      InLaneMask[i] = (M % NumLaneElts) + Src * NumElts;
15195    }
15196
15197    // If this lane has two sources, see if it fits with the repeat mask so far.
15198    if (Srcs[1] < 0)
15199      continue;
15200
15201    LaneSrcs[Lane][0] = Srcs[0];
15202    LaneSrcs[Lane][1] = Srcs[1];
15203
15204    auto MatchMasks = [](ArrayRef<int> M1, ArrayRef<int> M2) {
15205      assert(M1.size() == M2.size() && "Unexpected mask size");
15206      for (int i = 0, e = M1.size(); i != e; ++i)
15207        if (M1[i] >= 0 && M2[i] >= 0 && M1[i] != M2[i])
15208          return false;
15209      return true;
15210    };
15211
15212    auto MergeMasks = [](ArrayRef<int> Mask, MutableArrayRef<int> MergedMask) {
15213      assert(Mask.size() == MergedMask.size() && "Unexpected mask size");
15214      for (int i = 0, e = MergedMask.size(); i != e; ++i) {
15215        int M = Mask[i];
15216        if (M < 0)
15217          continue;
15218        assert((MergedMask[i] < 0 || MergedMask[i] == M) &&
15219               "Unexpected mask element");
15220        MergedMask[i] = M;
15221      }
15222    };
15223
15224    if (MatchMasks(InLaneMask, RepeatMask)) {
15225      // Merge this lane mask into the final repeat mask.
15226      MergeMasks(InLaneMask, RepeatMask);
15227      continue;
15228    }
15229
15230    // Didn't find a match. Swap the operands and try again.
15231    std::swap(LaneSrcs[Lane][0], LaneSrcs[Lane][1]);
15232    ShuffleVectorSDNode::commuteMask(InLaneMask);
15233
15234    if (MatchMasks(InLaneMask, RepeatMask)) {
15235      // Merge this lane mask into the final repeat mask.
15236      MergeMasks(InLaneMask, RepeatMask);
15237      continue;
15238    }
15239
15240    // Couldn't find a match with the operands in either order.
15241    return SDValue();
15242  }
15243
15244  // Now handle any lanes with only one source.
15245  for (int Lane = 0; Lane != NumLanes; ++Lane) {
15246    // If this lane has already been processed, skip it.
15247    if (LaneSrcs[Lane][0] >= 0)
15248      continue;
15249
15250    for (int i = 0; i != NumLaneElts; ++i) {
15251      int M = Mask[(Lane * NumLaneElts) + i];
15252      if (M < 0)
15253        continue;
15254
15255      // If RepeatMask isn't defined yet we can define it ourself.
15256      if (RepeatMask[i] < 0)
15257        RepeatMask[i] = M % NumLaneElts;
15258
15259      if (RepeatMask[i] < NumElts) {
15260        if (RepeatMask[i] != M % NumLaneElts)
15261          return SDValue();
15262        LaneSrcs[Lane][0] = M / NumLaneElts;
15263      } else {
15264        if (RepeatMask[i] != ((M % NumLaneElts) + NumElts))
15265          return SDValue();
15266        LaneSrcs[Lane][1] = M / NumLaneElts;
15267      }
15268    }
15269
15270    if (LaneSrcs[Lane][0] < 0 && LaneSrcs[Lane][1] < 0)
15271      return SDValue();
15272  }
15273
15274  SmallVector<int, 16> NewMask(NumElts, -1);
15275  for (int Lane = 0; Lane != NumLanes; ++Lane) {
15276    int Src = LaneSrcs[Lane][0];
15277    for (int i = 0; i != NumLaneElts; ++i) {
15278      int M = -1;
15279      if (Src >= 0)
15280        M = Src * NumLaneElts + i;
15281      NewMask[Lane * NumLaneElts + i] = M;
15282    }
15283  }
15284  SDValue NewV1 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
15285  // Ensure we didn't get back the shuffle we started with.
15286  // FIXME: This is a hack to make up for some splat handling code in
15287  // getVectorShuffle.
15288  if (isa<ShuffleVectorSDNode>(NewV1) &&
15289      cast<ShuffleVectorSDNode>(NewV1)->getMask() == Mask)
15290    return SDValue();
15291
15292  for (int Lane = 0; Lane != NumLanes; ++Lane) {
15293    int Src = LaneSrcs[Lane][1];
15294    for (int i = 0; i != NumLaneElts; ++i) {
15295      int M = -1;
15296      if (Src >= 0)
15297        M = Src * NumLaneElts + i;
15298      NewMask[Lane * NumLaneElts + i] = M;
15299    }
15300  }
15301  SDValue NewV2 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
15302  // Ensure we didn't get back the shuffle we started with.
15303  // FIXME: This is a hack to make up for some splat handling code in
15304  // getVectorShuffle.
15305  if (isa<ShuffleVectorSDNode>(NewV2) &&
15306      cast<ShuffleVectorSDNode>(NewV2)->getMask() == Mask)
15307    return SDValue();
15308
15309  for (int i = 0; i != NumElts; ++i) {
15310    NewMask[i] = RepeatMask[i % NumLaneElts];
15311    if (NewMask[i] < 0)
15312      continue;
15313
15314    NewMask[i] += (i / NumLaneElts) * NumLaneElts;
15315  }
15316  return DAG.getVectorShuffle(VT, DL, NewV1, NewV2, NewMask);
15317}
15318
15319/// If the input shuffle mask results in a vector that is undefined in all upper
15320/// or lower half elements and that mask accesses only 2 halves of the
15321/// shuffle's operands, return true. A mask of half the width with mask indexes
15322/// adjusted to access the extracted halves of the original shuffle operands is
15323/// returned in HalfMask. HalfIdx1 and HalfIdx2 return whether the upper or
15324/// lower half of each input operand is accessed.
15325static bool
15326getHalfShuffleMask(ArrayRef<int> Mask, MutableArrayRef<int> HalfMask,
15327                   int &HalfIdx1, int &HalfIdx2) {
15328  assert((Mask.size() == HalfMask.size() * 2) &&
15329         "Expected input mask to be twice as long as output");
15330
15331  // Exactly one half of the result must be undef to allow narrowing.
15332  bool UndefLower = isUndefLowerHalf(Mask);
15333  bool UndefUpper = isUndefUpperHalf(Mask);
15334  if (UndefLower == UndefUpper)
15335    return false;
15336
15337  unsigned HalfNumElts = HalfMask.size();
15338  unsigned MaskIndexOffset = UndefLower ? HalfNumElts : 0;
15339  HalfIdx1 = -1;
15340  HalfIdx2 = -1;
15341  for (unsigned i = 0; i != HalfNumElts; ++i) {
15342    int M = Mask[i + MaskIndexOffset];
15343    if (M < 0) {
15344      HalfMask[i] = M;
15345      continue;
15346    }
15347
15348    // Determine which of the 4 half vectors this element is from.
15349    // i.e. 0 = Lower V1, 1 = Upper V1, 2 = Lower V2, 3 = Upper V2.
15350    int HalfIdx = M / HalfNumElts;
15351
15352    // Determine the element index into its half vector source.
15353    int HalfElt = M % HalfNumElts;
15354
15355    // We can shuffle with up to 2 half vectors, set the new 'half'
15356    // shuffle mask accordingly.
15357    if (HalfIdx1 < 0 || HalfIdx1 == HalfIdx) {
15358      HalfMask[i] = HalfElt;
15359      HalfIdx1 = HalfIdx;
15360      continue;
15361    }
15362    if (HalfIdx2 < 0 || HalfIdx2 == HalfIdx) {
15363      HalfMask[i] = HalfElt + HalfNumElts;
15364      HalfIdx2 = HalfIdx;
15365      continue;
15366    }
15367
15368    // Too many half vectors referenced.
15369    return false;
15370  }
15371
15372  return true;
15373}
15374
15375/// Given the output values from getHalfShuffleMask(), create a half width
15376/// shuffle of extracted vectors followed by an insert back to full width.
15377static SDValue getShuffleHalfVectors(const SDLoc &DL, SDValue V1, SDValue V2,
15378                                     ArrayRef<int> HalfMask, int HalfIdx1,
15379                                     int HalfIdx2, bool UndefLower,
15380                                     SelectionDAG &DAG, bool UseConcat = false) {
15381  assert(V1.getValueType() == V2.getValueType() && "Different sized vectors?");
15382  assert(V1.getValueType().isSimple() && "Expecting only simple types");
15383
15384  MVT VT = V1.getSimpleValueType();
15385  MVT HalfVT = VT.getHalfNumVectorElementsVT();
15386  unsigned HalfNumElts = HalfVT.getVectorNumElements();
15387
15388  auto getHalfVector = [&](int HalfIdx) {
15389    if (HalfIdx < 0)
15390      return DAG.getUNDEF(HalfVT);
15391    SDValue V = (HalfIdx < 2 ? V1 : V2);
15392    HalfIdx = (HalfIdx % 2) * HalfNumElts;
15393    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V,
15394                       DAG.getIntPtrConstant(HalfIdx, DL));
15395  };
15396
15397  // ins undef, (shuf (ext V1, HalfIdx1), (ext V2, HalfIdx2), HalfMask), Offset
15398  SDValue Half1 = getHalfVector(HalfIdx1);
15399  SDValue Half2 = getHalfVector(HalfIdx2);
15400  SDValue V = DAG.getVectorShuffle(HalfVT, DL, Half1, Half2, HalfMask);
15401  if (UseConcat) {
15402    SDValue Op0 = V;
15403    SDValue Op1 = DAG.getUNDEF(HalfVT);
15404    if (UndefLower)
15405      std::swap(Op0, Op1);
15406    return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Op0, Op1);
15407  }
15408
15409  unsigned Offset = UndefLower ? HalfNumElts : 0;
15410  return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V,
15411                     DAG.getIntPtrConstant(Offset, DL));
15412}
15413
15414/// Lower shuffles where an entire half of a 256 or 512-bit vector is UNDEF.
15415/// This allows for fast cases such as subvector extraction/insertion
15416/// or shuffling smaller vector types which can lower more efficiently.
15417static SDValue lowerShuffleWithUndefHalf(const SDLoc &DL, MVT VT, SDValue V1,
15418                                         SDValue V2, ArrayRef<int> Mask,
15419                                         const X86Subtarget &Subtarget,
15420                                         SelectionDAG &DAG) {
15421  assert((VT.is256BitVector() || VT.is512BitVector()) &&
15422         "Expected 256-bit or 512-bit vector");
15423
15424  bool UndefLower = isUndefLowerHalf(Mask);
15425  if (!UndefLower && !isUndefUpperHalf(Mask))
15426    return SDValue();
15427
15428  assert((!UndefLower || !isUndefUpperHalf(Mask)) &&
15429         "Completely undef shuffle mask should have been simplified already");
15430
15431  // Upper half is undef and lower half is whole upper subvector.
15432  // e.g. vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
15433  MVT HalfVT = VT.getHalfNumVectorElementsVT();
15434  unsigned HalfNumElts = HalfVT.getVectorNumElements();
15435  if (!UndefLower &&
15436      isSequentialOrUndefInRange(Mask, 0, HalfNumElts, HalfNumElts)) {
15437    SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
15438                             DAG.getIntPtrConstant(HalfNumElts, DL));
15439    return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
15440                       DAG.getIntPtrConstant(0, DL));
15441  }
15442
15443  // Lower half is undef and upper half is whole lower subvector.
15444  // e.g. vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
15445  if (UndefLower &&
15446      isSequentialOrUndefInRange(Mask, HalfNumElts, HalfNumElts, 0)) {
15447    SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
15448                             DAG.getIntPtrConstant(0, DL));
15449    return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
15450                       DAG.getIntPtrConstant(HalfNumElts, DL));
15451  }
15452
15453  int HalfIdx1, HalfIdx2;
15454  SmallVector<int, 8> HalfMask(HalfNumElts);
15455  if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2))
15456    return SDValue();
15457
15458  assert(HalfMask.size() == HalfNumElts && "Unexpected shuffle mask length");
15459
15460  // Only shuffle the halves of the inputs when useful.
15461  unsigned NumLowerHalves =
15462      (HalfIdx1 == 0 || HalfIdx1 == 2) + (HalfIdx2 == 0 || HalfIdx2 == 2);
15463  unsigned NumUpperHalves =
15464      (HalfIdx1 == 1 || HalfIdx1 == 3) + (HalfIdx2 == 1 || HalfIdx2 == 3);
15465  assert(NumLowerHalves + NumUpperHalves <= 2 && "Only 1 or 2 halves allowed");
15466
15467  // Determine the larger pattern of undef/halves, then decide if it's worth
15468  // splitting the shuffle based on subtarget capabilities and types.
15469  unsigned EltWidth = VT.getVectorElementType().getSizeInBits();
15470  if (!UndefLower) {
15471    // XXXXuuuu: no insert is needed.
15472    // Always extract lowers when setting lower - these are all free subreg ops.
15473    if (NumUpperHalves == 0)
15474      return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
15475                                   UndefLower, DAG);
15476
15477    if (NumUpperHalves == 1) {
15478      // AVX2 has efficient 32/64-bit element cross-lane shuffles.
15479      if (Subtarget.hasAVX2()) {
15480        // extract128 + vunpckhps/vshufps, is better than vblend + vpermps.
15481        if (EltWidth == 32 && NumLowerHalves && HalfVT.is128BitVector() &&
15482            !is128BitUnpackShuffleMask(HalfMask) &&
15483            (!isSingleSHUFPSMask(HalfMask) ||
15484             Subtarget.hasFastVariableShuffle()))
15485          return SDValue();
15486        // If this is a unary shuffle (assume that the 2nd operand is
15487        // canonicalized to undef), then we can use vpermpd. Otherwise, we
15488        // are better off extracting the upper half of 1 operand and using a
15489        // narrow shuffle.
15490        if (EltWidth == 64 && V2.isUndef())
15491          return SDValue();
15492      }
15493      // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
15494      if (Subtarget.hasAVX512() && VT.is512BitVector())
15495        return SDValue();
15496      // Extract + narrow shuffle is better than the wide alternative.
15497      return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
15498                                   UndefLower, DAG);
15499    }
15500
15501    // Don't extract both uppers, instead shuffle and then extract.
15502    assert(NumUpperHalves == 2 && "Half vector count went wrong");
15503    return SDValue();
15504  }
15505
15506  // UndefLower - uuuuXXXX: an insert to high half is required if we split this.
15507  if (NumUpperHalves == 0) {
15508    // AVX2 has efficient 64-bit element cross-lane shuffles.
15509    // TODO: Refine to account for unary shuffle, splat, and other masks?
15510    if (Subtarget.hasAVX2() && EltWidth == 64)
15511      return SDValue();
15512    // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
15513    if (Subtarget.hasAVX512() && VT.is512BitVector())
15514      return SDValue();
15515    // Narrow shuffle + insert is better than the wide alternative.
15516    return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
15517                                 UndefLower, DAG);
15518  }
15519
15520  // NumUpperHalves != 0: don't bother with extract, shuffle, and then insert.
15521  return SDValue();
15522}
15523
15524/// Test whether the specified input (0 or 1) is in-place blended by the
15525/// given mask.
15526///
15527/// This returns true if the elements from a particular input are already in the
15528/// slot required by the given mask and require no permutation.
15529static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
15530  assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
15531  int Size = Mask.size();
15532  for (int i = 0; i < Size; ++i)
15533    if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
15534      return false;
15535
15536  return true;
15537}
15538
15539/// Handle case where shuffle sources are coming from the same 128-bit lane and
15540/// every lane can be represented as the same repeating mask - allowing us to
15541/// shuffle the sources with the repeating shuffle and then permute the result
15542/// to the destination lanes.
15543static SDValue lowerShuffleAsRepeatedMaskAndLanePermute(
15544    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
15545    const X86Subtarget &Subtarget, SelectionDAG &DAG) {
15546  int NumElts = VT.getVectorNumElements();
15547  int NumLanes = VT.getSizeInBits() / 128;
15548  int NumLaneElts = NumElts / NumLanes;
15549
15550  // On AVX2 we may be able to just shuffle the lowest elements and then
15551  // broadcast the result.
15552  if (Subtarget.hasAVX2()) {
15553    for (unsigned BroadcastSize : {16, 32, 64}) {
15554      if (BroadcastSize <= VT.getScalarSizeInBits())
15555        continue;
15556      int NumBroadcastElts = BroadcastSize / VT.getScalarSizeInBits();
15557
15558      // Attempt to match a repeating pattern every NumBroadcastElts,
15559      // accounting for UNDEFs but only references the lowest 128-bit
15560      // lane of the inputs.
15561      auto FindRepeatingBroadcastMask = [&](SmallVectorImpl<int> &RepeatMask) {
15562        for (int i = 0; i != NumElts; i += NumBroadcastElts)
15563          for (int j = 0; j != NumBroadcastElts; ++j) {
15564            int M = Mask[i + j];
15565            if (M < 0)
15566              continue;
15567            int &R = RepeatMask[j];
15568            if (0 != ((M % NumElts) / NumLaneElts))
15569              return false;
15570            if (0 <= R && R != M)
15571              return false;
15572            R = M;
15573          }
15574        return true;
15575      };
15576
15577      SmallVector<int, 8> RepeatMask((unsigned)NumElts, -1);
15578      if (!FindRepeatingBroadcastMask(RepeatMask))
15579        continue;
15580
15581      // Shuffle the (lowest) repeated elements in place for broadcast.
15582      SDValue RepeatShuf = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatMask);
15583
15584      // Shuffle the actual broadcast.
15585      SmallVector<int, 8> BroadcastMask((unsigned)NumElts, -1);
15586      for (int i = 0; i != NumElts; i += NumBroadcastElts)
15587        for (int j = 0; j != NumBroadcastElts; ++j)
15588          BroadcastMask[i + j] = j;
15589      return DAG.getVectorShuffle(VT, DL, RepeatShuf, DAG.getUNDEF(VT),
15590                                  BroadcastMask);
15591    }
15592  }
15593
15594  // Bail if the shuffle mask doesn't cross 128-bit lanes.
15595  if (!is128BitLaneCrossingShuffleMask(VT, Mask))
15596    return SDValue();
15597
15598  // Bail if we already have a repeated lane shuffle mask.
15599  SmallVector<int, 8> RepeatedShuffleMask;
15600  if (is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedShuffleMask))
15601    return SDValue();
15602
15603  // On AVX2 targets we can permute 256-bit vectors as 64-bit sub-lanes
15604  // (with PERMQ/PERMPD), otherwise we can only permute whole 128-bit lanes.
15605  int SubLaneScale = Subtarget.hasAVX2() && VT.is256BitVector() ? 2 : 1;
15606  int NumSubLanes = NumLanes * SubLaneScale;
15607  int NumSubLaneElts = NumLaneElts / SubLaneScale;
15608
15609  // Check that all the sources are coming from the same lane and see if we can
15610  // form a repeating shuffle mask (local to each sub-lane). At the same time,
15611  // determine the source sub-lane for each destination sub-lane.
15612  int TopSrcSubLane = -1;
15613  SmallVector<int, 8> Dst2SrcSubLanes((unsigned)NumSubLanes, -1);
15614  SmallVector<int, 8> RepeatedSubLaneMasks[2] = {
15615      SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef),
15616      SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef)};
15617
15618  for (int DstSubLane = 0; DstSubLane != NumSubLanes; ++DstSubLane) {
15619    // Extract the sub-lane mask, check that it all comes from the same lane
15620    // and normalize the mask entries to come from the first lane.
15621    int SrcLane = -1;
15622    SmallVector<int, 8> SubLaneMask((unsigned)NumSubLaneElts, -1);
15623    for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
15624      int M = Mask[(DstSubLane * NumSubLaneElts) + Elt];
15625      if (M < 0)
15626        continue;
15627      int Lane = (M % NumElts) / NumLaneElts;
15628      if ((0 <= SrcLane) && (SrcLane != Lane))
15629        return SDValue();
15630      SrcLane = Lane;
15631      int LocalM = (M % NumLaneElts) + (M < NumElts ? 0 : NumElts);
15632      SubLaneMask[Elt] = LocalM;
15633    }
15634
15635    // Whole sub-lane is UNDEF.
15636    if (SrcLane < 0)
15637      continue;
15638
15639    // Attempt to match against the candidate repeated sub-lane masks.
15640    for (int SubLane = 0; SubLane != SubLaneScale; ++SubLane) {
15641      auto MatchMasks = [NumSubLaneElts](ArrayRef<int> M1, ArrayRef<int> M2) {
15642        for (int i = 0; i != NumSubLaneElts; ++i) {
15643          if (M1[i] < 0 || M2[i] < 0)
15644            continue;
15645          if (M1[i] != M2[i])
15646            return false;
15647        }
15648        return true;
15649      };
15650
15651      auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane];
15652      if (!MatchMasks(SubLaneMask, RepeatedSubLaneMask))
15653        continue;
15654
15655      // Merge the sub-lane mask into the matching repeated sub-lane mask.
15656      for (int i = 0; i != NumSubLaneElts; ++i) {
15657        int M = SubLaneMask[i];
15658        if (M < 0)
15659          continue;
15660        assert((RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] == M) &&
15661               "Unexpected mask element");
15662        RepeatedSubLaneMask[i] = M;
15663      }
15664
15665      // Track the top most source sub-lane - by setting the remaining to UNDEF
15666      // we can greatly simplify shuffle matching.
15667      int SrcSubLane = (SrcLane * SubLaneScale) + SubLane;
15668      TopSrcSubLane = std::max(TopSrcSubLane, SrcSubLane);
15669      Dst2SrcSubLanes[DstSubLane] = SrcSubLane;
15670      break;
15671    }
15672
15673    // Bail if we failed to find a matching repeated sub-lane mask.
15674    if (Dst2SrcSubLanes[DstSubLane] < 0)
15675      return SDValue();
15676  }
15677  assert(0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes &&
15678         "Unexpected source lane");
15679
15680  // Create a repeating shuffle mask for the entire vector.
15681  SmallVector<int, 8> RepeatedMask((unsigned)NumElts, -1);
15682  for (int SubLane = 0; SubLane <= TopSrcSubLane; ++SubLane) {
15683    int Lane = SubLane / SubLaneScale;
15684    auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane % SubLaneScale];
15685    for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
15686      int M = RepeatedSubLaneMask[Elt];
15687      if (M < 0)
15688        continue;
15689      int Idx = (SubLane * NumSubLaneElts) + Elt;
15690      RepeatedMask[Idx] = M + (Lane * NumLaneElts);
15691    }
15692  }
15693  SDValue RepeatedShuffle = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatedMask);
15694
15695  // Shuffle each source sub-lane to its destination.
15696  SmallVector<int, 8> SubLaneMask((unsigned)NumElts, -1);
15697  for (int i = 0; i != NumElts; i += NumSubLaneElts) {
15698    int SrcSubLane = Dst2SrcSubLanes[i / NumSubLaneElts];
15699    if (SrcSubLane < 0)
15700      continue;
15701    for (int j = 0; j != NumSubLaneElts; ++j)
15702      SubLaneMask[i + j] = j + (SrcSubLane * NumSubLaneElts);
15703  }
15704
15705  return DAG.getVectorShuffle(VT, DL, RepeatedShuffle, DAG.getUNDEF(VT),
15706                              SubLaneMask);
15707}
15708
15709static bool matchShuffleWithSHUFPD(MVT VT, SDValue &V1, SDValue &V2,
15710                                   bool &ForceV1Zero, bool &ForceV2Zero,
15711                                   unsigned &ShuffleImm, ArrayRef<int> Mask,
15712                                   const APInt &Zeroable) {
15713  int NumElts = VT.getVectorNumElements();
15714  assert(VT.getScalarSizeInBits() == 64 &&
15715         (NumElts == 2 || NumElts == 4 || NumElts == 8) &&
15716         "Unexpected data type for VSHUFPD");
15717  assert(isUndefOrZeroOrInRange(Mask, 0, 2 * NumElts) &&
15718         "Illegal shuffle mask");
15719
15720  bool ZeroLane[2] = { true, true };
15721  for (int i = 0; i < NumElts; ++i)
15722    ZeroLane[i & 1] &= Zeroable[i];
15723
15724  // Mask for V8F64: 0/1,  8/9,  2/3,  10/11, 4/5, ..
15725  // Mask for V4F64; 0/1,  4/5,  2/3,  6/7..
15726  ShuffleImm = 0;
15727  bool ShufpdMask = true;
15728  bool CommutableMask = true;
15729  for (int i = 0; i < NumElts; ++i) {
15730    if (Mask[i] == SM_SentinelUndef || ZeroLane[i & 1])
15731      continue;
15732    if (Mask[i] < 0)
15733      return false;
15734    int Val = (i & 6) + NumElts * (i & 1);
15735    int CommutVal = (i & 0xe) + NumElts * ((i & 1) ^ 1);
15736    if (Mask[i] < Val || Mask[i] > Val + 1)
15737      ShufpdMask = false;
15738    if (Mask[i] < CommutVal || Mask[i] > CommutVal + 1)
15739      CommutableMask = false;
15740    ShuffleImm |= (Mask[i] % 2) << i;
15741  }
15742
15743  if (!ShufpdMask && !CommutableMask)
15744    return false;
15745
15746  if (!ShufpdMask && CommutableMask)
15747    std::swap(V1, V2);
15748
15749  ForceV1Zero = ZeroLane[0];
15750  ForceV2Zero = ZeroLane[1];
15751  return true;
15752}
15753
15754static SDValue lowerShuffleWithSHUFPD(const SDLoc &DL, MVT VT, SDValue V1,
15755                                      SDValue V2, ArrayRef<int> Mask,
15756                                      const APInt &Zeroable,
15757                                      const X86Subtarget &Subtarget,
15758                                      SelectionDAG &DAG) {
15759  assert((VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v8f64) &&
15760         "Unexpected data type for VSHUFPD");
15761
15762  unsigned Immediate = 0;
15763  bool ForceV1Zero = false, ForceV2Zero = false;
15764  if (!matchShuffleWithSHUFPD(VT, V1, V2, ForceV1Zero, ForceV2Zero, Immediate,
15765                              Mask, Zeroable))
15766    return SDValue();
15767
15768  // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
15769  if (ForceV1Zero)
15770    V1 = getZeroVector(VT, Subtarget, DAG, DL);
15771  if (ForceV2Zero)
15772    V2 = getZeroVector(VT, Subtarget, DAG, DL);
15773
15774  return DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
15775                     DAG.getTargetConstant(Immediate, DL, MVT::i8));
15776}
15777
15778// Look for {0, 8, 16, 24, 32, 40, 48, 56 } in the first 8 elements. Followed
15779// by zeroable elements in the remaining 24 elements. Turn this into two
15780// vmovqb instructions shuffled together.
15781static SDValue lowerShuffleAsVTRUNCAndUnpack(const SDLoc &DL, MVT VT,
15782                                             SDValue V1, SDValue V2,
15783                                             ArrayRef<int> Mask,
15784                                             const APInt &Zeroable,
15785                                             SelectionDAG &DAG) {
15786  assert(VT == MVT::v32i8 && "Unexpected type!");
15787
15788  // The first 8 indices should be every 8th element.
15789  if (!isSequentialOrUndefInRange(Mask, 0, 8, 0, 8))
15790    return SDValue();
15791
15792  // Remaining elements need to be zeroable.
15793  if (Zeroable.countLeadingOnes() < (Mask.size() - 8))
15794    return SDValue();
15795
15796  V1 = DAG.getBitcast(MVT::v4i64, V1);
15797  V2 = DAG.getBitcast(MVT::v4i64, V2);
15798
15799  V1 = DAG.getNode(X86ISD::VTRUNC, DL, MVT::v16i8, V1);
15800  V2 = DAG.getNode(X86ISD::VTRUNC, DL, MVT::v16i8, V2);
15801
15802  // The VTRUNCs will put 0s in the upper 12 bytes. Use them to put zeroes in
15803  // the upper bits of the result using an unpckldq.
15804  SDValue Unpack = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2,
15805                                        { 0, 1, 2, 3, 16, 17, 18, 19,
15806                                          4, 5, 6, 7, 20, 21, 22, 23 });
15807  // Insert the unpckldq into a zero vector to widen to v32i8.
15808  return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v32i8,
15809                     DAG.getConstant(0, DL, MVT::v32i8), Unpack,
15810                     DAG.getIntPtrConstant(0, DL));
15811}
15812
15813
15814/// Handle lowering of 4-lane 64-bit floating point shuffles.
15815///
15816/// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
15817/// isn't available.
15818static SDValue lowerV4F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15819                                 const APInt &Zeroable, SDValue V1, SDValue V2,
15820                                 const X86Subtarget &Subtarget,
15821                                 SelectionDAG &DAG) {
15822  assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
15823  assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
15824  assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
15825
15826  if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4f64, V1, V2, Mask, Zeroable,
15827                                     Subtarget, DAG))
15828    return V;
15829
15830  if (V2.isUndef()) {
15831    // Check for being able to broadcast a single element.
15832    if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f64, V1, V2,
15833                                                    Mask, Subtarget, DAG))
15834      return Broadcast;
15835
15836    // Use low duplicate instructions for masks that match their pattern.
15837    if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
15838      return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
15839
15840    if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
15841      // Non-half-crossing single input shuffles can be lowered with an
15842      // interleaved permutation.
15843      unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
15844                              ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
15845      return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
15846                         DAG.getTargetConstant(VPERMILPMask, DL, MVT::i8));
15847    }
15848
15849    // With AVX2 we have direct support for this permutation.
15850    if (Subtarget.hasAVX2())
15851      return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
15852                         getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15853
15854    // Try to create an in-lane repeating shuffle mask and then shuffle the
15855    // results into the target lanes.
15856    if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15857            DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
15858      return V;
15859
15860    // Try to permute the lanes and then use a per-lane permute.
15861    if (SDValue V = lowerShuffleAsLanePermuteAndPermute(DL, MVT::v4f64, V1, V2,
15862                                                        Mask, DAG, Subtarget))
15863      return V;
15864
15865    // Otherwise, fall back.
15866    return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v4f64, V1, V2, Mask,
15867                                               DAG, Subtarget);
15868  }
15869
15870  // Use dedicated unpack instructions for masks that match their pattern.
15871  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f64, Mask, V1, V2, DAG))
15872    return V;
15873
15874  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
15875                                          Zeroable, Subtarget, DAG))
15876    return Blend;
15877
15878  // Check if the blend happens to exactly fit that of SHUFPD.
15879  if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v4f64, V1, V2, Mask,
15880                                          Zeroable, Subtarget, DAG))
15881    return Op;
15882
15883  // If we have lane crossing shuffles AND they don't all come from the lower
15884  // lane elements, lower to SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
15885  // TODO: Handle BUILD_VECTOR sources which getVectorShuffle currently
15886  // canonicalize to a blend of splat which isn't necessary for this combine.
15887  if (is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask) &&
15888      !all_of(Mask, [](int M) { return M < 2 || (4 <= M && M < 6); }) &&
15889      (V1.getOpcode() != ISD::BUILD_VECTOR) &&
15890      (V2.getOpcode() != ISD::BUILD_VECTOR))
15891    if (SDValue Op = lowerShuffleAsLanePermuteAndSHUFP(DL, MVT::v4f64, V1, V2,
15892                                                       Mask, DAG))
15893      return Op;
15894
15895  // If we have one input in place, then we can permute the other input and
15896  // blend the result.
15897  if (isShuffleMaskInputInPlace(0, Mask) || isShuffleMaskInputInPlace(1, Mask))
15898    return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2, Mask,
15899                                                Subtarget, DAG);
15900
15901  // Try to create an in-lane repeating shuffle mask and then shuffle the
15902  // results into the target lanes.
15903  if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15904          DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
15905    return V;
15906
15907  // Try to simplify this by merging 128-bit lanes to enable a lane-based
15908  // shuffle. However, if we have AVX2 and either inputs are already in place,
15909  // we will be able to shuffle even across lanes the other input in a single
15910  // instruction so skip this pattern.
15911  if (!(Subtarget.hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
15912                                isShuffleMaskInputInPlace(1, Mask))))
15913    if (SDValue V = lowerShuffleAsLanePermuteAndRepeatedMask(
15914            DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
15915      return V;
15916
15917  // If we have VLX support, we can use VEXPAND.
15918  if (Subtarget.hasVLX())
15919    if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4f64, Zeroable, Mask, V1, V2,
15920                                         DAG, Subtarget))
15921      return V;
15922
15923  // If we have AVX2 then we always want to lower with a blend because an v4 we
15924  // can fully permute the elements.
15925  if (Subtarget.hasAVX2())
15926    return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2, Mask,
15927                                                Subtarget, DAG);
15928
15929  // Otherwise fall back on generic lowering.
15930  return lowerShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask,
15931                                    Subtarget, DAG);
15932}
15933
15934/// Handle lowering of 4-lane 64-bit integer shuffles.
15935///
15936/// This routine is only called when we have AVX2 and thus a reasonable
15937/// instruction set for v4i64 shuffling..
15938static SDValue lowerV4I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15939                                 const APInt &Zeroable, SDValue V1, SDValue V2,
15940                                 const X86Subtarget &Subtarget,
15941                                 SelectionDAG &DAG) {
15942  assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
15943  assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
15944  assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
15945  assert(Subtarget.hasAVX2() && "We can only lower v4i64 with AVX2!");
15946
15947  if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4i64, V1, V2, Mask, Zeroable,
15948                                     Subtarget, DAG))
15949    return V;
15950
15951  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
15952                                          Zeroable, Subtarget, DAG))
15953    return Blend;
15954
15955  // Check for being able to broadcast a single element.
15956  if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i64, V1, V2, Mask,
15957                                                  Subtarget, DAG))
15958    return Broadcast;
15959
15960  if (V2.isUndef()) {
15961    // When the shuffle is mirrored between the 128-bit lanes of the unit, we
15962    // can use lower latency instructions that will operate on both lanes.
15963    SmallVector<int, 2> RepeatedMask;
15964    if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
15965      SmallVector<int, 4> PSHUFDMask;
15966      scaleShuffleMask<int>(2, RepeatedMask, PSHUFDMask);
15967      return DAG.getBitcast(
15968          MVT::v4i64,
15969          DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
15970                      DAG.getBitcast(MVT::v8i32, V1),
15971                      getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
15972    }
15973
15974    // AVX2 provides a direct instruction for permuting a single input across
15975    // lanes.
15976    return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
15977                       getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15978  }
15979
15980  // Try to use shift instructions.
15981  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask,
15982                                          Zeroable, Subtarget, DAG))
15983    return Shift;
15984
15985  // If we have VLX support, we can use VALIGN or VEXPAND.
15986  if (Subtarget.hasVLX()) {
15987    if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v4i64, V1, V2, Mask,
15988                                              Subtarget, DAG))
15989      return Rotate;
15990
15991    if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4i64, Zeroable, Mask, V1, V2,
15992                                         DAG, Subtarget))
15993      return V;
15994  }
15995
15996  // Try to use PALIGNR.
15997  if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i64, V1, V2, Mask,
15998                                                Subtarget, DAG))
15999    return Rotate;
16000
16001  // Use dedicated unpack instructions for masks that match their pattern.
16002  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i64, Mask, V1, V2, DAG))
16003    return V;
16004
16005  // If we have one input in place, then we can permute the other input and
16006  // blend the result.
16007  if (isShuffleMaskInputInPlace(0, Mask) || isShuffleMaskInputInPlace(1, Mask))
16008    return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2, Mask,
16009                                                Subtarget, DAG);
16010
16011  // Try to create an in-lane repeating shuffle mask and then shuffle the
16012  // results into the target lanes.
16013  if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16014          DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
16015    return V;
16016
16017  // Try to simplify this by merging 128-bit lanes to enable a lane-based
16018  // shuffle. However, if we have AVX2 and either inputs are already in place,
16019  // we will be able to shuffle even across lanes the other input in a single
16020  // instruction so skip this pattern.
16021  if (!isShuffleMaskInputInPlace(0, Mask) &&
16022      !isShuffleMaskInputInPlace(1, Mask))
16023    if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16024            DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
16025      return Result;
16026
16027  // Otherwise fall back on generic blend lowering.
16028  return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2, Mask,
16029                                              Subtarget, DAG);
16030}
16031
16032/// Handle lowering of 8-lane 32-bit floating point shuffles.
16033///
16034/// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
16035/// isn't available.
16036static SDValue lowerV8F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16037                                 const APInt &Zeroable, SDValue V1, SDValue V2,
16038                                 const X86Subtarget &Subtarget,
16039                                 SelectionDAG &DAG) {
16040  assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
16041  assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
16042  assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16043
16044  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
16045                                          Zeroable, Subtarget, DAG))
16046    return Blend;
16047
16048  // Check for being able to broadcast a single element.
16049  if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8f32, V1, V2, Mask,
16050                                                  Subtarget, DAG))
16051    return Broadcast;
16052
16053  // If the shuffle mask is repeated in each 128-bit lane, we have many more
16054  // options to efficiently lower the shuffle.
16055  SmallVector<int, 4> RepeatedMask;
16056  if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
16057    assert(RepeatedMask.size() == 4 &&
16058           "Repeated masks must be half the mask width!");
16059
16060    // Use even/odd duplicate instructions for masks that match their pattern.
16061    if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
16062      return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
16063    if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
16064      return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
16065
16066    if (V2.isUndef())
16067      return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
16068                         getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16069
16070    // Use dedicated unpack instructions for masks that match their pattern.
16071    if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8f32, Mask, V1, V2, DAG))
16072      return V;
16073
16074    // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
16075    // have already handled any direct blends.
16076    return lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
16077  }
16078
16079  // Try to create an in-lane repeating shuffle mask and then shuffle the
16080  // results into the target lanes.
16081  if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16082          DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
16083    return V;
16084
16085  // If we have a single input shuffle with different shuffle patterns in the
16086  // two 128-bit lanes use the variable mask to VPERMILPS.
16087  if (V2.isUndef()) {
16088    SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
16089    if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
16090      return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, V1, VPermMask);
16091
16092    if (Subtarget.hasAVX2())
16093      return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32, VPermMask, V1);
16094
16095    // Otherwise, fall back.
16096    return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v8f32, V1, V2, Mask,
16097                                               DAG, Subtarget);
16098  }
16099
16100  // Try to simplify this by merging 128-bit lanes to enable a lane-based
16101  // shuffle.
16102  if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16103          DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
16104    return Result;
16105
16106  // If we have VLX support, we can use VEXPAND.
16107  if (Subtarget.hasVLX())
16108    if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f32, Zeroable, Mask, V1, V2,
16109                                         DAG, Subtarget))
16110      return V;
16111
16112  // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
16113  // since after split we get a more efficient code using vpunpcklwd and
16114  // vpunpckhwd instrs than vblend.
16115  if (!Subtarget.hasAVX512() && isUnpackWdShuffleMask(Mask, MVT::v8f32))
16116    if (SDValue V = lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask,
16117                                               Subtarget, DAG))
16118      return V;
16119
16120  // If we have AVX2 then we always want to lower with a blend because at v8 we
16121  // can fully permute the elements.
16122  if (Subtarget.hasAVX2())
16123    return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2, Mask,
16124                                                Subtarget, DAG);
16125
16126  // Otherwise fall back on generic lowering.
16127  return lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask,
16128                                    Subtarget, DAG);
16129}
16130
16131/// Handle lowering of 8-lane 32-bit integer shuffles.
16132///
16133/// This routine is only called when we have AVX2 and thus a reasonable
16134/// instruction set for v8i32 shuffling..
16135static SDValue lowerV8I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16136                                 const APInt &Zeroable, SDValue V1, SDValue V2,
16137                                 const X86Subtarget &Subtarget,
16138                                 SelectionDAG &DAG) {
16139  assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
16140  assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
16141  assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16142  assert(Subtarget.hasAVX2() && "We can only lower v8i32 with AVX2!");
16143
16144  // Whenever we can lower this as a zext, that instruction is strictly faster
16145  // than any alternative. It also allows us to fold memory operands into the
16146  // shuffle in many cases.
16147  if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2, Mask,
16148                                                   Zeroable, Subtarget, DAG))
16149    return ZExt;
16150
16151  // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
16152  // since after split we get a more efficient code than vblend by using
16153  // vpunpcklwd and vpunpckhwd instrs.
16154  if (isUnpackWdShuffleMask(Mask, MVT::v8i32) && !V2.isUndef() &&
16155      !Subtarget.hasAVX512())
16156    if (SDValue V = lowerShuffleAsSplitOrBlend(DL, MVT::v8i32, V1, V2, Mask,
16157                                               Subtarget, DAG))
16158      return V;
16159
16160  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
16161                                          Zeroable, Subtarget, DAG))
16162    return Blend;
16163
16164  // Check for being able to broadcast a single element.
16165  if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i32, V1, V2, Mask,
16166                                                  Subtarget, DAG))
16167    return Broadcast;
16168
16169  // If the shuffle mask is repeated in each 128-bit lane we can use more
16170  // efficient instructions that mirror the shuffles across the two 128-bit
16171  // lanes.
16172  SmallVector<int, 4> RepeatedMask;
16173  bool Is128BitLaneRepeatedShuffle =
16174      is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask);
16175  if (Is128BitLaneRepeatedShuffle) {
16176    assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
16177    if (V2.isUndef())
16178      return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
16179                         getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16180
16181    // Use dedicated unpack instructions for masks that match their pattern.
16182    if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i32, Mask, V1, V2, DAG))
16183      return V;
16184  }
16185
16186  // Try to use shift instructions.
16187  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask,
16188                                          Zeroable, Subtarget, DAG))
16189    return Shift;
16190
16191  // If we have VLX support, we can use VALIGN or EXPAND.
16192  if (Subtarget.hasVLX()) {
16193    if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v8i32, V1, V2, Mask,
16194                                              Subtarget, DAG))
16195      return Rotate;
16196
16197    if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i32, Zeroable, Mask, V1, V2,
16198                                         DAG, Subtarget))
16199      return V;
16200  }
16201
16202  // Try to use byte rotation instructions.
16203  if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i32, V1, V2, Mask,
16204                                                Subtarget, DAG))
16205    return Rotate;
16206
16207  // Try to create an in-lane repeating shuffle mask and then shuffle the
16208  // results into the target lanes.
16209  if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16210          DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
16211    return V;
16212
16213  // If the shuffle patterns aren't repeated but it is a single input, directly
16214  // generate a cross-lane VPERMD instruction.
16215  if (V2.isUndef()) {
16216    SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
16217    return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8i32, VPermMask, V1);
16218  }
16219
16220  // Assume that a single SHUFPS is faster than an alternative sequence of
16221  // multiple instructions (even if the CPU has a domain penalty).
16222  // If some CPU is harmed by the domain switch, we can fix it in a later pass.
16223  if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
16224    SDValue CastV1 = DAG.getBitcast(MVT::v8f32, V1);
16225    SDValue CastV2 = DAG.getBitcast(MVT::v8f32, V2);
16226    SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask,
16227                                            CastV1, CastV2, DAG);
16228    return DAG.getBitcast(MVT::v8i32, ShufPS);
16229  }
16230
16231  // Try to simplify this by merging 128-bit lanes to enable a lane-based
16232  // shuffle.
16233  if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16234          DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
16235    return Result;
16236
16237  // Otherwise fall back on generic blend lowering.
16238  return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2, Mask,
16239                                              Subtarget, DAG);
16240}
16241
16242/// Handle lowering of 16-lane 16-bit integer shuffles.
16243///
16244/// This routine is only called when we have AVX2 and thus a reasonable
16245/// instruction set for v16i16 shuffling..
16246static SDValue lowerV16I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16247                                  const APInt &Zeroable, SDValue V1, SDValue V2,
16248                                  const X86Subtarget &Subtarget,
16249                                  SelectionDAG &DAG) {
16250  assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
16251  assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
16252  assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
16253  assert(Subtarget.hasAVX2() && "We can only lower v16i16 with AVX2!");
16254
16255  // Whenever we can lower this as a zext, that instruction is strictly faster
16256  // than any alternative. It also allows us to fold memory operands into the
16257  // shuffle in many cases.
16258  if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16259          DL, MVT::v16i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
16260    return ZExt;
16261
16262  // Check for being able to broadcast a single element.
16263  if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i16, V1, V2, Mask,
16264                                                  Subtarget, DAG))
16265    return Broadcast;
16266
16267  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
16268                                          Zeroable, Subtarget, DAG))
16269    return Blend;
16270
16271  // Use dedicated unpack instructions for masks that match their pattern.
16272  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i16, Mask, V1, V2, DAG))
16273    return V;
16274
16275  // Use dedicated pack instructions for masks that match their pattern.
16276  if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i16, Mask, V1, V2, DAG,
16277                                       Subtarget))
16278    return V;
16279
16280  // Try to use shift instructions.
16281  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask,
16282                                          Zeroable, Subtarget, DAG))
16283    return Shift;
16284
16285  // Try to use byte rotation instructions.
16286  if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i16, V1, V2, Mask,
16287                                                Subtarget, DAG))
16288    return Rotate;
16289
16290  // Try to create an in-lane repeating shuffle mask and then shuffle the
16291  // results into the target lanes.
16292  if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16293          DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
16294    return V;
16295
16296  if (V2.isUndef()) {
16297    // There are no generalized cross-lane shuffle operations available on i16
16298    // element types.
16299    if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask)) {
16300      if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16301              DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
16302        return V;
16303
16304      return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v16i16, V1, V2, Mask,
16305                                                 DAG, Subtarget);
16306    }
16307
16308    SmallVector<int, 8> RepeatedMask;
16309    if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
16310      // As this is a single-input shuffle, the repeated mask should be
16311      // a strictly valid v8i16 mask that we can pass through to the v8i16
16312      // lowering to handle even the v16 case.
16313      return lowerV8I16GeneralSingleInputShuffle(
16314          DL, MVT::v16i16, V1, RepeatedMask, Subtarget, DAG);
16315    }
16316  }
16317
16318  if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v16i16, Mask, V1, V2,
16319                                              Zeroable, Subtarget, DAG))
16320    return PSHUFB;
16321
16322  // AVX512BWVL can lower to VPERMW.
16323  if (Subtarget.hasBWI() && Subtarget.hasVLX())
16324    return lowerShuffleWithPERMV(DL, MVT::v16i16, Mask, V1, V2, DAG);
16325
16326  // Try to simplify this by merging 128-bit lanes to enable a lane-based
16327  // shuffle.
16328  if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16329          DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
16330    return Result;
16331
16332  // Try to permute the lanes and then use a per-lane permute.
16333  if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16334          DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
16335    return V;
16336
16337  // Otherwise fall back on generic lowering.
16338  return lowerShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask,
16339                                    Subtarget, DAG);
16340}
16341
16342/// Handle lowering of 32-lane 8-bit integer shuffles.
16343///
16344/// This routine is only called when we have AVX2 and thus a reasonable
16345/// instruction set for v32i8 shuffling..
16346static SDValue lowerV32I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16347                                 const APInt &Zeroable, SDValue V1, SDValue V2,
16348                                 const X86Subtarget &Subtarget,
16349                                 SelectionDAG &DAG) {
16350  assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
16351  assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
16352  assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
16353  assert(Subtarget.hasAVX2() && "We can only lower v32i8 with AVX2!");
16354
16355  // Whenever we can lower this as a zext, that instruction is strictly faster
16356  // than any alternative. It also allows us to fold memory operands into the
16357  // shuffle in many cases.
16358  if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2, Mask,
16359                                                   Zeroable, Subtarget, DAG))
16360    return ZExt;
16361
16362  // Check for being able to broadcast a single element.
16363  if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v32i8, V1, V2, Mask,
16364                                                  Subtarget, DAG))
16365    return Broadcast;
16366
16367  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
16368                                          Zeroable, Subtarget, DAG))
16369    return Blend;
16370
16371  // Use dedicated unpack instructions for masks that match their pattern.
16372  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i8, Mask, V1, V2, DAG))
16373    return V;
16374
16375  // Use dedicated pack instructions for masks that match their pattern.
16376  if (SDValue V = lowerShuffleWithPACK(DL, MVT::v32i8, Mask, V1, V2, DAG,
16377                                       Subtarget))
16378    return V;
16379
16380  // Try to use shift instructions.
16381  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask,
16382                                                Zeroable, Subtarget, DAG))
16383    return Shift;
16384
16385  // Try to use byte rotation instructions.
16386  if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i8, V1, V2, Mask,
16387                                                Subtarget, DAG))
16388    return Rotate;
16389
16390  // Try to create an in-lane repeating shuffle mask and then shuffle the
16391  // results into the target lanes.
16392  if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16393          DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
16394    return V;
16395
16396  // There are no generalized cross-lane shuffle operations available on i8
16397  // element types.
16398  if (V2.isUndef() && is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask)) {
16399    if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16400            DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
16401      return V;
16402
16403    return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v32i8, V1, V2, Mask,
16404                                               DAG, Subtarget);
16405  }
16406
16407  if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i8, Mask, V1, V2,
16408                                              Zeroable, Subtarget, DAG))
16409    return PSHUFB;
16410
16411  // AVX512VBMIVL can lower to VPERMB.
16412  if (Subtarget.hasVBMI() && Subtarget.hasVLX())
16413    return lowerShuffleWithPERMV(DL, MVT::v32i8, Mask, V1, V2, DAG);
16414
16415  // Try to simplify this by merging 128-bit lanes to enable a lane-based
16416  // shuffle.
16417  if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16418          DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
16419    return Result;
16420
16421  // Try to permute the lanes and then use a per-lane permute.
16422  if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16423          DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
16424    return V;
16425
16426  // Look for {0, 8, 16, 24, 32, 40, 48, 56 } in the first 8 elements. Followed
16427  // by zeroable elements in the remaining 24 elements. Turn this into two
16428  // vmovqb instructions shuffled together.
16429  if (Subtarget.hasVLX())
16430    if (SDValue V = lowerShuffleAsVTRUNCAndUnpack(DL, MVT::v32i8, V1, V2,
16431                                                  Mask, Zeroable, DAG))
16432      return V;
16433
16434  // Otherwise fall back on generic lowering.
16435  return lowerShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask,
16436                                    Subtarget, DAG);
16437}
16438
16439/// High-level routine to lower various 256-bit x86 vector shuffles.
16440///
16441/// This routine either breaks down the specific type of a 256-bit x86 vector
16442/// shuffle or splits it into two 128-bit shuffles and fuses the results back
16443/// together based on the available instructions.
16444static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
16445                                  SDValue V1, SDValue V2, const APInt &Zeroable,
16446                                  const X86Subtarget &Subtarget,
16447                                  SelectionDAG &DAG) {
16448  // If we have a single input to the zero element, insert that into V1 if we
16449  // can do so cheaply.
16450  int NumElts = VT.getVectorNumElements();
16451  int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
16452
16453  if (NumV2Elements == 1 && Mask[0] >= NumElts)
16454    if (SDValue Insertion = lowerShuffleAsElementInsertion(
16455            DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
16456      return Insertion;
16457
16458  // Handle special cases where the lower or upper half is UNDEF.
16459  if (SDValue V =
16460          lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
16461    return V;
16462
16463  // There is a really nice hard cut-over between AVX1 and AVX2 that means we
16464  // can check for those subtargets here and avoid much of the subtarget
16465  // querying in the per-vector-type lowering routines. With AVX1 we have
16466  // essentially *zero* ability to manipulate a 256-bit vector with integer
16467  // types. Since we'll use floating point types there eventually, just
16468  // immediately cast everything to a float and operate entirely in that domain.
16469  if (VT.isInteger() && !Subtarget.hasAVX2()) {
16470    int ElementBits = VT.getScalarSizeInBits();
16471    if (ElementBits < 32) {
16472      // No floating point type available, if we can't use the bit operations
16473      // for masking/blending then decompose into 128-bit vectors.
16474      if (SDValue V = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
16475                                            Subtarget, DAG))
16476        return V;
16477      if (SDValue V = lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
16478        return V;
16479      return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
16480    }
16481
16482    MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
16483                                VT.getVectorNumElements());
16484    V1 = DAG.getBitcast(FpVT, V1);
16485    V2 = DAG.getBitcast(FpVT, V2);
16486    return DAG.getBitcast(VT, DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
16487  }
16488
16489  switch (VT.SimpleTy) {
16490  case MVT::v4f64:
16491    return lowerV4F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16492  case MVT::v4i64:
16493    return lowerV4I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16494  case MVT::v8f32:
16495    return lowerV8F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16496  case MVT::v8i32:
16497    return lowerV8I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16498  case MVT::v16i16:
16499    return lowerV16I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16500  case MVT::v32i8:
16501    return lowerV32I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16502
16503  default:
16504    llvm_unreachable("Not a valid 256-bit x86 vector type!");
16505  }
16506}
16507
16508/// Try to lower a vector shuffle as a 128-bit shuffles.
16509static SDValue lowerV4X128Shuffle(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
16510                                  const APInt &Zeroable, SDValue V1, SDValue V2,
16511                                  const X86Subtarget &Subtarget,
16512                                  SelectionDAG &DAG) {
16513  assert(VT.getScalarSizeInBits() == 64 &&
16514         "Unexpected element type size for 128bit shuffle.");
16515
16516  // To handle 256 bit vector requires VLX and most probably
16517  // function lowerV2X128VectorShuffle() is better solution.
16518  assert(VT.is512BitVector() && "Unexpected vector size for 512bit shuffle.");
16519
16520  // TODO - use Zeroable like we do for lowerV2X128VectorShuffle?
16521  SmallVector<int, 4> WidenedMask;
16522  if (!canWidenShuffleElements(Mask, WidenedMask))
16523    return SDValue();
16524
16525  // Try to use an insert into a zero vector.
16526  if (WidenedMask[0] == 0 && (Zeroable & 0xf0) == 0xf0 &&
16527      (WidenedMask[1] == 1 || (Zeroable & 0x0c) == 0x0c)) {
16528    unsigned NumElts = ((Zeroable & 0x0c) == 0x0c) ? 2 : 4;
16529    MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
16530    SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
16531                              DAG.getIntPtrConstant(0, DL));
16532    return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
16533                       getZeroVector(VT, Subtarget, DAG, DL), LoV,
16534                       DAG.getIntPtrConstant(0, DL));
16535  }
16536
16537  // Check for patterns which can be matched with a single insert of a 256-bit
16538  // subvector.
16539  bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask,
16540                                        {0, 1, 2, 3, 0, 1, 2, 3});
16541  if (OnlyUsesV1 || isShuffleEquivalent(V1, V2, Mask,
16542                                        {0, 1, 2, 3, 8, 9, 10, 11})) {
16543    MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 4);
16544    SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
16545                                 OnlyUsesV1 ? V1 : V2,
16546                              DAG.getIntPtrConstant(0, DL));
16547    return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
16548                       DAG.getIntPtrConstant(4, DL));
16549  }
16550
16551  assert(WidenedMask.size() == 4);
16552
16553  // See if this is an insertion of the lower 128-bits of V2 into V1.
16554  bool IsInsert = true;
16555  int V2Index = -1;
16556  for (int i = 0; i < 4; ++i) {
16557    assert(WidenedMask[i] >= -1);
16558    if (WidenedMask[i] < 0)
16559      continue;
16560
16561    // Make sure all V1 subvectors are in place.
16562    if (WidenedMask[i] < 4) {
16563      if (WidenedMask[i] != i) {
16564        IsInsert = false;
16565        break;
16566      }
16567    } else {
16568      // Make sure we only have a single V2 index and its the lowest 128-bits.
16569      if (V2Index >= 0 || WidenedMask[i] != 4) {
16570        IsInsert = false;
16571        break;
16572      }
16573      V2Index = i;
16574    }
16575  }
16576  if (IsInsert && V2Index >= 0) {
16577    MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
16578    SDValue Subvec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
16579                                 DAG.getIntPtrConstant(0, DL));
16580    return insert128BitVector(V1, Subvec, V2Index * 2, DAG, DL);
16581  }
16582
16583  // Try to lower to vshuf64x2/vshuf32x4.
16584  SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
16585  unsigned PermMask = 0;
16586  // Insure elements came from the same Op.
16587  for (int i = 0; i < 4; ++i) {
16588    assert(WidenedMask[i] >= -1);
16589    if (WidenedMask[i] < 0)
16590      continue;
16591
16592    SDValue Op = WidenedMask[i] >= 4 ? V2 : V1;
16593    unsigned OpIndex = i / 2;
16594    if (Ops[OpIndex].isUndef())
16595      Ops[OpIndex] = Op;
16596    else if (Ops[OpIndex] != Op)
16597      return SDValue();
16598
16599    // Convert the 128-bit shuffle mask selection values into 128-bit selection
16600    // bits defined by a vshuf64x2 instruction's immediate control byte.
16601    PermMask |= (WidenedMask[i] % 4) << (i * 2);
16602  }
16603
16604  return DAG.getNode(X86ISD::SHUF128, DL, VT, Ops[0], Ops[1],
16605                     DAG.getTargetConstant(PermMask, DL, MVT::i8));
16606}
16607
16608/// Handle lowering of 8-lane 64-bit floating point shuffles.
16609static SDValue lowerV8F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16610                                 const APInt &Zeroable, SDValue V1, SDValue V2,
16611                                 const X86Subtarget &Subtarget,
16612                                 SelectionDAG &DAG) {
16613  assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
16614  assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
16615  assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16616
16617  if (V2.isUndef()) {
16618    // Use low duplicate instructions for masks that match their pattern.
16619    if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2, 4, 4, 6, 6}))
16620      return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v8f64, V1);
16621
16622    if (!is128BitLaneCrossingShuffleMask(MVT::v8f64, Mask)) {
16623      // Non-half-crossing single input shuffles can be lowered with an
16624      // interleaved permutation.
16625      unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
16626                              ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3) |
16627                              ((Mask[4] == 5) << 4) | ((Mask[5] == 5) << 5) |
16628                              ((Mask[6] == 7) << 6) | ((Mask[7] == 7) << 7);
16629      return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f64, V1,
16630                         DAG.getTargetConstant(VPERMILPMask, DL, MVT::i8));
16631    }
16632
16633    SmallVector<int, 4> RepeatedMask;
16634    if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask))
16635      return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8f64, V1,
16636                         getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16637  }
16638
16639  if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8f64, Mask, Zeroable, V1,
16640                                           V2, Subtarget, DAG))
16641    return Shuf128;
16642
16643  if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8f64, Mask, V1, V2, DAG))
16644    return Unpck;
16645
16646  // Check if the blend happens to exactly fit that of SHUFPD.
16647  if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v8f64, V1, V2, Mask,
16648                                          Zeroable, Subtarget, DAG))
16649    return Op;
16650
16651  if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f64, Zeroable, Mask, V1, V2,
16652                                       DAG, Subtarget))
16653    return V;
16654
16655  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f64, V1, V2, Mask,
16656                                          Zeroable, Subtarget, DAG))
16657    return Blend;
16658
16659  return lowerShuffleWithPERMV(DL, MVT::v8f64, Mask, V1, V2, DAG);
16660}
16661
16662/// Handle lowering of 16-lane 32-bit floating point shuffles.
16663static SDValue lowerV16F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16664                                  const APInt &Zeroable, SDValue V1, SDValue V2,
16665                                  const X86Subtarget &Subtarget,
16666                                  SelectionDAG &DAG) {
16667  assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
16668  assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
16669  assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
16670
16671  // If the shuffle mask is repeated in each 128-bit lane, we have many more
16672  // options to efficiently lower the shuffle.
16673  SmallVector<int, 4> RepeatedMask;
16674  if (is128BitLaneRepeatedShuffleMask(MVT::v16f32, Mask, RepeatedMask)) {
16675    assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
16676
16677    // Use even/odd duplicate instructions for masks that match their pattern.
16678    if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
16679      return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v16f32, V1);
16680    if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
16681      return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v16f32, V1);
16682
16683    if (V2.isUndef())
16684      return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v16f32, V1,
16685                         getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16686
16687    // Use dedicated unpack instructions for masks that match their pattern.
16688    if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16f32, Mask, V1, V2, DAG))
16689      return V;
16690
16691    if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16f32, V1, V2, Mask,
16692                                            Zeroable, Subtarget, DAG))
16693      return Blend;
16694
16695    // Otherwise, fall back to a SHUFPS sequence.
16696    return lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask, V1, V2, DAG);
16697  }
16698
16699  // If we have a single input shuffle with different shuffle patterns in the
16700  // 128-bit lanes and don't lane cross, use variable mask VPERMILPS.
16701  if (V2.isUndef() &&
16702      !is128BitLaneCrossingShuffleMask(MVT::v16f32, Mask)) {
16703    SDValue VPermMask = getConstVector(Mask, MVT::v16i32, DAG, DL, true);
16704    return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v16f32, V1, VPermMask);
16705  }
16706
16707  // If we have AVX512F support, we can use VEXPAND.
16708  if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16f32, Zeroable, Mask,
16709                                             V1, V2, DAG, Subtarget))
16710    return V;
16711
16712  return lowerShuffleWithPERMV(DL, MVT::v16f32, Mask, V1, V2, DAG);
16713}
16714
16715/// Handle lowering of 8-lane 64-bit integer shuffles.
16716static SDValue lowerV8I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16717                                 const APInt &Zeroable, SDValue V1, SDValue V2,
16718                                 const X86Subtarget &Subtarget,
16719                                 SelectionDAG &DAG) {
16720  assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
16721  assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
16722  assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16723
16724  if (V2.isUndef()) {
16725    // When the shuffle is mirrored between the 128-bit lanes of the unit, we
16726    // can use lower latency instructions that will operate on all four
16727    // 128-bit lanes.
16728    SmallVector<int, 2> Repeated128Mask;
16729    if (is128BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated128Mask)) {
16730      SmallVector<int, 4> PSHUFDMask;
16731      scaleShuffleMask<int>(2, Repeated128Mask, PSHUFDMask);
16732      return DAG.getBitcast(
16733          MVT::v8i64,
16734          DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32,
16735                      DAG.getBitcast(MVT::v16i32, V1),
16736                      getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
16737    }
16738
16739    SmallVector<int, 4> Repeated256Mask;
16740    if (is256BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated256Mask))
16741      return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8i64, V1,
16742                         getV4X86ShuffleImm8ForMask(Repeated256Mask, DL, DAG));
16743  }
16744
16745  if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8i64, Mask, Zeroable, V1,
16746                                           V2, Subtarget, DAG))
16747    return Shuf128;
16748
16749  // Try to use shift instructions.
16750  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i64, V1, V2, Mask,
16751                                          Zeroable, Subtarget, DAG))
16752    return Shift;
16753
16754  // Try to use VALIGN.
16755  if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v8i64, V1, V2, Mask,
16756                                            Subtarget, DAG))
16757    return Rotate;
16758
16759  // Try to use PALIGNR.
16760  if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i64, V1, V2, Mask,
16761                                                Subtarget, DAG))
16762    return Rotate;
16763
16764  if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8i64, Mask, V1, V2, DAG))
16765    return Unpck;
16766  // If we have AVX512F support, we can use VEXPAND.
16767  if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i64, Zeroable, Mask, V1, V2,
16768                                       DAG, Subtarget))
16769    return V;
16770
16771  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i64, V1, V2, Mask,
16772                                          Zeroable, Subtarget, DAG))
16773    return Blend;
16774
16775  return lowerShuffleWithPERMV(DL, MVT::v8i64, Mask, V1, V2, DAG);
16776}
16777
16778/// Handle lowering of 16-lane 32-bit integer shuffles.
16779static SDValue lowerV16I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16780                                  const APInt &Zeroable, SDValue V1, SDValue V2,
16781                                  const X86Subtarget &Subtarget,
16782                                  SelectionDAG &DAG) {
16783  assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
16784  assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
16785  assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
16786
16787  // Whenever we can lower this as a zext, that instruction is strictly faster
16788  // than any alternative. It also allows us to fold memory operands into the
16789  // shuffle in many cases.
16790  if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16791          DL, MVT::v16i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
16792    return ZExt;
16793
16794  // If the shuffle mask is repeated in each 128-bit lane we can use more
16795  // efficient instructions that mirror the shuffles across the four 128-bit
16796  // lanes.
16797  SmallVector<int, 4> RepeatedMask;
16798  bool Is128BitLaneRepeatedShuffle =
16799      is128BitLaneRepeatedShuffleMask(MVT::v16i32, Mask, RepeatedMask);
16800  if (Is128BitLaneRepeatedShuffle) {
16801    assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
16802    if (V2.isUndef())
16803      return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32, V1,
16804                         getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16805
16806    // Use dedicated unpack instructions for masks that match their pattern.
16807    if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i32, Mask, V1, V2, DAG))
16808      return V;
16809  }
16810
16811  // Try to use shift instructions.
16812  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i32, V1, V2, Mask,
16813                                          Zeroable, Subtarget, DAG))
16814    return Shift;
16815
16816  // Try to use VALIGN.
16817  if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v16i32, V1, V2, Mask,
16818                                            Subtarget, DAG))
16819    return Rotate;
16820
16821  // Try to use byte rotation instructions.
16822  if (Subtarget.hasBWI())
16823    if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i32, V1, V2, Mask,
16824                                                  Subtarget, DAG))
16825      return Rotate;
16826
16827  // Assume that a single SHUFPS is faster than using a permv shuffle.
16828  // If some CPU is harmed by the domain switch, we can fix it in a later pass.
16829  if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
16830    SDValue CastV1 = DAG.getBitcast(MVT::v16f32, V1);
16831    SDValue CastV2 = DAG.getBitcast(MVT::v16f32, V2);
16832    SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask,
16833                                            CastV1, CastV2, DAG);
16834    return DAG.getBitcast(MVT::v16i32, ShufPS);
16835  }
16836  // If we have AVX512F support, we can use VEXPAND.
16837  if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16i32, Zeroable, Mask, V1, V2,
16838                                       DAG, Subtarget))
16839    return V;
16840
16841  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i32, V1, V2, Mask,
16842                                          Zeroable, Subtarget, DAG))
16843    return Blend;
16844  return lowerShuffleWithPERMV(DL, MVT::v16i32, Mask, V1, V2, DAG);
16845}
16846
16847/// Handle lowering of 32-lane 16-bit integer shuffles.
16848static SDValue lowerV32I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16849                                  const APInt &Zeroable, SDValue V1, SDValue V2,
16850                                  const X86Subtarget &Subtarget,
16851                                  SelectionDAG &DAG) {
16852  assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
16853  assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
16854  assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
16855  assert(Subtarget.hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
16856
16857  // Whenever we can lower this as a zext, that instruction is strictly faster
16858  // than any alternative. It also allows us to fold memory operands into the
16859  // shuffle in many cases.
16860  if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16861          DL, MVT::v32i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
16862    return ZExt;
16863
16864  // Use dedicated unpack instructions for masks that match their pattern.
16865  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i16, Mask, V1, V2, DAG))
16866    return V;
16867
16868  // Try to use shift instructions.
16869  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v32i16, V1, V2, Mask,
16870                                          Zeroable, Subtarget, DAG))
16871    return Shift;
16872
16873  // Try to use byte rotation instructions.
16874  if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i16, V1, V2, Mask,
16875                                                Subtarget, DAG))
16876    return Rotate;
16877
16878  if (V2.isUndef()) {
16879    SmallVector<int, 8> RepeatedMask;
16880    if (is128BitLaneRepeatedShuffleMask(MVT::v32i16, Mask, RepeatedMask)) {
16881      // As this is a single-input shuffle, the repeated mask should be
16882      // a strictly valid v8i16 mask that we can pass through to the v8i16
16883      // lowering to handle even the v32 case.
16884      return lowerV8I16GeneralSingleInputShuffle(
16885          DL, MVT::v32i16, V1, RepeatedMask, Subtarget, DAG);
16886    }
16887  }
16888
16889  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i16, V1, V2, Mask,
16890                                                Zeroable, Subtarget, DAG))
16891    return Blend;
16892
16893  if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i16, Mask, V1, V2,
16894                                              Zeroable, Subtarget, DAG))
16895    return PSHUFB;
16896
16897  return lowerShuffleWithPERMV(DL, MVT::v32i16, Mask, V1, V2, DAG);
16898}
16899
16900/// Handle lowering of 64-lane 8-bit integer shuffles.
16901static SDValue lowerV64I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16902                                 const APInt &Zeroable, SDValue V1, SDValue V2,
16903                                 const X86Subtarget &Subtarget,
16904                                 SelectionDAG &DAG) {
16905  assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
16906  assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
16907  assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
16908  assert(Subtarget.hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
16909
16910  // Whenever we can lower this as a zext, that instruction is strictly faster
16911  // than any alternative. It also allows us to fold memory operands into the
16912  // shuffle in many cases.
16913  if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16914          DL, MVT::v64i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
16915    return ZExt;
16916
16917  // Use dedicated unpack instructions for masks that match their pattern.
16918  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v64i8, Mask, V1, V2, DAG))
16919    return V;
16920
16921  // Use dedicated pack instructions for masks that match their pattern.
16922  if (SDValue V = lowerShuffleWithPACK(DL, MVT::v64i8, Mask, V1, V2, DAG,
16923                                       Subtarget))
16924    return V;
16925
16926  // Try to use shift instructions.
16927  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v64i8, V1, V2, Mask,
16928                                          Zeroable, Subtarget, DAG))
16929    return Shift;
16930
16931  // Try to use byte rotation instructions.
16932  if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v64i8, V1, V2, Mask,
16933                                                Subtarget, DAG))
16934    return Rotate;
16935
16936  if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v64i8, Mask, V1, V2,
16937                                              Zeroable, Subtarget, DAG))
16938    return PSHUFB;
16939
16940  // VBMI can use VPERMV/VPERMV3 byte shuffles.
16941  if (Subtarget.hasVBMI())
16942    return lowerShuffleWithPERMV(DL, MVT::v64i8, Mask, V1, V2, DAG);
16943
16944  // Try to create an in-lane repeating shuffle mask and then shuffle the
16945  // results into the target lanes.
16946  if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16947          DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
16948    return V;
16949
16950  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v64i8, V1, V2, Mask,
16951                                          Zeroable, Subtarget, DAG))
16952    return Blend;
16953
16954  // Try to simplify this by merging 128-bit lanes to enable a lane-based
16955  // shuffle.
16956  if (!V2.isUndef())
16957    if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16958            DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
16959      return Result;
16960
16961  // FIXME: Implement direct support for this type!
16962  return splitAndLowerShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
16963}
16964
16965/// High-level routine to lower various 512-bit x86 vector shuffles.
16966///
16967/// This routine either breaks down the specific type of a 512-bit x86 vector
16968/// shuffle or splits it into two 256-bit shuffles and fuses the results back
16969/// together based on the available instructions.
16970static SDValue lower512BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
16971                                  MVT VT, SDValue V1, SDValue V2,
16972                                  const APInt &Zeroable,
16973                                  const X86Subtarget &Subtarget,
16974                                  SelectionDAG &DAG) {
16975  assert(Subtarget.hasAVX512() &&
16976         "Cannot lower 512-bit vectors w/ basic ISA!");
16977
16978  // If we have a single input to the zero element, insert that into V1 if we
16979  // can do so cheaply.
16980  int NumElts = Mask.size();
16981  int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
16982
16983  if (NumV2Elements == 1 && Mask[0] >= NumElts)
16984    if (SDValue Insertion = lowerShuffleAsElementInsertion(
16985            DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
16986      return Insertion;
16987
16988  // Handle special cases where the lower or upper half is UNDEF.
16989  if (SDValue V =
16990          lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
16991    return V;
16992
16993  // Check for being able to broadcast a single element.
16994  if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, Mask,
16995                                                  Subtarget, DAG))
16996    return Broadcast;
16997
16998  // Dispatch to each element type for lowering. If we don't have support for
16999  // specific element type shuffles at 512 bits, immediately split them and
17000  // lower them. Each lowering routine of a given type is allowed to assume that
17001  // the requisite ISA extensions for that element type are available.
17002  switch (VT.SimpleTy) {
17003  case MVT::v8f64:
17004    return lowerV8F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17005  case MVT::v16f32:
17006    return lowerV16F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17007  case MVT::v8i64:
17008    return lowerV8I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17009  case MVT::v16i32:
17010    return lowerV16I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17011  case MVT::v32i16:
17012    return lowerV32I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17013  case MVT::v64i8:
17014    return lowerV64I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17015
17016  default:
17017    llvm_unreachable("Not a valid 512-bit x86 vector type!");
17018  }
17019}
17020
17021static SDValue lower1BitShuffleAsKSHIFTR(const SDLoc &DL, ArrayRef<int> Mask,
17022                                         MVT VT, SDValue V1, SDValue V2,
17023                                         const X86Subtarget &Subtarget,
17024                                         SelectionDAG &DAG) {
17025  // Shuffle should be unary.
17026  if (!V2.isUndef())
17027    return SDValue();
17028
17029  int ShiftAmt = -1;
17030  int NumElts = Mask.size();
17031  for (int i = 0; i != NumElts; ++i) {
17032    int M = Mask[i];
17033    assert((M == SM_SentinelUndef || (0 <= M && M < NumElts)) &&
17034           "Unexpected mask index.");
17035    if (M < 0)
17036      continue;
17037
17038    // The first non-undef element determines our shift amount.
17039    if (ShiftAmt < 0) {
17040      ShiftAmt = M - i;
17041      // Need to be shifting right.
17042      if (ShiftAmt <= 0)
17043        return SDValue();
17044    }
17045    // All non-undef elements must shift by the same amount.
17046    if (ShiftAmt != M - i)
17047      return SDValue();
17048  }
17049  assert(ShiftAmt >= 0 && "All undef?");
17050
17051  // Great we found a shift right.
17052  MVT WideVT = VT;
17053  if ((!Subtarget.hasDQI() && NumElts == 8) || NumElts < 8)
17054    WideVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
17055  SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideVT,
17056                            DAG.getUNDEF(WideVT), V1,
17057                            DAG.getIntPtrConstant(0, DL));
17058  Res = DAG.getNode(X86ISD::KSHIFTR, DL, WideVT, Res,
17059                    DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
17060  return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
17061                     DAG.getIntPtrConstant(0, DL));
17062}
17063
17064// Determine if this shuffle can be implemented with a KSHIFT instruction.
17065// Returns the shift amount if possible or -1 if not. This is a simplified
17066// version of matchShuffleAsShift.
17067static int match1BitShuffleAsKSHIFT(unsigned &Opcode, ArrayRef<int> Mask,
17068                                    int MaskOffset, const APInt &Zeroable) {
17069  int Size = Mask.size();
17070
17071  auto CheckZeros = [&](int Shift, bool Left) {
17072    for (int j = 0; j < Shift; ++j)
17073      if (!Zeroable[j + (Left ? 0 : (Size - Shift))])
17074        return false;
17075
17076    return true;
17077  };
17078
17079  auto MatchShift = [&](int Shift, bool Left) {
17080    unsigned Pos = Left ? Shift : 0;
17081    unsigned Low = Left ? 0 : Shift;
17082    unsigned Len = Size - Shift;
17083    return isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset);
17084  };
17085
17086  for (int Shift = 1; Shift != Size; ++Shift)
17087    for (bool Left : {true, false})
17088      if (CheckZeros(Shift, Left) && MatchShift(Shift, Left)) {
17089        Opcode = Left ? X86ISD::KSHIFTL : X86ISD::KSHIFTR;
17090        return Shift;
17091      }
17092
17093  return -1;
17094}
17095
17096
17097// Lower vXi1 vector shuffles.
17098// There is no a dedicated instruction on AVX-512 that shuffles the masks.
17099// The only way to shuffle bits is to sign-extend the mask vector to SIMD
17100// vector, shuffle and then truncate it back.
17101static SDValue lower1BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
17102                                MVT VT, SDValue V1, SDValue V2,
17103                                const APInt &Zeroable,
17104                                const X86Subtarget &Subtarget,
17105                                SelectionDAG &DAG) {
17106  assert(Subtarget.hasAVX512() &&
17107         "Cannot lower 512-bit vectors w/o basic ISA!");
17108
17109  int NumElts = Mask.size();
17110
17111  // Try to recognize shuffles that are just padding a subvector with zeros.
17112  int SubvecElts = 0;
17113  int Src = -1;
17114  for (int i = 0; i != NumElts; ++i) {
17115    if (Mask[i] >= 0) {
17116      // Grab the source from the first valid mask. All subsequent elements need
17117      // to use this same source.
17118      if (Src < 0)
17119        Src = Mask[i] / NumElts;
17120      if (Src != (Mask[i] / NumElts) || (Mask[i] % NumElts) != i)
17121        break;
17122    }
17123
17124    ++SubvecElts;
17125  }
17126  assert(SubvecElts != NumElts && "Identity shuffle?");
17127
17128  // Clip to a power 2.
17129  SubvecElts = PowerOf2Floor(SubvecElts);
17130
17131  // Make sure the number of zeroable bits in the top at least covers the bits
17132  // not covered by the subvector.
17133  if ((int)Zeroable.countLeadingOnes() >= (NumElts - SubvecElts)) {
17134    assert(Src >= 0 && "Expected a source!");
17135    MVT ExtractVT = MVT::getVectorVT(MVT::i1, SubvecElts);
17136    SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtractVT,
17137                                  Src == 0 ? V1 : V2,
17138                                  DAG.getIntPtrConstant(0, DL));
17139    return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
17140                       DAG.getConstant(0, DL, VT),
17141                       Extract, DAG.getIntPtrConstant(0, DL));
17142  }
17143
17144  // Try a simple shift right with undef elements. Later we'll try with zeros.
17145  if (SDValue Shift = lower1BitShuffleAsKSHIFTR(DL, Mask, VT, V1, V2, Subtarget,
17146                                                DAG))
17147    return Shift;
17148
17149  // Try to match KSHIFTs.
17150  unsigned Offset = 0;
17151  for (SDValue V : { V1, V2 }) {
17152    unsigned Opcode;
17153    int ShiftAmt = match1BitShuffleAsKSHIFT(Opcode, Mask, Offset, Zeroable);
17154    if (ShiftAmt >= 0) {
17155      MVT WideVT = VT;
17156      if ((!Subtarget.hasDQI() && NumElts == 8) || NumElts < 8)
17157        WideVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
17158      SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideVT,
17159                                DAG.getUNDEF(WideVT), V,
17160                                DAG.getIntPtrConstant(0, DL));
17161      // Widened right shifts need two shifts to ensure we shift in zeroes.
17162      if (Opcode == X86ISD::KSHIFTR && WideVT != VT) {
17163        int WideElts = WideVT.getVectorNumElements();
17164        // Shift left to put the original vector in the MSBs of the new size.
17165        Res = DAG.getNode(X86ISD::KSHIFTL, DL, WideVT, Res,
17166                          DAG.getTargetConstant(WideElts - NumElts, DL, MVT::i8));
17167        // Increase the shift amount to account for the left shift.
17168        ShiftAmt += WideElts - NumElts;
17169      }
17170
17171      Res = DAG.getNode(Opcode, DL, WideVT, Res,
17172                        DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
17173      return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
17174                         DAG.getIntPtrConstant(0, DL));
17175    }
17176    Offset += NumElts; // Increment for next iteration.
17177  }
17178
17179
17180
17181  MVT ExtVT;
17182  switch (VT.SimpleTy) {
17183  default:
17184    llvm_unreachable("Expected a vector of i1 elements");
17185  case MVT::v2i1:
17186    ExtVT = MVT::v2i64;
17187    break;
17188  case MVT::v4i1:
17189    ExtVT = MVT::v4i32;
17190    break;
17191  case MVT::v8i1:
17192    // Take 512-bit type, more shuffles on KNL. If we have VLX use a 256-bit
17193    // shuffle.
17194    ExtVT = Subtarget.hasVLX() ? MVT::v8i32 : MVT::v8i64;
17195    break;
17196  case MVT::v16i1:
17197    // Take 512-bit type, unless we are avoiding 512-bit types and have the
17198    // 256-bit operation available.
17199    ExtVT = Subtarget.canExtendTo512DQ() ? MVT::v16i32 : MVT::v16i16;
17200    break;
17201  case MVT::v32i1:
17202    // Take 512-bit type, unless we are avoiding 512-bit types and have the
17203    // 256-bit operation available.
17204    assert(Subtarget.hasBWI() && "Expected AVX512BW support");
17205    ExtVT = Subtarget.canExtendTo512BW() ? MVT::v32i16 : MVT::v32i8;
17206    break;
17207  case MVT::v64i1:
17208    // Fall back to scalarization. FIXME: We can do better if the shuffle
17209    // can be partitioned cleanly.
17210    if (!Subtarget.useBWIRegs())
17211      return SDValue();
17212    ExtVT = MVT::v64i8;
17213    break;
17214  }
17215
17216  V1 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V1);
17217  V2 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V2);
17218
17219  SDValue Shuffle = DAG.getVectorShuffle(ExtVT, DL, V1, V2, Mask);
17220  // i1 was sign extended we can use X86ISD::CVT2MASK.
17221  int NumElems = VT.getVectorNumElements();
17222  if ((Subtarget.hasBWI() && (NumElems >= 32)) ||
17223      (Subtarget.hasDQI() && (NumElems < 32)))
17224    return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, ExtVT),
17225                       Shuffle, ISD::SETGT);
17226
17227  return DAG.getNode(ISD::TRUNCATE, DL, VT, Shuffle);
17228}
17229
17230/// Helper function that returns true if the shuffle mask should be
17231/// commuted to improve canonicalization.
17232static bool canonicalizeShuffleMaskWithCommute(ArrayRef<int> Mask) {
17233  int NumElements = Mask.size();
17234
17235  int NumV1Elements = 0, NumV2Elements = 0;
17236  for (int M : Mask)
17237    if (M < 0)
17238      continue;
17239    else if (M < NumElements)
17240      ++NumV1Elements;
17241    else
17242      ++NumV2Elements;
17243
17244  // Commute the shuffle as needed such that more elements come from V1 than
17245  // V2. This allows us to match the shuffle pattern strictly on how many
17246  // elements come from V1 without handling the symmetric cases.
17247  if (NumV2Elements > NumV1Elements)
17248    return true;
17249
17250  assert(NumV1Elements > 0 && "No V1 indices");
17251
17252  if (NumV2Elements == 0)
17253    return false;
17254
17255  // When the number of V1 and V2 elements are the same, try to minimize the
17256  // number of uses of V2 in the low half of the vector. When that is tied,
17257  // ensure that the sum of indices for V1 is equal to or lower than the sum
17258  // indices for V2. When those are equal, try to ensure that the number of odd
17259  // indices for V1 is lower than the number of odd indices for V2.
17260  if (NumV1Elements == NumV2Elements) {
17261    int LowV1Elements = 0, LowV2Elements = 0;
17262    for (int M : Mask.slice(0, NumElements / 2))
17263      if (M >= NumElements)
17264        ++LowV2Elements;
17265      else if (M >= 0)
17266        ++LowV1Elements;
17267    if (LowV2Elements > LowV1Elements)
17268      return true;
17269    if (LowV2Elements == LowV1Elements) {
17270      int SumV1Indices = 0, SumV2Indices = 0;
17271      for (int i = 0, Size = Mask.size(); i < Size; ++i)
17272        if (Mask[i] >= NumElements)
17273          SumV2Indices += i;
17274        else if (Mask[i] >= 0)
17275          SumV1Indices += i;
17276      if (SumV2Indices < SumV1Indices)
17277        return true;
17278      if (SumV2Indices == SumV1Indices) {
17279        int NumV1OddIndices = 0, NumV2OddIndices = 0;
17280        for (int i = 0, Size = Mask.size(); i < Size; ++i)
17281          if (Mask[i] >= NumElements)
17282            NumV2OddIndices += i % 2;
17283          else if (Mask[i] >= 0)
17284            NumV1OddIndices += i % 2;
17285        if (NumV2OddIndices < NumV1OddIndices)
17286          return true;
17287      }
17288    }
17289  }
17290
17291  return false;
17292}
17293
17294/// Top-level lowering for x86 vector shuffles.
17295///
17296/// This handles decomposition, canonicalization, and lowering of all x86
17297/// vector shuffles. Most of the specific lowering strategies are encapsulated
17298/// above in helper routines. The canonicalization attempts to widen shuffles
17299/// to involve fewer lanes of wider elements, consolidate symmetric patterns
17300/// s.t. only one of the two inputs needs to be tested, etc.
17301static SDValue lowerVECTOR_SHUFFLE(SDValue Op, const X86Subtarget &Subtarget,
17302                                   SelectionDAG &DAG) {
17303  ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
17304  ArrayRef<int> OrigMask = SVOp->getMask();
17305  SDValue V1 = Op.getOperand(0);
17306  SDValue V2 = Op.getOperand(1);
17307  MVT VT = Op.getSimpleValueType();
17308  int NumElements = VT.getVectorNumElements();
17309  SDLoc DL(Op);
17310  bool Is1BitVector = (VT.getVectorElementType() == MVT::i1);
17311
17312  assert((VT.getSizeInBits() != 64 || Is1BitVector) &&
17313         "Can't lower MMX shuffles");
17314
17315  bool V1IsUndef = V1.isUndef();
17316  bool V2IsUndef = V2.isUndef();
17317  if (V1IsUndef && V2IsUndef)
17318    return DAG.getUNDEF(VT);
17319
17320  // When we create a shuffle node we put the UNDEF node to second operand,
17321  // but in some cases the first operand may be transformed to UNDEF.
17322  // In this case we should just commute the node.
17323  if (V1IsUndef)
17324    return DAG.getCommutedVectorShuffle(*SVOp);
17325
17326  // Check for non-undef masks pointing at an undef vector and make the masks
17327  // undef as well. This makes it easier to match the shuffle based solely on
17328  // the mask.
17329  if (V2IsUndef &&
17330      any_of(OrigMask, [NumElements](int M) { return M >= NumElements; })) {
17331    SmallVector<int, 8> NewMask(OrigMask.begin(), OrigMask.end());
17332    for (int &M : NewMask)
17333      if (M >= NumElements)
17334        M = -1;
17335    return DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
17336  }
17337
17338  // Check for illegal shuffle mask element index values.
17339  int MaskUpperLimit = OrigMask.size() * (V2IsUndef ? 1 : 2);
17340  (void)MaskUpperLimit;
17341  assert(llvm::all_of(OrigMask,
17342                      [&](int M) { return -1 <= M && M < MaskUpperLimit; }) &&
17343         "Out of bounds shuffle index");
17344
17345  // We actually see shuffles that are entirely re-arrangements of a set of
17346  // zero inputs. This mostly happens while decomposing complex shuffles into
17347  // simple ones. Directly lower these as a buildvector of zeros.
17348  APInt KnownUndef, KnownZero;
17349  computeZeroableShuffleElements(OrigMask, V1, V2, KnownUndef, KnownZero);
17350
17351  APInt Zeroable = KnownUndef | KnownZero;
17352  if (Zeroable.isAllOnesValue())
17353    return getZeroVector(VT, Subtarget, DAG, DL);
17354
17355  bool V2IsZero = !V2IsUndef && ISD::isBuildVectorAllZeros(V2.getNode());
17356
17357  // Try to collapse shuffles into using a vector type with fewer elements but
17358  // wider element types. We cap this to not form integers or floating point
17359  // elements wider than 64 bits, but it might be interesting to form i128
17360  // integers to handle flipping the low and high halves of AVX 256-bit vectors.
17361  SmallVector<int, 16> WidenedMask;
17362  if (VT.getScalarSizeInBits() < 64 && !Is1BitVector &&
17363      canWidenShuffleElements(OrigMask, Zeroable, V2IsZero, WidenedMask)) {
17364    // Shuffle mask widening should not interfere with a broadcast opportunity
17365    // by obfuscating the operands with bitcasts.
17366    // TODO: Avoid lowering directly from this top-level function: make this
17367    // a query (canLowerAsBroadcast) and defer lowering to the type-based calls.
17368    if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, OrigMask,
17369                                                    Subtarget, DAG))
17370      return Broadcast;
17371
17372    MVT NewEltVT = VT.isFloatingPoint()
17373                       ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
17374                       : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
17375    int NewNumElts = NumElements / 2;
17376    MVT NewVT = MVT::getVectorVT(NewEltVT, NewNumElts);
17377    // Make sure that the new vector type is legal. For example, v2f64 isn't
17378    // legal on SSE1.
17379    if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
17380      if (V2IsZero) {
17381        // Modify the new Mask to take all zeros from the all-zero vector.
17382        // Choose indices that are blend-friendly.
17383        bool UsedZeroVector = false;
17384        assert(find(WidenedMask, SM_SentinelZero) != WidenedMask.end() &&
17385               "V2's non-undef elements are used?!");
17386        for (int i = 0; i != NewNumElts; ++i)
17387          if (WidenedMask[i] == SM_SentinelZero) {
17388            WidenedMask[i] = i + NewNumElts;
17389            UsedZeroVector = true;
17390          }
17391        // Ensure all elements of V2 are zero - isBuildVectorAllZeros permits
17392        // some elements to be undef.
17393        if (UsedZeroVector)
17394          V2 = getZeroVector(NewVT, Subtarget, DAG, DL);
17395      }
17396      V1 = DAG.getBitcast(NewVT, V1);
17397      V2 = DAG.getBitcast(NewVT, V2);
17398      return DAG.getBitcast(
17399          VT, DAG.getVectorShuffle(NewVT, DL, V1, V2, WidenedMask));
17400    }
17401  }
17402
17403  // Commute the shuffle if it will improve canonicalization.
17404  SmallVector<int, 64> Mask(OrigMask.begin(), OrigMask.end());
17405  if (canonicalizeShuffleMaskWithCommute(Mask)) {
17406    ShuffleVectorSDNode::commuteMask(Mask);
17407    std::swap(V1, V2);
17408  }
17409
17410  if (SDValue V = lowerShuffleWithVPMOV(DL, Mask, VT, V1, V2, DAG, Subtarget))
17411    return V;
17412
17413  // For each vector width, delegate to a specialized lowering routine.
17414  if (VT.is128BitVector())
17415    return lower128BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17416
17417  if (VT.is256BitVector())
17418    return lower256BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17419
17420  if (VT.is512BitVector())
17421    return lower512BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17422
17423  if (Is1BitVector)
17424    return lower1BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17425
17426  llvm_unreachable("Unimplemented!");
17427}
17428
17429/// Try to lower a VSELECT instruction to a vector shuffle.
17430static SDValue lowerVSELECTtoVectorShuffle(SDValue Op,
17431                                           const X86Subtarget &Subtarget,
17432                                           SelectionDAG &DAG) {
17433  SDValue Cond = Op.getOperand(0);
17434  SDValue LHS = Op.getOperand(1);
17435  SDValue RHS = Op.getOperand(2);
17436  MVT VT = Op.getSimpleValueType();
17437
17438  // Only non-legal VSELECTs reach this lowering, convert those into generic
17439  // shuffles and re-use the shuffle lowering path for blends.
17440  SmallVector<int, 32> Mask;
17441  if (createShuffleMaskFromVSELECT(Mask, Cond))
17442    return DAG.getVectorShuffle(VT, SDLoc(Op), LHS, RHS, Mask);
17443
17444  return SDValue();
17445}
17446
17447SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
17448  SDValue Cond = Op.getOperand(0);
17449  SDValue LHS = Op.getOperand(1);
17450  SDValue RHS = Op.getOperand(2);
17451
17452  // A vselect where all conditions and data are constants can be optimized into
17453  // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
17454  if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()) &&
17455      ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
17456      ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
17457    return SDValue();
17458
17459  // Try to lower this to a blend-style vector shuffle. This can handle all
17460  // constant condition cases.
17461  if (SDValue BlendOp = lowerVSELECTtoVectorShuffle(Op, Subtarget, DAG))
17462    return BlendOp;
17463
17464  // If this VSELECT has a vector if i1 as a mask, it will be directly matched
17465  // with patterns on the mask registers on AVX-512.
17466  MVT CondVT = Cond.getSimpleValueType();
17467  unsigned CondEltSize = Cond.getScalarValueSizeInBits();
17468  if (CondEltSize == 1)
17469    return Op;
17470
17471  // Variable blends are only legal from SSE4.1 onward.
17472  if (!Subtarget.hasSSE41())
17473    return SDValue();
17474
17475  SDLoc dl(Op);
17476  MVT VT = Op.getSimpleValueType();
17477  unsigned EltSize = VT.getScalarSizeInBits();
17478  unsigned NumElts = VT.getVectorNumElements();
17479
17480  // If the VSELECT is on a 512-bit type, we have to convert a non-i1 condition
17481  // into an i1 condition so that we can use the mask-based 512-bit blend
17482  // instructions.
17483  if (VT.getSizeInBits() == 512) {
17484    // Build a mask by testing the condition against zero.
17485    MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
17486    SDValue Mask = DAG.getSetCC(dl, MaskVT, Cond,
17487                                DAG.getConstant(0, dl, CondVT),
17488                                ISD::SETNE);
17489    // Now return a new VSELECT using the mask.
17490    return DAG.getSelect(dl, VT, Mask, LHS, RHS);
17491  }
17492
17493  // SEXT/TRUNC cases where the mask doesn't match the destination size.
17494  if (CondEltSize != EltSize) {
17495    // If we don't have a sign splat, rely on the expansion.
17496    if (CondEltSize != DAG.ComputeNumSignBits(Cond))
17497      return SDValue();
17498
17499    MVT NewCondSVT = MVT::getIntegerVT(EltSize);
17500    MVT NewCondVT = MVT::getVectorVT(NewCondSVT, NumElts);
17501    Cond = DAG.getSExtOrTrunc(Cond, dl, NewCondVT);
17502    return DAG.getNode(ISD::VSELECT, dl, VT, Cond, LHS, RHS);
17503  }
17504
17505  // Only some types will be legal on some subtargets. If we can emit a legal
17506  // VSELECT-matching blend, return Op, and but if we need to expand, return
17507  // a null value.
17508  switch (VT.SimpleTy) {
17509  default:
17510    // Most of the vector types have blends past SSE4.1.
17511    return Op;
17512
17513  case MVT::v32i8:
17514    // The byte blends for AVX vectors were introduced only in AVX2.
17515    if (Subtarget.hasAVX2())
17516      return Op;
17517
17518    return SDValue();
17519
17520  case MVT::v8i16:
17521  case MVT::v16i16: {
17522    // Bitcast everything to the vXi8 type and use a vXi8 vselect.
17523    MVT CastVT = MVT::getVectorVT(MVT::i8, NumElts * 2);
17524    Cond = DAG.getBitcast(CastVT, Cond);
17525    LHS = DAG.getBitcast(CastVT, LHS);
17526    RHS = DAG.getBitcast(CastVT, RHS);
17527    SDValue Select = DAG.getNode(ISD::VSELECT, dl, CastVT, Cond, LHS, RHS);
17528    return DAG.getBitcast(VT, Select);
17529  }
17530  }
17531}
17532
17533static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
17534  MVT VT = Op.getSimpleValueType();
17535  SDLoc dl(Op);
17536
17537  if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
17538    return SDValue();
17539
17540  if (VT.getSizeInBits() == 8) {
17541    SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
17542                                  Op.getOperand(0), Op.getOperand(1));
17543    return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
17544  }
17545
17546  if (VT == MVT::f32) {
17547    // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
17548    // the result back to FR32 register. It's only worth matching if the
17549    // result has a single use which is a store or a bitcast to i32.  And in
17550    // the case of a store, it's not worth it if the index is a constant 0,
17551    // because a MOVSSmr can be used instead, which is smaller and faster.
17552    if (!Op.hasOneUse())
17553      return SDValue();
17554    SDNode *User = *Op.getNode()->use_begin();
17555    if ((User->getOpcode() != ISD::STORE ||
17556         isNullConstant(Op.getOperand(1))) &&
17557        (User->getOpcode() != ISD::BITCAST ||
17558         User->getValueType(0) != MVT::i32))
17559      return SDValue();
17560    SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
17561                                  DAG.getBitcast(MVT::v4i32, Op.getOperand(0)),
17562                                  Op.getOperand(1));
17563    return DAG.getBitcast(MVT::f32, Extract);
17564  }
17565
17566  if (VT == MVT::i32 || VT == MVT::i64) {
17567    // ExtractPS/pextrq works with constant index.
17568    if (isa<ConstantSDNode>(Op.getOperand(1)))
17569      return Op;
17570  }
17571
17572  return SDValue();
17573}
17574
17575/// Extract one bit from mask vector, like v16i1 or v8i1.
17576/// AVX-512 feature.
17577static SDValue ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG,
17578                                        const X86Subtarget &Subtarget) {
17579  SDValue Vec = Op.getOperand(0);
17580  SDLoc dl(Vec);
17581  MVT VecVT = Vec.getSimpleValueType();
17582  SDValue Idx = Op.getOperand(1);
17583  MVT EltVT = Op.getSimpleValueType();
17584
17585  assert((VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI()) &&
17586         "Unexpected vector type in ExtractBitFromMaskVector");
17587
17588  // variable index can't be handled in mask registers,
17589  // extend vector to VR512/128
17590  if (!isa<ConstantSDNode>(Idx)) {
17591    unsigned NumElts = VecVT.getVectorNumElements();
17592    // Extending v8i1/v16i1 to 512-bit get better performance on KNL
17593    // than extending to 128/256bit.
17594    MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
17595    MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
17596    SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec);
17597    SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ExtEltVT, Ext, Idx);
17598    return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
17599  }
17600
17601  unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
17602  if (IdxVal == 0) // the operation is legal
17603    return Op;
17604
17605  // Extend to natively supported kshift.
17606  unsigned NumElems = VecVT.getVectorNumElements();
17607  MVT WideVecVT = VecVT;
17608  if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
17609    WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
17610    Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
17611                      DAG.getUNDEF(WideVecVT), Vec,
17612                      DAG.getIntPtrConstant(0, dl));
17613  }
17614
17615  // Use kshiftr instruction to move to the lower element.
17616  Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
17617                    DAG.getTargetConstant(IdxVal, dl, MVT::i8));
17618
17619  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
17620                     DAG.getIntPtrConstant(0, dl));
17621}
17622
17623SDValue
17624X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
17625                                           SelectionDAG &DAG) const {
17626  SDLoc dl(Op);
17627  SDValue Vec = Op.getOperand(0);
17628  MVT VecVT = Vec.getSimpleValueType();
17629  SDValue Idx = Op.getOperand(1);
17630
17631  if (VecVT.getVectorElementType() == MVT::i1)
17632    return ExtractBitFromMaskVector(Op, DAG, Subtarget);
17633
17634  if (!isa<ConstantSDNode>(Idx)) {
17635    // Its more profitable to go through memory (1 cycles throughput)
17636    // than using VMOVD + VPERMV/PSHUFB sequence ( 2/3 cycles throughput)
17637    // IACA tool was used to get performance estimation
17638    // (https://software.intel.com/en-us/articles/intel-architecture-code-analyzer)
17639    //
17640    // example : extractelement <16 x i8> %a, i32 %i
17641    //
17642    // Block Throughput: 3.00 Cycles
17643    // Throughput Bottleneck: Port5
17644    //
17645    // | Num Of |   Ports pressure in cycles  |    |
17646    // |  Uops  |  0  - DV  |  5  |  6  |  7  |    |
17647    // ---------------------------------------------
17648    // |   1    |           | 1.0 |     |     | CP | vmovd xmm1, edi
17649    // |   1    |           | 1.0 |     |     | CP | vpshufb xmm0, xmm0, xmm1
17650    // |   2    | 1.0       | 1.0 |     |     | CP | vpextrb eax, xmm0, 0x0
17651    // Total Num Of Uops: 4
17652    //
17653    //
17654    // Block Throughput: 1.00 Cycles
17655    // Throughput Bottleneck: PORT2_AGU, PORT3_AGU, Port4
17656    //
17657    // |    |  Ports pressure in cycles   |  |
17658    // |Uops| 1 | 2 - D  |3 -  D  | 4 | 5 |  |
17659    // ---------------------------------------------------------
17660    // |2^  |   | 0.5    | 0.5    |1.0|   |CP| vmovaps xmmword ptr [rsp-0x18], xmm0
17661    // |1   |0.5|        |        |   |0.5|  | lea rax, ptr [rsp-0x18]
17662    // |1   |   |0.5, 0.5|0.5, 0.5|   |   |CP| mov al, byte ptr [rdi+rax*1]
17663    // Total Num Of Uops: 4
17664
17665    return SDValue();
17666  }
17667
17668  unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
17669
17670  // If this is a 256-bit vector result, first extract the 128-bit vector and
17671  // then extract the element from the 128-bit vector.
17672  if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
17673    // Get the 128-bit vector.
17674    Vec = extract128BitVector(Vec, IdxVal, DAG, dl);
17675    MVT EltVT = VecVT.getVectorElementType();
17676
17677    unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
17678    assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
17679
17680    // Find IdxVal modulo ElemsPerChunk. Since ElemsPerChunk is a power of 2
17681    // this can be done with a mask.
17682    IdxVal &= ElemsPerChunk - 1;
17683    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
17684                       DAG.getIntPtrConstant(IdxVal, dl));
17685  }
17686
17687  assert(VecVT.is128BitVector() && "Unexpected vector length");
17688
17689  MVT VT = Op.getSimpleValueType();
17690
17691  if (VT.getSizeInBits() == 16) {
17692    // If IdxVal is 0, it's cheaper to do a move instead of a pextrw, unless
17693    // we're going to zero extend the register or fold the store (SSE41 only).
17694    if (IdxVal == 0 && !MayFoldIntoZeroExtend(Op) &&
17695        !(Subtarget.hasSSE41() && MayFoldIntoStore(Op)))
17696      return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
17697                         DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
17698                                     DAG.getBitcast(MVT::v4i32, Vec), Idx));
17699
17700    // Transform it so it match pextrw which produces a 32-bit result.
17701    SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
17702                                  Op.getOperand(0), Op.getOperand(1));
17703    return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
17704  }
17705
17706  if (Subtarget.hasSSE41())
17707    if (SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG))
17708      return Res;
17709
17710  // TODO: We only extract a single element from v16i8, we can probably afford
17711  // to be more aggressive here before using the default approach of spilling to
17712  // stack.
17713  if (VT.getSizeInBits() == 8 && Op->isOnlyUserOf(Vec.getNode())) {
17714    // Extract either the lowest i32 or any i16, and extract the sub-byte.
17715    int DWordIdx = IdxVal / 4;
17716    if (DWordIdx == 0) {
17717      SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
17718                                DAG.getBitcast(MVT::v4i32, Vec),
17719                                DAG.getIntPtrConstant(DWordIdx, dl));
17720      int ShiftVal = (IdxVal % 4) * 8;
17721      if (ShiftVal != 0)
17722        Res = DAG.getNode(ISD::SRL, dl, MVT::i32, Res,
17723                          DAG.getConstant(ShiftVal, dl, MVT::i8));
17724      return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
17725    }
17726
17727    int WordIdx = IdxVal / 2;
17728    SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
17729                              DAG.getBitcast(MVT::v8i16, Vec),
17730                              DAG.getIntPtrConstant(WordIdx, dl));
17731    int ShiftVal = (IdxVal % 2) * 8;
17732    if (ShiftVal != 0)
17733      Res = DAG.getNode(ISD::SRL, dl, MVT::i16, Res,
17734                        DAG.getConstant(ShiftVal, dl, MVT::i8));
17735    return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
17736  }
17737
17738  if (VT.getSizeInBits() == 32) {
17739    if (IdxVal == 0)
17740      return Op;
17741
17742    // SHUFPS the element to the lowest double word, then movss.
17743    int Mask[4] = { static_cast<int>(IdxVal), -1, -1, -1 };
17744    Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
17745    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
17746                       DAG.getIntPtrConstant(0, dl));
17747  }
17748
17749  if (VT.getSizeInBits() == 64) {
17750    // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
17751    // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
17752    //        to match extract_elt for f64.
17753    if (IdxVal == 0)
17754      return Op;
17755
17756    // UNPCKHPD the element to the lowest double word, then movsd.
17757    // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
17758    // to a f64mem, the whole operation is folded into a single MOVHPDmr.
17759    int Mask[2] = { 1, -1 };
17760    Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
17761    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
17762                       DAG.getIntPtrConstant(0, dl));
17763  }
17764
17765  return SDValue();
17766}
17767
17768/// Insert one bit to mask vector, like v16i1 or v8i1.
17769/// AVX-512 feature.
17770static SDValue InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG,
17771                                     const X86Subtarget &Subtarget) {
17772  SDLoc dl(Op);
17773  SDValue Vec = Op.getOperand(0);
17774  SDValue Elt = Op.getOperand(1);
17775  SDValue Idx = Op.getOperand(2);
17776  MVT VecVT = Vec.getSimpleValueType();
17777
17778  if (!isa<ConstantSDNode>(Idx)) {
17779    // Non constant index. Extend source and destination,
17780    // insert element and then truncate the result.
17781    unsigned NumElts = VecVT.getVectorNumElements();
17782    MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
17783    MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
17784    SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
17785      DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec),
17786      DAG.getNode(ISD::SIGN_EXTEND, dl, ExtEltVT, Elt), Idx);
17787    return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
17788  }
17789
17790  // Copy into a k-register, extract to v1i1 and insert_subvector.
17791  SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i1, Elt);
17792
17793  return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VecVT, Vec, EltInVec,
17794                     Op.getOperand(2));
17795}
17796
17797SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
17798                                                  SelectionDAG &DAG) const {
17799  MVT VT = Op.getSimpleValueType();
17800  MVT EltVT = VT.getVectorElementType();
17801  unsigned NumElts = VT.getVectorNumElements();
17802
17803  if (EltVT == MVT::i1)
17804    return InsertBitToMaskVector(Op, DAG, Subtarget);
17805
17806  SDLoc dl(Op);
17807  SDValue N0 = Op.getOperand(0);
17808  SDValue N1 = Op.getOperand(1);
17809  SDValue N2 = Op.getOperand(2);
17810
17811  auto *N2C = dyn_cast<ConstantSDNode>(N2);
17812  if (!N2C || N2C->getAPIntValue().uge(NumElts))
17813    return SDValue();
17814  uint64_t IdxVal = N2C->getZExtValue();
17815
17816  bool IsZeroElt = X86::isZeroNode(N1);
17817  bool IsAllOnesElt = VT.isInteger() && llvm::isAllOnesConstant(N1);
17818
17819  // If we are inserting a element, see if we can do this more efficiently with
17820  // a blend shuffle with a rematerializable vector than a costly integer
17821  // insertion.
17822  if ((IsZeroElt || IsAllOnesElt) && Subtarget.hasSSE41() &&
17823      16 <= EltVT.getSizeInBits()) {
17824    SmallVector<int, 8> BlendMask;
17825    for (unsigned i = 0; i != NumElts; ++i)
17826      BlendMask.push_back(i == IdxVal ? i + NumElts : i);
17827    SDValue CstVector = IsZeroElt ? getZeroVector(VT, Subtarget, DAG, dl)
17828                                  : getOnesVector(VT, DAG, dl);
17829    return DAG.getVectorShuffle(VT, dl, N0, CstVector, BlendMask);
17830  }
17831
17832  // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
17833  // into that, and then insert the subvector back into the result.
17834  if (VT.is256BitVector() || VT.is512BitVector()) {
17835    // With a 256-bit vector, we can insert into the zero element efficiently
17836    // using a blend if we have AVX or AVX2 and the right data type.
17837    if (VT.is256BitVector() && IdxVal == 0) {
17838      // TODO: It is worthwhile to cast integer to floating point and back
17839      // and incur a domain crossing penalty if that's what we'll end up
17840      // doing anyway after extracting to a 128-bit vector.
17841      if ((Subtarget.hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
17842          (Subtarget.hasAVX2() && EltVT == MVT::i32)) {
17843        SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
17844        return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec,
17845                           DAG.getTargetConstant(1, dl, MVT::i8));
17846      }
17847    }
17848
17849    // Get the desired 128-bit vector chunk.
17850    SDValue V = extract128BitVector(N0, IdxVal, DAG, dl);
17851
17852    // Insert the element into the desired chunk.
17853    unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
17854    assert(isPowerOf2_32(NumEltsIn128));
17855    // Since NumEltsIn128 is a power of 2 we can use mask instead of modulo.
17856    unsigned IdxIn128 = IdxVal & (NumEltsIn128 - 1);
17857
17858    V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
17859                    DAG.getIntPtrConstant(IdxIn128, dl));
17860
17861    // Insert the changed part back into the bigger vector
17862    return insert128BitVector(N0, V, IdxVal, DAG, dl);
17863  }
17864  assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
17865
17866  // This will be just movd/movq/movss/movsd.
17867  if (IdxVal == 0 && ISD::isBuildVectorAllZeros(N0.getNode()) &&
17868      (EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 ||
17869       EltVT == MVT::i64)) {
17870    N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
17871    return getShuffleVectorZeroOrUndef(N1, 0, true, Subtarget, DAG);
17872  }
17873
17874  // Transform it so it match pinsr{b,w} which expects a GR32 as its second
17875  // argument. SSE41 required for pinsrb.
17876  if (VT == MVT::v8i16 || (VT == MVT::v16i8 && Subtarget.hasSSE41())) {
17877    unsigned Opc;
17878    if (VT == MVT::v8i16) {
17879      assert(Subtarget.hasSSE2() && "SSE2 required for PINSRW");
17880      Opc = X86ISD::PINSRW;
17881    } else {
17882      assert(VT == MVT::v16i8 && "PINSRB requires v16i8 vector");
17883      assert(Subtarget.hasSSE41() && "SSE41 required for PINSRB");
17884      Opc = X86ISD::PINSRB;
17885    }
17886
17887    if (N1.getValueType() != MVT::i32)
17888      N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
17889    if (N2.getValueType() != MVT::i32)
17890      N2 = DAG.getIntPtrConstant(IdxVal, dl);
17891    return DAG.getNode(Opc, dl, VT, N0, N1, N2);
17892  }
17893
17894  if (Subtarget.hasSSE41()) {
17895    if (EltVT == MVT::f32) {
17896      // Bits [7:6] of the constant are the source select. This will always be
17897      //   zero here. The DAG Combiner may combine an extract_elt index into
17898      //   these bits. For example (insert (extract, 3), 2) could be matched by
17899      //   putting the '3' into bits [7:6] of X86ISD::INSERTPS.
17900      // Bits [5:4] of the constant are the destination select. This is the
17901      //   value of the incoming immediate.
17902      // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
17903      //   combine either bitwise AND or insert of float 0.0 to set these bits.
17904
17905      bool MinSize = DAG.getMachineFunction().getFunction().hasMinSize();
17906      if (IdxVal == 0 && (!MinSize || !MayFoldLoad(N1))) {
17907        // If this is an insertion of 32-bits into the low 32-bits of
17908        // a vector, we prefer to generate a blend with immediate rather
17909        // than an insertps. Blends are simpler operations in hardware and so
17910        // will always have equal or better performance than insertps.
17911        // But if optimizing for size and there's a load folding opportunity,
17912        // generate insertps because blendps does not have a 32-bit memory
17913        // operand form.
17914        N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
17915        return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1,
17916                           DAG.getTargetConstant(1, dl, MVT::i8));
17917      }
17918      // Create this as a scalar to vector..
17919      N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
17920      return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1,
17921                         DAG.getTargetConstant(IdxVal << 4, dl, MVT::i8));
17922    }
17923
17924    // PINSR* works with constant index.
17925    if (EltVT == MVT::i32 || EltVT == MVT::i64)
17926      return Op;
17927  }
17928
17929  return SDValue();
17930}
17931
17932static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, const X86Subtarget &Subtarget,
17933                                     SelectionDAG &DAG) {
17934  SDLoc dl(Op);
17935  MVT OpVT = Op.getSimpleValueType();
17936
17937  // It's always cheaper to replace a xor+movd with xorps and simplifies further
17938  // combines.
17939  if (X86::isZeroNode(Op.getOperand(0)))
17940    return getZeroVector(OpVT, Subtarget, DAG, dl);
17941
17942  // If this is a 256-bit vector result, first insert into a 128-bit
17943  // vector and then insert into the 256-bit vector.
17944  if (!OpVT.is128BitVector()) {
17945    // Insert into a 128-bit vector.
17946    unsigned SizeFactor = OpVT.getSizeInBits() / 128;
17947    MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
17948                                 OpVT.getVectorNumElements() / SizeFactor);
17949
17950    Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
17951
17952    // Insert the 128-bit vector.
17953    return insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
17954  }
17955  assert(OpVT.is128BitVector() && OpVT.isInteger() && OpVT != MVT::v2i64 &&
17956         "Expected an SSE type!");
17957
17958  // Pass through a v4i32 SCALAR_TO_VECTOR as that's what we use in tblgen.
17959  if (OpVT == MVT::v4i32)
17960    return Op;
17961
17962  SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
17963  return DAG.getBitcast(
17964      OpVT, DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, AnyExt));
17965}
17966
17967// Lower a node with an INSERT_SUBVECTOR opcode.  This may result in a
17968// simple superregister reference or explicit instructions to insert
17969// the upper bits of a vector.
17970static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
17971                                     SelectionDAG &DAG) {
17972  assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1);
17973
17974  return insert1BitVector(Op, DAG, Subtarget);
17975}
17976
17977static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
17978                                      SelectionDAG &DAG) {
17979  assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1 &&
17980         "Only vXi1 extract_subvectors need custom lowering");
17981
17982  SDLoc dl(Op);
17983  SDValue Vec = Op.getOperand(0);
17984  SDValue Idx = Op.getOperand(1);
17985
17986  if (!isa<ConstantSDNode>(Idx))
17987    return SDValue();
17988
17989  unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
17990  if (IdxVal == 0) // the operation is legal
17991    return Op;
17992
17993  MVT VecVT = Vec.getSimpleValueType();
17994  unsigned NumElems = VecVT.getVectorNumElements();
17995
17996  // Extend to natively supported kshift.
17997  MVT WideVecVT = VecVT;
17998  if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
17999    WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
18000    Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
18001                      DAG.getUNDEF(WideVecVT), Vec,
18002                      DAG.getIntPtrConstant(0, dl));
18003  }
18004
18005  // Shift to the LSB.
18006  Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
18007                    DAG.getTargetConstant(IdxVal, dl, MVT::i8));
18008
18009  return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, Op.getValueType(), Vec,
18010                     DAG.getIntPtrConstant(0, dl));
18011}
18012
18013// Returns the appropriate wrapper opcode for a global reference.
18014unsigned X86TargetLowering::getGlobalWrapperKind(
18015    const GlobalValue *GV, const unsigned char OpFlags) const {
18016  // References to absolute symbols are never PC-relative.
18017  if (GV && GV->isAbsoluteSymbolRef())
18018    return X86ISD::Wrapper;
18019
18020  CodeModel::Model M = getTargetMachine().getCodeModel();
18021  if (Subtarget.isPICStyleRIPRel() &&
18022      (M == CodeModel::Small || M == CodeModel::Kernel))
18023    return X86ISD::WrapperRIP;
18024
18025  // GOTPCREL references must always use RIP.
18026  if (OpFlags == X86II::MO_GOTPCREL)
18027    return X86ISD::WrapperRIP;
18028
18029  return X86ISD::Wrapper;
18030}
18031
18032// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
18033// their target counterpart wrapped in the X86ISD::Wrapper node. Suppose N is
18034// one of the above mentioned nodes. It has to be wrapped because otherwise
18035// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
18036// be used to form addressing mode. These wrapped nodes will be selected
18037// into MOV32ri.
18038SDValue
18039X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
18040  ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
18041
18042  // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
18043  // global base reg.
18044  unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
18045
18046  auto PtrVT = getPointerTy(DAG.getDataLayout());
18047  SDValue Result = DAG.getTargetConstantPool(
18048      CP->getConstVal(), PtrVT, CP->getAlignment(), CP->getOffset(), OpFlag);
18049  SDLoc DL(CP);
18050  Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
18051  // With PIC, the address is actually $g + Offset.
18052  if (OpFlag) {
18053    Result =
18054        DAG.getNode(ISD::ADD, DL, PtrVT,
18055                    DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
18056  }
18057
18058  return Result;
18059}
18060
18061SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
18062  JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
18063
18064  // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
18065  // global base reg.
18066  unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
18067
18068  auto PtrVT = getPointerTy(DAG.getDataLayout());
18069  SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
18070  SDLoc DL(JT);
18071  Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
18072
18073  // With PIC, the address is actually $g + Offset.
18074  if (OpFlag)
18075    Result =
18076        DAG.getNode(ISD::ADD, DL, PtrVT,
18077                    DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
18078
18079  return Result;
18080}
18081
18082SDValue X86TargetLowering::LowerExternalSymbol(SDValue Op,
18083                                               SelectionDAG &DAG) const {
18084  return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
18085}
18086
18087SDValue
18088X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
18089  // Create the TargetBlockAddressAddress node.
18090  unsigned char OpFlags =
18091    Subtarget.classifyBlockAddressReference();
18092  const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
18093  int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
18094  SDLoc dl(Op);
18095  auto PtrVT = getPointerTy(DAG.getDataLayout());
18096  SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags);
18097  Result = DAG.getNode(getGlobalWrapperKind(), dl, PtrVT, Result);
18098
18099  // With PIC, the address is actually $g + Offset.
18100  if (isGlobalRelativeToPICBase(OpFlags)) {
18101    Result = DAG.getNode(ISD::ADD, dl, PtrVT,
18102                         DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
18103  }
18104
18105  return Result;
18106}
18107
18108/// Creates target global address or external symbol nodes for calls or
18109/// other uses.
18110SDValue X86TargetLowering::LowerGlobalOrExternal(SDValue Op, SelectionDAG &DAG,
18111                                                 bool ForCall) const {
18112  // Unpack the global address or external symbol.
18113  const SDLoc &dl = SDLoc(Op);
18114  const GlobalValue *GV = nullptr;
18115  int64_t Offset = 0;
18116  const char *ExternalSym = nullptr;
18117  if (const auto *G = dyn_cast<GlobalAddressSDNode>(Op)) {
18118    GV = G->getGlobal();
18119    Offset = G->getOffset();
18120  } else {
18121    const auto *ES = cast<ExternalSymbolSDNode>(Op);
18122    ExternalSym = ES->getSymbol();
18123  }
18124
18125  // Calculate some flags for address lowering.
18126  const Module &Mod = *DAG.getMachineFunction().getFunction().getParent();
18127  unsigned char OpFlags;
18128  if (ForCall)
18129    OpFlags = Subtarget.classifyGlobalFunctionReference(GV, Mod);
18130  else
18131    OpFlags = Subtarget.classifyGlobalReference(GV, Mod);
18132  bool HasPICReg = isGlobalRelativeToPICBase(OpFlags);
18133  bool NeedsLoad = isGlobalStubReference(OpFlags);
18134
18135  CodeModel::Model M = DAG.getTarget().getCodeModel();
18136  auto PtrVT = getPointerTy(DAG.getDataLayout());
18137  SDValue Result;
18138
18139  if (GV) {
18140    // Create a target global address if this is a global. If possible, fold the
18141    // offset into the global address reference. Otherwise, ADD it on later.
18142    int64_t GlobalOffset = 0;
18143    if (OpFlags == X86II::MO_NO_FLAG &&
18144        X86::isOffsetSuitableForCodeModel(Offset, M)) {
18145      std::swap(GlobalOffset, Offset);
18146    }
18147    Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, GlobalOffset, OpFlags);
18148  } else {
18149    // If this is not a global address, this must be an external symbol.
18150    Result = DAG.getTargetExternalSymbol(ExternalSym, PtrVT, OpFlags);
18151  }
18152
18153  // If this is a direct call, avoid the wrapper if we don't need to do any
18154  // loads or adds. This allows SDAG ISel to match direct calls.
18155  if (ForCall && !NeedsLoad && !HasPICReg && Offset == 0)
18156    return Result;
18157
18158  Result = DAG.getNode(getGlobalWrapperKind(GV, OpFlags), dl, PtrVT, Result);
18159
18160  // With PIC, the address is actually $g + Offset.
18161  if (HasPICReg) {
18162    Result = DAG.getNode(ISD::ADD, dl, PtrVT,
18163                         DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
18164  }
18165
18166  // For globals that require a load from a stub to get the address, emit the
18167  // load.
18168  if (NeedsLoad)
18169    Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
18170                         MachinePointerInfo::getGOT(DAG.getMachineFunction()));
18171
18172  // If there was a non-zero offset that we didn't fold, create an explicit
18173  // addition for it.
18174  if (Offset != 0)
18175    Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result,
18176                         DAG.getConstant(Offset, dl, PtrVT));
18177
18178  return Result;
18179}
18180
18181SDValue
18182X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
18183  return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
18184}
18185
18186static SDValue
18187GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
18188           SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
18189           unsigned char OperandFlags, bool LocalDynamic = false) {
18190  MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
18191  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
18192  SDLoc dl(GA);
18193  SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
18194                                           GA->getValueType(0),
18195                                           GA->getOffset(),
18196                                           OperandFlags);
18197
18198  X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
18199                                           : X86ISD::TLSADDR;
18200
18201  if (InFlag) {
18202    SDValue Ops[] = { Chain,  TGA, *InFlag };
18203    Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
18204  } else {
18205    SDValue Ops[]  = { Chain, TGA };
18206    Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
18207  }
18208
18209  // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
18210  MFI.setAdjustsStack(true);
18211  MFI.setHasCalls(true);
18212
18213  SDValue Flag = Chain.getValue(1);
18214  return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
18215}
18216
18217// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
18218static SDValue
18219LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
18220                                const EVT PtrVT) {
18221  SDValue InFlag;
18222  SDLoc dl(GA);  // ? function entry point might be better
18223  SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
18224                                   DAG.getNode(X86ISD::GlobalBaseReg,
18225                                               SDLoc(), PtrVT), InFlag);
18226  InFlag = Chain.getValue(1);
18227
18228  return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
18229}
18230
18231// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
18232static SDValue
18233LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
18234                                const EVT PtrVT) {
18235  return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
18236                    X86::RAX, X86II::MO_TLSGD);
18237}
18238
18239static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
18240                                           SelectionDAG &DAG,
18241                                           const EVT PtrVT,
18242                                           bool is64Bit) {
18243  SDLoc dl(GA);
18244
18245  // Get the start address of the TLS block for this module.
18246  X86MachineFunctionInfo *MFI = DAG.getMachineFunction()
18247      .getInfo<X86MachineFunctionInfo>();
18248  MFI->incNumLocalDynamicTLSAccesses();
18249
18250  SDValue Base;
18251  if (is64Bit) {
18252    Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
18253                      X86II::MO_TLSLD, /*LocalDynamic=*/true);
18254  } else {
18255    SDValue InFlag;
18256    SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
18257        DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
18258    InFlag = Chain.getValue(1);
18259    Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
18260                      X86II::MO_TLSLDM, /*LocalDynamic=*/true);
18261  }
18262
18263  // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
18264  // of Base.
18265
18266  // Build x@dtpoff.
18267  unsigned char OperandFlags = X86II::MO_DTPOFF;
18268  unsigned WrapperKind = X86ISD::Wrapper;
18269  SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
18270                                           GA->getValueType(0),
18271                                           GA->getOffset(), OperandFlags);
18272  SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
18273
18274  // Add x@dtpoff with the base.
18275  return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
18276}
18277
18278// Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
18279static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
18280                                   const EVT PtrVT, TLSModel::Model model,
18281                                   bool is64Bit, bool isPIC) {
18282  SDLoc dl(GA);
18283
18284  // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
18285  Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
18286                                                         is64Bit ? 257 : 256));
18287
18288  SDValue ThreadPointer =
18289      DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0, dl),
18290                  MachinePointerInfo(Ptr));
18291
18292  unsigned char OperandFlags = 0;
18293  // Most TLS accesses are not RIP relative, even on x86-64.  One exception is
18294  // initialexec.
18295  unsigned WrapperKind = X86ISD::Wrapper;
18296  if (model == TLSModel::LocalExec) {
18297    OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
18298  } else if (model == TLSModel::InitialExec) {
18299    if (is64Bit) {
18300      OperandFlags = X86II::MO_GOTTPOFF;
18301      WrapperKind = X86ISD::WrapperRIP;
18302    } else {
18303      OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
18304    }
18305  } else {
18306    llvm_unreachable("Unexpected model");
18307  }
18308
18309  // emit "addl x@ntpoff,%eax" (local exec)
18310  // or "addl x@indntpoff,%eax" (initial exec)
18311  // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
18312  SDValue TGA =
18313      DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
18314                                 GA->getOffset(), OperandFlags);
18315  SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
18316
18317  if (model == TLSModel::InitialExec) {
18318    if (isPIC && !is64Bit) {
18319      Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
18320                           DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
18321                           Offset);
18322    }
18323
18324    Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
18325                         MachinePointerInfo::getGOT(DAG.getMachineFunction()));
18326  }
18327
18328  // The address of the thread local variable is the add of the thread
18329  // pointer with the offset of the variable.
18330  return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
18331}
18332
18333SDValue
18334X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
18335
18336  GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
18337
18338  if (DAG.getTarget().useEmulatedTLS())
18339    return LowerToTLSEmulatedModel(GA, DAG);
18340
18341  const GlobalValue *GV = GA->getGlobal();
18342  auto PtrVT = getPointerTy(DAG.getDataLayout());
18343  bool PositionIndependent = isPositionIndependent();
18344
18345  if (Subtarget.isTargetELF()) {
18346    TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
18347    switch (model) {
18348      case TLSModel::GeneralDynamic:
18349        if (Subtarget.is64Bit())
18350          return LowerToTLSGeneralDynamicModel64(GA, DAG, PtrVT);
18351        return LowerToTLSGeneralDynamicModel32(GA, DAG, PtrVT);
18352      case TLSModel::LocalDynamic:
18353        return LowerToTLSLocalDynamicModel(GA, DAG, PtrVT,
18354                                           Subtarget.is64Bit());
18355      case TLSModel::InitialExec:
18356      case TLSModel::LocalExec:
18357        return LowerToTLSExecModel(GA, DAG, PtrVT, model, Subtarget.is64Bit(),
18358                                   PositionIndependent);
18359    }
18360    llvm_unreachable("Unknown TLS model.");
18361  }
18362
18363  if (Subtarget.isTargetDarwin()) {
18364    // Darwin only has one model of TLS.  Lower to that.
18365    unsigned char OpFlag = 0;
18366    unsigned WrapperKind = Subtarget.isPICStyleRIPRel() ?
18367                           X86ISD::WrapperRIP : X86ISD::Wrapper;
18368
18369    // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
18370    // global base reg.
18371    bool PIC32 = PositionIndependent && !Subtarget.is64Bit();
18372    if (PIC32)
18373      OpFlag = X86II::MO_TLVP_PIC_BASE;
18374    else
18375      OpFlag = X86II::MO_TLVP;
18376    SDLoc DL(Op);
18377    SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
18378                                                GA->getValueType(0),
18379                                                GA->getOffset(), OpFlag);
18380    SDValue Offset = DAG.getNode(WrapperKind, DL, PtrVT, Result);
18381
18382    // With PIC32, the address is actually $g + Offset.
18383    if (PIC32)
18384      Offset = DAG.getNode(ISD::ADD, DL, PtrVT,
18385                           DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
18386                           Offset);
18387
18388    // Lowering the machine isd will make sure everything is in the right
18389    // location.
18390    SDValue Chain = DAG.getEntryNode();
18391    SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
18392    Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
18393    SDValue Args[] = { Chain, Offset };
18394    Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
18395    Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, DL, true),
18396                               DAG.getIntPtrConstant(0, DL, true),
18397                               Chain.getValue(1), DL);
18398
18399    // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
18400    MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
18401    MFI.setAdjustsStack(true);
18402
18403    // And our return value (tls address) is in the standard call return value
18404    // location.
18405    unsigned Reg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
18406    return DAG.getCopyFromReg(Chain, DL, Reg, PtrVT, Chain.getValue(1));
18407  }
18408
18409  if (Subtarget.isOSWindows()) {
18410    // Just use the implicit TLS architecture
18411    // Need to generate something similar to:
18412    //   mov     rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
18413    //                                  ; from TEB
18414    //   mov     ecx, dword [rel _tls_index]: Load index (from C runtime)
18415    //   mov     rcx, qword [rdx+rcx*8]
18416    //   mov     eax, .tls$:tlsvar
18417    //   [rax+rcx] contains the address
18418    // Windows 64bit: gs:0x58
18419    // Windows 32bit: fs:__tls_array
18420
18421    SDLoc dl(GA);
18422    SDValue Chain = DAG.getEntryNode();
18423
18424    // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
18425    // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
18426    // use its literal value of 0x2C.
18427    Value *Ptr = Constant::getNullValue(Subtarget.is64Bit()
18428                                        ? Type::getInt8PtrTy(*DAG.getContext(),
18429                                                             256)
18430                                        : Type::getInt32PtrTy(*DAG.getContext(),
18431                                                              257));
18432
18433    SDValue TlsArray = Subtarget.is64Bit()
18434                           ? DAG.getIntPtrConstant(0x58, dl)
18435                           : (Subtarget.isTargetWindowsGNU()
18436                                  ? DAG.getIntPtrConstant(0x2C, dl)
18437                                  : DAG.getExternalSymbol("_tls_array", PtrVT));
18438
18439    SDValue ThreadPointer =
18440        DAG.getLoad(PtrVT, dl, Chain, TlsArray, MachinePointerInfo(Ptr));
18441
18442    SDValue res;
18443    if (GV->getThreadLocalMode() == GlobalVariable::LocalExecTLSModel) {
18444      res = ThreadPointer;
18445    } else {
18446      // Load the _tls_index variable
18447      SDValue IDX = DAG.getExternalSymbol("_tls_index", PtrVT);
18448      if (Subtarget.is64Bit())
18449        IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain, IDX,
18450                             MachinePointerInfo(), MVT::i32);
18451      else
18452        IDX = DAG.getLoad(PtrVT, dl, Chain, IDX, MachinePointerInfo());
18453
18454      auto &DL = DAG.getDataLayout();
18455      SDValue Scale =
18456          DAG.getConstant(Log2_64_Ceil(DL.getPointerSize()), dl, MVT::i8);
18457      IDX = DAG.getNode(ISD::SHL, dl, PtrVT, IDX, Scale);
18458
18459      res = DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, IDX);
18460    }
18461
18462    res = DAG.getLoad(PtrVT, dl, Chain, res, MachinePointerInfo());
18463
18464    // Get the offset of start of .tls section
18465    SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
18466                                             GA->getValueType(0),
18467                                             GA->getOffset(), X86II::MO_SECREL);
18468    SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, PtrVT, TGA);
18469
18470    // The address of the thread local variable is the add of the thread
18471    // pointer with the offset of the variable.
18472    return DAG.getNode(ISD::ADD, dl, PtrVT, res, Offset);
18473  }
18474
18475  llvm_unreachable("TLS not implemented for this target.");
18476}
18477
18478/// Lower SRA_PARTS and friends, which return two i32 values
18479/// and take a 2 x i32 value to shift plus a shift amount.
18480/// TODO: Can this be moved to general expansion code?
18481static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
18482  assert(Op.getNumOperands() == 3 && "Not a double-shift!");
18483  MVT VT = Op.getSimpleValueType();
18484  unsigned VTBits = VT.getSizeInBits();
18485  SDLoc dl(Op);
18486  bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
18487  SDValue ShOpLo = Op.getOperand(0);
18488  SDValue ShOpHi = Op.getOperand(1);
18489  SDValue ShAmt  = Op.getOperand(2);
18490  // ISD::FSHL and ISD::FSHR have defined overflow behavior but ISD::SHL and
18491  // ISD::SRA/L nodes haven't. Insert an AND to be safe, it's optimized away
18492  // during isel.
18493  SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
18494                                  DAG.getConstant(VTBits - 1, dl, MVT::i8));
18495  SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
18496                                     DAG.getConstant(VTBits - 1, dl, MVT::i8))
18497                       : DAG.getConstant(0, dl, VT);
18498
18499  SDValue Tmp2, Tmp3;
18500  if (Op.getOpcode() == ISD::SHL_PARTS) {
18501    Tmp2 = DAG.getNode(ISD::FSHL, dl, VT, ShOpHi, ShOpLo, ShAmt);
18502    Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
18503  } else {
18504    Tmp2 = DAG.getNode(ISD::FSHR, dl, VT, ShOpHi, ShOpLo, ShAmt);
18505    Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
18506  }
18507
18508  // If the shift amount is larger or equal than the width of a part we can't
18509  // rely on the results of shld/shrd. Insert a test and select the appropriate
18510  // values for large shift amounts.
18511  SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
18512                                DAG.getConstant(VTBits, dl, MVT::i8));
18513  SDValue Cond = DAG.getSetCC(dl, MVT::i8, AndNode,
18514                             DAG.getConstant(0, dl, MVT::i8), ISD::SETNE);
18515
18516  SDValue Hi, Lo;
18517  if (Op.getOpcode() == ISD::SHL_PARTS) {
18518    Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2);
18519    Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3);
18520  } else {
18521    Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2);
18522    Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3);
18523  }
18524
18525  return DAG.getMergeValues({ Lo, Hi }, dl);
18526}
18527
18528static SDValue LowerFunnelShift(SDValue Op, const X86Subtarget &Subtarget,
18529                                SelectionDAG &DAG) {
18530  MVT VT = Op.getSimpleValueType();
18531  assert((Op.getOpcode() == ISD::FSHL || Op.getOpcode() == ISD::FSHR) &&
18532         "Unexpected funnel shift opcode!");
18533
18534  SDLoc DL(Op);
18535  SDValue Op0 = Op.getOperand(0);
18536  SDValue Op1 = Op.getOperand(1);
18537  SDValue Amt = Op.getOperand(2);
18538
18539  bool IsFSHR = Op.getOpcode() == ISD::FSHR;
18540
18541  if (VT.isVector()) {
18542    assert(Subtarget.hasVBMI2() && "Expected VBMI2");
18543
18544    if (IsFSHR)
18545      std::swap(Op0, Op1);
18546
18547    APInt APIntShiftAmt;
18548    if (X86::isConstantSplat(Amt, APIntShiftAmt)) {
18549      uint64_t ShiftAmt = APIntShiftAmt.urem(VT.getScalarSizeInBits());
18550      return DAG.getNode(IsFSHR ? X86ISD::VSHRD : X86ISD::VSHLD, DL, VT, Op0,
18551                         Op1, DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
18552    }
18553
18554    return DAG.getNode(IsFSHR ? X86ISD::VSHRDV : X86ISD::VSHLDV, DL, VT,
18555                       Op0, Op1, Amt);
18556  }
18557
18558  assert((VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) &&
18559         "Unexpected funnel shift type!");
18560
18561  // Expand slow SHLD/SHRD cases if we are not optimizing for size.
18562  bool OptForSize = DAG.shouldOptForSize();
18563  if (!OptForSize && Subtarget.isSHLDSlow())
18564    return SDValue();
18565
18566  if (IsFSHR)
18567    std::swap(Op0, Op1);
18568
18569  // i16 needs to modulo the shift amount, but i32/i64 have implicit modulo.
18570  if (VT == MVT::i16)
18571    Amt = DAG.getNode(ISD::AND, DL, Amt.getValueType(), Amt,
18572                      DAG.getConstant(15, DL, Amt.getValueType()));
18573
18574  unsigned SHDOp = (IsFSHR ? X86ISD::SHRD : X86ISD::SHLD);
18575  return DAG.getNode(SHDOp, DL, VT, Op0, Op1, Amt);
18576}
18577
18578// Try to use a packed vector operation to handle i64 on 32-bit targets when
18579// AVX512DQ is enabled.
18580static SDValue LowerI64IntToFP_AVX512DQ(SDValue Op, SelectionDAG &DAG,
18581                                        const X86Subtarget &Subtarget) {
18582  assert((Op.getOpcode() == ISD::SINT_TO_FP ||
18583          Op.getOpcode() == ISD::STRICT_SINT_TO_FP ||
18584          Op.getOpcode() == ISD::STRICT_UINT_TO_FP ||
18585          Op.getOpcode() == ISD::UINT_TO_FP) &&
18586         "Unexpected opcode!");
18587  bool IsStrict = Op->isStrictFPOpcode();
18588  unsigned OpNo = IsStrict ? 1 : 0;
18589  SDValue Src = Op.getOperand(OpNo);
18590  MVT SrcVT = Src.getSimpleValueType();
18591  MVT VT = Op.getSimpleValueType();
18592
18593   if (!Subtarget.hasDQI() || SrcVT != MVT::i64 || Subtarget.is64Bit() ||
18594       (VT != MVT::f32 && VT != MVT::f64))
18595    return SDValue();
18596
18597  // Pack the i64 into a vector, do the operation and extract.
18598
18599  // Using 256-bit to ensure result is 128-bits for f32 case.
18600  unsigned NumElts = Subtarget.hasVLX() ? 4 : 8;
18601  MVT VecInVT = MVT::getVectorVT(MVT::i64, NumElts);
18602  MVT VecVT = MVT::getVectorVT(VT, NumElts);
18603
18604  SDLoc dl(Op);
18605  SDValue InVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecInVT, Src);
18606  if (IsStrict) {
18607    SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, {VecVT, MVT::Other},
18608                                 {Op.getOperand(0), InVec});
18609    SDValue Chain = CvtVec.getValue(1);
18610    SDValue Value = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
18611                                DAG.getIntPtrConstant(0, dl));
18612    return DAG.getMergeValues({Value, Chain}, dl);
18613  }
18614
18615  SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, VecVT, InVec);
18616
18617  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
18618                     DAG.getIntPtrConstant(0, dl));
18619}
18620
18621static bool useVectorCast(unsigned Opcode, MVT FromVT, MVT ToVT,
18622                          const X86Subtarget &Subtarget) {
18623  switch (Opcode) {
18624    case ISD::SINT_TO_FP:
18625      // TODO: Handle wider types with AVX/AVX512.
18626      if (!Subtarget.hasSSE2() || FromVT != MVT::v4i32)
18627        return false;
18628      // CVTDQ2PS or (V)CVTDQ2PD
18629      return ToVT == MVT::v4f32 || (Subtarget.hasAVX() && ToVT == MVT::v4f64);
18630
18631    case ISD::UINT_TO_FP:
18632      // TODO: Handle wider types and i64 elements.
18633      if (!Subtarget.hasAVX512() || FromVT != MVT::v4i32)
18634        return false;
18635      // VCVTUDQ2PS or VCVTUDQ2PD
18636      return ToVT == MVT::v4f32 || ToVT == MVT::v4f64;
18637
18638    default:
18639      return false;
18640  }
18641}
18642
18643/// Given a scalar cast operation that is extracted from a vector, try to
18644/// vectorize the cast op followed by extraction. This will avoid an expensive
18645/// round-trip between XMM and GPR.
18646static SDValue vectorizeExtractedCast(SDValue Cast, SelectionDAG &DAG,
18647                                      const X86Subtarget &Subtarget) {
18648  // TODO: This could be enhanced to handle smaller integer types by peeking
18649  // through an extend.
18650  SDValue Extract = Cast.getOperand(0);
18651  MVT DestVT = Cast.getSimpleValueType();
18652  if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
18653      !isa<ConstantSDNode>(Extract.getOperand(1)))
18654    return SDValue();
18655
18656  // See if we have a 128-bit vector cast op for this type of cast.
18657  SDValue VecOp = Extract.getOperand(0);
18658  MVT FromVT = VecOp.getSimpleValueType();
18659  unsigned NumEltsInXMM = 128 / FromVT.getScalarSizeInBits();
18660  MVT Vec128VT = MVT::getVectorVT(FromVT.getScalarType(), NumEltsInXMM);
18661  MVT ToVT = MVT::getVectorVT(DestVT, NumEltsInXMM);
18662  if (!useVectorCast(Cast.getOpcode(), Vec128VT, ToVT, Subtarget))
18663    return SDValue();
18664
18665  // If we are extracting from a non-zero element, first shuffle the source
18666  // vector to allow extracting from element zero.
18667  SDLoc DL(Cast);
18668  if (!isNullConstant(Extract.getOperand(1))) {
18669    SmallVector<int, 16> Mask(FromVT.getVectorNumElements(), -1);
18670    Mask[0] = Extract.getConstantOperandVal(1);
18671    VecOp = DAG.getVectorShuffle(FromVT, DL, VecOp, DAG.getUNDEF(FromVT), Mask);
18672  }
18673  // If the source vector is wider than 128-bits, extract the low part. Do not
18674  // create an unnecessarily wide vector cast op.
18675  if (FromVT != Vec128VT)
18676    VecOp = extract128BitVector(VecOp, 0, DAG, DL);
18677
18678  // cast (extelt V, 0) --> extelt (cast (extract_subv V)), 0
18679  // cast (extelt V, C) --> extelt (cast (extract_subv (shuffle V, [C...]))), 0
18680  SDValue VCast = DAG.getNode(Cast.getOpcode(), DL, ToVT, VecOp);
18681  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, DestVT, VCast,
18682                     DAG.getIntPtrConstant(0, DL));
18683}
18684
18685static SDValue lowerINT_TO_FP_vXi64(SDValue Op, SelectionDAG &DAG,
18686                                    const X86Subtarget &Subtarget) {
18687  SDLoc DL(Op);
18688  bool IsStrict = Op->isStrictFPOpcode();
18689  MVT VT = Op->getSimpleValueType(0);
18690  SDValue Src = Op->getOperand(IsStrict ? 1 : 0);
18691
18692  if (Subtarget.hasDQI()) {
18693    assert(!Subtarget.hasVLX() && "Unexpected features");
18694
18695    assert((Src.getSimpleValueType() == MVT::v2i64 ||
18696            Src.getSimpleValueType() == MVT::v4i64) &&
18697           "Unsupported custom type");
18698
18699    // With AVX512DQ, but not VLX we need to widen to get a 512-bit result type.
18700    assert((VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v4f64) &&
18701           "Unexpected VT!");
18702    MVT WideVT = VT == MVT::v4f32 ? MVT::v8f32 : MVT::v8f64;
18703
18704    // Need to concat with zero vector for strict fp to avoid spurious
18705    // exceptions.
18706    SDValue Tmp = IsStrict ? DAG.getConstant(0, DL, MVT::v8i64)
18707                           : DAG.getUNDEF(MVT::v8i64);
18708    Src = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v8i64, Tmp, Src,
18709                      DAG.getIntPtrConstant(0, DL));
18710    SDValue Res, Chain;
18711    if (IsStrict) {
18712      Res = DAG.getNode(Op.getOpcode(), DL, {WideVT, MVT::Other},
18713                        {Op->getOperand(0), Src});
18714      Chain = Res.getValue(1);
18715    } else {
18716      Res = DAG.getNode(Op.getOpcode(), DL, WideVT, Src);
18717    }
18718
18719    Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
18720                      DAG.getIntPtrConstant(0, DL));
18721
18722    if (IsStrict)
18723      return DAG.getMergeValues({Res, Chain}, DL);
18724    return Res;
18725  }
18726
18727  bool IsSigned = Op->getOpcode() == ISD::SINT_TO_FP ||
18728                  Op->getOpcode() == ISD::STRICT_SINT_TO_FP;
18729  if (VT != MVT::v4f32 || IsSigned)
18730    return SDValue();
18731
18732  SDValue Zero = DAG.getConstant(0, DL, MVT::v4i64);
18733  SDValue One  = DAG.getConstant(1, DL, MVT::v4i64);
18734  SDValue Sign = DAG.getNode(ISD::OR, DL, MVT::v4i64,
18735                             DAG.getNode(ISD::SRL, DL, MVT::v4i64, Src, One),
18736                             DAG.getNode(ISD::AND, DL, MVT::v4i64, Src, One));
18737  SDValue IsNeg = DAG.getSetCC(DL, MVT::v4i64, Src, Zero, ISD::SETLT);
18738  SDValue SignSrc = DAG.getSelect(DL, MVT::v4i64, IsNeg, Sign, Src);
18739  SmallVector<SDValue, 4> SignCvts(4);
18740  SmallVector<SDValue, 4> Chains(4);
18741  for (int i = 0; i != 4; ++i) {
18742    SDValue Src = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, SignSrc,
18743                              DAG.getIntPtrConstant(i, DL));
18744    if (IsStrict) {
18745      SignCvts[i] =
18746          DAG.getNode(ISD::STRICT_SINT_TO_FP, DL, {MVT::f32, MVT::Other},
18747                      {Op.getOperand(0), Src});
18748      Chains[i] = SignCvts[i].getValue(1);
18749    } else {
18750      SignCvts[i] = DAG.getNode(ISD::SINT_TO_FP, DL, MVT::f32, Src);
18751    }
18752  }
18753  SDValue SignCvt = DAG.getBuildVector(VT, DL, SignCvts);
18754
18755  SDValue Slow, Chain;
18756  if (IsStrict) {
18757    Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
18758    Slow = DAG.getNode(ISD::STRICT_FADD, DL, {MVT::v4f32, MVT::Other},
18759                       {Chain, SignCvt, SignCvt});
18760    Chain = Slow.getValue(1);
18761  } else {
18762    Slow = DAG.getNode(ISD::FADD, DL, MVT::v4f32, SignCvt, SignCvt);
18763  }
18764
18765  IsNeg = DAG.getNode(ISD::TRUNCATE, DL, MVT::v4i32, IsNeg);
18766  SDValue Cvt = DAG.getSelect(DL, MVT::v4f32, IsNeg, Slow, SignCvt);
18767
18768  if (IsStrict)
18769    return DAG.getMergeValues({Cvt, Chain}, DL);
18770
18771  return Cvt;
18772}
18773
18774SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
18775                                           SelectionDAG &DAG) const {
18776  bool IsStrict = Op->isStrictFPOpcode();
18777  unsigned OpNo = IsStrict ? 1 : 0;
18778  SDValue Src = Op.getOperand(OpNo);
18779  SDValue Chain = IsStrict ? Op->getOperand(0) : DAG.getEntryNode();
18780  MVT SrcVT = Src.getSimpleValueType();
18781  MVT VT = Op.getSimpleValueType();
18782  SDLoc dl(Op);
18783
18784  if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
18785    return Extract;
18786
18787  if (SrcVT.isVector()) {
18788    if (SrcVT == MVT::v2i32 && VT == MVT::v2f64) {
18789      // Note: Since v2f64 is a legal type. We don't need to zero extend the
18790      // source for strict FP.
18791      if (IsStrict)
18792        return DAG.getNode(
18793            X86ISD::STRICT_CVTSI2P, dl, {VT, MVT::Other},
18794            {Chain, DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
18795                                DAG.getUNDEF(SrcVT))});
18796      return DAG.getNode(X86ISD::CVTSI2P, dl, VT,
18797                         DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
18798                                     DAG.getUNDEF(SrcVT)));
18799    }
18800    if (SrcVT == MVT::v2i64 || SrcVT == MVT::v4i64)
18801      return lowerINT_TO_FP_vXi64(Op, DAG, Subtarget);
18802
18803    return SDValue();
18804  }
18805
18806  assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
18807         "Unknown SINT_TO_FP to lower!");
18808
18809  bool UseSSEReg = isScalarFPTypeInSSEReg(VT);
18810
18811  // These are really Legal; return the operand so the caller accepts it as
18812  // Legal.
18813  if (SrcVT == MVT::i32 && UseSSEReg)
18814    return Op;
18815  if (SrcVT == MVT::i64 && UseSSEReg && Subtarget.is64Bit())
18816    return Op;
18817
18818  if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
18819    return V;
18820
18821  // SSE doesn't have an i16 conversion so we need to promote.
18822  if (SrcVT == MVT::i16 && (UseSSEReg || VT == MVT::f128)) {
18823    SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, Src);
18824    if (IsStrict)
18825      return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
18826                         {Chain, Ext});
18827
18828    return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Ext);
18829  }
18830
18831  if (VT == MVT::f128)
18832    return LowerF128Call(Op, DAG, RTLIB::getSINTTOFP(SrcVT, VT));
18833
18834  SDValue ValueToStore = Src;
18835  if (SrcVT == MVT::i64 && UseSSEReg && !Subtarget.is64Bit())
18836    // Bitcasting to f64 here allows us to do a single 64-bit store from
18837    // an SSE register, avoiding the store forwarding penalty that would come
18838    // with two 32-bit stores.
18839    ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
18840
18841  unsigned Size = SrcVT.getSizeInBits()/8;
18842  MachineFunction &MF = DAG.getMachineFunction();
18843  auto PtrVT = getPointerTy(MF.getDataLayout());
18844  int SSFI = MF.getFrameInfo().CreateStackObject(Size, Size, false);
18845  SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
18846  Chain = DAG.getStore(
18847      Chain, dl, ValueToStore, StackSlot,
18848      MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
18849  std::pair<SDValue, SDValue> Tmp = BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
18850
18851  if (IsStrict)
18852    return DAG.getMergeValues({Tmp.first, Tmp.second}, dl);
18853
18854  return Tmp.first;
18855}
18856
18857std::pair<SDValue, SDValue> X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
18858                                     SDValue StackSlot,
18859                                     SelectionDAG &DAG) const {
18860  // Build the FILD
18861  SDLoc DL(Op);
18862  SDVTList Tys;
18863  bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
18864  if (useSSE)
18865    Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
18866  else
18867    Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
18868
18869  unsigned ByteSize = SrcVT.getSizeInBits() / 8;
18870
18871  FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
18872  MachineMemOperand *LoadMMO;
18873  if (FI) {
18874    int SSFI = FI->getIndex();
18875    LoadMMO = DAG.getMachineFunction().getMachineMemOperand(
18876        MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
18877        MachineMemOperand::MOLoad, ByteSize, ByteSize);
18878  } else {
18879    LoadMMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
18880    StackSlot = StackSlot.getOperand(1);
18881  }
18882  SDValue FILDOps[] = {Chain, StackSlot};
18883  SDValue Result =
18884      DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG : X86ISD::FILD, DL,
18885                              Tys, FILDOps, SrcVT, LoadMMO);
18886  Chain = Result.getValue(1);
18887
18888  if (useSSE) {
18889    SDValue InFlag = Result.getValue(2);
18890
18891    // FIXME: Currently the FST is glued to the FILD_FLAG. This
18892    // shouldn't be necessary except that RFP cannot be live across
18893    // multiple blocks. When stackifier is fixed, they can be uncoupled.
18894    MachineFunction &MF = DAG.getMachineFunction();
18895    unsigned SSFISize = Op.getValueSizeInBits() / 8;
18896    int SSFI = MF.getFrameInfo().CreateStackObject(SSFISize, SSFISize, false);
18897    auto PtrVT = getPointerTy(MF.getDataLayout());
18898    SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
18899    Tys = DAG.getVTList(MVT::Other);
18900    SDValue FSTOps[] = {Chain, Result, StackSlot, InFlag};
18901    MachineMemOperand *StoreMMO = DAG.getMachineFunction().getMachineMemOperand(
18902        MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
18903        MachineMemOperand::MOStore, SSFISize, SSFISize);
18904
18905    Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, FSTOps,
18906                                    Op.getValueType(), StoreMMO);
18907    Result = DAG.getLoad(
18908        Op.getValueType(), DL, Chain, StackSlot,
18909        MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
18910    Chain = Result.getValue(1);
18911  }
18912
18913  return { Result, Chain };
18914}
18915
18916/// Horizontal vector math instructions may be slower than normal math with
18917/// shuffles. Limit horizontal op codegen based on size/speed trade-offs, uarch
18918/// implementation, and likely shuffle complexity of the alternate sequence.
18919static bool shouldUseHorizontalOp(bool IsSingleSource, SelectionDAG &DAG,
18920                                  const X86Subtarget &Subtarget) {
18921  bool IsOptimizingSize = DAG.shouldOptForSize();
18922  bool HasFastHOps = Subtarget.hasFastHorizontalOps();
18923  return !IsSingleSource || IsOptimizingSize || HasFastHOps;
18924}
18925
18926/// 64-bit unsigned integer to double expansion.
18927static SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG,
18928                                   const X86Subtarget &Subtarget) {
18929  // This algorithm is not obvious. Here it is what we're trying to output:
18930  /*
18931     movq       %rax,  %xmm0
18932     punpckldq  (c0),  %xmm0  // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
18933     subpd      (c1),  %xmm0  // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
18934     #ifdef __SSE3__
18935       haddpd   %xmm0, %xmm0
18936     #else
18937       pshufd   $0x4e, %xmm0, %xmm1
18938       addpd    %xmm1, %xmm0
18939     #endif
18940  */
18941
18942  bool IsStrict = Op->isStrictFPOpcode();
18943  unsigned OpNo = IsStrict ? 1 : 0;
18944  SDLoc dl(Op);
18945  LLVMContext *Context = DAG.getContext();
18946
18947  // Build some magic constants.
18948  static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
18949  Constant *C0 = ConstantDataVector::get(*Context, CV0);
18950  auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
18951  SDValue CPIdx0 = DAG.getConstantPool(C0, PtrVT, 16);
18952
18953  SmallVector<Constant*,2> CV1;
18954  CV1.push_back(
18955    ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
18956                                      APInt(64, 0x4330000000000000ULL))));
18957  CV1.push_back(
18958    ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
18959                                      APInt(64, 0x4530000000000000ULL))));
18960  Constant *C1 = ConstantVector::get(CV1);
18961  SDValue CPIdx1 = DAG.getConstantPool(C1, PtrVT, 16);
18962
18963  // Load the 64-bit value into an XMM register.
18964  SDValue XR1 =
18965      DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Op.getOperand(OpNo));
18966  SDValue CLod0 =
18967      DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
18968                  MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
18969                  /* Alignment = */ 16);
18970  SDValue Unpck1 =
18971      getUnpackl(DAG, dl, MVT::v4i32, DAG.getBitcast(MVT::v4i32, XR1), CLod0);
18972
18973  SDValue CLod1 =
18974      DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
18975                  MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
18976                  /* Alignment = */ 16);
18977  SDValue XR2F = DAG.getBitcast(MVT::v2f64, Unpck1);
18978  SDValue Sub;
18979  SDValue Chain;
18980  // TODO: Are there any fast-math-flags to propagate here?
18981  if (IsStrict) {
18982    Sub = DAG.getNode(ISD::STRICT_FSUB, dl, {MVT::v2f64, MVT::Other},
18983                      {Op.getOperand(0), XR2F, CLod1});
18984    Chain = Sub.getValue(1);
18985  } else
18986    Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
18987  SDValue Result;
18988
18989  if (!IsStrict && Subtarget.hasSSE3() &&
18990      shouldUseHorizontalOp(true, DAG, Subtarget)) {
18991    // FIXME: Do we need a STRICT version of FHADD?
18992    Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
18993  } else {
18994    SDValue Shuffle = DAG.getVectorShuffle(MVT::v2f64, dl, Sub, Sub, {1,-1});
18995    if (IsStrict) {
18996      Result = DAG.getNode(ISD::STRICT_FADD, dl, {MVT::v2f64, MVT::Other},
18997                           {Chain, Shuffle, Sub});
18998      Chain = Result.getValue(1);
18999    } else
19000      Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, Shuffle, Sub);
19001  }
19002  Result = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
19003                       DAG.getIntPtrConstant(0, dl));
19004  if (IsStrict)
19005    return DAG.getMergeValues({Result, Chain}, dl);
19006
19007  return Result;
19008}
19009
19010/// 32-bit unsigned integer to float expansion.
19011static SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG,
19012                                   const X86Subtarget &Subtarget) {
19013  unsigned OpNo = Op.getNode()->isStrictFPOpcode() ? 1 : 0;
19014  SDLoc dl(Op);
19015  // FP constant to bias correct the final result.
19016  SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl,
19017                                   MVT::f64);
19018
19019  // Load the 32-bit value into an XMM register.
19020  SDValue Load =
19021      DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Op.getOperand(OpNo));
19022
19023  // Zero out the upper parts of the register.
19024  Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
19025
19026  // Or the load with the bias.
19027  SDValue Or = DAG.getNode(
19028      ISD::OR, dl, MVT::v2i64,
19029      DAG.getBitcast(MVT::v2i64, Load),
19030      DAG.getBitcast(MVT::v2i64,
19031                     DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Bias)));
19032  Or =
19033      DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
19034                  DAG.getBitcast(MVT::v2f64, Or), DAG.getIntPtrConstant(0, dl));
19035
19036  if (Op.getNode()->isStrictFPOpcode()) {
19037    // Subtract the bias.
19038    // TODO: Are there any fast-math-flags to propagate here?
19039    SDValue Chain = Op.getOperand(0);
19040    SDValue Sub = DAG.getNode(ISD::STRICT_FSUB, dl, {MVT::f64, MVT::Other},
19041                              {Chain, Or, Bias});
19042
19043    if (Op.getValueType() == Sub.getValueType())
19044      return Sub;
19045
19046    // Handle final rounding.
19047    std::pair<SDValue, SDValue> ResultPair = DAG.getStrictFPExtendOrRound(
19048        Sub, Sub.getValue(1), dl, Op.getSimpleValueType());
19049
19050    return DAG.getMergeValues({ResultPair.first, ResultPair.second}, dl);
19051  }
19052
19053  // Subtract the bias.
19054  // TODO: Are there any fast-math-flags to propagate here?
19055  SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
19056
19057  // Handle final rounding.
19058  return DAG.getFPExtendOrRound(Sub, dl, Op.getSimpleValueType());
19059}
19060
19061static SDValue lowerUINT_TO_FP_v2i32(SDValue Op, SelectionDAG &DAG,
19062                                     const X86Subtarget &Subtarget,
19063                                     const SDLoc &DL) {
19064  if (Op.getSimpleValueType() != MVT::v2f64)
19065    return SDValue();
19066
19067  bool IsStrict = Op->isStrictFPOpcode();
19068
19069  SDValue N0 = Op.getOperand(IsStrict ? 1 : 0);
19070  assert(N0.getSimpleValueType() == MVT::v2i32 && "Unexpected input type");
19071
19072  if (Subtarget.hasAVX512()) {
19073    if (!Subtarget.hasVLX()) {
19074      // Let generic type legalization widen this.
19075      if (!IsStrict)
19076        return SDValue();
19077      // Otherwise pad the integer input with 0s and widen the operation.
19078      N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
19079                       DAG.getConstant(0, DL, MVT::v2i32));
19080      SDValue Res = DAG.getNode(Op->getOpcode(), DL, {MVT::v4f64, MVT::Other},
19081                                {Op.getOperand(0), N0});
19082      SDValue Chain = Res.getValue(1);
19083      Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2f64, Res,
19084                        DAG.getIntPtrConstant(0, DL));
19085      return DAG.getMergeValues({Res, Chain}, DL);
19086    }
19087
19088    // Legalize to v4i32 type.
19089    N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
19090                     DAG.getUNDEF(MVT::v2i32));
19091    if (IsStrict)
19092      return DAG.getNode(X86ISD::STRICT_CVTUI2P, DL, {MVT::v2f64, MVT::Other},
19093                         {Op.getOperand(0), N0});
19094    return DAG.getNode(X86ISD::CVTUI2P, DL, MVT::v2f64, N0);
19095  }
19096
19097  // Zero extend to 2i64, OR with the floating point representation of 2^52.
19098  // This gives us the floating point equivalent of 2^52 + the i32 integer
19099  // since double has 52-bits of mantissa. Then subtract 2^52 in floating
19100  // point leaving just our i32 integers in double format.
19101  SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v2i64, N0);
19102  SDValue VBias =
19103      DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), DL, MVT::v2f64);
19104  SDValue Or = DAG.getNode(ISD::OR, DL, MVT::v2i64, ZExtIn,
19105                           DAG.getBitcast(MVT::v2i64, VBias));
19106  Or = DAG.getBitcast(MVT::v2f64, Or);
19107
19108  if (IsStrict)
19109    return DAG.getNode(ISD::STRICT_FSUB, DL, {MVT::v2f64, MVT::Other},
19110                       {Op.getOperand(0), Or, VBias});
19111  return DAG.getNode(ISD::FSUB, DL, MVT::v2f64, Or, VBias);
19112}
19113
19114static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
19115                                     const X86Subtarget &Subtarget) {
19116  SDLoc DL(Op);
19117  bool IsStrict = Op->isStrictFPOpcode();
19118  SDValue V = Op->getOperand(IsStrict ? 1 : 0);
19119  MVT VecIntVT = V.getSimpleValueType();
19120  assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
19121         "Unsupported custom type");
19122
19123  if (Subtarget.hasAVX512()) {
19124    // With AVX512, but not VLX we need to widen to get a 512-bit result type.
19125    assert(!Subtarget.hasVLX() && "Unexpected features");
19126    MVT VT = Op->getSimpleValueType(0);
19127
19128    // v8i32->v8f64 is legal with AVX512 so just return it.
19129    if (VT == MVT::v8f64)
19130      return Op;
19131
19132    assert((VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v4f64) &&
19133           "Unexpected VT!");
19134    MVT WideVT = VT == MVT::v4f64 ? MVT::v8f64 : MVT::v16f32;
19135    MVT WideIntVT = VT == MVT::v4f64 ? MVT::v8i32 : MVT::v16i32;
19136    // Need to concat with zero vector for strict fp to avoid spurious
19137    // exceptions.
19138    SDValue Tmp =
19139        IsStrict ? DAG.getConstant(0, DL, WideIntVT) : DAG.getUNDEF(WideIntVT);
19140    V = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideIntVT, Tmp, V,
19141                    DAG.getIntPtrConstant(0, DL));
19142    SDValue Res, Chain;
19143    if (IsStrict) {
19144      Res = DAG.getNode(ISD::STRICT_UINT_TO_FP, DL, {WideVT, MVT::Other},
19145                        {Op->getOperand(0), V});
19146      Chain = Res.getValue(1);
19147    } else {
19148      Res = DAG.getNode(ISD::UINT_TO_FP, DL, WideVT, V);
19149    }
19150
19151    Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
19152                      DAG.getIntPtrConstant(0, DL));
19153
19154    if (IsStrict)
19155      return DAG.getMergeValues({Res, Chain}, DL);
19156    return Res;
19157  }
19158
19159  if (Subtarget.hasAVX() && VecIntVT == MVT::v4i32 &&
19160      Op->getSimpleValueType(0) == MVT::v4f64) {
19161    SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i64, V);
19162    Constant *Bias = ConstantFP::get(
19163        *DAG.getContext(),
19164        APFloat(APFloat::IEEEdouble(), APInt(64, 0x4330000000000000ULL)));
19165    auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
19166    SDValue CPIdx = DAG.getConstantPool(Bias, PtrVT, /*Alignment*/ 8);
19167    SDVTList Tys = DAG.getVTList(MVT::v4f64, MVT::Other);
19168    SDValue Ops[] = {DAG.getEntryNode(), CPIdx};
19169    SDValue VBias = DAG.getMemIntrinsicNode(
19170        X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::f64,
19171        MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
19172        /*Alignment*/ 8, MachineMemOperand::MOLoad);
19173
19174    SDValue Or = DAG.getNode(ISD::OR, DL, MVT::v4i64, ZExtIn,
19175                             DAG.getBitcast(MVT::v4i64, VBias));
19176    Or = DAG.getBitcast(MVT::v4f64, Or);
19177
19178    if (IsStrict)
19179      return DAG.getNode(ISD::STRICT_FSUB, DL, {MVT::v4f64, MVT::Other},
19180                         {Op.getOperand(0), Or, VBias});
19181    return DAG.getNode(ISD::FSUB, DL, MVT::v4f64, Or, VBias);
19182  }
19183
19184  // The algorithm is the following:
19185  // #ifdef __SSE4_1__
19186  //     uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
19187  //     uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
19188  //                                 (uint4) 0x53000000, 0xaa);
19189  // #else
19190  //     uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
19191  //     uint4 hi = (v >> 16) | (uint4) 0x53000000;
19192  // #endif
19193  //     float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
19194  //     return (float4) lo + fhi;
19195
19196  bool Is128 = VecIntVT == MVT::v4i32;
19197  MVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
19198  // If we convert to something else than the supported type, e.g., to v4f64,
19199  // abort early.
19200  if (VecFloatVT != Op->getSimpleValueType(0))
19201    return SDValue();
19202
19203  // In the #idef/#else code, we have in common:
19204  // - The vector of constants:
19205  // -- 0x4b000000
19206  // -- 0x53000000
19207  // - A shift:
19208  // -- v >> 16
19209
19210  // Create the splat vector for 0x4b000000.
19211  SDValue VecCstLow = DAG.getConstant(0x4b000000, DL, VecIntVT);
19212  // Create the splat vector for 0x53000000.
19213  SDValue VecCstHigh = DAG.getConstant(0x53000000, DL, VecIntVT);
19214
19215  // Create the right shift.
19216  SDValue VecCstShift = DAG.getConstant(16, DL, VecIntVT);
19217  SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
19218
19219  SDValue Low, High;
19220  if (Subtarget.hasSSE41()) {
19221    MVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
19222    //     uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
19223    SDValue VecCstLowBitcast = DAG.getBitcast(VecI16VT, VecCstLow);
19224    SDValue VecBitcast = DAG.getBitcast(VecI16VT, V);
19225    // Low will be bitcasted right away, so do not bother bitcasting back to its
19226    // original type.
19227    Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
19228                      VecCstLowBitcast, DAG.getTargetConstant(0xaa, DL, MVT::i8));
19229    //     uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
19230    //                                 (uint4) 0x53000000, 0xaa);
19231    SDValue VecCstHighBitcast = DAG.getBitcast(VecI16VT, VecCstHigh);
19232    SDValue VecShiftBitcast = DAG.getBitcast(VecI16VT, HighShift);
19233    // High will be bitcasted right away, so do not bother bitcasting back to
19234    // its original type.
19235    High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
19236                       VecCstHighBitcast, DAG.getTargetConstant(0xaa, DL, MVT::i8));
19237  } else {
19238    SDValue VecCstMask = DAG.getConstant(0xffff, DL, VecIntVT);
19239    //     uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
19240    SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
19241    Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
19242
19243    //     uint4 hi = (v >> 16) | (uint4) 0x53000000;
19244    High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
19245  }
19246
19247  // Create the vector constant for (0x1.0p39f + 0x1.0p23f).
19248  SDValue VecCstFSub = DAG.getConstantFP(
19249      APFloat(APFloat::IEEEsingle(), APInt(32, 0x53000080)), DL, VecFloatVT);
19250
19251  //     float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
19252  // NOTE: By using fsub of a positive constant instead of fadd of a negative
19253  // constant, we avoid reassociation in MachineCombiner when unsafe-fp-math is
19254  // enabled. See PR24512.
19255  SDValue HighBitcast = DAG.getBitcast(VecFloatVT, High);
19256  // TODO: Are there any fast-math-flags to propagate here?
19257  //     (float4) lo;
19258  SDValue LowBitcast = DAG.getBitcast(VecFloatVT, Low);
19259  //     return (float4) lo + fhi;
19260  if (IsStrict) {
19261    SDValue FHigh = DAG.getNode(ISD::STRICT_FSUB, DL, {VecFloatVT, MVT::Other},
19262                                {Op.getOperand(0), HighBitcast, VecCstFSub});
19263    return DAG.getNode(ISD::STRICT_FADD, DL, {VecFloatVT, MVT::Other},
19264                       {FHigh.getValue(1), LowBitcast, FHigh});
19265  }
19266
19267  SDValue FHigh =
19268      DAG.getNode(ISD::FSUB, DL, VecFloatVT, HighBitcast, VecCstFSub);
19269  return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
19270}
19271
19272static SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG,
19273                                   const X86Subtarget &Subtarget) {
19274  unsigned OpNo = Op.getNode()->isStrictFPOpcode() ? 1 : 0;
19275  SDValue N0 = Op.getOperand(OpNo);
19276  MVT SrcVT = N0.getSimpleValueType();
19277  SDLoc dl(Op);
19278
19279  switch (SrcVT.SimpleTy) {
19280  default:
19281    llvm_unreachable("Custom UINT_TO_FP is not supported!");
19282  case MVT::v2i32:
19283    return lowerUINT_TO_FP_v2i32(Op, DAG, Subtarget, dl);
19284  case MVT::v4i32:
19285  case MVT::v8i32:
19286    return lowerUINT_TO_FP_vXi32(Op, DAG, Subtarget);
19287  case MVT::v2i64:
19288  case MVT::v4i64:
19289    return lowerINT_TO_FP_vXi64(Op, DAG, Subtarget);
19290  }
19291}
19292
19293SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
19294                                           SelectionDAG &DAG) const {
19295  bool IsStrict = Op->isStrictFPOpcode();
19296  unsigned OpNo = IsStrict ? 1 : 0;
19297  SDValue Src = Op.getOperand(OpNo);
19298  SDLoc dl(Op);
19299  auto PtrVT = getPointerTy(DAG.getDataLayout());
19300  MVT SrcVT = Src.getSimpleValueType();
19301  MVT DstVT = Op->getSimpleValueType(0);
19302  SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
19303
19304  if (DstVT == MVT::f128)
19305    return LowerF128Call(Op, DAG, RTLIB::getUINTTOFP(SrcVT, DstVT));
19306
19307  if (DstVT.isVector())
19308    return lowerUINT_TO_FP_vec(Op, DAG, Subtarget);
19309
19310  if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
19311    return Extract;
19312
19313  if (Subtarget.hasAVX512() && isScalarFPTypeInSSEReg(DstVT) &&
19314      (SrcVT == MVT::i32 || (SrcVT == MVT::i64 && Subtarget.is64Bit()))) {
19315    // Conversions from unsigned i32 to f32/f64 are legal,
19316    // using VCVTUSI2SS/SD.  Same for i64 in 64-bit mode.
19317    return Op;
19318  }
19319
19320  // Promote i32 to i64 and use a signed conversion on 64-bit targets.
19321  if (SrcVT == MVT::i32 && Subtarget.is64Bit()) {
19322    Src = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Src);
19323    if (IsStrict)
19324      return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {DstVT, MVT::Other},
19325                         {Chain, Src});
19326    return DAG.getNode(ISD::SINT_TO_FP, dl, DstVT, Src);
19327  }
19328
19329  if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
19330    return V;
19331
19332  if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
19333    return LowerUINT_TO_FP_i64(Op, DAG, Subtarget);
19334  if (SrcVT == MVT::i32 && X86ScalarSSEf64 && DstVT != MVT::f80)
19335    return LowerUINT_TO_FP_i32(Op, DAG, Subtarget);
19336  if (Subtarget.is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
19337    return SDValue();
19338
19339  // Make a 64-bit buffer, and use it to build an FILD.
19340  SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
19341  if (SrcVT == MVT::i32) {
19342    SDValue OffsetSlot = DAG.getMemBasePlusOffset(StackSlot, 4, dl);
19343    SDValue Store1 =
19344        DAG.getStore(Chain, dl, Src, StackSlot, MachinePointerInfo());
19345    SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, dl, MVT::i32),
19346                                  OffsetSlot, MachinePointerInfo());
19347    std::pair<SDValue, SDValue> Tmp =
19348        BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
19349    if (IsStrict)
19350      return DAG.getMergeValues({Tmp.first, Tmp.second}, dl);
19351
19352    return Tmp.first;
19353  }
19354
19355  assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
19356  SDValue ValueToStore = Src;
19357  if (isScalarFPTypeInSSEReg(Op.getValueType()) && !Subtarget.is64Bit()) {
19358    // Bitcasting to f64 here allows us to do a single 64-bit store from
19359    // an SSE register, avoiding the store forwarding penalty that would come
19360    // with two 32-bit stores.
19361    ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
19362  }
19363  SDValue Store =
19364      DAG.getStore(Chain, dl, ValueToStore, StackSlot, MachinePointerInfo());
19365  // For i64 source, we need to add the appropriate power of 2 if the input
19366  // was negative.  This is the same as the optimization in
19367  // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
19368  // we must be careful to do the computation in x87 extended precision, not
19369  // in SSE. (The generic code can't know it's OK to do this, or how to.)
19370  int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
19371  MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
19372      MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
19373      MachineMemOperand::MOLoad, 8, 8);
19374
19375  SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
19376  SDValue Ops[] = { Store, StackSlot };
19377  SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
19378                                         MVT::i64, MMO);
19379  Chain = Fild.getValue(1);
19380
19381
19382  // Check whether the sign bit is set.
19383  SDValue SignSet = DAG.getSetCC(
19384      dl, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
19385      Op.getOperand(OpNo), DAG.getConstant(0, dl, MVT::i64), ISD::SETLT);
19386
19387  // Build a 64 bit pair (FF, 0) in the constant pool, with FF in the hi bits.
19388  APInt FF(64, 0x5F80000000000000ULL);
19389  SDValue FudgePtr = DAG.getConstantPool(
19390      ConstantInt::get(*DAG.getContext(), FF), PtrVT);
19391
19392  // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
19393  SDValue Zero = DAG.getIntPtrConstant(0, dl);
19394  SDValue Four = DAG.getIntPtrConstant(4, dl);
19395  SDValue Offset = DAG.getSelect(dl, Zero.getValueType(), SignSet, Four, Zero);
19396  FudgePtr = DAG.getNode(ISD::ADD, dl, PtrVT, FudgePtr, Offset);
19397
19398  // Load the value out, extending it from f32 to f80.
19399  SDValue Fudge = DAG.getExtLoad(
19400      ISD::EXTLOAD, dl, MVT::f80, Chain, FudgePtr,
19401      MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), MVT::f32,
19402      /* Alignment = */ 4);
19403  Chain = Fudge.getValue(1);
19404  // Extend everything to 80 bits to force it to be done on x87.
19405  // TODO: Are there any fast-math-flags to propagate here?
19406  if (IsStrict) {
19407    SDValue Add = DAG.getNode(ISD::STRICT_FADD, dl, {MVT::f80, MVT::Other},
19408                              {Chain, Fild, Fudge});
19409    // STRICT_FP_ROUND can't handle equal types.
19410    if (DstVT == MVT::f80)
19411      return Add;
19412    return DAG.getNode(ISD::STRICT_FP_ROUND, dl, {DstVT, MVT::Other},
19413                       {Add.getValue(1), Add, DAG.getIntPtrConstant(0, dl)});
19414  }
19415  SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
19416  return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add,
19417                     DAG.getIntPtrConstant(0, dl));
19418}
19419
19420// If the given FP_TO_SINT (IsSigned) or FP_TO_UINT (!IsSigned) operation
19421// is legal, or has an fp128 or f16 source (which needs to be promoted to f32),
19422// just return an SDValue().
19423// Otherwise it is assumed to be a conversion from one of f32, f64 or f80
19424// to i16, i32 or i64, and we lower it to a legal sequence and return the
19425// result.
19426SDValue
19427X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
19428                                   bool IsSigned, SDValue &Chain) const {
19429  bool IsStrict = Op->isStrictFPOpcode();
19430  SDLoc DL(Op);
19431
19432  EVT DstTy = Op.getValueType();
19433  SDValue Value = Op.getOperand(IsStrict ? 1 : 0);
19434  EVT TheVT = Value.getValueType();
19435  auto PtrVT = getPointerTy(DAG.getDataLayout());
19436
19437  if (TheVT != MVT::f32 && TheVT != MVT::f64 && TheVT != MVT::f80) {
19438    // f16 must be promoted before using the lowering in this routine.
19439    // fp128 does not use this lowering.
19440    return SDValue();
19441  }
19442
19443  // If using FIST to compute an unsigned i64, we'll need some fixup
19444  // to handle values above the maximum signed i64.  A FIST is always
19445  // used for the 32-bit subtarget, but also for f80 on a 64-bit target.
19446  bool UnsignedFixup = !IsSigned && DstTy == MVT::i64;
19447
19448  // FIXME: This does not generate an invalid exception if the input does not
19449  // fit in i32. PR44019
19450  if (!IsSigned && DstTy != MVT::i64) {
19451    // Replace the fp-to-uint32 operation with an fp-to-sint64 FIST.
19452    // The low 32 bits of the fist result will have the correct uint32 result.
19453    assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
19454    DstTy = MVT::i64;
19455  }
19456
19457  assert(DstTy.getSimpleVT() <= MVT::i64 &&
19458         DstTy.getSimpleVT() >= MVT::i16 &&
19459         "Unknown FP_TO_INT to lower!");
19460
19461  // We lower FP->int64 into FISTP64 followed by a load from a temporary
19462  // stack slot.
19463  MachineFunction &MF = DAG.getMachineFunction();
19464  unsigned MemSize = DstTy.getStoreSize();
19465  int SSFI = MF.getFrameInfo().CreateStackObject(MemSize, MemSize, false);
19466  SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
19467
19468  Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
19469
19470  SDValue Adjust; // 0x0 or 0x80000000, for result sign bit adjustment.
19471
19472  if (UnsignedFixup) {
19473    //
19474    // Conversion to unsigned i64 is implemented with a select,
19475    // depending on whether the source value fits in the range
19476    // of a signed i64.  Let Thresh be the FP equivalent of
19477    // 0x8000000000000000ULL.
19478    //
19479    //  Adjust = (Value < Thresh) ? 0 : 0x80000000;
19480    //  FltOfs = (Value < Thresh) ? 0 : 0x80000000;
19481    //  FistSrc = (Value - FltOfs);
19482    //  Fist-to-mem64 FistSrc
19483    //  Add 0 or 0x800...0ULL to the 64-bit result, which is equivalent
19484    //  to XOR'ing the high 32 bits with Adjust.
19485    //
19486    // Being a power of 2, Thresh is exactly representable in all FP formats.
19487    // For X87 we'd like to use the smallest FP type for this constant, but
19488    // for DAG type consistency we have to match the FP operand type.
19489
19490    APFloat Thresh(APFloat::IEEEsingle(), APInt(32, 0x5f000000));
19491    LLVM_ATTRIBUTE_UNUSED APFloat::opStatus Status = APFloat::opOK;
19492    bool LosesInfo = false;
19493    if (TheVT == MVT::f64)
19494      // The rounding mode is irrelevant as the conversion should be exact.
19495      Status = Thresh.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven,
19496                              &LosesInfo);
19497    else if (TheVT == MVT::f80)
19498      Status = Thresh.convert(APFloat::x87DoubleExtended(),
19499                              APFloat::rmNearestTiesToEven, &LosesInfo);
19500
19501    assert(Status == APFloat::opOK && !LosesInfo &&
19502           "FP conversion should have been exact");
19503
19504    SDValue ThreshVal = DAG.getConstantFP(Thresh, DL, TheVT);
19505
19506    EVT ResVT = getSetCCResultType(DAG.getDataLayout(),
19507                                   *DAG.getContext(), TheVT);
19508    SDValue Cmp;
19509    if (IsStrict) {
19510      Cmp = DAG.getSetCC(DL, ResVT, Value, ThreshVal, ISD::SETLT,
19511                         Chain, /*IsSignaling*/ true);
19512      Chain = Cmp.getValue(1);
19513    } else {
19514      Cmp = DAG.getSetCC(DL, ResVT, Value, ThreshVal, ISD::SETLT);
19515    }
19516
19517    Adjust = DAG.getSelect(DL, MVT::i64, Cmp,
19518                           DAG.getConstant(0, DL, MVT::i64),
19519                           DAG.getConstant(APInt::getSignMask(64),
19520                                           DL, MVT::i64));
19521    SDValue FltOfs = DAG.getSelect(DL, TheVT, Cmp,
19522                                   DAG.getConstantFP(0.0, DL, TheVT),
19523                                   ThreshVal);
19524
19525    if (IsStrict) {
19526      Value = DAG.getNode(ISD::STRICT_FSUB, DL, { TheVT, MVT::Other},
19527                          { Chain, Value, FltOfs });
19528      Chain = Value.getValue(1);
19529    } else
19530      Value = DAG.getNode(ISD::FSUB, DL, TheVT, Value, FltOfs);
19531  }
19532
19533  MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, SSFI);
19534
19535  // FIXME This causes a redundant load/store if the SSE-class value is already
19536  // in memory, such as if it is on the callstack.
19537  if (isScalarFPTypeInSSEReg(TheVT)) {
19538    assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
19539    Chain = DAG.getStore(Chain, DL, Value, StackSlot, MPI);
19540    SDVTList Tys = DAG.getVTList(TheVT, MVT::Other);
19541    SDValue Ops[] = { Chain, StackSlot };
19542
19543    unsigned FLDSize = TheVT.getStoreSize();
19544    assert(FLDSize <= MemSize && "Stack slot not big enough");
19545    MachineMemOperand *MMO = MF.getMachineMemOperand(
19546        MPI, MachineMemOperand::MOLoad, FLDSize, FLDSize);
19547    Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, TheVT, MMO);
19548    Chain = Value.getValue(1);
19549  }
19550
19551  // Build the FP_TO_INT*_IN_MEM
19552  MachineMemOperand *MMO = MF.getMachineMemOperand(
19553      MPI, MachineMemOperand::MOStore, MemSize, MemSize);
19554  SDValue Ops[] = { Chain, Value, StackSlot };
19555  SDValue FIST = DAG.getMemIntrinsicNode(X86ISD::FP_TO_INT_IN_MEM, DL,
19556                                         DAG.getVTList(MVT::Other),
19557                                         Ops, DstTy, MMO);
19558
19559  SDValue Res = DAG.getLoad(Op.getValueType(), SDLoc(Op), FIST, StackSlot, MPI);
19560  Chain = Res.getValue(1);
19561
19562  // If we need an unsigned fixup, XOR the result with adjust.
19563  if (UnsignedFixup)
19564    Res = DAG.getNode(ISD::XOR, DL, MVT::i64, Res, Adjust);
19565
19566  return Res;
19567}
19568
19569static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
19570                              const X86Subtarget &Subtarget) {
19571  MVT VT = Op.getSimpleValueType();
19572  SDValue In = Op.getOperand(0);
19573  MVT InVT = In.getSimpleValueType();
19574  SDLoc dl(Op);
19575  unsigned Opc = Op.getOpcode();
19576
19577  assert(VT.isVector() && InVT.isVector() && "Expected vector type");
19578  assert((Opc == ISD::ANY_EXTEND || Opc == ISD::ZERO_EXTEND) &&
19579         "Unexpected extension opcode");
19580  assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
19581         "Expected same number of elements");
19582  assert((VT.getVectorElementType() == MVT::i16 ||
19583          VT.getVectorElementType() == MVT::i32 ||
19584          VT.getVectorElementType() == MVT::i64) &&
19585         "Unexpected element type");
19586  assert((InVT.getVectorElementType() == MVT::i8 ||
19587          InVT.getVectorElementType() == MVT::i16 ||
19588          InVT.getVectorElementType() == MVT::i32) &&
19589         "Unexpected element type");
19590
19591  unsigned ExtendInVecOpc = getOpcode_EXTEND_VECTOR_INREG(Opc);
19592
19593  // Custom legalize v8i8->v8i64 on CPUs without avx512bw.
19594  if (InVT == MVT::v8i8) {
19595    if (VT != MVT::v8i64)
19596      return SDValue();
19597
19598    In = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op),
19599                     MVT::v16i8, In, DAG.getUNDEF(MVT::v8i8));
19600    return DAG.getNode(ExtendInVecOpc, dl, VT, In);
19601  }
19602
19603  if (Subtarget.hasInt256())
19604    return Op;
19605
19606  // Optimize vectors in AVX mode:
19607  //
19608  //   v8i16 -> v8i32
19609  //   Use vpmovzwd for 4 lower elements  v8i16 -> v4i32.
19610  //   Use vpunpckhwd for 4 upper elements  v8i16 -> v4i32.
19611  //   Concat upper and lower parts.
19612  //
19613  //   v4i32 -> v4i64
19614  //   Use vpmovzdq for 4 lower elements  v4i32 -> v2i64.
19615  //   Use vpunpckhdq for 4 upper elements  v4i32 -> v2i64.
19616  //   Concat upper and lower parts.
19617  //
19618  MVT HalfVT = VT.getHalfNumVectorElementsVT();
19619  SDValue OpLo = DAG.getNode(ExtendInVecOpc, dl, HalfVT, In);
19620
19621  // Short-circuit if we can determine that each 128-bit half is the same value.
19622  // Otherwise, this is difficult to match and optimize.
19623  if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(In))
19624    if (hasIdenticalHalvesShuffleMask(Shuf->getMask()))
19625      return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpLo);
19626
19627  SDValue ZeroVec = DAG.getConstant(0, dl, InVT);
19628  SDValue Undef = DAG.getUNDEF(InVT);
19629  bool NeedZero = Opc == ISD::ZERO_EXTEND;
19630  SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
19631  OpHi = DAG.getBitcast(HalfVT, OpHi);
19632
19633  return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
19634}
19635
19636// Helper to split and extend a v16i1 mask to v16i8 or v16i16.
19637static SDValue SplitAndExtendv16i1(unsigned ExtOpc, MVT VT, SDValue In,
19638                                   const SDLoc &dl, SelectionDAG &DAG) {
19639  assert((VT == MVT::v16i8 || VT == MVT::v16i16) && "Unexpected VT.");
19640  SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
19641                           DAG.getIntPtrConstant(0, dl));
19642  SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
19643                           DAG.getIntPtrConstant(8, dl));
19644  Lo = DAG.getNode(ExtOpc, dl, MVT::v8i16, Lo);
19645  Hi = DAG.getNode(ExtOpc, dl, MVT::v8i16, Hi);
19646  SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i16, Lo, Hi);
19647  return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
19648}
19649
19650static  SDValue LowerZERO_EXTEND_Mask(SDValue Op,
19651                                      const X86Subtarget &Subtarget,
19652                                      SelectionDAG &DAG) {
19653  MVT VT = Op->getSimpleValueType(0);
19654  SDValue In = Op->getOperand(0);
19655  MVT InVT = In.getSimpleValueType();
19656  assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
19657  SDLoc DL(Op);
19658  unsigned NumElts = VT.getVectorNumElements();
19659
19660  // For all vectors, but vXi8 we can just emit a sign_extend and a shift. This
19661  // avoids a constant pool load.
19662  if (VT.getVectorElementType() != MVT::i8) {
19663    SDValue Extend = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, In);
19664    return DAG.getNode(ISD::SRL, DL, VT, Extend,
19665                       DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT));
19666  }
19667
19668  // Extend VT if BWI is not supported.
19669  MVT ExtVT = VT;
19670  if (!Subtarget.hasBWI()) {
19671    // If v16i32 is to be avoided, we'll need to split and concatenate.
19672    if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
19673      return SplitAndExtendv16i1(ISD::ZERO_EXTEND, VT, In, DL, DAG);
19674
19675    ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
19676  }
19677
19678  // Widen to 512-bits if VLX is not supported.
19679  MVT WideVT = ExtVT;
19680  if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
19681    NumElts *= 512 / ExtVT.getSizeInBits();
19682    InVT = MVT::getVectorVT(MVT::i1, NumElts);
19683    In = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InVT, DAG.getUNDEF(InVT),
19684                     In, DAG.getIntPtrConstant(0, DL));
19685    WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(),
19686                              NumElts);
19687  }
19688
19689  SDValue One = DAG.getConstant(1, DL, WideVT);
19690  SDValue Zero = DAG.getConstant(0, DL, WideVT);
19691
19692  SDValue SelectedVal = DAG.getSelect(DL, WideVT, In, One, Zero);
19693
19694  // Truncate if we had to extend above.
19695  if (VT != ExtVT) {
19696    WideVT = MVT::getVectorVT(MVT::i8, NumElts);
19697    SelectedVal = DAG.getNode(ISD::TRUNCATE, DL, WideVT, SelectedVal);
19698  }
19699
19700  // Extract back to 128/256-bit if we widened.
19701  if (WideVT != VT)
19702    SelectedVal = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SelectedVal,
19703                              DAG.getIntPtrConstant(0, DL));
19704
19705  return SelectedVal;
19706}
19707
19708static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
19709                                SelectionDAG &DAG) {
19710  SDValue In = Op.getOperand(0);
19711  MVT SVT = In.getSimpleValueType();
19712
19713  if (SVT.getVectorElementType() == MVT::i1)
19714    return LowerZERO_EXTEND_Mask(Op, Subtarget, DAG);
19715
19716  assert(Subtarget.hasAVX() && "Expected AVX support");
19717  return LowerAVXExtend(Op, DAG, Subtarget);
19718}
19719
19720/// Helper to recursively truncate vector elements in half with PACKSS/PACKUS.
19721/// It makes use of the fact that vectors with enough leading sign/zero bits
19722/// prevent the PACKSS/PACKUS from saturating the results.
19723/// AVX2 (Int256) sub-targets require extra shuffling as the PACK*S operates
19724/// within each 128-bit lane.
19725static SDValue truncateVectorWithPACK(unsigned Opcode, EVT DstVT, SDValue In,
19726                                      const SDLoc &DL, SelectionDAG &DAG,
19727                                      const X86Subtarget &Subtarget) {
19728  assert((Opcode == X86ISD::PACKSS || Opcode == X86ISD::PACKUS) &&
19729         "Unexpected PACK opcode");
19730  assert(DstVT.isVector() && "VT not a vector?");
19731
19732  // Requires SSE2 but AVX512 has fast vector truncate.
19733  if (!Subtarget.hasSSE2())
19734    return SDValue();
19735
19736  EVT SrcVT = In.getValueType();
19737
19738  // No truncation required, we might get here due to recursive calls.
19739  if (SrcVT == DstVT)
19740    return In;
19741
19742  // We only support vector truncation to 64bits or greater from a
19743  // 128bits or greater source.
19744  unsigned DstSizeInBits = DstVT.getSizeInBits();
19745  unsigned SrcSizeInBits = SrcVT.getSizeInBits();
19746  if ((DstSizeInBits % 64) != 0 || (SrcSizeInBits % 128) != 0)
19747    return SDValue();
19748
19749  unsigned NumElems = SrcVT.getVectorNumElements();
19750  if (!isPowerOf2_32(NumElems))
19751    return SDValue();
19752
19753  LLVMContext &Ctx = *DAG.getContext();
19754  assert(DstVT.getVectorNumElements() == NumElems && "Illegal truncation");
19755  assert(SrcSizeInBits > DstSizeInBits && "Illegal truncation");
19756
19757  EVT PackedSVT = EVT::getIntegerVT(Ctx, SrcVT.getScalarSizeInBits() / 2);
19758
19759  // Pack to the largest type possible:
19760  // vXi64/vXi32 -> PACK*SDW and vXi16 -> PACK*SWB.
19761  EVT InVT = MVT::i16, OutVT = MVT::i8;
19762  if (SrcVT.getScalarSizeInBits() > 16 &&
19763      (Opcode == X86ISD::PACKSS || Subtarget.hasSSE41())) {
19764    InVT = MVT::i32;
19765    OutVT = MVT::i16;
19766  }
19767
19768  // 128bit -> 64bit truncate - PACK 128-bit src in the lower subvector.
19769  if (SrcVT.is128BitVector()) {
19770    InVT = EVT::getVectorVT(Ctx, InVT, 128 / InVT.getSizeInBits());
19771    OutVT = EVT::getVectorVT(Ctx, OutVT, 128 / OutVT.getSizeInBits());
19772    In = DAG.getBitcast(InVT, In);
19773    SDValue Res = DAG.getNode(Opcode, DL, OutVT, In, In);
19774    Res = extractSubVector(Res, 0, DAG, DL, 64);
19775    return DAG.getBitcast(DstVT, Res);
19776  }
19777
19778  // Extract lower/upper subvectors.
19779  unsigned NumSubElts = NumElems / 2;
19780  SDValue Lo = extractSubVector(In, 0 * NumSubElts, DAG, DL, SrcSizeInBits / 2);
19781  SDValue Hi = extractSubVector(In, 1 * NumSubElts, DAG, DL, SrcSizeInBits / 2);
19782
19783  unsigned SubSizeInBits = SrcSizeInBits / 2;
19784  InVT = EVT::getVectorVT(Ctx, InVT, SubSizeInBits / InVT.getSizeInBits());
19785  OutVT = EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits());
19786
19787  // 256bit -> 128bit truncate - PACK lower/upper 128-bit subvectors.
19788  if (SrcVT.is256BitVector() && DstVT.is128BitVector()) {
19789    Lo = DAG.getBitcast(InVT, Lo);
19790    Hi = DAG.getBitcast(InVT, Hi);
19791    SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
19792    return DAG.getBitcast(DstVT, Res);
19793  }
19794
19795  // AVX2: 512bit -> 256bit truncate - PACK lower/upper 256-bit subvectors.
19796  // AVX2: 512bit -> 128bit truncate - PACK(PACK, PACK).
19797  if (SrcVT.is512BitVector() && Subtarget.hasInt256()) {
19798    Lo = DAG.getBitcast(InVT, Lo);
19799    Hi = DAG.getBitcast(InVT, Hi);
19800    SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
19801
19802    // 256-bit PACK(ARG0, ARG1) leaves us with ((LO0,LO1),(HI0,HI1)),
19803    // so we need to shuffle to get ((LO0,HI0),(LO1,HI1)).
19804    // Scale shuffle mask to avoid bitcasts and help ComputeNumSignBits.
19805    SmallVector<int, 64> Mask;
19806    int Scale = 64 / OutVT.getScalarSizeInBits();
19807    scaleShuffleMask<int>(Scale, ArrayRef<int>({ 0, 2, 1, 3 }), Mask);
19808    Res = DAG.getVectorShuffle(OutVT, DL, Res, Res, Mask);
19809
19810    if (DstVT.is256BitVector())
19811      return DAG.getBitcast(DstVT, Res);
19812
19813    // If 512bit -> 128bit truncate another stage.
19814    EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
19815    Res = DAG.getBitcast(PackedVT, Res);
19816    return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
19817  }
19818
19819  // Recursively pack lower/upper subvectors, concat result and pack again.
19820  assert(SrcSizeInBits >= 256 && "Expected 256-bit vector or greater");
19821  EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumSubElts);
19822  Lo = truncateVectorWithPACK(Opcode, PackedVT, Lo, DL, DAG, Subtarget);
19823  Hi = truncateVectorWithPACK(Opcode, PackedVT, Hi, DL, DAG, Subtarget);
19824
19825  PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
19826  SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, PackedVT, Lo, Hi);
19827  return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
19828}
19829
19830static SDValue LowerTruncateVecI1(SDValue Op, SelectionDAG &DAG,
19831                                  const X86Subtarget &Subtarget) {
19832
19833  SDLoc DL(Op);
19834  MVT VT = Op.getSimpleValueType();
19835  SDValue In = Op.getOperand(0);
19836  MVT InVT = In.getSimpleValueType();
19837
19838  assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type.");
19839
19840  // Shift LSB to MSB and use VPMOVB/W2M or TESTD/Q.
19841  unsigned ShiftInx = InVT.getScalarSizeInBits() - 1;
19842  if (InVT.getScalarSizeInBits() <= 16) {
19843    if (Subtarget.hasBWI()) {
19844      // legal, will go to VPMOVB2M, VPMOVW2M
19845      if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
19846        // We need to shift to get the lsb into sign position.
19847        // Shift packed bytes not supported natively, bitcast to word
19848        MVT ExtVT = MVT::getVectorVT(MVT::i16, InVT.getSizeInBits()/16);
19849        In = DAG.getNode(ISD::SHL, DL, ExtVT,
19850                         DAG.getBitcast(ExtVT, In),
19851                         DAG.getConstant(ShiftInx, DL, ExtVT));
19852        In = DAG.getBitcast(InVT, In);
19853      }
19854      return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT),
19855                          In, ISD::SETGT);
19856    }
19857    // Use TESTD/Q, extended vector to packed dword/qword.
19858    assert((InVT.is256BitVector() || InVT.is128BitVector()) &&
19859           "Unexpected vector type.");
19860    unsigned NumElts = InVT.getVectorNumElements();
19861    assert((NumElts == 8 || NumElts == 16) && "Unexpected number of elements");
19862    // We need to change to a wider element type that we have support for.
19863    // For 8 element vectors this is easy, we either extend to v8i32 or v8i64.
19864    // For 16 element vectors we extend to v16i32 unless we are explicitly
19865    // trying to avoid 512-bit vectors. If we are avoiding 512-bit vectors
19866    // we need to split into two 8 element vectors which we can extend to v8i32,
19867    // truncate and concat the results. There's an additional complication if
19868    // the original type is v16i8. In that case we can't split the v16i8 so
19869    // first we pre-extend it to v16i16 which we can split to v8i16, then extend
19870    // to v8i32, truncate that to v8i1 and concat the two halves.
19871    if (NumElts == 16 && !Subtarget.canExtendTo512DQ()) {
19872      if (InVT == MVT::v16i8) {
19873        // First we need to sign extend up to 256-bits so we can split that.
19874        InVT = MVT::v16i16;
19875        In = DAG.getNode(ISD::SIGN_EXTEND, DL, InVT, In);
19876      }
19877      SDValue Lo = extract128BitVector(In, 0, DAG, DL);
19878      SDValue Hi = extract128BitVector(In, 8, DAG, DL);
19879      // We're split now, just emit two truncates and a concat. The two
19880      // truncates will trigger legalization to come back to this function.
19881      Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Lo);
19882      Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Hi);
19883      return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
19884    }
19885    // We either have 8 elements or we're allowed to use 512-bit vectors.
19886    // If we have VLX, we want to use the narrowest vector that can get the
19887    // job done so we use vXi32.
19888    MVT EltVT = Subtarget.hasVLX() ? MVT::i32 : MVT::getIntegerVT(512/NumElts);
19889    MVT ExtVT = MVT::getVectorVT(EltVT, NumElts);
19890    In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
19891    InVT = ExtVT;
19892    ShiftInx = InVT.getScalarSizeInBits() - 1;
19893  }
19894
19895  if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
19896    // We need to shift to get the lsb into sign position.
19897    In = DAG.getNode(ISD::SHL, DL, InVT, In,
19898                     DAG.getConstant(ShiftInx, DL, InVT));
19899  }
19900  // If we have DQI, emit a pattern that will be iseled as vpmovq2m/vpmovd2m.
19901  if (Subtarget.hasDQI())
19902    return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT), In, ISD::SETGT);
19903  return DAG.getSetCC(DL, VT, In, DAG.getConstant(0, DL, InVT), ISD::SETNE);
19904}
19905
19906SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
19907  SDLoc DL(Op);
19908  MVT VT = Op.getSimpleValueType();
19909  SDValue In = Op.getOperand(0);
19910  MVT InVT = In.getSimpleValueType();
19911  unsigned InNumEltBits = InVT.getScalarSizeInBits();
19912
19913  assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
19914         "Invalid TRUNCATE operation");
19915
19916  // If we're called by the type legalizer, handle a few cases.
19917  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19918  if (!TLI.isTypeLegal(InVT)) {
19919    if ((InVT == MVT::v8i64 || InVT == MVT::v16i32 || InVT == MVT::v16i64) &&
19920        VT.is128BitVector()) {
19921      assert(Subtarget.hasVLX() && "Unexpected subtarget!");
19922      // The default behavior is to truncate one step, concatenate, and then
19923      // truncate the remainder. We'd rather produce two 64-bit results and
19924      // concatenate those.
19925      SDValue Lo, Hi;
19926      std::tie(Lo, Hi) = DAG.SplitVector(In, DL);
19927
19928      EVT LoVT, HiVT;
19929      std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
19930
19931      Lo = DAG.getNode(ISD::TRUNCATE, DL, LoVT, Lo);
19932      Hi = DAG.getNode(ISD::TRUNCATE, DL, HiVT, Hi);
19933      return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
19934    }
19935
19936    // Otherwise let default legalization handle it.
19937    return SDValue();
19938  }
19939
19940  if (VT.getVectorElementType() == MVT::i1)
19941    return LowerTruncateVecI1(Op, DAG, Subtarget);
19942
19943  // vpmovqb/w/d, vpmovdb/w, vpmovwb
19944  if (Subtarget.hasAVX512()) {
19945    // word to byte only under BWI. Otherwise we have to promoted to v16i32
19946    // and then truncate that. But we should only do that if we haven't been
19947    // asked to avoid 512-bit vectors. The actual promotion to v16i32 will be
19948    // handled by isel patterns.
19949    if (InVT != MVT::v16i16 || Subtarget.hasBWI() ||
19950        Subtarget.canExtendTo512DQ())
19951      return Op;
19952  }
19953
19954  unsigned NumPackedSignBits = std::min<unsigned>(VT.getScalarSizeInBits(), 16);
19955  unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
19956
19957  // Truncate with PACKUS if we are truncating a vector with leading zero bits
19958  // that extend all the way to the packed/truncated value.
19959  // Pre-SSE41 we can only use PACKUSWB.
19960  KnownBits Known = DAG.computeKnownBits(In);
19961  if ((InNumEltBits - NumPackedZeroBits) <= Known.countMinLeadingZeros())
19962    if (SDValue V =
19963            truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget))
19964      return V;
19965
19966  // Truncate with PACKSS if we are truncating a vector with sign-bits that
19967  // extend all the way to the packed/truncated value.
19968  if ((InNumEltBits - NumPackedSignBits) < DAG.ComputeNumSignBits(In))
19969    if (SDValue V =
19970            truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget))
19971      return V;
19972
19973  // Handle truncation of V256 to V128 using shuffles.
19974  assert(VT.is128BitVector() && InVT.is256BitVector() && "Unexpected types!");
19975
19976  if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
19977    // On AVX2, v4i64 -> v4i32 becomes VPERMD.
19978    if (Subtarget.hasInt256()) {
19979      static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
19980      In = DAG.getBitcast(MVT::v8i32, In);
19981      In = DAG.getVectorShuffle(MVT::v8i32, DL, In, In, ShufMask);
19982      return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
19983                         DAG.getIntPtrConstant(0, DL));
19984    }
19985
19986    SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
19987                               DAG.getIntPtrConstant(0, DL));
19988    SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
19989                               DAG.getIntPtrConstant(2, DL));
19990    OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
19991    OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
19992    static const int ShufMask[] = {0, 2, 4, 6};
19993    return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
19994  }
19995
19996  if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
19997    // On AVX2, v8i32 -> v8i16 becomes PSHUFB.
19998    if (Subtarget.hasInt256()) {
19999      In = DAG.getBitcast(MVT::v32i8, In);
20000
20001      // The PSHUFB mask:
20002      static const int ShufMask1[] = { 0,  1,  4,  5,  8,  9, 12, 13,
20003                                      -1, -1, -1, -1, -1, -1, -1, -1,
20004                                      16, 17, 20, 21, 24, 25, 28, 29,
20005                                      -1, -1, -1, -1, -1, -1, -1, -1 };
20006      In = DAG.getVectorShuffle(MVT::v32i8, DL, In, In, ShufMask1);
20007      In = DAG.getBitcast(MVT::v4i64, In);
20008
20009      static const int ShufMask2[] = {0,  2,  -1,  -1};
20010      In = DAG.getVectorShuffle(MVT::v4i64, DL,  In, In, ShufMask2);
20011      In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
20012                       DAG.getIntPtrConstant(0, DL));
20013      return DAG.getBitcast(VT, In);
20014    }
20015
20016    SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
20017                               DAG.getIntPtrConstant(0, DL));
20018
20019    SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
20020                               DAG.getIntPtrConstant(4, DL));
20021
20022    OpLo = DAG.getBitcast(MVT::v16i8, OpLo);
20023    OpHi = DAG.getBitcast(MVT::v16i8, OpHi);
20024
20025    // The PSHUFB mask:
20026    static const int ShufMask1[] = {0,  1,  4,  5,  8,  9, 12, 13,
20027                                   -1, -1, -1, -1, -1, -1, -1, -1};
20028
20029    OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, OpLo, ShufMask1);
20030    OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, OpHi, ShufMask1);
20031
20032    OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
20033    OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
20034
20035    // The MOVLHPS Mask:
20036    static const int ShufMask2[] = {0, 1, 4, 5};
20037    SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
20038    return DAG.getBitcast(MVT::v8i16, res);
20039  }
20040
20041  if (VT == MVT::v16i8 && InVT == MVT::v16i16) {
20042    // Use an AND to zero uppper bits for PACKUS.
20043    In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(255, DL, InVT));
20044
20045    SDValue InLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i16, In,
20046                               DAG.getIntPtrConstant(0, DL));
20047    SDValue InHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i16, In,
20048                               DAG.getIntPtrConstant(8, DL));
20049    return DAG.getNode(X86ISD::PACKUS, DL, VT, InLo, InHi);
20050  }
20051
20052  llvm_unreachable("All 256->128 cases should have been handled above!");
20053}
20054
20055SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
20056  bool IsStrict = Op->isStrictFPOpcode();
20057  bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
20058                  Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
20059  MVT VT = Op->getSimpleValueType(0);
20060  SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
20061  MVT SrcVT = Src.getSimpleValueType();
20062  SDLoc dl(Op);
20063
20064  if (VT.isVector()) {
20065    if (VT == MVT::v2i1 && SrcVT == MVT::v2f64) {
20066      MVT ResVT = MVT::v4i32;
20067      MVT TruncVT = MVT::v4i1;
20068      unsigned Opc;
20069      if (IsStrict)
20070        Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
20071      else
20072        Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
20073
20074      if (!IsSigned && !Subtarget.hasVLX()) {
20075        assert(Subtarget.useAVX512Regs() && "Unexpected features!");
20076        // Widen to 512-bits.
20077        ResVT = MVT::v8i32;
20078        TruncVT = MVT::v8i1;
20079        Opc = Op.getOpcode();
20080        // Need to concat with zero vector for strict fp to avoid spurious
20081        // exceptions.
20082        // TODO: Should we just do this for non-strict as well?
20083        SDValue Tmp = IsStrict ? DAG.getConstantFP(0.0, dl, MVT::v8f64)
20084                               : DAG.getUNDEF(MVT::v8f64);
20085        Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8f64, Tmp, Src,
20086                          DAG.getIntPtrConstant(0, dl));
20087      }
20088      SDValue Res, Chain;
20089      if (IsStrict) {
20090        Res =
20091            DAG.getNode(Opc, dl, {ResVT, MVT::Other}, {Op->getOperand(0), Src});
20092        Chain = Res.getValue(1);
20093      } else {
20094        Res = DAG.getNode(Opc, dl, ResVT, Src);
20095      }
20096
20097      Res = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Res);
20098      Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i1, Res,
20099                        DAG.getIntPtrConstant(0, dl));
20100      if (IsStrict)
20101        return DAG.getMergeValues({Res, Chain}, dl);
20102      return Res;
20103    }
20104
20105    // v8f64->v8i32 is legal, but we need v8i32 to be custom for v8f32.
20106    if (VT == MVT::v8i32 && SrcVT == MVT::v8f64) {
20107      assert(!IsSigned && "Expected unsigned conversion!");
20108      assert(Subtarget.useAVX512Regs() && "Requires avx512f");
20109      return Op;
20110    }
20111
20112    // Widen vXi32 fp_to_uint with avx512f to 512-bit source.
20113    if ((VT == MVT::v4i32 || VT == MVT::v8i32) &&
20114        (SrcVT == MVT::v4f64 || SrcVT == MVT::v4f32 || SrcVT == MVT::v8f32)) {
20115      assert(!IsSigned && "Expected unsigned conversion!");
20116      assert(Subtarget.useAVX512Regs() && !Subtarget.hasVLX() &&
20117             "Unexpected features!");
20118      MVT WideVT = SrcVT == MVT::v4f64 ? MVT::v8f64 : MVT::v16f32;
20119      MVT ResVT = SrcVT == MVT::v4f64 ? MVT::v8i32 : MVT::v16i32;
20120      // Need to concat with zero vector for strict fp to avoid spurious
20121      // exceptions.
20122      // TODO: Should we just do this for non-strict as well?
20123      SDValue Tmp =
20124          IsStrict ? DAG.getConstantFP(0.0, dl, WideVT) : DAG.getUNDEF(WideVT);
20125      Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVT, Tmp, Src,
20126                        DAG.getIntPtrConstant(0, dl));
20127
20128      SDValue Res, Chain;
20129      if (IsStrict) {
20130        Res = DAG.getNode(ISD::STRICT_FP_TO_UINT, dl, {ResVT, MVT::Other},
20131                          {Op->getOperand(0), Src});
20132        Chain = Res.getValue(1);
20133      } else {
20134        Res = DAG.getNode(ISD::FP_TO_UINT, dl, ResVT, Src);
20135      }
20136
20137      Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
20138                        DAG.getIntPtrConstant(0, dl));
20139
20140      if (IsStrict)
20141        return DAG.getMergeValues({Res, Chain}, dl);
20142      return Res;
20143    }
20144
20145    // Widen vXi64 fp_to_uint/fp_to_sint with avx512dq to 512-bit source.
20146    if ((VT == MVT::v2i64 || VT == MVT::v4i64) &&
20147        (SrcVT == MVT::v2f64 || SrcVT == MVT::v4f64 || SrcVT == MVT::v4f32)) {
20148      assert(Subtarget.useAVX512Regs() && Subtarget.hasDQI() &&
20149             !Subtarget.hasVLX() && "Unexpected features!");
20150      MVT WideVT = SrcVT == MVT::v4f32 ? MVT::v8f32 : MVT::v8f64;
20151      // Need to concat with zero vector for strict fp to avoid spurious
20152      // exceptions.
20153      // TODO: Should we just do this for non-strict as well?
20154      SDValue Tmp =
20155          IsStrict ? DAG.getConstantFP(0.0, dl, WideVT) : DAG.getUNDEF(WideVT);
20156      Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVT, Tmp, Src,
20157                        DAG.getIntPtrConstant(0, dl));
20158
20159      SDValue Res, Chain;
20160      if (IsStrict) {
20161        Res = DAG.getNode(Op.getOpcode(), dl, {MVT::v8i64, MVT::Other},
20162                          {Op->getOperand(0), Src});
20163        Chain = Res.getValue(1);
20164      } else {
20165        Res = DAG.getNode(Op.getOpcode(), dl, MVT::v8i64, Src);
20166      }
20167
20168      Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
20169                        DAG.getIntPtrConstant(0, dl));
20170
20171      if (IsStrict)
20172        return DAG.getMergeValues({Res, Chain}, dl);
20173      return Res;
20174    }
20175
20176    if (VT == MVT::v2i64 && SrcVT  == MVT::v2f32) {
20177      assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL");
20178      SDValue Tmp = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
20179                                DAG.getUNDEF(MVT::v2f32));
20180      if (IsStrict) {
20181        unsigned Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI
20182                                : X86ISD::STRICT_CVTTP2UI;
20183        return DAG.getNode(Opc, dl, {VT, MVT::Other}, {Op->getOperand(0), Tmp});
20184      }
20185      unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
20186      return DAG.getNode(Opc, dl, VT, Tmp);
20187    }
20188
20189    return SDValue();
20190  }
20191
20192  assert(!VT.isVector());
20193
20194  bool UseSSEReg = isScalarFPTypeInSSEReg(SrcVT);
20195
20196  if (!IsSigned && UseSSEReg) {
20197    // Conversions from f32/f64 with AVX512 should be legal.
20198    if (Subtarget.hasAVX512())
20199      return Op;
20200
20201    // Use default expansion for i64.
20202    if (VT == MVT::i64)
20203      return SDValue();
20204
20205    assert(VT == MVT::i32 && "Unexpected VT!");
20206
20207    // Promote i32 to i64 and use a signed operation on 64-bit targets.
20208    // FIXME: This does not generate an invalid exception if the input does not
20209    // fit in i32. PR44019
20210    if (Subtarget.is64Bit()) {
20211      SDValue Res, Chain;
20212      if (IsStrict) {
20213        Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { MVT::i64, MVT::Other},
20214                          { Op.getOperand(0), Src });
20215        Chain = Res.getValue(1);
20216      } else
20217        Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i64, Src);
20218
20219      Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
20220      if (IsStrict)
20221        return DAG.getMergeValues({ Res, Chain }, dl);
20222      return Res;
20223    }
20224
20225    // Use default expansion for SSE1/2 targets without SSE3. With SSE3 we can
20226    // use fisttp which will be handled later.
20227    if (!Subtarget.hasSSE3())
20228      return SDValue();
20229  }
20230
20231  // Promote i16 to i32 if we can use a SSE operation or the type is f128.
20232  // FIXME: This does not generate an invalid exception if the input does not
20233  // fit in i16. PR44019
20234  if (VT == MVT::i16 && (UseSSEReg || SrcVT == MVT::f128)) {
20235    assert(IsSigned && "Expected i16 FP_TO_UINT to have been promoted!");
20236    SDValue Res, Chain;
20237    if (IsStrict) {
20238      Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { MVT::i32, MVT::Other},
20239                        { Op.getOperand(0), Src });
20240      Chain = Res.getValue(1);
20241    } else
20242      Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src);
20243
20244    Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
20245    if (IsStrict)
20246      return DAG.getMergeValues({ Res, Chain }, dl);
20247    return Res;
20248  }
20249
20250  // If this is a FP_TO_SINT using SSEReg we're done.
20251  if (UseSSEReg && IsSigned)
20252    return Op;
20253
20254  // fp128 needs to use a libcall.
20255  if (SrcVT == MVT::f128) {
20256    RTLIB::Libcall LC;
20257    if (IsSigned)
20258      LC = RTLIB::getFPTOSINT(SrcVT, VT);
20259    else
20260      LC = RTLIB::getFPTOUINT(SrcVT, VT);
20261
20262    SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
20263    MakeLibCallOptions CallOptions;
20264    std::pair<SDValue, SDValue> Tmp = makeLibCall(DAG, LC, VT, Src, CallOptions,
20265                                                  SDLoc(Op), Chain);
20266
20267    if (IsStrict)
20268      return DAG.getMergeValues({ Tmp.first, Tmp.second }, dl);
20269
20270    return Tmp.first;
20271  }
20272
20273  // Fall back to X87.
20274  SDValue Chain;
20275  if (SDValue V = FP_TO_INTHelper(Op, DAG, IsSigned, Chain)) {
20276    if (IsStrict)
20277      return DAG.getMergeValues({V, Chain}, dl);
20278    return V;
20279  }
20280
20281  llvm_unreachable("Expected FP_TO_INTHelper to handle all remaining cases.");
20282}
20283
20284SDValue X86TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
20285  bool IsStrict = Op->isStrictFPOpcode();
20286
20287  SDLoc DL(Op);
20288  MVT VT = Op.getSimpleValueType();
20289  SDValue In = Op.getOperand(IsStrict ? 1 : 0);
20290  MVT SVT = In.getSimpleValueType();
20291
20292  if (VT == MVT::f128) {
20293    RTLIB::Libcall LC = RTLIB::getFPEXT(SVT, VT);
20294    return LowerF128Call(Op, DAG, LC);
20295  }
20296
20297  assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
20298
20299  SDValue Res =
20300      DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32, In, DAG.getUNDEF(SVT));
20301  if (IsStrict)
20302    return DAG.getNode(X86ISD::STRICT_VFPEXT, DL, {VT, MVT::Other},
20303                       {Op->getOperand(0), Res});
20304  return DAG.getNode(X86ISD::VFPEXT, DL, VT, Res);
20305}
20306
20307SDValue X86TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
20308  bool IsStrict = Op->isStrictFPOpcode();
20309
20310  MVT VT = Op.getSimpleValueType();
20311  SDValue In = Op.getOperand(IsStrict ? 1 : 0);
20312  MVT SVT = In.getSimpleValueType();
20313
20314  // It's legal except when f128 is involved
20315  if (SVT != MVT::f128)
20316    return Op;
20317
20318  RTLIB::Libcall LC = RTLIB::getFPROUND(SVT, VT);
20319
20320  // FP_ROUND node has a second operand indicating whether it is known to be
20321  // precise. That doesn't take part in the LibCall so we can't directly use
20322  // LowerF128Call.
20323
20324  SDLoc dl(Op);
20325  SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
20326  MakeLibCallOptions CallOptions;
20327  std::pair<SDValue, SDValue> Tmp = makeLibCall(DAG, LC, VT, In, CallOptions,
20328                                                dl, Chain);
20329
20330  if (IsStrict)
20331    return DAG.getMergeValues({ Tmp.first, Tmp.second }, dl);
20332
20333  return Tmp.first;
20334}
20335
20336/// Depending on uarch and/or optimizing for size, we might prefer to use a
20337/// vector operation in place of the typical scalar operation.
20338static SDValue lowerAddSubToHorizontalOp(SDValue Op, SelectionDAG &DAG,
20339                                         const X86Subtarget &Subtarget) {
20340  // If both operands have other uses, this is probably not profitable.
20341  SDValue LHS = Op.getOperand(0);
20342  SDValue RHS = Op.getOperand(1);
20343  if (!LHS.hasOneUse() && !RHS.hasOneUse())
20344    return Op;
20345
20346  // FP horizontal add/sub were added with SSE3. Integer with SSSE3.
20347  bool IsFP = Op.getSimpleValueType().isFloatingPoint();
20348  if (IsFP && !Subtarget.hasSSE3())
20349    return Op;
20350  if (!IsFP && !Subtarget.hasSSSE3())
20351    return Op;
20352
20353  // Extract from a common vector.
20354  if (LHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
20355      RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
20356      LHS.getOperand(0) != RHS.getOperand(0) ||
20357      !isa<ConstantSDNode>(LHS.getOperand(1)) ||
20358      !isa<ConstantSDNode>(RHS.getOperand(1)) ||
20359      !shouldUseHorizontalOp(true, DAG, Subtarget))
20360    return Op;
20361
20362  // Allow commuted 'hadd' ops.
20363  // TODO: Allow commuted (f)sub by negating the result of (F)HSUB?
20364  unsigned HOpcode;
20365  switch (Op.getOpcode()) {
20366    case ISD::ADD: HOpcode = X86ISD::HADD; break;
20367    case ISD::SUB: HOpcode = X86ISD::HSUB; break;
20368    case ISD::FADD: HOpcode = X86ISD::FHADD; break;
20369    case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
20370    default:
20371      llvm_unreachable("Trying to lower unsupported opcode to horizontal op");
20372  }
20373  unsigned LExtIndex = LHS.getConstantOperandVal(1);
20374  unsigned RExtIndex = RHS.getConstantOperandVal(1);
20375  if ((LExtIndex & 1) == 1 && (RExtIndex & 1) == 0 &&
20376      (HOpcode == X86ISD::HADD || HOpcode == X86ISD::FHADD))
20377    std::swap(LExtIndex, RExtIndex);
20378
20379  if ((LExtIndex & 1) != 0 || RExtIndex != (LExtIndex + 1))
20380    return Op;
20381
20382  SDValue X = LHS.getOperand(0);
20383  EVT VecVT = X.getValueType();
20384  unsigned BitWidth = VecVT.getSizeInBits();
20385  unsigned NumLanes = BitWidth / 128;
20386  unsigned NumEltsPerLane = VecVT.getVectorNumElements() / NumLanes;
20387  assert((BitWidth == 128 || BitWidth == 256 || BitWidth == 512) &&
20388         "Not expecting illegal vector widths here");
20389
20390  // Creating a 256-bit horizontal op would be wasteful, and there is no 512-bit
20391  // equivalent, so extract the 256/512-bit source op to 128-bit if we can.
20392  SDLoc DL(Op);
20393  if (BitWidth == 256 || BitWidth == 512) {
20394    unsigned LaneIdx = LExtIndex / NumEltsPerLane;
20395    X = extract128BitVector(X, LaneIdx * NumEltsPerLane, DAG, DL);
20396    LExtIndex %= NumEltsPerLane;
20397  }
20398
20399  // add (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hadd X, X), 0
20400  // add (extractelt (X, 1), extractelt (X, 0)) --> extractelt (hadd X, X), 0
20401  // add (extractelt (X, 2), extractelt (X, 3)) --> extractelt (hadd X, X), 1
20402  // sub (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hsub X, X), 0
20403  SDValue HOp = DAG.getNode(HOpcode, DL, X.getValueType(), X, X);
20404  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Op.getSimpleValueType(), HOp,
20405                     DAG.getIntPtrConstant(LExtIndex / 2, DL));
20406}
20407
20408/// Depending on uarch and/or optimizing for size, we might prefer to use a
20409/// vector operation in place of the typical scalar operation.
20410SDValue X86TargetLowering::lowerFaddFsub(SDValue Op, SelectionDAG &DAG) const {
20411  assert((Op.getValueType() == MVT::f32 || Op.getValueType() == MVT::f64) &&
20412         "Only expecting float/double");
20413  return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
20414}
20415
20416/// The only differences between FABS and FNEG are the mask and the logic op.
20417/// FNEG also has a folding opportunity for FNEG(FABS(x)).
20418static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
20419  assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
20420         "Wrong opcode for lowering FABS or FNEG.");
20421
20422  bool IsFABS = (Op.getOpcode() == ISD::FABS);
20423
20424  // If this is a FABS and it has an FNEG user, bail out to fold the combination
20425  // into an FNABS. We'll lower the FABS after that if it is still in use.
20426  if (IsFABS)
20427    for (SDNode *User : Op->uses())
20428      if (User->getOpcode() == ISD::FNEG)
20429        return Op;
20430
20431  SDLoc dl(Op);
20432  MVT VT = Op.getSimpleValueType();
20433
20434  bool IsF128 = (VT == MVT::f128);
20435  assert((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 ||
20436          VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
20437          VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) &&
20438         "Unexpected type in LowerFABSorFNEG");
20439
20440  // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
20441  // decide if we should generate a 16-byte constant mask when we only need 4 or
20442  // 8 bytes for the scalar case.
20443
20444  // There are no scalar bitwise logical SSE/AVX instructions, so we
20445  // generate a 16-byte vector constant and logic op even for the scalar case.
20446  // Using a 16-byte mask allows folding the load of the mask with
20447  // the logic op, so it can save (~4 bytes) on code size.
20448  bool IsFakeVector = !VT.isVector() && !IsF128;
20449  MVT LogicVT = VT;
20450  if (IsFakeVector)
20451    LogicVT = (VT == MVT::f64) ? MVT::v2f64 : MVT::v4f32;
20452
20453  unsigned EltBits = VT.getScalarSizeInBits();
20454  // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
20455  APInt MaskElt = IsFABS ? APInt::getSignedMaxValue(EltBits) :
20456                           APInt::getSignMask(EltBits);
20457  const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
20458  SDValue Mask = DAG.getConstantFP(APFloat(Sem, MaskElt), dl, LogicVT);
20459
20460  SDValue Op0 = Op.getOperand(0);
20461  bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
20462  unsigned LogicOp = IsFABS  ? X86ISD::FAND :
20463                     IsFNABS ? X86ISD::FOR  :
20464                               X86ISD::FXOR;
20465  SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
20466
20467  if (VT.isVector() || IsF128)
20468    return DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
20469
20470  // For the scalar case extend to a 128-bit vector, perform the logic op,
20471  // and extract the scalar result back out.
20472  Operand = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Operand);
20473  SDValue LogicNode = DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
20474  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, LogicNode,
20475                     DAG.getIntPtrConstant(0, dl));
20476}
20477
20478static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
20479  SDValue Mag = Op.getOperand(0);
20480  SDValue Sign = Op.getOperand(1);
20481  SDLoc dl(Op);
20482
20483  // If the sign operand is smaller, extend it first.
20484  MVT VT = Op.getSimpleValueType();
20485  if (Sign.getSimpleValueType().bitsLT(VT))
20486    Sign = DAG.getNode(ISD::FP_EXTEND, dl, VT, Sign);
20487
20488  // And if it is bigger, shrink it first.
20489  if (Sign.getSimpleValueType().bitsGT(VT))
20490    Sign = DAG.getNode(ISD::FP_ROUND, dl, VT, Sign, DAG.getIntPtrConstant(1, dl));
20491
20492  // At this point the operands and the result should have the same
20493  // type, and that won't be f80 since that is not custom lowered.
20494  bool IsF128 = (VT == MVT::f128);
20495  assert((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 ||
20496          VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
20497          VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) &&
20498         "Unexpected type in LowerFCOPYSIGN");
20499
20500  const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
20501
20502  // Perform all scalar logic operations as 16-byte vectors because there are no
20503  // scalar FP logic instructions in SSE.
20504  // TODO: This isn't necessary. If we used scalar types, we might avoid some
20505  // unnecessary splats, but we might miss load folding opportunities. Should
20506  // this decision be based on OptimizeForSize?
20507  bool IsFakeVector = !VT.isVector() && !IsF128;
20508  MVT LogicVT = VT;
20509  if (IsFakeVector)
20510    LogicVT = (VT == MVT::f64) ? MVT::v2f64 : MVT::v4f32;
20511
20512  // The mask constants are automatically splatted for vector types.
20513  unsigned EltSizeInBits = VT.getScalarSizeInBits();
20514  SDValue SignMask = DAG.getConstantFP(
20515      APFloat(Sem, APInt::getSignMask(EltSizeInBits)), dl, LogicVT);
20516  SDValue MagMask = DAG.getConstantFP(
20517      APFloat(Sem, APInt::getSignedMaxValue(EltSizeInBits)), dl, LogicVT);
20518
20519  // First, clear all bits but the sign bit from the second operand (sign).
20520  if (IsFakeVector)
20521    Sign = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Sign);
20522  SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, LogicVT, Sign, SignMask);
20523
20524  // Next, clear the sign bit from the first operand (magnitude).
20525  // TODO: If we had general constant folding for FP logic ops, this check
20526  // wouldn't be necessary.
20527  SDValue MagBits;
20528  if (ConstantFPSDNode *Op0CN = isConstOrConstSplatFP(Mag)) {
20529    APFloat APF = Op0CN->getValueAPF();
20530    APF.clearSign();
20531    MagBits = DAG.getConstantFP(APF, dl, LogicVT);
20532  } else {
20533    // If the magnitude operand wasn't a constant, we need to AND out the sign.
20534    if (IsFakeVector)
20535      Mag = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Mag);
20536    MagBits = DAG.getNode(X86ISD::FAND, dl, LogicVT, Mag, MagMask);
20537  }
20538
20539  // OR the magnitude value with the sign bit.
20540  SDValue Or = DAG.getNode(X86ISD::FOR, dl, LogicVT, MagBits, SignBit);
20541  return !IsFakeVector ? Or : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Or,
20542                                          DAG.getIntPtrConstant(0, dl));
20543}
20544
20545static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
20546  SDValue N0 = Op.getOperand(0);
20547  SDLoc dl(Op);
20548  MVT VT = Op.getSimpleValueType();
20549
20550  MVT OpVT = N0.getSimpleValueType();
20551  assert((OpVT == MVT::f32 || OpVT == MVT::f64) &&
20552         "Unexpected type for FGETSIGN");
20553
20554  // Lower ISD::FGETSIGN to (AND (X86ISD::MOVMSK ...) 1).
20555  MVT VecVT = (OpVT == MVT::f32 ? MVT::v4f32 : MVT::v2f64);
20556  SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, N0);
20557  Res = DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32, Res);
20558  Res = DAG.getZExtOrTrunc(Res, dl, VT);
20559  Res = DAG.getNode(ISD::AND, dl, VT, Res, DAG.getConstant(1, dl, VT));
20560  return Res;
20561}
20562
20563/// Helper for creating a X86ISD::SETCC node.
20564static SDValue getSETCC(X86::CondCode Cond, SDValue EFLAGS, const SDLoc &dl,
20565                        SelectionDAG &DAG) {
20566  return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
20567                     DAG.getTargetConstant(Cond, dl, MVT::i8), EFLAGS);
20568}
20569
20570/// Helper for matching OR(EXTRACTELT(X,0),OR(EXTRACTELT(X,1),...))
20571/// style scalarized (associative) reduction patterns.
20572static bool matchScalarReduction(SDValue Op, ISD::NodeType BinOp,
20573                                 SmallVectorImpl<SDValue> &SrcOps) {
20574  SmallVector<SDValue, 8> Opnds;
20575  DenseMap<SDValue, APInt> SrcOpMap;
20576  EVT VT = MVT::Other;
20577
20578  // Recognize a special case where a vector is casted into wide integer to
20579  // test all 0s.
20580  assert(Op.getOpcode() == unsigned(BinOp) &&
20581         "Unexpected bit reduction opcode");
20582  Opnds.push_back(Op.getOperand(0));
20583  Opnds.push_back(Op.getOperand(1));
20584
20585  for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
20586    SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
20587    // BFS traverse all BinOp operands.
20588    if (I->getOpcode() == unsigned(BinOp)) {
20589      Opnds.push_back(I->getOperand(0));
20590      Opnds.push_back(I->getOperand(1));
20591      // Re-evaluate the number of nodes to be traversed.
20592      e += 2; // 2 more nodes (LHS and RHS) are pushed.
20593      continue;
20594    }
20595
20596    // Quit if a non-EXTRACT_VECTOR_ELT
20597    if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
20598      return false;
20599
20600    // Quit if without a constant index.
20601    SDValue Idx = I->getOperand(1);
20602    if (!isa<ConstantSDNode>(Idx))
20603      return false;
20604
20605    SDValue Src = I->getOperand(0);
20606    DenseMap<SDValue, APInt>::iterator M = SrcOpMap.find(Src);
20607    if (M == SrcOpMap.end()) {
20608      VT = Src.getValueType();
20609      // Quit if not the same type.
20610      if (SrcOpMap.begin() != SrcOpMap.end() &&
20611          VT != SrcOpMap.begin()->first.getValueType())
20612        return false;
20613      unsigned NumElts = VT.getVectorNumElements();
20614      APInt EltCount = APInt::getNullValue(NumElts);
20615      M = SrcOpMap.insert(std::make_pair(Src, EltCount)).first;
20616      SrcOps.push_back(Src);
20617    }
20618    // Quit if element already used.
20619    unsigned CIdx = cast<ConstantSDNode>(Idx)->getZExtValue();
20620    if (M->second[CIdx])
20621      return false;
20622    M->second.setBit(CIdx);
20623  }
20624
20625  // Quit if not all elements are used.
20626  for (DenseMap<SDValue, APInt>::const_iterator I = SrcOpMap.begin(),
20627                                                E = SrcOpMap.end();
20628       I != E; ++I) {
20629    if (!I->second.isAllOnesValue())
20630      return false;
20631  }
20632
20633  return true;
20634}
20635
20636// Check whether an OR'd tree is PTEST-able.
20637static SDValue LowerVectorAllZeroTest(SDValue Op, ISD::CondCode CC,
20638                                      const X86Subtarget &Subtarget,
20639                                      SelectionDAG &DAG, SDValue &X86CC) {
20640  assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
20641
20642  if (!Subtarget.hasSSE41() || !Op->hasOneUse())
20643    return SDValue();
20644
20645  SmallVector<SDValue, 8> VecIns;
20646  if (!matchScalarReduction(Op, ISD::OR, VecIns))
20647    return SDValue();
20648
20649  // Quit if not 128/256-bit vector.
20650  EVT VT = VecIns[0].getValueType();
20651  if (!VT.is128BitVector() && !VT.is256BitVector())
20652    return SDValue();
20653
20654  SDLoc DL(Op);
20655  MVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
20656
20657  // Cast all vectors into TestVT for PTEST.
20658  for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
20659    VecIns[i] = DAG.getBitcast(TestVT, VecIns[i]);
20660
20661  // If more than one full vector is evaluated, OR them first before PTEST.
20662  for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
20663    // Each iteration will OR 2 nodes and append the result until there is only
20664    // 1 node left, i.e. the final OR'd value of all vectors.
20665    SDValue LHS = VecIns[Slot];
20666    SDValue RHS = VecIns[Slot + 1];
20667    VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
20668  }
20669
20670  X86CC = DAG.getTargetConstant(CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE,
20671                                DL, MVT::i8);
20672  return DAG.getNode(X86ISD::PTEST, DL, MVT::i32, VecIns.back(), VecIns.back());
20673}
20674
20675/// return true if \c Op has a use that doesn't just read flags.
20676static bool hasNonFlagsUse(SDValue Op) {
20677  for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
20678       ++UI) {
20679    SDNode *User = *UI;
20680    unsigned UOpNo = UI.getOperandNo();
20681    if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
20682      // Look pass truncate.
20683      UOpNo = User->use_begin().getOperandNo();
20684      User = *User->use_begin();
20685    }
20686
20687    if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
20688        !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
20689      return true;
20690  }
20691  return false;
20692}
20693
20694// Transform to an x86-specific ALU node with flags if there is a chance of
20695// using an RMW op or only the flags are used. Otherwise, leave
20696// the node alone and emit a 'cmp' or 'test' instruction.
20697static bool isProfitableToUseFlagOp(SDValue Op) {
20698  for (SDNode *U : Op->uses())
20699    if (U->getOpcode() != ISD::CopyToReg &&
20700        U->getOpcode() != ISD::SETCC &&
20701        U->getOpcode() != ISD::STORE)
20702      return false;
20703
20704  return true;
20705}
20706
20707/// Emit nodes that will be selected as "test Op0,Op0", or something
20708/// equivalent.
20709static SDValue EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl,
20710                        SelectionDAG &DAG, const X86Subtarget &Subtarget) {
20711  // CF and OF aren't always set the way we want. Determine which
20712  // of these we need.
20713  bool NeedCF = false;
20714  bool NeedOF = false;
20715  switch (X86CC) {
20716  default: break;
20717  case X86::COND_A: case X86::COND_AE:
20718  case X86::COND_B: case X86::COND_BE:
20719    NeedCF = true;
20720    break;
20721  case X86::COND_G: case X86::COND_GE:
20722  case X86::COND_L: case X86::COND_LE:
20723  case X86::COND_O: case X86::COND_NO: {
20724    // Check if we really need to set the
20725    // Overflow flag. If NoSignedWrap is present
20726    // that is not actually needed.
20727    switch (Op->getOpcode()) {
20728    case ISD::ADD:
20729    case ISD::SUB:
20730    case ISD::MUL:
20731    case ISD::SHL:
20732      if (Op.getNode()->getFlags().hasNoSignedWrap())
20733        break;
20734      LLVM_FALLTHROUGH;
20735    default:
20736      NeedOF = true;
20737      break;
20738    }
20739    break;
20740  }
20741  }
20742  // See if we can use the EFLAGS value from the operand instead of
20743  // doing a separate TEST. TEST always sets OF and CF to 0, so unless
20744  // we prove that the arithmetic won't overflow, we can't use OF or CF.
20745  if (Op.getResNo() != 0 || NeedOF || NeedCF) {
20746    // Emit a CMP with 0, which is the TEST pattern.
20747    return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
20748                       DAG.getConstant(0, dl, Op.getValueType()));
20749  }
20750  unsigned Opcode = 0;
20751  unsigned NumOperands = 0;
20752
20753  SDValue ArithOp = Op;
20754
20755  // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
20756  // which may be the result of a CAST.  We use the variable 'Op', which is the
20757  // non-casted variable when we check for possible users.
20758  switch (ArithOp.getOpcode()) {
20759  case ISD::AND:
20760    // If the primary 'and' result isn't used, don't bother using X86ISD::AND,
20761    // because a TEST instruction will be better.
20762    if (!hasNonFlagsUse(Op))
20763      break;
20764
20765    LLVM_FALLTHROUGH;
20766  case ISD::ADD:
20767  case ISD::SUB:
20768  case ISD::OR:
20769  case ISD::XOR:
20770    if (!isProfitableToUseFlagOp(Op))
20771      break;
20772
20773    // Otherwise use a regular EFLAGS-setting instruction.
20774    switch (ArithOp.getOpcode()) {
20775    default: llvm_unreachable("unexpected operator!");
20776    case ISD::ADD: Opcode = X86ISD::ADD; break;
20777    case ISD::SUB: Opcode = X86ISD::SUB; break;
20778    case ISD::XOR: Opcode = X86ISD::XOR; break;
20779    case ISD::AND: Opcode = X86ISD::AND; break;
20780    case ISD::OR:  Opcode = X86ISD::OR;  break;
20781    }
20782
20783    NumOperands = 2;
20784    break;
20785  case X86ISD::ADD:
20786  case X86ISD::SUB:
20787  case X86ISD::OR:
20788  case X86ISD::XOR:
20789  case X86ISD::AND:
20790    return SDValue(Op.getNode(), 1);
20791  case ISD::SSUBO:
20792  case ISD::USUBO: {
20793    // /USUBO/SSUBO will become a X86ISD::SUB and we can use its Z flag.
20794    SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
20795    return DAG.getNode(X86ISD::SUB, dl, VTs, Op->getOperand(0),
20796                       Op->getOperand(1)).getValue(1);
20797  }
20798  default:
20799    break;
20800  }
20801
20802  if (Opcode == 0) {
20803    // Emit a CMP with 0, which is the TEST pattern.
20804    return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
20805                       DAG.getConstant(0, dl, Op.getValueType()));
20806  }
20807  SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
20808  SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
20809
20810  SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
20811  DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), New);
20812  return SDValue(New.getNode(), 1);
20813}
20814
20815/// Emit nodes that will be selected as "cmp Op0,Op1", or something
20816/// equivalent.
20817static std::pair<SDValue, SDValue> EmitCmp(SDValue Op0, SDValue Op1,
20818                                           unsigned X86CC, const SDLoc &dl,
20819                                           SelectionDAG &DAG,
20820                                           const X86Subtarget &Subtarget,
20821                                           SDValue Chain, bool IsSignaling) {
20822  if (isNullConstant(Op1))
20823    return std::make_pair(EmitTest(Op0, X86CC, dl, DAG, Subtarget), Chain);
20824
20825  EVT CmpVT = Op0.getValueType();
20826
20827  if (CmpVT.isFloatingPoint()) {
20828    if (Chain) {
20829      SDValue Res =
20830          DAG.getNode(IsSignaling ? X86ISD::STRICT_FCMPS : X86ISD::STRICT_FCMP,
20831                      dl, {MVT::i32, MVT::Other}, {Chain, Op0, Op1});
20832      return std::make_pair(Res, Res.getValue(1));
20833    }
20834    return std::make_pair(DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1),
20835                          SDValue());
20836  }
20837
20838  assert((CmpVT == MVT::i8 || CmpVT == MVT::i16 ||
20839          CmpVT == MVT::i32 || CmpVT == MVT::i64) && "Unexpected VT!");
20840
20841  // Only promote the compare up to I32 if it is a 16 bit operation
20842  // with an immediate.  16 bit immediates are to be avoided.
20843  if (CmpVT == MVT::i16 && !Subtarget.isAtom() &&
20844      !DAG.getMachineFunction().getFunction().hasMinSize()) {
20845    ConstantSDNode *COp0 = dyn_cast<ConstantSDNode>(Op0);
20846    ConstantSDNode *COp1 = dyn_cast<ConstantSDNode>(Op1);
20847    // Don't do this if the immediate can fit in 8-bits.
20848    if ((COp0 && !COp0->getAPIntValue().isSignedIntN(8)) ||
20849        (COp1 && !COp1->getAPIntValue().isSignedIntN(8))) {
20850      unsigned ExtendOp =
20851          isX86CCSigned(X86CC) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
20852      if (X86CC == X86::COND_E || X86CC == X86::COND_NE) {
20853        // For equality comparisons try to use SIGN_EXTEND if the input was
20854        // truncate from something with enough sign bits.
20855        if (Op0.getOpcode() == ISD::TRUNCATE) {
20856          SDValue In = Op0.getOperand(0);
20857          unsigned EffBits =
20858              In.getScalarValueSizeInBits() - DAG.ComputeNumSignBits(In) + 1;
20859          if (EffBits <= 16)
20860            ExtendOp = ISD::SIGN_EXTEND;
20861        } else if (Op1.getOpcode() == ISD::TRUNCATE) {
20862          SDValue In = Op1.getOperand(0);
20863          unsigned EffBits =
20864              In.getScalarValueSizeInBits() - DAG.ComputeNumSignBits(In) + 1;
20865          if (EffBits <= 16)
20866            ExtendOp = ISD::SIGN_EXTEND;
20867        }
20868      }
20869
20870      CmpVT = MVT::i32;
20871      Op0 = DAG.getNode(ExtendOp, dl, CmpVT, Op0);
20872      Op1 = DAG.getNode(ExtendOp, dl, CmpVT, Op1);
20873    }
20874  }
20875
20876  // Try to shrink i64 compares if the input has enough zero bits.
20877  // FIXME: Do this for non-constant compares for constant on LHS?
20878  if (CmpVT == MVT::i64 && isa<ConstantSDNode>(Op1) && !isX86CCSigned(X86CC) &&
20879      Op0.hasOneUse() && // Hacky way to not break CSE opportunities with sub.
20880      cast<ConstantSDNode>(Op1)->getAPIntValue().getActiveBits() <= 32 &&
20881      DAG.MaskedValueIsZero(Op0, APInt::getHighBitsSet(64, 32))) {
20882    CmpVT = MVT::i32;
20883    Op0 = DAG.getNode(ISD::TRUNCATE, dl, CmpVT, Op0);
20884    Op1 = DAG.getNode(ISD::TRUNCATE, dl, CmpVT, Op1);
20885  }
20886
20887  // Use SUB instead of CMP to enable CSE between SUB and CMP.
20888  SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
20889  SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs, Op0, Op1);
20890  return std::make_pair(Sub.getValue(1), SDValue());
20891}
20892
20893/// Convert a comparison if required by the subtarget.
20894SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
20895                                                 SelectionDAG &DAG) const {
20896  // If the subtarget does not support the FUCOMI instruction, floating-point
20897  // comparisons have to be converted.
20898  bool IsCmp = Cmp.getOpcode() == X86ISD::CMP;
20899  bool IsStrictCmp = Cmp.getOpcode() == X86ISD::STRICT_FCMP ||
20900                     Cmp.getOpcode() == X86ISD::STRICT_FCMPS;
20901
20902  if (Subtarget.hasCMov() || (!IsCmp && !IsStrictCmp) ||
20903      !Cmp.getOperand(IsStrictCmp ? 1 : 0).getValueType().isFloatingPoint() ||
20904      !Cmp.getOperand(IsStrictCmp ? 2 : 1).getValueType().isFloatingPoint())
20905    return Cmp;
20906
20907  // The instruction selector will select an FUCOM instruction instead of
20908  // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
20909  // build an SDNode sequence that transfers the result from FPSW into EFLAGS:
20910  // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86any_fcmp ...)), 8))))
20911  SDLoc dl(Cmp);
20912  SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
20913  SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
20914  SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
20915                            DAG.getConstant(8, dl, MVT::i8));
20916  SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
20917
20918  // Some 64-bit targets lack SAHF support, but they do support FCOMI.
20919  assert(Subtarget.hasLAHFSAHF() && "Target doesn't support SAHF or FCOMI?");
20920  return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
20921}
20922
20923/// Check if replacement of SQRT with RSQRT should be disabled.
20924bool X86TargetLowering::isFsqrtCheap(SDValue Op, SelectionDAG &DAG) const {
20925  EVT VT = Op.getValueType();
20926
20927  // We never want to use both SQRT and RSQRT instructions for the same input.
20928  if (DAG.getNodeIfExists(X86ISD::FRSQRT, DAG.getVTList(VT), Op))
20929    return false;
20930
20931  if (VT.isVector())
20932    return Subtarget.hasFastVectorFSQRT();
20933  return Subtarget.hasFastScalarFSQRT();
20934}
20935
20936/// The minimum architected relative accuracy is 2^-12. We need one
20937/// Newton-Raphson step to have a good float result (24 bits of precision).
20938SDValue X86TargetLowering::getSqrtEstimate(SDValue Op,
20939                                           SelectionDAG &DAG, int Enabled,
20940                                           int &RefinementSteps,
20941                                           bool &UseOneConstNR,
20942                                           bool Reciprocal) const {
20943  EVT VT = Op.getValueType();
20944
20945  // SSE1 has rsqrtss and rsqrtps. AVX adds a 256-bit variant for rsqrtps.
20946  // It is likely not profitable to do this for f64 because a double-precision
20947  // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
20948  // instructions: convert to single, rsqrtss, convert back to double, refine
20949  // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
20950  // along with FMA, this could be a throughput win.
20951  // TODO: SQRT requires SSE2 to prevent the introduction of an illegal v4i32
20952  // after legalize types.
20953  if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
20954      (VT == MVT::v4f32 && Subtarget.hasSSE1() && Reciprocal) ||
20955      (VT == MVT::v4f32 && Subtarget.hasSSE2() && !Reciprocal) ||
20956      (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
20957      (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
20958    if (RefinementSteps == ReciprocalEstimate::Unspecified)
20959      RefinementSteps = 1;
20960
20961    UseOneConstNR = false;
20962    // There is no FSQRT for 512-bits, but there is RSQRT14.
20963    unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RSQRT14 : X86ISD::FRSQRT;
20964    return DAG.getNode(Opcode, SDLoc(Op), VT, Op);
20965  }
20966  return SDValue();
20967}
20968
20969/// The minimum architected relative accuracy is 2^-12. We need one
20970/// Newton-Raphson step to have a good float result (24 bits of precision).
20971SDValue X86TargetLowering::getRecipEstimate(SDValue Op, SelectionDAG &DAG,
20972                                            int Enabled,
20973                                            int &RefinementSteps) const {
20974  EVT VT = Op.getValueType();
20975
20976  // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
20977  // It is likely not profitable to do this for f64 because a double-precision
20978  // reciprocal estimate with refinement on x86 prior to FMA requires
20979  // 15 instructions: convert to single, rcpss, convert back to double, refine
20980  // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
20981  // along with FMA, this could be a throughput win.
20982
20983  if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
20984      (VT == MVT::v4f32 && Subtarget.hasSSE1()) ||
20985      (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
20986      (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
20987    // Enable estimate codegen with 1 refinement step for vector division.
20988    // Scalar division estimates are disabled because they break too much
20989    // real-world code. These defaults are intended to match GCC behavior.
20990    if (VT == MVT::f32 && Enabled == ReciprocalEstimate::Unspecified)
20991      return SDValue();
20992
20993    if (RefinementSteps == ReciprocalEstimate::Unspecified)
20994      RefinementSteps = 1;
20995
20996    // There is no FSQRT for 512-bits, but there is RCP14.
20997    unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RCP14 : X86ISD::FRCP;
20998    return DAG.getNode(Opcode, SDLoc(Op), VT, Op);
20999  }
21000  return SDValue();
21001}
21002
21003/// If we have at least two divisions that use the same divisor, convert to
21004/// multiplication by a reciprocal. This may need to be adjusted for a given
21005/// CPU if a division's cost is not at least twice the cost of a multiplication.
21006/// This is because we still need one division to calculate the reciprocal and
21007/// then we need two multiplies by that reciprocal as replacements for the
21008/// original divisions.
21009unsigned X86TargetLowering::combineRepeatedFPDivisors() const {
21010  return 2;
21011}
21012
21013SDValue
21014X86TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
21015                                 SelectionDAG &DAG,
21016                                 SmallVectorImpl<SDNode *> &Created) const {
21017  AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
21018  if (isIntDivCheap(N->getValueType(0), Attr))
21019    return SDValue(N,0); // Lower SDIV as SDIV
21020
21021  assert((Divisor.isPowerOf2() || (-Divisor).isPowerOf2()) &&
21022         "Unexpected divisor!");
21023
21024  // Only perform this transform if CMOV is supported otherwise the select
21025  // below will become a branch.
21026  if (!Subtarget.hasCMov())
21027    return SDValue();
21028
21029  // fold (sdiv X, pow2)
21030  EVT VT = N->getValueType(0);
21031  // FIXME: Support i8.
21032  if (VT != MVT::i16 && VT != MVT::i32 &&
21033      !(Subtarget.is64Bit() && VT == MVT::i64))
21034    return SDValue();
21035
21036  unsigned Lg2 = Divisor.countTrailingZeros();
21037
21038  // If the divisor is 2 or -2, the default expansion is better.
21039  if (Lg2 == 1)
21040    return SDValue();
21041
21042  SDLoc DL(N);
21043  SDValue N0 = N->getOperand(0);
21044  SDValue Zero = DAG.getConstant(0, DL, VT);
21045  APInt Lg2Mask = APInt::getLowBitsSet(VT.getSizeInBits(), Lg2);
21046  SDValue Pow2MinusOne = DAG.getConstant(Lg2Mask, DL, VT);
21047
21048  // If N0 is negative, we need to add (Pow2 - 1) to it before shifting right.
21049  SDValue Cmp = DAG.getSetCC(DL, MVT::i8, N0, Zero, ISD::SETLT);
21050  SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
21051  SDValue CMov = DAG.getNode(ISD::SELECT, DL, VT, Cmp, Add, N0);
21052
21053  Created.push_back(Cmp.getNode());
21054  Created.push_back(Add.getNode());
21055  Created.push_back(CMov.getNode());
21056
21057  // Divide by pow2.
21058  SDValue SRA =
21059      DAG.getNode(ISD::SRA, DL, VT, CMov, DAG.getConstant(Lg2, DL, MVT::i8));
21060
21061  // If we're dividing by a positive value, we're done.  Otherwise, we must
21062  // negate the result.
21063  if (Divisor.isNonNegative())
21064    return SRA;
21065
21066  Created.push_back(SRA.getNode());
21067  return DAG.getNode(ISD::SUB, DL, VT, Zero, SRA);
21068}
21069
21070/// Result of 'and' is compared against zero. Change to a BT node if possible.
21071/// Returns the BT node and the condition code needed to use it.
21072static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC,
21073                            const SDLoc &dl, SelectionDAG &DAG,
21074                            SDValue &X86CC) {
21075  assert(And.getOpcode() == ISD::AND && "Expected AND node!");
21076  SDValue Op0 = And.getOperand(0);
21077  SDValue Op1 = And.getOperand(1);
21078  if (Op0.getOpcode() == ISD::TRUNCATE)
21079    Op0 = Op0.getOperand(0);
21080  if (Op1.getOpcode() == ISD::TRUNCATE)
21081    Op1 = Op1.getOperand(0);
21082
21083  SDValue Src, BitNo;
21084  if (Op1.getOpcode() == ISD::SHL)
21085    std::swap(Op0, Op1);
21086  if (Op0.getOpcode() == ISD::SHL) {
21087    if (isOneConstant(Op0.getOperand(0))) {
21088      // If we looked past a truncate, check that it's only truncating away
21089      // known zeros.
21090      unsigned BitWidth = Op0.getValueSizeInBits();
21091      unsigned AndBitWidth = And.getValueSizeInBits();
21092      if (BitWidth > AndBitWidth) {
21093        KnownBits Known = DAG.computeKnownBits(Op0);
21094        if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth)
21095          return SDValue();
21096      }
21097      Src = Op1;
21098      BitNo = Op0.getOperand(1);
21099    }
21100  } else if (Op1.getOpcode() == ISD::Constant) {
21101    ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
21102    uint64_t AndRHSVal = AndRHS->getZExtValue();
21103    SDValue AndLHS = Op0;
21104
21105    if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
21106      Src = AndLHS.getOperand(0);
21107      BitNo = AndLHS.getOperand(1);
21108    } else {
21109      // Use BT if the immediate can't be encoded in a TEST instruction or we
21110      // are optimizing for size and the immedaite won't fit in a byte.
21111      bool OptForSize = DAG.shouldOptForSize();
21112      if ((!isUInt<32>(AndRHSVal) || (OptForSize && !isUInt<8>(AndRHSVal))) &&
21113          isPowerOf2_64(AndRHSVal)) {
21114        Src = AndLHS;
21115        BitNo = DAG.getConstant(Log2_64_Ceil(AndRHSVal), dl,
21116                                Src.getValueType());
21117      }
21118    }
21119  }
21120
21121  // No patterns found, give up.
21122  if (!Src.getNode())
21123    return SDValue();
21124
21125  // If Src is i8, promote it to i32 with any_extend.  There is no i8 BT
21126  // instruction.  Since the shift amount is in-range-or-undefined, we know
21127  // that doing a bittest on the i32 value is ok.  We extend to i32 because
21128  // the encoding for the i16 version is larger than the i32 version.
21129  // Also promote i16 to i32 for performance / code size reason.
21130  if (Src.getValueType() == MVT::i8 || Src.getValueType() == MVT::i16)
21131    Src = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Src);
21132
21133  // See if we can use the 32-bit instruction instead of the 64-bit one for a
21134  // shorter encoding. Since the former takes the modulo 32 of BitNo and the
21135  // latter takes the modulo 64, this is only valid if the 5th bit of BitNo is
21136  // known to be zero.
21137  if (Src.getValueType() == MVT::i64 &&
21138      DAG.MaskedValueIsZero(BitNo, APInt(BitNo.getValueSizeInBits(), 32)))
21139    Src = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src);
21140
21141  // If the operand types disagree, extend the shift amount to match.  Since
21142  // BT ignores high bits (like shifts) we can use anyextend.
21143  if (Src.getValueType() != BitNo.getValueType())
21144    BitNo = DAG.getNode(ISD::ANY_EXTEND, dl, Src.getValueType(), BitNo);
21145
21146  X86CC = DAG.getTargetConstant(CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B,
21147                                dl, MVT::i8);
21148  return DAG.getNode(X86ISD::BT, dl, MVT::i32, Src, BitNo);
21149}
21150
21151/// Turns an ISD::CondCode into a value suitable for SSE floating-point mask
21152/// CMPs.
21153static unsigned translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
21154                                   SDValue &Op1, bool &IsAlwaysSignaling) {
21155  unsigned SSECC;
21156  bool Swap = false;
21157
21158  // SSE Condition code mapping:
21159  //  0 - EQ
21160  //  1 - LT
21161  //  2 - LE
21162  //  3 - UNORD
21163  //  4 - NEQ
21164  //  5 - NLT
21165  //  6 - NLE
21166  //  7 - ORD
21167  switch (SetCCOpcode) {
21168  default: llvm_unreachable("Unexpected SETCC condition");
21169  case ISD::SETOEQ:
21170  case ISD::SETEQ:  SSECC = 0; break;
21171  case ISD::SETOGT:
21172  case ISD::SETGT:  Swap = true; LLVM_FALLTHROUGH;
21173  case ISD::SETLT:
21174  case ISD::SETOLT: SSECC = 1; break;
21175  case ISD::SETOGE:
21176  case ISD::SETGE:  Swap = true; LLVM_FALLTHROUGH;
21177  case ISD::SETLE:
21178  case ISD::SETOLE: SSECC = 2; break;
21179  case ISD::SETUO:  SSECC = 3; break;
21180  case ISD::SETUNE:
21181  case ISD::SETNE:  SSECC = 4; break;
21182  case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH;
21183  case ISD::SETUGE: SSECC = 5; break;
21184  case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH;
21185  case ISD::SETUGT: SSECC = 6; break;
21186  case ISD::SETO:   SSECC = 7; break;
21187  case ISD::SETUEQ: SSECC = 8; break;
21188  case ISD::SETONE: SSECC = 12; break;
21189  }
21190  if (Swap)
21191    std::swap(Op0, Op1);
21192
21193  switch (SetCCOpcode) {
21194  default:
21195    IsAlwaysSignaling = true;
21196    break;
21197  case ISD::SETEQ:
21198  case ISD::SETOEQ:
21199  case ISD::SETUEQ:
21200  case ISD::SETNE:
21201  case ISD::SETONE:
21202  case ISD::SETUNE:
21203  case ISD::SETO:
21204  case ISD::SETUO:
21205    IsAlwaysSignaling = false;
21206    break;
21207  }
21208
21209  return SSECC;
21210}
21211
21212/// Break a VSETCC 256-bit integer VSETCC into two new 128 ones and then
21213/// concatenate the result back.
21214static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
21215  MVT VT = Op.getSimpleValueType();
21216
21217  assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
21218         "Unsupported value type for operation");
21219
21220  unsigned NumElems = VT.getVectorNumElements();
21221  SDLoc dl(Op);
21222  SDValue CC = Op.getOperand(2);
21223
21224  // Extract the LHS vectors
21225  SDValue LHS = Op.getOperand(0);
21226  SDValue LHS1 = extract128BitVector(LHS, 0, DAG, dl);
21227  SDValue LHS2 = extract128BitVector(LHS, NumElems / 2, DAG, dl);
21228
21229  // Extract the RHS vectors
21230  SDValue RHS = Op.getOperand(1);
21231  SDValue RHS1 = extract128BitVector(RHS, 0, DAG, dl);
21232  SDValue RHS2 = extract128BitVector(RHS, NumElems / 2, DAG, dl);
21233
21234  // Issue the operation on the smaller types and concatenate the result back
21235  MVT EltVT = VT.getVectorElementType();
21236  MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
21237  return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
21238                     DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC),
21239                     DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
21240}
21241
21242static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
21243
21244  SDValue Op0 = Op.getOperand(0);
21245  SDValue Op1 = Op.getOperand(1);
21246  SDValue CC = Op.getOperand(2);
21247  MVT VT = Op.getSimpleValueType();
21248  SDLoc dl(Op);
21249
21250  assert(VT.getVectorElementType() == MVT::i1 &&
21251         "Cannot set masked compare for this operation");
21252
21253  ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
21254
21255  // Prefer SETGT over SETLT.
21256  if (SetCCOpcode == ISD::SETLT) {
21257    SetCCOpcode = ISD::getSetCCSwappedOperands(SetCCOpcode);
21258    std::swap(Op0, Op1);
21259  }
21260
21261  return DAG.getSetCC(dl, VT, Op0, Op1, SetCCOpcode);
21262}
21263
21264/// Given a buildvector constant, return a new vector constant with each element
21265/// incremented or decremented. If incrementing or decrementing would result in
21266/// unsigned overflow or underflow or this is not a simple vector constant,
21267/// return an empty value.
21268static SDValue incDecVectorConstant(SDValue V, SelectionDAG &DAG, bool IsInc) {
21269  auto *BV = dyn_cast<BuildVectorSDNode>(V.getNode());
21270  if (!BV)
21271    return SDValue();
21272
21273  MVT VT = V.getSimpleValueType();
21274  MVT EltVT = VT.getVectorElementType();
21275  unsigned NumElts = VT.getVectorNumElements();
21276  SmallVector<SDValue, 8> NewVecC;
21277  SDLoc DL(V);
21278  for (unsigned i = 0; i < NumElts; ++i) {
21279    auto *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
21280    if (!Elt || Elt->isOpaque() || Elt->getSimpleValueType(0) != EltVT)
21281      return SDValue();
21282
21283    // Avoid overflow/underflow.
21284    const APInt &EltC = Elt->getAPIntValue();
21285    if ((IsInc && EltC.isMaxValue()) || (!IsInc && EltC.isNullValue()))
21286      return SDValue();
21287
21288    NewVecC.push_back(DAG.getConstant(EltC + (IsInc ? 1 : -1), DL, EltVT));
21289  }
21290
21291  return DAG.getBuildVector(VT, DL, NewVecC);
21292}
21293
21294/// As another special case, use PSUBUS[BW] when it's profitable. E.g. for
21295/// Op0 u<= Op1:
21296///   t = psubus Op0, Op1
21297///   pcmpeq t, <0..0>
21298static SDValue LowerVSETCCWithSUBUS(SDValue Op0, SDValue Op1, MVT VT,
21299                                    ISD::CondCode Cond, const SDLoc &dl,
21300                                    const X86Subtarget &Subtarget,
21301                                    SelectionDAG &DAG) {
21302  if (!Subtarget.hasSSE2())
21303    return SDValue();
21304
21305  MVT VET = VT.getVectorElementType();
21306  if (VET != MVT::i8 && VET != MVT::i16)
21307    return SDValue();
21308
21309  switch (Cond) {
21310  default:
21311    return SDValue();
21312  case ISD::SETULT: {
21313    // If the comparison is against a constant we can turn this into a
21314    // setule.  With psubus, setule does not require a swap.  This is
21315    // beneficial because the constant in the register is no longer
21316    // destructed as the destination so it can be hoisted out of a loop.
21317    // Only do this pre-AVX since vpcmp* is no longer destructive.
21318    if (Subtarget.hasAVX())
21319      return SDValue();
21320    SDValue ULEOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/false);
21321    if (!ULEOp1)
21322      return SDValue();
21323    Op1 = ULEOp1;
21324    break;
21325  }
21326  case ISD::SETUGT: {
21327    // If the comparison is against a constant, we can turn this into a setuge.
21328    // This is beneficial because materializing a constant 0 for the PCMPEQ is
21329    // probably cheaper than XOR+PCMPGT using 2 different vector constants:
21330    // cmpgt (xor X, SignMaskC) CmpC --> cmpeq (usubsat (CmpC+1), X), 0
21331    SDValue UGEOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/true);
21332    if (!UGEOp1)
21333      return SDValue();
21334    Op1 = Op0;
21335    Op0 = UGEOp1;
21336    break;
21337  }
21338  // Psubus is better than flip-sign because it requires no inversion.
21339  case ISD::SETUGE:
21340    std::swap(Op0, Op1);
21341    break;
21342  case ISD::SETULE:
21343    break;
21344  }
21345
21346  SDValue Result = DAG.getNode(ISD::USUBSAT, dl, VT, Op0, Op1);
21347  return DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
21348                     DAG.getConstant(0, dl, VT));
21349}
21350
21351static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
21352                           SelectionDAG &DAG) {
21353  bool IsStrict = Op.getOpcode() == ISD::STRICT_FSETCC ||
21354                  Op.getOpcode() == ISD::STRICT_FSETCCS;
21355  SDValue Op0 = Op.getOperand(IsStrict ? 1 : 0);
21356  SDValue Op1 = Op.getOperand(IsStrict ? 2 : 1);
21357  SDValue CC = Op.getOperand(IsStrict ? 3 : 2);
21358  MVT VT = Op->getSimpleValueType(0);
21359  ISD::CondCode Cond = cast<CondCodeSDNode>(CC)->get();
21360  bool isFP = Op1.getSimpleValueType().isFloatingPoint();
21361  SDLoc dl(Op);
21362
21363  if (isFP) {
21364#ifndef NDEBUG
21365    MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
21366    assert(EltVT == MVT::f32 || EltVT == MVT::f64);
21367#endif
21368
21369    bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS;
21370    SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
21371
21372    unsigned Opc;
21373    if (Subtarget.hasAVX512() && VT.getVectorElementType() == MVT::i1) {
21374      assert(VT.getVectorNumElements() <= 16);
21375      Opc = IsStrict ? X86ISD::STRICT_CMPM : X86ISD::CMPM;
21376    } else {
21377      Opc = IsStrict ? X86ISD::STRICT_CMPP : X86ISD::CMPP;
21378      // The SSE/AVX packed FP comparison nodes are defined with a
21379      // floating-point vector result that matches the operand type. This allows
21380      // them to work with an SSE1 target (integer vector types are not legal).
21381      VT = Op0.getSimpleValueType();
21382    }
21383
21384    SDValue Cmp;
21385    bool IsAlwaysSignaling;
21386    unsigned SSECC = translateX86FSETCC(Cond, Op0, Op1, IsAlwaysSignaling);
21387    if (!Subtarget.hasAVX()) {
21388      // TODO: We could use following steps to handle a quiet compare with
21389      // signaling encodings.
21390      // 1. Get ordered masks from a quiet ISD::SETO
21391      // 2. Use the masks to mask potential unordered elements in operand A, B
21392      // 3. Get the compare results of masked A, B
21393      // 4. Calculating final result using the mask and result from 3
21394      // But currently, we just fall back to scalar operations.
21395      if (IsStrict && IsAlwaysSignaling && !IsSignaling)
21396        return SDValue();
21397
21398      // Insert an extra signaling instruction to raise exception.
21399      if (IsStrict && !IsAlwaysSignaling && IsSignaling) {
21400        SDValue SignalCmp = DAG.getNode(
21401            Opc, dl, {VT, MVT::Other},
21402            {Chain, Op0, Op1, DAG.getTargetConstant(1, dl, MVT::i8)}); // LT_OS
21403        // FIXME: It seems we need to update the flags of all new strict nodes.
21404        // Otherwise, mayRaiseFPException in MI will return false due to
21405        // NoFPExcept = false by default. However, I didn't find it in other
21406        // patches.
21407        SignalCmp->setFlags(Op->getFlags());
21408        Chain = SignalCmp.getValue(1);
21409      }
21410
21411      // In the two cases not handled by SSE compare predicates (SETUEQ/SETONE),
21412      // emit two comparisons and a logic op to tie them together.
21413      if (SSECC >= 8) {
21414        // LLVM predicate is SETUEQ or SETONE.
21415        unsigned CC0, CC1;
21416        unsigned CombineOpc;
21417        if (Cond == ISD::SETUEQ) {
21418          CC0 = 3; // UNORD
21419          CC1 = 0; // EQ
21420          CombineOpc = X86ISD::FOR;
21421        } else {
21422          assert(Cond == ISD::SETONE);
21423          CC0 = 7; // ORD
21424          CC1 = 4; // NEQ
21425          CombineOpc = X86ISD::FAND;
21426        }
21427
21428        SDValue Cmp0, Cmp1;
21429        if (IsStrict) {
21430          Cmp0 = DAG.getNode(
21431              Opc, dl, {VT, MVT::Other},
21432              {Chain, Op0, Op1, DAG.getTargetConstant(CC0, dl, MVT::i8)});
21433          Cmp1 = DAG.getNode(
21434              Opc, dl, {VT, MVT::Other},
21435              {Chain, Op0, Op1, DAG.getTargetConstant(CC1, dl, MVT::i8)});
21436          Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Cmp0.getValue(1),
21437                              Cmp1.getValue(1));
21438        } else {
21439          Cmp0 = DAG.getNode(
21440              Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(CC0, dl, MVT::i8));
21441          Cmp1 = DAG.getNode(
21442              Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(CC1, dl, MVT::i8));
21443        }
21444        Cmp = DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
21445      } else {
21446        if (IsStrict) {
21447          Cmp = DAG.getNode(
21448              Opc, dl, {VT, MVT::Other},
21449              {Chain, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8)});
21450          Chain = Cmp.getValue(1);
21451        } else
21452          Cmp = DAG.getNode(
21453              Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8));
21454      }
21455    } else {
21456      // Handle all other FP comparisons here.
21457      if (IsStrict) {
21458        // Make a flip on already signaling CCs before setting bit 4 of AVX CC.
21459        SSECC |= (IsAlwaysSignaling ^ IsSignaling) << 4;
21460        Cmp = DAG.getNode(
21461            Opc, dl, {VT, MVT::Other},
21462            {Chain, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8)});
21463        Chain = Cmp.getValue(1);
21464      } else
21465        Cmp = DAG.getNode(
21466            Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8));
21467    }
21468
21469    // If this is SSE/AVX CMPP, bitcast the result back to integer to match the
21470    // result type of SETCC. The bitcast is expected to be optimized away
21471    // during combining/isel.
21472    Cmp = DAG.getBitcast(Op.getSimpleValueType(), Cmp);
21473
21474    if (IsStrict)
21475      return DAG.getMergeValues({Cmp, Chain}, dl);
21476
21477    return Cmp;
21478  }
21479
21480  assert(!IsStrict && "Strict SETCC only handles FP operands.");
21481
21482  MVT VTOp0 = Op0.getSimpleValueType();
21483  (void)VTOp0;
21484  assert(VTOp0 == Op1.getSimpleValueType() &&
21485         "Expected operands with same type!");
21486  assert(VT.getVectorNumElements() == VTOp0.getVectorNumElements() &&
21487         "Invalid number of packed elements for source and destination!");
21488
21489  // The non-AVX512 code below works under the assumption that source and
21490  // destination types are the same.
21491  assert((Subtarget.hasAVX512() || (VT == VTOp0)) &&
21492         "Value types for source and destination must be the same!");
21493
21494  // The result is boolean, but operands are int/float
21495  if (VT.getVectorElementType() == MVT::i1) {
21496    // In AVX-512 architecture setcc returns mask with i1 elements,
21497    // But there is no compare instruction for i8 and i16 elements in KNL.
21498    assert((VTOp0.getScalarSizeInBits() >= 32 || Subtarget.hasBWI()) &&
21499           "Unexpected operand type");
21500    return LowerIntVSETCC_AVX512(Op, DAG);
21501  }
21502
21503  // Lower using XOP integer comparisons.
21504  if (VT.is128BitVector() && Subtarget.hasXOP()) {
21505    // Translate compare code to XOP PCOM compare mode.
21506    unsigned CmpMode = 0;
21507    switch (Cond) {
21508    default: llvm_unreachable("Unexpected SETCC condition");
21509    case ISD::SETULT:
21510    case ISD::SETLT: CmpMode = 0x00; break;
21511    case ISD::SETULE:
21512    case ISD::SETLE: CmpMode = 0x01; break;
21513    case ISD::SETUGT:
21514    case ISD::SETGT: CmpMode = 0x02; break;
21515    case ISD::SETUGE:
21516    case ISD::SETGE: CmpMode = 0x03; break;
21517    case ISD::SETEQ: CmpMode = 0x04; break;
21518    case ISD::SETNE: CmpMode = 0x05; break;
21519    }
21520
21521    // Are we comparing unsigned or signed integers?
21522    unsigned Opc =
21523        ISD::isUnsignedIntSetCC(Cond) ? X86ISD::VPCOMU : X86ISD::VPCOM;
21524
21525    return DAG.getNode(Opc, dl, VT, Op0, Op1,
21526                       DAG.getTargetConstant(CmpMode, dl, MVT::i8));
21527  }
21528
21529  // (X & Y) != 0 --> (X & Y) == Y iff Y is power-of-2.
21530  // Revert part of the simplifySetCCWithAnd combine, to avoid an invert.
21531  if (Cond == ISD::SETNE && ISD::isBuildVectorAllZeros(Op1.getNode())) {
21532    SDValue BC0 = peekThroughBitcasts(Op0);
21533    if (BC0.getOpcode() == ISD::AND) {
21534      APInt UndefElts;
21535      SmallVector<APInt, 64> EltBits;
21536      if (getTargetConstantBitsFromNode(BC0.getOperand(1),
21537                                        VT.getScalarSizeInBits(), UndefElts,
21538                                        EltBits, false, false)) {
21539        if (llvm::all_of(EltBits, [](APInt &V) { return V.isPowerOf2(); })) {
21540          Cond = ISD::SETEQ;
21541          Op1 = DAG.getBitcast(VT, BC0.getOperand(1));
21542        }
21543      }
21544    }
21545  }
21546
21547  // ICMP_EQ(AND(X,C),C) -> SRA(SHL(X,LOG2(C)),BW-1) iff C is power-of-2.
21548  if (Cond == ISD::SETEQ && Op0.getOpcode() == ISD::AND &&
21549      Op0.getOperand(1) == Op1 && Op0.hasOneUse()) {
21550    ConstantSDNode *C1 = isConstOrConstSplat(Op1);
21551    if (C1 && C1->getAPIntValue().isPowerOf2()) {
21552      unsigned BitWidth = VT.getScalarSizeInBits();
21553      unsigned ShiftAmt = BitWidth - C1->getAPIntValue().logBase2() - 1;
21554
21555      SDValue Result = Op0.getOperand(0);
21556      Result = DAG.getNode(ISD::SHL, dl, VT, Result,
21557                           DAG.getConstant(ShiftAmt, dl, VT));
21558      Result = DAG.getNode(ISD::SRA, dl, VT, Result,
21559                           DAG.getConstant(BitWidth - 1, dl, VT));
21560      return Result;
21561    }
21562  }
21563
21564  // Break 256-bit integer vector compare into smaller ones.
21565  if (VT.is256BitVector() && !Subtarget.hasInt256())
21566    return Lower256IntVSETCC(Op, DAG);
21567
21568  // If this is a SETNE against the signed minimum value, change it to SETGT.
21569  // If this is a SETNE against the signed maximum value, change it to SETLT.
21570  // which will be swapped to SETGT.
21571  // Otherwise we use PCMPEQ+invert.
21572  APInt ConstValue;
21573  if (Cond == ISD::SETNE &&
21574      ISD::isConstantSplatVector(Op1.getNode(), ConstValue)) {
21575    if (ConstValue.isMinSignedValue())
21576      Cond = ISD::SETGT;
21577    else if (ConstValue.isMaxSignedValue())
21578      Cond = ISD::SETLT;
21579  }
21580
21581  // If both operands are known non-negative, then an unsigned compare is the
21582  // same as a signed compare and there's no need to flip signbits.
21583  // TODO: We could check for more general simplifications here since we're
21584  // computing known bits.
21585  bool FlipSigns = ISD::isUnsignedIntSetCC(Cond) &&
21586                   !(DAG.SignBitIsZero(Op0) && DAG.SignBitIsZero(Op1));
21587
21588  // Special case: Use min/max operations for unsigned compares.
21589  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
21590  if (ISD::isUnsignedIntSetCC(Cond) &&
21591      (FlipSigns || ISD::isTrueWhenEqual(Cond)) &&
21592      TLI.isOperationLegal(ISD::UMIN, VT)) {
21593    // If we have a constant operand, increment/decrement it and change the
21594    // condition to avoid an invert.
21595    if (Cond == ISD::SETUGT) {
21596      // X > C --> X >= (C+1) --> X == umax(X, C+1)
21597      if (SDValue UGTOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/true)) {
21598        Op1 = UGTOp1;
21599        Cond = ISD::SETUGE;
21600      }
21601    }
21602    if (Cond == ISD::SETULT) {
21603      // X < C --> X <= (C-1) --> X == umin(X, C-1)
21604      if (SDValue ULTOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/false)) {
21605        Op1 = ULTOp1;
21606        Cond = ISD::SETULE;
21607      }
21608    }
21609    bool Invert = false;
21610    unsigned Opc;
21611    switch (Cond) {
21612    default: llvm_unreachable("Unexpected condition code");
21613    case ISD::SETUGT: Invert = true; LLVM_FALLTHROUGH;
21614    case ISD::SETULE: Opc = ISD::UMIN; break;
21615    case ISD::SETULT: Invert = true; LLVM_FALLTHROUGH;
21616    case ISD::SETUGE: Opc = ISD::UMAX; break;
21617    }
21618
21619    SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
21620    Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
21621
21622    // If the logical-not of the result is required, perform that now.
21623    if (Invert)
21624      Result = DAG.getNOT(dl, Result, VT);
21625
21626    return Result;
21627  }
21628
21629  // Try to use SUBUS and PCMPEQ.
21630  if (SDValue V = LowerVSETCCWithSUBUS(Op0, Op1, VT, Cond, dl, Subtarget, DAG))
21631    return V;
21632
21633  // We are handling one of the integer comparisons here. Since SSE only has
21634  // GT and EQ comparisons for integer, swapping operands and multiple
21635  // operations may be required for some comparisons.
21636  unsigned Opc = (Cond == ISD::SETEQ || Cond == ISD::SETNE) ? X86ISD::PCMPEQ
21637                                                            : X86ISD::PCMPGT;
21638  bool Swap = Cond == ISD::SETLT || Cond == ISD::SETULT ||
21639              Cond == ISD::SETGE || Cond == ISD::SETUGE;
21640  bool Invert = Cond == ISD::SETNE ||
21641                (Cond != ISD::SETEQ && ISD::isTrueWhenEqual(Cond));
21642
21643  if (Swap)
21644    std::swap(Op0, Op1);
21645
21646  // Check that the operation in question is available (most are plain SSE2,
21647  // but PCMPGTQ and PCMPEQQ have different requirements).
21648  if (VT == MVT::v2i64) {
21649    if (Opc == X86ISD::PCMPGT && !Subtarget.hasSSE42()) {
21650      assert(Subtarget.hasSSE2() && "Don't know how to lower!");
21651
21652      // Special case for sign bit test. We can use a v4i32 PCMPGT and shuffle
21653      // the odd elements over the even elements.
21654      if (!FlipSigns && !Invert && ISD::isBuildVectorAllZeros(Op0.getNode())) {
21655        Op0 = DAG.getConstant(0, dl, MVT::v4i32);
21656        Op1 = DAG.getBitcast(MVT::v4i32, Op1);
21657
21658        SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
21659        static const int MaskHi[] = { 1, 1, 3, 3 };
21660        SDValue Result = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
21661
21662        return DAG.getBitcast(VT, Result);
21663      }
21664
21665      if (!FlipSigns && !Invert && ISD::isBuildVectorAllOnes(Op1.getNode())) {
21666        Op0 = DAG.getBitcast(MVT::v4i32, Op0);
21667        Op1 = DAG.getConstant(-1, dl, MVT::v4i32);
21668
21669        SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
21670        static const int MaskHi[] = { 1, 1, 3, 3 };
21671        SDValue Result = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
21672
21673        return DAG.getBitcast(VT, Result);
21674      }
21675
21676      // Since SSE has no unsigned integer comparisons, we need to flip the sign
21677      // bits of the inputs before performing those operations. The lower
21678      // compare is always unsigned.
21679      SDValue SB;
21680      if (FlipSigns) {
21681        SB = DAG.getConstant(0x8000000080000000ULL, dl, MVT::v2i64);
21682      } else {
21683        SB = DAG.getConstant(0x0000000080000000ULL, dl, MVT::v2i64);
21684      }
21685      Op0 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op0, SB);
21686      Op1 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op1, SB);
21687
21688      // Cast everything to the right type.
21689      Op0 = DAG.getBitcast(MVT::v4i32, Op0);
21690      Op1 = DAG.getBitcast(MVT::v4i32, Op1);
21691
21692      // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
21693      SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
21694      SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
21695
21696      // Create masks for only the low parts/high parts of the 64 bit integers.
21697      static const int MaskHi[] = { 1, 1, 3, 3 };
21698      static const int MaskLo[] = { 0, 0, 2, 2 };
21699      SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
21700      SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
21701      SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
21702
21703      SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
21704      Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
21705
21706      if (Invert)
21707        Result = DAG.getNOT(dl, Result, MVT::v4i32);
21708
21709      return DAG.getBitcast(VT, Result);
21710    }
21711
21712    if (Opc == X86ISD::PCMPEQ && !Subtarget.hasSSE41()) {
21713      // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
21714      // pcmpeqd + pshufd + pand.
21715      assert(Subtarget.hasSSE2() && !FlipSigns && "Don't know how to lower!");
21716
21717      // First cast everything to the right type.
21718      Op0 = DAG.getBitcast(MVT::v4i32, Op0);
21719      Op1 = DAG.getBitcast(MVT::v4i32, Op1);
21720
21721      // Do the compare.
21722      SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
21723
21724      // Make sure the lower and upper halves are both all-ones.
21725      static const int Mask[] = { 1, 0, 3, 2 };
21726      SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
21727      Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
21728
21729      if (Invert)
21730        Result = DAG.getNOT(dl, Result, MVT::v4i32);
21731
21732      return DAG.getBitcast(VT, Result);
21733    }
21734  }
21735
21736  // Since SSE has no unsigned integer comparisons, we need to flip the sign
21737  // bits of the inputs before performing those operations.
21738  if (FlipSigns) {
21739    MVT EltVT = VT.getVectorElementType();
21740    SDValue SM = DAG.getConstant(APInt::getSignMask(EltVT.getSizeInBits()), dl,
21741                                 VT);
21742    Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SM);
21743    Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SM);
21744  }
21745
21746  SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
21747
21748  // If the logical-not of the result is required, perform that now.
21749  if (Invert)
21750    Result = DAG.getNOT(dl, Result, VT);
21751
21752  return Result;
21753}
21754
21755// Try to select this as a KORTEST+SETCC or KTEST+SETCC if possible.
21756static SDValue EmitAVX512Test(SDValue Op0, SDValue Op1, ISD::CondCode CC,
21757                              const SDLoc &dl, SelectionDAG &DAG,
21758                              const X86Subtarget &Subtarget,
21759                              SDValue &X86CC) {
21760  // Only support equality comparisons.
21761  if (CC != ISD::SETEQ && CC != ISD::SETNE)
21762    return SDValue();
21763
21764  // Must be a bitcast from vXi1.
21765  if (Op0.getOpcode() != ISD::BITCAST)
21766    return SDValue();
21767
21768  Op0 = Op0.getOperand(0);
21769  MVT VT = Op0.getSimpleValueType();
21770  if (!(Subtarget.hasAVX512() && VT == MVT::v16i1) &&
21771      !(Subtarget.hasDQI() && VT == MVT::v8i1) &&
21772      !(Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1)))
21773    return SDValue();
21774
21775  X86::CondCode X86Cond;
21776  if (isNullConstant(Op1)) {
21777    X86Cond = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
21778  } else if (isAllOnesConstant(Op1)) {
21779    // C flag is set for all ones.
21780    X86Cond = CC == ISD::SETEQ ? X86::COND_B : X86::COND_AE;
21781  } else
21782    return SDValue();
21783
21784  // If the input is an AND, we can combine it's operands into the KTEST.
21785  bool KTestable = false;
21786  if (Subtarget.hasDQI() && (VT == MVT::v8i1 || VT == MVT::v16i1))
21787    KTestable = true;
21788  if (Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1))
21789    KTestable = true;
21790  if (!isNullConstant(Op1))
21791    KTestable = false;
21792  if (KTestable && Op0.getOpcode() == ISD::AND && Op0.hasOneUse()) {
21793    SDValue LHS = Op0.getOperand(0);
21794    SDValue RHS = Op0.getOperand(1);
21795    X86CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
21796    return DAG.getNode(X86ISD::KTEST, dl, MVT::i32, LHS, RHS);
21797  }
21798
21799  // If the input is an OR, we can combine it's operands into the KORTEST.
21800  SDValue LHS = Op0;
21801  SDValue RHS = Op0;
21802  if (Op0.getOpcode() == ISD::OR && Op0.hasOneUse()) {
21803    LHS = Op0.getOperand(0);
21804    RHS = Op0.getOperand(1);
21805  }
21806
21807  X86CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
21808  return DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
21809}
21810
21811/// Emit flags for the given setcc condition and operands. Also returns the
21812/// corresponding X86 condition code constant in X86CC.
21813SDValue X86TargetLowering::emitFlagsForSetcc(SDValue Op0, SDValue Op1,
21814                                             ISD::CondCode CC, const SDLoc &dl,
21815                                             SelectionDAG &DAG, SDValue &X86CC,
21816                                             SDValue &Chain,
21817                                             bool IsSignaling) const {
21818  // Optimize to BT if possible.
21819  // Lower (X & (1 << N)) == 0 to BT(X, N).
21820  // Lower ((X >>u N) & 1) != 0 to BT(X, N).
21821  // Lower ((X >>s N) & 1) != 0 to BT(X, N).
21822  if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && isNullConstant(Op1) &&
21823      (CC == ISD::SETEQ || CC == ISD::SETNE)) {
21824    if (SDValue BT = LowerAndToBT(Op0, CC, dl, DAG, X86CC))
21825      return BT;
21826  }
21827
21828  // Try to use PTEST for a tree ORs equality compared with 0.
21829  // TODO: We could do AND tree with all 1s as well by using the C flag.
21830  if (Op0.getOpcode() == ISD::OR && isNullConstant(Op1) &&
21831      (CC == ISD::SETEQ || CC == ISD::SETNE)) {
21832    if (SDValue PTEST = LowerVectorAllZeroTest(Op0, CC, Subtarget, DAG, X86CC))
21833      return PTEST;
21834  }
21835
21836  // Try to lower using KORTEST or KTEST.
21837  if (SDValue Test = EmitAVX512Test(Op0, Op1, CC, dl, DAG, Subtarget, X86CC))
21838    return Test;
21839
21840  // Look for X == 0, X == 1, X != 0, or X != 1.  We can simplify some forms of
21841  // these.
21842  if ((isOneConstant(Op1) || isNullConstant(Op1)) &&
21843      (CC == ISD::SETEQ || CC == ISD::SETNE)) {
21844    // If the input is a setcc, then reuse the input setcc or use a new one with
21845    // the inverted condition.
21846    if (Op0.getOpcode() == X86ISD::SETCC) {
21847      bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1);
21848
21849      X86CC = Op0.getOperand(0);
21850      if (Invert) {
21851        X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
21852        CCode = X86::GetOppositeBranchCondition(CCode);
21853        X86CC = DAG.getTargetConstant(CCode, dl, MVT::i8);
21854      }
21855
21856      return Op0.getOperand(1);
21857    }
21858  }
21859
21860  // Try to use the carry flag from the add in place of an separate CMP for:
21861  // (seteq (add X, -1), -1). Similar for setne.
21862  if (isAllOnesConstant(Op1) && Op0.getOpcode() == ISD::ADD &&
21863      Op0.getOperand(1) == Op1 && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
21864    if (isProfitableToUseFlagOp(Op0)) {
21865      SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
21866
21867      SDValue New = DAG.getNode(X86ISD::ADD, dl, VTs, Op0.getOperand(0),
21868                                Op0.getOperand(1));
21869      DAG.ReplaceAllUsesOfValueWith(SDValue(Op0.getNode(), 0), New);
21870      X86::CondCode CCode = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
21871      X86CC = DAG.getTargetConstant(CCode, dl, MVT::i8);
21872      return SDValue(New.getNode(), 1);
21873    }
21874  }
21875
21876  bool IsFP = Op1.getSimpleValueType().isFloatingPoint();
21877  X86::CondCode CondCode = TranslateX86CC(CC, dl, IsFP, Op0, Op1, DAG);
21878  if (CondCode == X86::COND_INVALID)
21879    return SDValue();
21880
21881  std::pair<SDValue, SDValue> Tmp =
21882      EmitCmp(Op0, Op1, CondCode, dl, DAG, Subtarget, Chain, IsSignaling);
21883  SDValue EFLAGS = Tmp.first;
21884  if (Chain)
21885    Chain = Tmp.second;
21886  EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
21887  X86CC = DAG.getTargetConstant(CondCode, dl, MVT::i8);
21888  return EFLAGS;
21889}
21890
21891SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
21892
21893  bool IsStrict = Op.getOpcode() == ISD::STRICT_FSETCC ||
21894                  Op.getOpcode() == ISD::STRICT_FSETCCS;
21895  MVT VT = Op->getSimpleValueType(0);
21896
21897  if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
21898
21899  assert(VT == MVT::i8 && "SetCC type must be 8-bit integer");
21900  SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
21901  SDValue Op0 = Op.getOperand(IsStrict ? 1 : 0);
21902  SDValue Op1 = Op.getOperand(IsStrict ? 2 : 1);
21903  SDLoc dl(Op);
21904  ISD::CondCode CC =
21905      cast<CondCodeSDNode>(Op.getOperand(IsStrict ? 3 : 2))->get();
21906
21907  // Handle f128 first, since one possible outcome is a normal integer
21908  // comparison which gets handled by emitFlagsForSetcc.
21909  if (Op0.getValueType() == MVT::f128) {
21910    softenSetCCOperands(DAG, MVT::f128, Op0, Op1, CC, dl, Op0, Op1, Chain,
21911                        Op.getOpcode() == ISD::STRICT_FSETCCS);
21912
21913    // If softenSetCCOperands returned a scalar, use it.
21914    if (!Op1.getNode()) {
21915      assert(Op0.getValueType() == Op.getValueType() &&
21916             "Unexpected setcc expansion!");
21917      if (IsStrict)
21918        return DAG.getMergeValues({Op0, Chain}, dl);
21919      return Op0;
21920    }
21921  }
21922
21923  SDValue X86CC;
21924  SDValue EFLAGS = emitFlagsForSetcc(Op0, Op1, CC, dl, DAG, X86CC, Chain,
21925                                     Op.getOpcode() == ISD::STRICT_FSETCCS);
21926  if (!EFLAGS)
21927    return SDValue();
21928
21929  SDValue Res = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, X86CC, EFLAGS);
21930
21931  if (IsStrict)
21932    return DAG.getMergeValues({Res, Chain}, dl);
21933
21934  return Res;
21935}
21936
21937SDValue X86TargetLowering::LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const {
21938  SDValue LHS = Op.getOperand(0);
21939  SDValue RHS = Op.getOperand(1);
21940  SDValue Carry = Op.getOperand(2);
21941  SDValue Cond = Op.getOperand(3);
21942  SDLoc DL(Op);
21943
21944  assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");
21945  X86::CondCode CC = TranslateIntegerX86CC(cast<CondCodeSDNode>(Cond)->get());
21946
21947  // Recreate the carry if needed.
21948  EVT CarryVT = Carry.getValueType();
21949  APInt NegOne = APInt::getAllOnesValue(CarryVT.getScalarSizeInBits());
21950  Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
21951                      Carry, DAG.getConstant(NegOne, DL, CarryVT));
21952
21953  SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
21954  SDValue Cmp = DAG.getNode(X86ISD::SBB, DL, VTs, LHS, RHS, Carry.getValue(1));
21955  return getSETCC(CC, Cmp.getValue(1), DL, DAG);
21956}
21957
21958// This function returns three things: the arithmetic computation itself
21959// (Value), an EFLAGS result (Overflow), and a condition code (Cond).  The
21960// flag and the condition code define the case in which the arithmetic
21961// computation overflows.
21962static std::pair<SDValue, SDValue>
21963getX86XALUOOp(X86::CondCode &Cond, SDValue Op, SelectionDAG &DAG) {
21964  assert(Op.getResNo() == 0 && "Unexpected result number!");
21965  SDValue Value, Overflow;
21966  SDValue LHS = Op.getOperand(0);
21967  SDValue RHS = Op.getOperand(1);
21968  unsigned BaseOp = 0;
21969  SDLoc DL(Op);
21970  switch (Op.getOpcode()) {
21971  default: llvm_unreachable("Unknown ovf instruction!");
21972  case ISD::SADDO:
21973    BaseOp = X86ISD::ADD;
21974    Cond = X86::COND_O;
21975    break;
21976  case ISD::UADDO:
21977    BaseOp = X86ISD::ADD;
21978    Cond = isOneConstant(RHS) ? X86::COND_E : X86::COND_B;
21979    break;
21980  case ISD::SSUBO:
21981    BaseOp = X86ISD::SUB;
21982    Cond = X86::COND_O;
21983    break;
21984  case ISD::USUBO:
21985    BaseOp = X86ISD::SUB;
21986    Cond = X86::COND_B;
21987    break;
21988  case ISD::SMULO:
21989    BaseOp = X86ISD::SMUL;
21990    Cond = X86::COND_O;
21991    break;
21992  case ISD::UMULO:
21993    BaseOp = X86ISD::UMUL;
21994    Cond = X86::COND_O;
21995    break;
21996  }
21997
21998  if (BaseOp) {
21999    // Also sets EFLAGS.
22000    SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
22001    Value = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
22002    Overflow = Value.getValue(1);
22003  }
22004
22005  return std::make_pair(Value, Overflow);
22006}
22007
22008static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
22009  // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
22010  // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
22011  // looks for this combo and may remove the "setcc" instruction if the "setcc"
22012  // has only one use.
22013  SDLoc DL(Op);
22014  X86::CondCode Cond;
22015  SDValue Value, Overflow;
22016  std::tie(Value, Overflow) = getX86XALUOOp(Cond, Op, DAG);
22017
22018  SDValue SetCC = getSETCC(Cond, Overflow, DL, DAG);
22019  assert(Op->getValueType(1) == MVT::i8 && "Unexpected VT!");
22020  return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Value, SetCC);
22021}
22022
22023/// Return true if opcode is a X86 logical comparison.
22024static bool isX86LogicalCmp(SDValue Op) {
22025  unsigned Opc = Op.getOpcode();
22026  if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
22027      Opc == X86ISD::SAHF)
22028    return true;
22029  if (Op.getResNo() == 1 &&
22030      (Opc == X86ISD::ADD || Opc == X86ISD::SUB || Opc == X86ISD::ADC ||
22031       Opc == X86ISD::SBB || Opc == X86ISD::SMUL || Opc == X86ISD::UMUL ||
22032       Opc == X86ISD::OR || Opc == X86ISD::XOR || Opc == X86ISD::AND))
22033    return true;
22034
22035  return false;
22036}
22037
22038static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
22039  if (V.getOpcode() != ISD::TRUNCATE)
22040    return false;
22041
22042  SDValue VOp0 = V.getOperand(0);
22043  unsigned InBits = VOp0.getValueSizeInBits();
22044  unsigned Bits = V.getValueSizeInBits();
22045  return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
22046}
22047
22048SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
22049  bool AddTest = true;
22050  SDValue Cond  = Op.getOperand(0);
22051  SDValue Op1 = Op.getOperand(1);
22052  SDValue Op2 = Op.getOperand(2);
22053  SDLoc DL(Op);
22054  MVT VT = Op1.getSimpleValueType();
22055  SDValue CC;
22056
22057  // Lower FP selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
22058  // are available or VBLENDV if AVX is available.
22059  // Otherwise FP cmovs get lowered into a less efficient branch sequence later.
22060  if (Cond.getOpcode() == ISD::SETCC &&
22061      ((Subtarget.hasSSE2() && VT == MVT::f64) ||
22062       (Subtarget.hasSSE1() && VT == MVT::f32)) &&
22063      VT == Cond.getOperand(0).getSimpleValueType() && Cond->hasOneUse()) {
22064    SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
22065    bool IsAlwaysSignaling;
22066    unsigned SSECC =
22067        translateX86FSETCC(cast<CondCodeSDNode>(Cond.getOperand(2))->get(),
22068                           CondOp0, CondOp1, IsAlwaysSignaling);
22069
22070    if (Subtarget.hasAVX512()) {
22071      SDValue Cmp =
22072          DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CondOp0, CondOp1,
22073                      DAG.getTargetConstant(SSECC, DL, MVT::i8));
22074      assert(!VT.isVector() && "Not a scalar type?");
22075      return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
22076    }
22077
22078    if (SSECC < 8 || Subtarget.hasAVX()) {
22079      SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
22080                                DAG.getTargetConstant(SSECC, DL, MVT::i8));
22081
22082      // If we have AVX, we can use a variable vector select (VBLENDV) instead
22083      // of 3 logic instructions for size savings and potentially speed.
22084      // Unfortunately, there is no scalar form of VBLENDV.
22085
22086      // If either operand is a +0.0 constant, don't try this. We can expect to
22087      // optimize away at least one of the logic instructions later in that
22088      // case, so that sequence would be faster than a variable blend.
22089
22090      // BLENDV was introduced with SSE 4.1, but the 2 register form implicitly
22091      // uses XMM0 as the selection register. That may need just as many
22092      // instructions as the AND/ANDN/OR sequence due to register moves, so
22093      // don't bother.
22094      if (Subtarget.hasAVX() && !isNullFPConstant(Op1) &&
22095          !isNullFPConstant(Op2)) {
22096        // Convert to vectors, do a VSELECT, and convert back to scalar.
22097        // All of the conversions should be optimized away.
22098        MVT VecVT = VT == MVT::f32 ? MVT::v4f32 : MVT::v2f64;
22099        SDValue VOp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op1);
22100        SDValue VOp2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op2);
22101        SDValue VCmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Cmp);
22102
22103        MVT VCmpVT = VT == MVT::f32 ? MVT::v4i32 : MVT::v2i64;
22104        VCmp = DAG.getBitcast(VCmpVT, VCmp);
22105
22106        SDValue VSel = DAG.getSelect(DL, VecVT, VCmp, VOp1, VOp2);
22107
22108        return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
22109                           VSel, DAG.getIntPtrConstant(0, DL));
22110      }
22111      SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
22112      SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
22113      return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
22114    }
22115  }
22116
22117  // AVX512 fallback is to lower selects of scalar floats to masked moves.
22118  if ((VT == MVT::f64 || VT == MVT::f32) && Subtarget.hasAVX512()) {
22119    SDValue Cmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1, Cond);
22120    return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
22121  }
22122
22123  // For v64i1 without 64-bit support we need to split and rejoin.
22124  if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
22125    assert(Subtarget.hasBWI() && "Expected BWI to be legal");
22126    SDValue Op1Lo = extractSubVector(Op1, 0, DAG, DL, 32);
22127    SDValue Op2Lo = extractSubVector(Op2, 0, DAG, DL, 32);
22128    SDValue Op1Hi = extractSubVector(Op1, 32, DAG, DL, 32);
22129    SDValue Op2Hi = extractSubVector(Op2, 32, DAG, DL, 32);
22130    SDValue Lo = DAG.getSelect(DL, MVT::v32i1, Cond, Op1Lo, Op2Lo);
22131    SDValue Hi = DAG.getSelect(DL, MVT::v32i1, Cond, Op1Hi, Op2Hi);
22132    return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
22133  }
22134
22135  if (VT.isVector() && VT.getVectorElementType() == MVT::i1) {
22136    SDValue Op1Scalar;
22137    if (ISD::isBuildVectorOfConstantSDNodes(Op1.getNode()))
22138      Op1Scalar = ConvertI1VectorToInteger(Op1, DAG);
22139    else if (Op1.getOpcode() == ISD::BITCAST && Op1.getOperand(0))
22140      Op1Scalar = Op1.getOperand(0);
22141    SDValue Op2Scalar;
22142    if (ISD::isBuildVectorOfConstantSDNodes(Op2.getNode()))
22143      Op2Scalar = ConvertI1VectorToInteger(Op2, DAG);
22144    else if (Op2.getOpcode() == ISD::BITCAST && Op2.getOperand(0))
22145      Op2Scalar = Op2.getOperand(0);
22146    if (Op1Scalar.getNode() && Op2Scalar.getNode()) {
22147      SDValue newSelect = DAG.getSelect(DL, Op1Scalar.getValueType(), Cond,
22148                                        Op1Scalar, Op2Scalar);
22149      if (newSelect.getValueSizeInBits() == VT.getSizeInBits())
22150        return DAG.getBitcast(VT, newSelect);
22151      SDValue ExtVec = DAG.getBitcast(MVT::v8i1, newSelect);
22152      return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, ExtVec,
22153                         DAG.getIntPtrConstant(0, DL));
22154    }
22155  }
22156
22157  if (Cond.getOpcode() == ISD::SETCC) {
22158    if (SDValue NewCond = LowerSETCC(Cond, DAG)) {
22159      Cond = NewCond;
22160      // If the condition was updated, it's possible that the operands of the
22161      // select were also updated (for example, EmitTest has a RAUW). Refresh
22162      // the local references to the select operands in case they got stale.
22163      Op1 = Op.getOperand(1);
22164      Op2 = Op.getOperand(2);
22165    }
22166  }
22167
22168  // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
22169  // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
22170  // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
22171  // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
22172  // (select (and (x , 0x1) == 0), y, (z ^ y) ) -> (-(and (x , 0x1)) & z ) ^ y
22173  // (select (and (x , 0x1) == 0), y, (z | y) ) -> (-(and (x , 0x1)) & z ) | y
22174  if (Cond.getOpcode() == X86ISD::SETCC &&
22175      Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
22176      isNullConstant(Cond.getOperand(1).getOperand(1))) {
22177    SDValue Cmp = Cond.getOperand(1);
22178    unsigned CondCode = Cond.getConstantOperandVal(0);
22179
22180    if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
22181        (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
22182      SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2;
22183      SDValue CmpOp0 = Cmp.getOperand(0);
22184
22185      // Apply further optimizations for special cases
22186      // (select (x != 0), -1, 0) -> neg & sbb
22187      // (select (x == 0), 0, -1) -> neg & sbb
22188      if (isNullConstant(Y) &&
22189          (isAllOnesConstant(Op1) == (CondCode == X86::COND_NE))) {
22190        SDValue Zero = DAG.getConstant(0, DL, CmpOp0.getValueType());
22191        SDValue CmpZero = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Zero, CmpOp0);
22192        SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
22193        Zero = DAG.getConstant(0, DL, Op.getValueType());
22194        return DAG.getNode(X86ISD::SBB, DL, VTs, Zero, Zero, CmpZero);
22195      }
22196
22197      Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
22198                        CmpOp0, DAG.getConstant(1, DL, CmpOp0.getValueType()));
22199      Cmp = ConvertCmpIfNecessary(Cmp, DAG);
22200
22201      SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
22202      SDValue Zero = DAG.getConstant(0, DL, Op.getValueType());
22203      SDValue Res =   // Res = 0 or -1.
22204        DAG.getNode(X86ISD::SBB, DL, VTs, Zero, Zero, Cmp);
22205
22206      if (isAllOnesConstant(Op1) != (CondCode == X86::COND_E))
22207        Res = DAG.getNOT(DL, Res, Res.getValueType());
22208
22209      if (!isNullConstant(Op2))
22210        Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
22211      return Res;
22212    } else if (!Subtarget.hasCMov() && CondCode == X86::COND_E &&
22213               Cmp.getOperand(0).getOpcode() == ISD::AND &&
22214               isOneConstant(Cmp.getOperand(0).getOperand(1))) {
22215      SDValue CmpOp0 = Cmp.getOperand(0);
22216      SDValue Src1, Src2;
22217      // true if Op2 is XOR or OR operator and one of its operands
22218      // is equal to Op1
22219      // ( a , a op b) || ( b , a op b)
22220      auto isOrXorPattern = [&]() {
22221        if ((Op2.getOpcode() == ISD::XOR || Op2.getOpcode() == ISD::OR) &&
22222            (Op2.getOperand(0) == Op1 || Op2.getOperand(1) == Op1)) {
22223          Src1 =
22224              Op2.getOperand(0) == Op1 ? Op2.getOperand(1) : Op2.getOperand(0);
22225          Src2 = Op1;
22226          return true;
22227        }
22228        return false;
22229      };
22230
22231      if (isOrXorPattern()) {
22232        SDValue Neg;
22233        unsigned int CmpSz = CmpOp0.getSimpleValueType().getSizeInBits();
22234        // we need mask of all zeros or ones with same size of the other
22235        // operands.
22236        if (CmpSz > VT.getSizeInBits())
22237          Neg = DAG.getNode(ISD::TRUNCATE, DL, VT, CmpOp0);
22238        else if (CmpSz < VT.getSizeInBits())
22239          Neg = DAG.getNode(ISD::AND, DL, VT,
22240              DAG.getNode(ISD::ANY_EXTEND, DL, VT, CmpOp0.getOperand(0)),
22241              DAG.getConstant(1, DL, VT));
22242        else
22243          Neg = CmpOp0;
22244        SDValue Mask = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
22245                                   Neg); // -(and (x, 0x1))
22246        SDValue And = DAG.getNode(ISD::AND, DL, VT, Mask, Src1); // Mask & z
22247        return DAG.getNode(Op2.getOpcode(), DL, VT, And, Src2);  // And Op y
22248      }
22249    }
22250  }
22251
22252  // Look past (and (setcc_carry (cmp ...)), 1).
22253  if (Cond.getOpcode() == ISD::AND &&
22254      Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
22255      isOneConstant(Cond.getOperand(1)))
22256    Cond = Cond.getOperand(0);
22257
22258  // If condition flag is set by a X86ISD::CMP, then use it as the condition
22259  // setting operand in place of the X86ISD::SETCC.
22260  unsigned CondOpcode = Cond.getOpcode();
22261  if (CondOpcode == X86ISD::SETCC ||
22262      CondOpcode == X86ISD::SETCC_CARRY) {
22263    CC = Cond.getOperand(0);
22264
22265    SDValue Cmp = Cond.getOperand(1);
22266    bool IllegalFPCMov = false;
22267    if (VT.isFloatingPoint() && !VT.isVector() &&
22268        !isScalarFPTypeInSSEReg(VT))  // FPStack?
22269      IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
22270
22271    if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
22272        Cmp.getOpcode() == X86ISD::BT) { // FIXME
22273      Cond = Cmp;
22274      AddTest = false;
22275    }
22276  } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
22277             CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
22278             CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
22279    SDValue Value;
22280    X86::CondCode X86Cond;
22281    std::tie(Value, Cond) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
22282
22283    CC = DAG.getTargetConstant(X86Cond, DL, MVT::i8);
22284    AddTest = false;
22285  }
22286
22287  if (AddTest) {
22288    // Look past the truncate if the high bits are known zero.
22289    if (isTruncWithZeroHighBitsInput(Cond, DAG))
22290      Cond = Cond.getOperand(0);
22291
22292    // We know the result of AND is compared against zero. Try to match
22293    // it to BT.
22294    if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
22295      SDValue BTCC;
22296      if (SDValue BT = LowerAndToBT(Cond, ISD::SETNE, DL, DAG, BTCC)) {
22297        CC = BTCC;
22298        Cond = BT;
22299        AddTest = false;
22300      }
22301    }
22302  }
22303
22304  if (AddTest) {
22305    CC = DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8);
22306    Cond = EmitTest(Cond, X86::COND_NE, DL, DAG, Subtarget);
22307  }
22308
22309  // a <  b ? -1 :  0 -> RES = ~setcc_carry
22310  // a <  b ?  0 : -1 -> RES = setcc_carry
22311  // a >= b ? -1 :  0 -> RES = setcc_carry
22312  // a >= b ?  0 : -1 -> RES = ~setcc_carry
22313  if (Cond.getOpcode() == X86ISD::SUB) {
22314    Cond = ConvertCmpIfNecessary(Cond, DAG);
22315    unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
22316
22317    if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
22318        (isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
22319        (isNullConstant(Op1) || isNullConstant(Op2))) {
22320      SDValue Res =
22321          DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
22322                      DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), Cond);
22323      if (isAllOnesConstant(Op1) != (CondCode == X86::COND_B))
22324        return DAG.getNOT(DL, Res, Res.getValueType());
22325      return Res;
22326    }
22327  }
22328
22329  // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
22330  // widen the cmov and push the truncate through. This avoids introducing a new
22331  // branch during isel and doesn't add any extensions.
22332  if (Op.getValueType() == MVT::i8 &&
22333      Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
22334    SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
22335    if (T1.getValueType() == T2.getValueType() &&
22336        // Blacklist CopyFromReg to avoid partial register stalls.
22337        T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
22338      SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, T1.getValueType(), T2, T1,
22339                                 CC, Cond);
22340      return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
22341    }
22342  }
22343
22344  // Or finally, promote i8 cmovs if we have CMOV,
22345  //                 or i16 cmovs if it won't prevent folding a load.
22346  // FIXME: we should not limit promotion of i8 case to only when the CMOV is
22347  //        legal, but EmitLoweredSelect() can not deal with these extensions
22348  //        being inserted between two CMOV's. (in i16 case too TBN)
22349  //        https://bugs.llvm.org/show_bug.cgi?id=40974
22350  if ((Op.getValueType() == MVT::i8 && Subtarget.hasCMov()) ||
22351      (Op.getValueType() == MVT::i16 && !MayFoldLoad(Op1) &&
22352       !MayFoldLoad(Op2))) {
22353    Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op1);
22354    Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op2);
22355    SDValue Ops[] = { Op2, Op1, CC, Cond };
22356    SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, MVT::i32, Ops);
22357    return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
22358  }
22359
22360  // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
22361  // condition is true.
22362  SDValue Ops[] = { Op2, Op1, CC, Cond };
22363  return DAG.getNode(X86ISD::CMOV, DL, Op.getValueType(), Ops);
22364}
22365
22366static SDValue LowerSIGN_EXTEND_Mask(SDValue Op,
22367                                     const X86Subtarget &Subtarget,
22368                                     SelectionDAG &DAG) {
22369  MVT VT = Op->getSimpleValueType(0);
22370  SDValue In = Op->getOperand(0);
22371  MVT InVT = In.getSimpleValueType();
22372  assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
22373  MVT VTElt = VT.getVectorElementType();
22374  SDLoc dl(Op);
22375
22376  unsigned NumElts = VT.getVectorNumElements();
22377
22378  // Extend VT if the scalar type is i8/i16 and BWI is not supported.
22379  MVT ExtVT = VT;
22380  if (!Subtarget.hasBWI() && VTElt.getSizeInBits() <= 16) {
22381    // If v16i32 is to be avoided, we'll need to split and concatenate.
22382    if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
22383      return SplitAndExtendv16i1(Op.getOpcode(), VT, In, dl, DAG);
22384
22385    ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
22386  }
22387
22388  // Widen to 512-bits if VLX is not supported.
22389  MVT WideVT = ExtVT;
22390  if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
22391    NumElts *= 512 / ExtVT.getSizeInBits();
22392    InVT = MVT::getVectorVT(MVT::i1, NumElts);
22393    In = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, InVT, DAG.getUNDEF(InVT),
22394                     In, DAG.getIntPtrConstant(0, dl));
22395    WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(), NumElts);
22396  }
22397
22398  SDValue V;
22399  MVT WideEltVT = WideVT.getVectorElementType();
22400  if ((Subtarget.hasDQI() && WideEltVT.getSizeInBits() >= 32) ||
22401      (Subtarget.hasBWI() && WideEltVT.getSizeInBits() <= 16)) {
22402    V = DAG.getNode(Op.getOpcode(), dl, WideVT, In);
22403  } else {
22404    SDValue NegOne = DAG.getConstant(-1, dl, WideVT);
22405    SDValue Zero = DAG.getConstant(0, dl, WideVT);
22406    V = DAG.getSelect(dl, WideVT, In, NegOne, Zero);
22407  }
22408
22409  // Truncate if we had to extend i16/i8 above.
22410  if (VT != ExtVT) {
22411    WideVT = MVT::getVectorVT(VTElt, NumElts);
22412    V = DAG.getNode(ISD::TRUNCATE, dl, WideVT, V);
22413  }
22414
22415  // Extract back to 128/256-bit if we widened.
22416  if (WideVT != VT)
22417    V = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, V,
22418                    DAG.getIntPtrConstant(0, dl));
22419
22420  return V;
22421}
22422
22423static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
22424                               SelectionDAG &DAG) {
22425  SDValue In = Op->getOperand(0);
22426  MVT InVT = In.getSimpleValueType();
22427
22428  if (InVT.getVectorElementType() == MVT::i1)
22429    return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
22430
22431  assert(Subtarget.hasAVX() && "Expected AVX support");
22432  return LowerAVXExtend(Op, DAG, Subtarget);
22433}
22434
22435// Lowering for SIGN_EXTEND_VECTOR_INREG and ZERO_EXTEND_VECTOR_INREG.
22436// For sign extend this needs to handle all vector sizes and SSE4.1 and
22437// non-SSE4.1 targets. For zero extend this should only handle inputs of
22438// MVT::v64i8 when BWI is not supported, but AVX512 is.
22439static SDValue LowerEXTEND_VECTOR_INREG(SDValue Op,
22440                                        const X86Subtarget &Subtarget,
22441                                        SelectionDAG &DAG) {
22442  SDValue In = Op->getOperand(0);
22443  MVT VT = Op->getSimpleValueType(0);
22444  MVT InVT = In.getSimpleValueType();
22445
22446  MVT SVT = VT.getVectorElementType();
22447  MVT InSVT = InVT.getVectorElementType();
22448  assert(SVT.getSizeInBits() > InSVT.getSizeInBits());
22449
22450  if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16)
22451    return SDValue();
22452  if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
22453    return SDValue();
22454  if (!(VT.is128BitVector() && Subtarget.hasSSE2()) &&
22455      !(VT.is256BitVector() && Subtarget.hasAVX()) &&
22456      !(VT.is512BitVector() && Subtarget.hasAVX512()))
22457    return SDValue();
22458
22459  SDLoc dl(Op);
22460  unsigned Opc = Op.getOpcode();
22461  unsigned NumElts = VT.getVectorNumElements();
22462
22463  // For 256-bit vectors, we only need the lower (128-bit) half of the input.
22464  // For 512-bit vectors, we need 128-bits or 256-bits.
22465  if (InVT.getSizeInBits() > 128) {
22466    // Input needs to be at least the same number of elements as output, and
22467    // at least 128-bits.
22468    int InSize = InSVT.getSizeInBits() * NumElts;
22469    In = extractSubVector(In, 0, DAG, dl, std::max(InSize, 128));
22470    InVT = In.getSimpleValueType();
22471  }
22472
22473  // SSE41 targets can use the pmov[sz]x* instructions directly for 128-bit results,
22474  // so are legal and shouldn't occur here. AVX2/AVX512 pmovsx* instructions still
22475  // need to be handled here for 256/512-bit results.
22476  if (Subtarget.hasInt256()) {
22477    assert(VT.getSizeInBits() > 128 && "Unexpected 128-bit vector extension");
22478
22479    if (InVT.getVectorNumElements() != NumElts)
22480      return DAG.getNode(Op.getOpcode(), dl, VT, In);
22481
22482    // FIXME: Apparently we create inreg operations that could be regular
22483    // extends.
22484    unsigned ExtOpc =
22485        Opc == ISD::SIGN_EXTEND_VECTOR_INREG ? ISD::SIGN_EXTEND
22486                                             : ISD::ZERO_EXTEND;
22487    return DAG.getNode(ExtOpc, dl, VT, In);
22488  }
22489
22490  // pre-AVX2 256-bit extensions need to be split into 128-bit instructions.
22491  if (Subtarget.hasAVX()) {
22492    assert(VT.is256BitVector() && "256-bit vector expected");
22493    MVT HalfVT = VT.getHalfNumVectorElementsVT();
22494    int HalfNumElts = HalfVT.getVectorNumElements();
22495
22496    unsigned NumSrcElts = InVT.getVectorNumElements();
22497    SmallVector<int, 16> HiMask(NumSrcElts, SM_SentinelUndef);
22498    for (int i = 0; i != HalfNumElts; ++i)
22499      HiMask[i] = HalfNumElts + i;
22500
22501    SDValue Lo = DAG.getNode(Opc, dl, HalfVT, In);
22502    SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, DAG.getUNDEF(InVT), HiMask);
22503    Hi = DAG.getNode(Opc, dl, HalfVT, Hi);
22504    return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
22505  }
22506
22507  // We should only get here for sign extend.
22508  assert(Opc == ISD::SIGN_EXTEND_VECTOR_INREG && "Unexpected opcode!");
22509  assert(VT.is128BitVector() && InVT.is128BitVector() && "Unexpected VTs");
22510
22511  // pre-SSE41 targets unpack lower lanes and then sign-extend using SRAI.
22512  SDValue Curr = In;
22513  SDValue SignExt = Curr;
22514
22515  // As SRAI is only available on i16/i32 types, we expand only up to i32
22516  // and handle i64 separately.
22517  if (InVT != MVT::v4i32) {
22518    MVT DestVT = VT == MVT::v2i64 ? MVT::v4i32 : VT;
22519
22520    unsigned DestWidth = DestVT.getScalarSizeInBits();
22521    unsigned Scale = DestWidth / InSVT.getSizeInBits();
22522
22523    unsigned InNumElts = InVT.getVectorNumElements();
22524    unsigned DestElts = DestVT.getVectorNumElements();
22525
22526    // Build a shuffle mask that takes each input element and places it in the
22527    // MSBs of the new element size.
22528    SmallVector<int, 16> Mask(InNumElts, SM_SentinelUndef);
22529    for (unsigned i = 0; i != DestElts; ++i)
22530      Mask[i * Scale + (Scale - 1)] = i;
22531
22532    Curr = DAG.getVectorShuffle(InVT, dl, In, In, Mask);
22533    Curr = DAG.getBitcast(DestVT, Curr);
22534
22535    unsigned SignExtShift = DestWidth - InSVT.getSizeInBits();
22536    SignExt = DAG.getNode(X86ISD::VSRAI, dl, DestVT, Curr,
22537                          DAG.getTargetConstant(SignExtShift, dl, MVT::i8));
22538  }
22539
22540  if (VT == MVT::v2i64) {
22541    assert(Curr.getValueType() == MVT::v4i32 && "Unexpected input VT");
22542    SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
22543    SDValue Sign = DAG.getSetCC(dl, MVT::v4i32, Zero, Curr, ISD::SETGT);
22544    SignExt = DAG.getVectorShuffle(MVT::v4i32, dl, SignExt, Sign, {0, 4, 1, 5});
22545    SignExt = DAG.getBitcast(VT, SignExt);
22546  }
22547
22548  return SignExt;
22549}
22550
22551static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
22552                                SelectionDAG &DAG) {
22553  MVT VT = Op->getSimpleValueType(0);
22554  SDValue In = Op->getOperand(0);
22555  MVT InVT = In.getSimpleValueType();
22556  SDLoc dl(Op);
22557
22558  if (InVT.getVectorElementType() == MVT::i1)
22559    return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
22560
22561  assert(VT.isVector() && InVT.isVector() && "Expected vector type");
22562  assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
22563         "Expected same number of elements");
22564  assert((VT.getVectorElementType() == MVT::i16 ||
22565          VT.getVectorElementType() == MVT::i32 ||
22566          VT.getVectorElementType() == MVT::i64) &&
22567         "Unexpected element type");
22568  assert((InVT.getVectorElementType() == MVT::i8 ||
22569          InVT.getVectorElementType() == MVT::i16 ||
22570          InVT.getVectorElementType() == MVT::i32) &&
22571         "Unexpected element type");
22572
22573  // Custom legalize v8i8->v8i64 on CPUs without avx512bw.
22574  if (InVT == MVT::v8i8) {
22575    if (VT != MVT::v8i64)
22576      return SDValue();
22577
22578    In = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op),
22579                     MVT::v16i8, In, DAG.getUNDEF(MVT::v8i8));
22580    return DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, VT, In);
22581  }
22582
22583  if (Subtarget.hasInt256())
22584    return Op;
22585
22586  // Optimize vectors in AVX mode
22587  // Sign extend  v8i16 to v8i32 and
22588  //              v4i32 to v4i64
22589  //
22590  // Divide input vector into two parts
22591  // for v4i32 the high shuffle mask will be {2, 3, -1, -1}
22592  // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
22593  // concat the vectors to original VT
22594  MVT HalfVT = VT.getHalfNumVectorElementsVT();
22595  SDValue OpLo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, In);
22596
22597  unsigned NumElems = InVT.getVectorNumElements();
22598  SmallVector<int,8> ShufMask(NumElems, -1);
22599  for (unsigned i = 0; i != NumElems/2; ++i)
22600    ShufMask[i] = i + NumElems/2;
22601
22602  SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
22603  OpHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, OpHi);
22604
22605  return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
22606}
22607
22608/// Change a vector store into a pair of half-size vector stores.
22609static SDValue splitVectorStore(StoreSDNode *Store, SelectionDAG &DAG) {
22610  SDValue StoredVal = Store->getValue();
22611  assert((StoredVal.getValueType().is256BitVector() ||
22612          StoredVal.getValueType().is512BitVector()) &&
22613         "Expecting 256/512-bit op");
22614
22615  // Splitting volatile memory ops is not allowed unless the operation was not
22616  // legal to begin with. Assume the input store is legal (this transform is
22617  // only used for targets with AVX). Note: It is possible that we have an
22618  // illegal type like v2i128, and so we could allow splitting a volatile store
22619  // in that case if that is important.
22620  if (!Store->isSimple())
22621    return SDValue();
22622
22623  EVT StoreVT = StoredVal.getValueType();
22624  unsigned NumElems = StoreVT.getVectorNumElements();
22625  unsigned HalfSize = StoredVal.getValueSizeInBits() / 2;
22626  unsigned HalfAlign = (128 == HalfSize ? 16 : 32);
22627
22628  SDLoc DL(Store);
22629  SDValue Value0 = extractSubVector(StoredVal, 0, DAG, DL, HalfSize);
22630  SDValue Value1 = extractSubVector(StoredVal, NumElems / 2, DAG, DL, HalfSize);
22631  SDValue Ptr0 = Store->getBasePtr();
22632  SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, HalfAlign, DL);
22633  unsigned Alignment = Store->getAlignment();
22634  SDValue Ch0 =
22635      DAG.getStore(Store->getChain(), DL, Value0, Ptr0, Store->getPointerInfo(),
22636                   Alignment, Store->getMemOperand()->getFlags());
22637  SDValue Ch1 = DAG.getStore(Store->getChain(), DL, Value1, Ptr1,
22638                             Store->getPointerInfo().getWithOffset(HalfAlign),
22639                             MinAlign(Alignment, HalfAlign),
22640                             Store->getMemOperand()->getFlags());
22641  return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Ch0, Ch1);
22642}
22643
22644/// Scalarize a vector store, bitcasting to TargetVT to determine the scalar
22645/// type.
22646static SDValue scalarizeVectorStore(StoreSDNode *Store, MVT StoreVT,
22647                                    SelectionDAG &DAG) {
22648  SDValue StoredVal = Store->getValue();
22649  assert(StoreVT.is128BitVector() &&
22650         StoredVal.getValueType().is128BitVector() && "Expecting 128-bit op");
22651  StoredVal = DAG.getBitcast(StoreVT, StoredVal);
22652
22653  // Splitting volatile memory ops is not allowed unless the operation was not
22654  // legal to begin with. We are assuming the input op is legal (this transform
22655  // is only used for targets with AVX).
22656  if (!Store->isSimple())
22657    return SDValue();
22658
22659  MVT StoreSVT = StoreVT.getScalarType();
22660  unsigned NumElems = StoreVT.getVectorNumElements();
22661  unsigned ScalarSize = StoreSVT.getStoreSize();
22662  unsigned Alignment = Store->getAlignment();
22663
22664  SDLoc DL(Store);
22665  SmallVector<SDValue, 4> Stores;
22666  for (unsigned i = 0; i != NumElems; ++i) {
22667    unsigned Offset = i * ScalarSize;
22668    SDValue Ptr = DAG.getMemBasePlusOffset(Store->getBasePtr(), Offset, DL);
22669    SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, StoreSVT, StoredVal,
22670                              DAG.getIntPtrConstant(i, DL));
22671    SDValue Ch = DAG.getStore(Store->getChain(), DL, Scl, Ptr,
22672                              Store->getPointerInfo().getWithOffset(Offset),
22673                              MinAlign(Alignment, Offset),
22674                              Store->getMemOperand()->getFlags());
22675    Stores.push_back(Ch);
22676  }
22677  return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores);
22678}
22679
22680static SDValue LowerStore(SDValue Op, const X86Subtarget &Subtarget,
22681                          SelectionDAG &DAG) {
22682  StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
22683  SDLoc dl(St);
22684  SDValue StoredVal = St->getValue();
22685
22686  // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 stores.
22687  if (StoredVal.getValueType().isVector() &&
22688      StoredVal.getValueType().getVectorElementType() == MVT::i1) {
22689    assert(StoredVal.getValueType().getVectorNumElements() <= 8 &&
22690           "Unexpected VT");
22691    assert(!St->isTruncatingStore() && "Expected non-truncating store");
22692    assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
22693           "Expected AVX512F without AVX512DQI");
22694
22695    StoredVal = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
22696                            DAG.getUNDEF(MVT::v16i1), StoredVal,
22697                            DAG.getIntPtrConstant(0, dl));
22698    StoredVal = DAG.getBitcast(MVT::i16, StoredVal);
22699    StoredVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, StoredVal);
22700
22701    return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
22702                        St->getPointerInfo(), St->getAlignment(),
22703                        St->getMemOperand()->getFlags());
22704  }
22705
22706  if (St->isTruncatingStore())
22707    return SDValue();
22708
22709  // If this is a 256-bit store of concatenated ops, we are better off splitting
22710  // that store into two 128-bit stores. This avoids spurious use of 256-bit ops
22711  // and each half can execute independently. Some cores would split the op into
22712  // halves anyway, so the concat (vinsertf128) is purely an extra op.
22713  MVT StoreVT = StoredVal.getSimpleValueType();
22714  if (StoreVT.is256BitVector()) {
22715    SmallVector<SDValue, 4> CatOps;
22716    if (StoredVal.hasOneUse() && collectConcatOps(StoredVal.getNode(), CatOps))
22717      return splitVectorStore(St, DAG);
22718    return SDValue();
22719  }
22720
22721  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22722  assert(StoreVT.isVector() && StoreVT.getSizeInBits() == 64 &&
22723         "Unexpected VT");
22724  assert(TLI.getTypeAction(*DAG.getContext(), StoreVT) ==
22725             TargetLowering::TypeWidenVector && "Unexpected type action!");
22726
22727  EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), StoreVT);
22728  StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, StoredVal,
22729                          DAG.getUNDEF(StoreVT));
22730
22731  if (Subtarget.hasSSE2()) {
22732    // Widen the vector, cast to a v2x64 type, extract the single 64-bit element
22733    // and store it.
22734    MVT StVT = Subtarget.is64Bit() && StoreVT.isInteger() ? MVT::i64 : MVT::f64;
22735    MVT CastVT = MVT::getVectorVT(StVT, 2);
22736    StoredVal = DAG.getBitcast(CastVT, StoredVal);
22737    StoredVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, StVT, StoredVal,
22738                            DAG.getIntPtrConstant(0, dl));
22739
22740    return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
22741                        St->getPointerInfo(), St->getAlignment(),
22742                        St->getMemOperand()->getFlags());
22743  }
22744  assert(Subtarget.hasSSE1() && "Expected SSE");
22745  SDVTList Tys = DAG.getVTList(MVT::Other);
22746  SDValue Ops[] = {St->getChain(), StoredVal, St->getBasePtr()};
22747  return DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys, Ops, MVT::i64,
22748                                 St->getMemOperand());
22749}
22750
22751// Lower vector extended loads using a shuffle. If SSSE3 is not available we
22752// may emit an illegal shuffle but the expansion is still better than scalar
22753// code. We generate sext/sext_invec for SEXTLOADs if it's available, otherwise
22754// we'll emit a shuffle and a arithmetic shift.
22755// FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
22756// TODO: It is possible to support ZExt by zeroing the undef values during
22757// the shuffle phase or after the shuffle.
22758static SDValue LowerLoad(SDValue Op, const X86Subtarget &Subtarget,
22759                                 SelectionDAG &DAG) {
22760  MVT RegVT = Op.getSimpleValueType();
22761  assert(RegVT.isVector() && "We only custom lower vector loads.");
22762  assert(RegVT.isInteger() &&
22763         "We only custom lower integer vector loads.");
22764
22765  LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
22766  SDLoc dl(Ld);
22767
22768  // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 loads.
22769  if (RegVT.getVectorElementType() == MVT::i1) {
22770    assert(EVT(RegVT) == Ld->getMemoryVT() && "Expected non-extending load");
22771    assert(RegVT.getVectorNumElements() <= 8 && "Unexpected VT");
22772    assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
22773           "Expected AVX512F without AVX512DQI");
22774
22775    SDValue NewLd = DAG.getLoad(MVT::i8, dl, Ld->getChain(), Ld->getBasePtr(),
22776                                Ld->getPointerInfo(), Ld->getAlignment(),
22777                                Ld->getMemOperand()->getFlags());
22778
22779    // Replace chain users with the new chain.
22780    assert(NewLd->getNumValues() == 2 && "Loads must carry a chain!");
22781
22782    SDValue Val = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, NewLd);
22783    Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, RegVT,
22784                      DAG.getBitcast(MVT::v16i1, Val),
22785                      DAG.getIntPtrConstant(0, dl));
22786    return DAG.getMergeValues({Val, NewLd.getValue(1)}, dl);
22787  }
22788
22789  return SDValue();
22790}
22791
22792/// Return true if node is an ISD::AND or ISD::OR of two X86ISD::SETCC nodes
22793/// each of which has no other use apart from the AND / OR.
22794static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
22795  Opc = Op.getOpcode();
22796  if (Opc != ISD::OR && Opc != ISD::AND)
22797    return false;
22798  return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
22799          Op.getOperand(0).hasOneUse() &&
22800          Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
22801          Op.getOperand(1).hasOneUse());
22802}
22803
22804/// Return true if node is an ISD::XOR of a X86ISD::SETCC and 1 and that the
22805/// SETCC node has a single use.
22806static bool isXor1OfSetCC(SDValue Op) {
22807  if (Op.getOpcode() != ISD::XOR)
22808    return false;
22809  if (isOneConstant(Op.getOperand(1)))
22810    return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
22811           Op.getOperand(0).hasOneUse();
22812  return false;
22813}
22814
22815SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
22816  bool addTest = true;
22817  SDValue Chain = Op.getOperand(0);
22818  SDValue Cond  = Op.getOperand(1);
22819  SDValue Dest  = Op.getOperand(2);
22820  SDLoc dl(Op);
22821  SDValue CC;
22822  bool Inverted = false;
22823
22824  if (Cond.getOpcode() == ISD::SETCC) {
22825    // Check for setcc([su]{add,sub,mul}o == 0).
22826    if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
22827        isNullConstant(Cond.getOperand(1)) &&
22828        Cond.getOperand(0).getResNo() == 1 &&
22829        (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
22830         Cond.getOperand(0).getOpcode() == ISD::UADDO ||
22831         Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
22832         Cond.getOperand(0).getOpcode() == ISD::USUBO ||
22833         Cond.getOperand(0).getOpcode() == ISD::SMULO ||
22834         Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
22835      Inverted = true;
22836      Cond = Cond.getOperand(0);
22837    } else {
22838      if (SDValue NewCond = LowerSETCC(Cond, DAG))
22839        Cond = NewCond;
22840    }
22841  }
22842#if 0
22843  // FIXME: LowerXALUO doesn't handle these!!
22844  else if (Cond.getOpcode() == X86ISD::ADD  ||
22845           Cond.getOpcode() == X86ISD::SUB  ||
22846           Cond.getOpcode() == X86ISD::SMUL ||
22847           Cond.getOpcode() == X86ISD::UMUL)
22848    Cond = LowerXALUO(Cond, DAG);
22849#endif
22850
22851  // Look pass (and (setcc_carry (cmp ...)), 1).
22852  if (Cond.getOpcode() == ISD::AND &&
22853      Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
22854      isOneConstant(Cond.getOperand(1)))
22855    Cond = Cond.getOperand(0);
22856
22857  // If condition flag is set by a X86ISD::CMP, then use it as the condition
22858  // setting operand in place of the X86ISD::SETCC.
22859  unsigned CondOpcode = Cond.getOpcode();
22860  if (CondOpcode == X86ISD::SETCC ||
22861      CondOpcode == X86ISD::SETCC_CARRY) {
22862    CC = Cond.getOperand(0);
22863
22864    SDValue Cmp = Cond.getOperand(1);
22865    unsigned Opc = Cmp.getOpcode();
22866    // FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
22867    if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
22868      Cond = Cmp;
22869      addTest = false;
22870    } else {
22871      switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
22872      default: break;
22873      case X86::COND_O:
22874      case X86::COND_B:
22875        // These can only come from an arithmetic instruction with overflow,
22876        // e.g. SADDO, UADDO.
22877        Cond = Cond.getOperand(1);
22878        addTest = false;
22879        break;
22880      }
22881    }
22882  }
22883  CondOpcode = Cond.getOpcode();
22884  if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
22885      CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
22886      CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
22887    SDValue Value;
22888    X86::CondCode X86Cond;
22889    std::tie(Value, Cond) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
22890
22891    if (Inverted)
22892      X86Cond = X86::GetOppositeBranchCondition(X86Cond);
22893
22894    CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
22895    addTest = false;
22896  } else {
22897    unsigned CondOpc;
22898    if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
22899      SDValue Cmp = Cond.getOperand(0).getOperand(1);
22900      if (CondOpc == ISD::OR) {
22901        // Also, recognize the pattern generated by an FCMP_UNE. We can emit
22902        // two branches instead of an explicit OR instruction with a
22903        // separate test.
22904        if (Cmp == Cond.getOperand(1).getOperand(1) &&
22905            isX86LogicalCmp(Cmp)) {
22906          CC = Cond.getOperand(0).getOperand(0);
22907          Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
22908                              Chain, Dest, CC, Cmp);
22909          CC = Cond.getOperand(1).getOperand(0);
22910          Cond = Cmp;
22911          addTest = false;
22912        }
22913      } else { // ISD::AND
22914        // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
22915        // two branches instead of an explicit AND instruction with a
22916        // separate test. However, we only do this if this block doesn't
22917        // have a fall-through edge, because this requires an explicit
22918        // jmp when the condition is false.
22919        if (Cmp == Cond.getOperand(1).getOperand(1) &&
22920            isX86LogicalCmp(Cmp) &&
22921            Op.getNode()->hasOneUse()) {
22922          X86::CondCode CCode0 =
22923              (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
22924          CCode0 = X86::GetOppositeBranchCondition(CCode0);
22925          CC = DAG.getTargetConstant(CCode0, dl, MVT::i8);
22926          SDNode *User = *Op.getNode()->use_begin();
22927          // Look for an unconditional branch following this conditional branch.
22928          // We need this because we need to reverse the successors in order
22929          // to implement FCMP_OEQ.
22930          if (User->getOpcode() == ISD::BR) {
22931            SDValue FalseBB = User->getOperand(1);
22932            SDNode *NewBR =
22933              DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
22934            assert(NewBR == User);
22935            (void)NewBR;
22936            Dest = FalseBB;
22937
22938            Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), Chain,
22939                                Dest, CC, Cmp);
22940            X86::CondCode CCode1 =
22941                (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
22942            CCode1 = X86::GetOppositeBranchCondition(CCode1);
22943            CC = DAG.getTargetConstant(CCode1, dl, MVT::i8);
22944            Cond = Cmp;
22945            addTest = false;
22946          }
22947        }
22948      }
22949    } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
22950      // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
22951      // It should be transformed during dag combiner except when the condition
22952      // is set by a arithmetics with overflow node.
22953      X86::CondCode CCode =
22954        (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
22955      CCode = X86::GetOppositeBranchCondition(CCode);
22956      CC = DAG.getTargetConstant(CCode, dl, MVT::i8);
22957      Cond = Cond.getOperand(0).getOperand(1);
22958      addTest = false;
22959    } else if (Cond.getOpcode() == ISD::SETCC &&
22960               cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
22961      // For FCMP_OEQ, we can emit
22962      // two branches instead of an explicit AND instruction with a
22963      // separate test. However, we only do this if this block doesn't
22964      // have a fall-through edge, because this requires an explicit
22965      // jmp when the condition is false.
22966      if (Op.getNode()->hasOneUse()) {
22967        SDNode *User = *Op.getNode()->use_begin();
22968        // Look for an unconditional branch following this conditional branch.
22969        // We need this because we need to reverse the successors in order
22970        // to implement FCMP_OEQ.
22971        if (User->getOpcode() == ISD::BR) {
22972          SDValue FalseBB = User->getOperand(1);
22973          SDNode *NewBR =
22974            DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
22975          assert(NewBR == User);
22976          (void)NewBR;
22977          Dest = FalseBB;
22978
22979          SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
22980                                    Cond.getOperand(0), Cond.getOperand(1));
22981          Cmp = ConvertCmpIfNecessary(Cmp, DAG);
22982          CC = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
22983          Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
22984                              Chain, Dest, CC, Cmp);
22985          CC = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
22986          Cond = Cmp;
22987          addTest = false;
22988        }
22989      }
22990    } else if (Cond.getOpcode() == ISD::SETCC &&
22991               cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
22992      // For FCMP_UNE, we can emit
22993      // two branches instead of an explicit OR instruction with a
22994      // separate test.
22995      SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
22996                                Cond.getOperand(0), Cond.getOperand(1));
22997      Cmp = ConvertCmpIfNecessary(Cmp, DAG);
22998      CC = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
22999      Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
23000                          Chain, Dest, CC, Cmp);
23001      CC = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
23002      Cond = Cmp;
23003      addTest = false;
23004    }
23005  }
23006
23007  if (addTest) {
23008    // Look pass the truncate if the high bits are known zero.
23009    if (isTruncWithZeroHighBitsInput(Cond, DAG))
23010        Cond = Cond.getOperand(0);
23011
23012    // We know the result of AND is compared against zero. Try to match
23013    // it to BT.
23014    if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
23015      SDValue BTCC;
23016      if (SDValue BT = LowerAndToBT(Cond, ISD::SETNE, dl, DAG, BTCC)) {
23017        CC = BTCC;
23018        Cond = BT;
23019        addTest = false;
23020      }
23021    }
23022  }
23023
23024  if (addTest) {
23025    X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
23026    CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
23027    Cond = EmitTest(Cond, X86Cond, dl, DAG, Subtarget);
23028  }
23029  Cond = ConvertCmpIfNecessary(Cond, DAG);
23030  return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
23031                     Chain, Dest, CC, Cond);
23032}
23033
23034// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
23035// Calls to _alloca are needed to probe the stack when allocating more than 4k
23036// bytes in one go. Touching the stack at 4K increments is necessary to ensure
23037// that the guard pages used by the OS virtual memory manager are allocated in
23038// correct sequence.
23039SDValue
23040X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
23041                                           SelectionDAG &DAG) const {
23042  MachineFunction &MF = DAG.getMachineFunction();
23043  bool SplitStack = MF.shouldSplitStack();
23044  bool EmitStackProbe = !getStackProbeSymbolName(MF).empty();
23045  bool Lower = (Subtarget.isOSWindows() && !Subtarget.isTargetMachO()) ||
23046               SplitStack || EmitStackProbe;
23047  SDLoc dl(Op);
23048
23049  // Get the inputs.
23050  SDNode *Node = Op.getNode();
23051  SDValue Chain = Op.getOperand(0);
23052  SDValue Size  = Op.getOperand(1);
23053  MaybeAlign Alignment(Op.getConstantOperandVal(2));
23054  EVT VT = Node->getValueType(0);
23055
23056  // Chain the dynamic stack allocation so that it doesn't modify the stack
23057  // pointer when other instructions are using the stack.
23058  Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl);
23059
23060  bool Is64Bit = Subtarget.is64Bit();
23061  MVT SPTy = getPointerTy(DAG.getDataLayout());
23062
23063  SDValue Result;
23064  if (!Lower) {
23065    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23066    unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
23067    assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
23068                    " not tell us which reg is the stack pointer!");
23069
23070    SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
23071    Chain = SP.getValue(1);
23072    const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
23073    const Align StackAlign(TFI.getStackAlignment());
23074    Result = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
23075    if (Alignment && Alignment > StackAlign)
23076      Result =
23077          DAG.getNode(ISD::AND, dl, VT, Result,
23078                      DAG.getConstant(~(Alignment->value() - 1ULL), dl, VT));
23079    Chain = DAG.getCopyToReg(Chain, dl, SPReg, Result); // Output chain
23080  } else if (SplitStack) {
23081    MachineRegisterInfo &MRI = MF.getRegInfo();
23082
23083    if (Is64Bit) {
23084      // The 64 bit implementation of segmented stacks needs to clobber both r10
23085      // r11. This makes it impossible to use it along with nested parameters.
23086      const Function &F = MF.getFunction();
23087      for (const auto &A : F.args()) {
23088        if (A.hasNestAttr())
23089          report_fatal_error("Cannot use segmented stacks with functions that "
23090                             "have nested arguments.");
23091      }
23092    }
23093
23094    const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
23095    Register Vreg = MRI.createVirtualRegister(AddrRegClass);
23096    Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
23097    Result = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
23098                                DAG.getRegister(Vreg, SPTy));
23099  } else {
23100    SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
23101    Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Size);
23102    MF.getInfo<X86MachineFunctionInfo>()->setHasWinAlloca(true);
23103
23104    const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
23105    Register SPReg = RegInfo->getStackRegister();
23106    SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
23107    Chain = SP.getValue(1);
23108
23109    if (Alignment) {
23110      SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
23111                       DAG.getConstant(~(Alignment->value() - 1ULL), dl, VT));
23112      Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
23113    }
23114
23115    Result = SP;
23116  }
23117
23118  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true),
23119                             DAG.getIntPtrConstant(0, dl, true), SDValue(), dl);
23120
23121  SDValue Ops[2] = {Result, Chain};
23122  return DAG.getMergeValues(Ops, dl);
23123}
23124
23125SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
23126  MachineFunction &MF = DAG.getMachineFunction();
23127  auto PtrVT = getPointerTy(MF.getDataLayout());
23128  X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
23129
23130  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
23131  SDLoc DL(Op);
23132
23133  if (!Subtarget.is64Bit() ||
23134      Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv())) {
23135    // vastart just stores the address of the VarArgsFrameIndex slot into the
23136    // memory location argument.
23137    SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
23138    return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
23139                        MachinePointerInfo(SV));
23140  }
23141
23142  // __va_list_tag:
23143  //   gp_offset         (0 - 6 * 8)
23144  //   fp_offset         (48 - 48 + 8 * 16)
23145  //   overflow_arg_area (point to parameters coming in memory).
23146  //   reg_save_area
23147  SmallVector<SDValue, 8> MemOps;
23148  SDValue FIN = Op.getOperand(1);
23149  // Store gp_offset
23150  SDValue Store = DAG.getStore(
23151      Op.getOperand(0), DL,
23152      DAG.getConstant(FuncInfo->getVarArgsGPOffset(), DL, MVT::i32), FIN,
23153      MachinePointerInfo(SV));
23154  MemOps.push_back(Store);
23155
23156  // Store fp_offset
23157  FIN = DAG.getMemBasePlusOffset(FIN, 4, DL);
23158  Store = DAG.getStore(
23159      Op.getOperand(0), DL,
23160      DAG.getConstant(FuncInfo->getVarArgsFPOffset(), DL, MVT::i32), FIN,
23161      MachinePointerInfo(SV, 4));
23162  MemOps.push_back(Store);
23163
23164  // Store ptr to overflow_arg_area
23165  FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4, DL));
23166  SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
23167  Store =
23168      DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, MachinePointerInfo(SV, 8));
23169  MemOps.push_back(Store);
23170
23171  // Store ptr to reg_save_area.
23172  FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(
23173      Subtarget.isTarget64BitLP64() ? 8 : 4, DL));
23174  SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT);
23175  Store = DAG.getStore(
23176      Op.getOperand(0), DL, RSFIN, FIN,
23177      MachinePointerInfo(SV, Subtarget.isTarget64BitLP64() ? 16 : 12));
23178  MemOps.push_back(Store);
23179  return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
23180}
23181
23182SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
23183  assert(Subtarget.is64Bit() &&
23184         "LowerVAARG only handles 64-bit va_arg!");
23185  assert(Op.getNumOperands() == 4);
23186
23187  MachineFunction &MF = DAG.getMachineFunction();
23188  if (Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()))
23189    // The Win64 ABI uses char* instead of a structure.
23190    return DAG.expandVAArg(Op.getNode());
23191
23192  SDValue Chain = Op.getOperand(0);
23193  SDValue SrcPtr = Op.getOperand(1);
23194  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
23195  unsigned Align = Op.getConstantOperandVal(3);
23196  SDLoc dl(Op);
23197
23198  EVT ArgVT = Op.getNode()->getValueType(0);
23199  Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
23200  uint32_t ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy);
23201  uint8_t ArgMode;
23202
23203  // Decide which area this value should be read from.
23204  // TODO: Implement the AMD64 ABI in its entirety. This simple
23205  // selection mechanism works only for the basic types.
23206  if (ArgVT == MVT::f80) {
23207    llvm_unreachable("va_arg for f80 not yet implemented");
23208  } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
23209    ArgMode = 2;  // Argument passed in XMM register. Use fp_offset.
23210  } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
23211    ArgMode = 1;  // Argument passed in GPR64 register(s). Use gp_offset.
23212  } else {
23213    llvm_unreachable("Unhandled argument type in LowerVAARG");
23214  }
23215
23216  if (ArgMode == 2) {
23217    // Sanity Check: Make sure using fp_offset makes sense.
23218    assert(!Subtarget.useSoftFloat() &&
23219           !(MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) &&
23220           Subtarget.hasSSE1());
23221  }
23222
23223  // Insert VAARG_64 node into the DAG
23224  // VAARG_64 returns two values: Variable Argument Address, Chain
23225  SDValue InstOps[] = {Chain, SrcPtr, DAG.getConstant(ArgSize, dl, MVT::i32),
23226                       DAG.getConstant(ArgMode, dl, MVT::i8),
23227                       DAG.getConstant(Align, dl, MVT::i32)};
23228  SDVTList VTs = DAG.getVTList(getPointerTy(DAG.getDataLayout()), MVT::Other);
23229  SDValue VAARG = DAG.getMemIntrinsicNode(
23230    X86ISD::VAARG_64, dl,
23231    VTs, InstOps, MVT::i64,
23232    MachinePointerInfo(SV),
23233    /*Align=*/0,
23234    MachineMemOperand::MOLoad | MachineMemOperand::MOStore);
23235  Chain = VAARG.getValue(1);
23236
23237  // Load the next argument and return it
23238  return DAG.getLoad(ArgVT, dl, Chain, VAARG, MachinePointerInfo());
23239}
23240
23241static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget,
23242                           SelectionDAG &DAG) {
23243  // X86-64 va_list is a struct { i32, i32, i8*, i8* }, except on Windows,
23244  // where a va_list is still an i8*.
23245  assert(Subtarget.is64Bit() && "This code only handles 64-bit va_copy!");
23246  if (Subtarget.isCallingConvWin64(
23247        DAG.getMachineFunction().getFunction().getCallingConv()))
23248    // Probably a Win64 va_copy.
23249    return DAG.expandVACopy(Op.getNode());
23250
23251  SDValue Chain = Op.getOperand(0);
23252  SDValue DstPtr = Op.getOperand(1);
23253  SDValue SrcPtr = Op.getOperand(2);
23254  const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
23255  const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
23256  SDLoc DL(Op);
23257
23258  return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
23259                       DAG.getIntPtrConstant(24, DL), 8, /*isVolatile*/false,
23260                       false, false,
23261                       MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
23262}
23263
23264// Helper to get immediate/variable SSE shift opcode from other shift opcodes.
23265static unsigned getTargetVShiftUniformOpcode(unsigned Opc, bool IsVariable) {
23266  switch (Opc) {
23267  case ISD::SHL:
23268  case X86ISD::VSHL:
23269  case X86ISD::VSHLI:
23270    return IsVariable ? X86ISD::VSHL : X86ISD::VSHLI;
23271  case ISD::SRL:
23272  case X86ISD::VSRL:
23273  case X86ISD::VSRLI:
23274    return IsVariable ? X86ISD::VSRL : X86ISD::VSRLI;
23275  case ISD::SRA:
23276  case X86ISD::VSRA:
23277  case X86ISD::VSRAI:
23278    return IsVariable ? X86ISD::VSRA : X86ISD::VSRAI;
23279  }
23280  llvm_unreachable("Unknown target vector shift node");
23281}
23282
23283/// Handle vector element shifts where the shift amount is a constant.
23284/// Takes immediate version of shift as input.
23285static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
23286                                          SDValue SrcOp, uint64_t ShiftAmt,
23287                                          SelectionDAG &DAG) {
23288  MVT ElementType = VT.getVectorElementType();
23289
23290  // Bitcast the source vector to the output type, this is mainly necessary for
23291  // vXi8/vXi64 shifts.
23292  if (VT != SrcOp.getSimpleValueType())
23293    SrcOp = DAG.getBitcast(VT, SrcOp);
23294
23295  // Fold this packed shift into its first operand if ShiftAmt is 0.
23296  if (ShiftAmt == 0)
23297    return SrcOp;
23298
23299  // Check for ShiftAmt >= element width
23300  if (ShiftAmt >= ElementType.getSizeInBits()) {
23301    if (Opc == X86ISD::VSRAI)
23302      ShiftAmt = ElementType.getSizeInBits() - 1;
23303    else
23304      return DAG.getConstant(0, dl, VT);
23305  }
23306
23307  assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
23308         && "Unknown target vector shift-by-constant node");
23309
23310  // Fold this packed vector shift into a build vector if SrcOp is a
23311  // vector of Constants or UNDEFs.
23312  if (ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
23313    SmallVector<SDValue, 8> Elts;
23314    unsigned NumElts = SrcOp->getNumOperands();
23315
23316    switch (Opc) {
23317    default: llvm_unreachable("Unknown opcode!");
23318    case X86ISD::VSHLI:
23319      for (unsigned i = 0; i != NumElts; ++i) {
23320        SDValue CurrentOp = SrcOp->getOperand(i);
23321        if (CurrentOp->isUndef()) {
23322          // Must produce 0s in the correct bits.
23323          Elts.push_back(DAG.getConstant(0, dl, ElementType));
23324          continue;
23325        }
23326        auto *ND = cast<ConstantSDNode>(CurrentOp);
23327        const APInt &C = ND->getAPIntValue();
23328        Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), dl, ElementType));
23329      }
23330      break;
23331    case X86ISD::VSRLI:
23332      for (unsigned i = 0; i != NumElts; ++i) {
23333        SDValue CurrentOp = SrcOp->getOperand(i);
23334        if (CurrentOp->isUndef()) {
23335          // Must produce 0s in the correct bits.
23336          Elts.push_back(DAG.getConstant(0, dl, ElementType));
23337          continue;
23338        }
23339        auto *ND = cast<ConstantSDNode>(CurrentOp);
23340        const APInt &C = ND->getAPIntValue();
23341        Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), dl, ElementType));
23342      }
23343      break;
23344    case X86ISD::VSRAI:
23345      for (unsigned i = 0; i != NumElts; ++i) {
23346        SDValue CurrentOp = SrcOp->getOperand(i);
23347        if (CurrentOp->isUndef()) {
23348          // All shifted in bits must be the same so use 0.
23349          Elts.push_back(DAG.getConstant(0, dl, ElementType));
23350          continue;
23351        }
23352        auto *ND = cast<ConstantSDNode>(CurrentOp);
23353        const APInt &C = ND->getAPIntValue();
23354        Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), dl, ElementType));
23355      }
23356      break;
23357    }
23358
23359    return DAG.getBuildVector(VT, dl, Elts);
23360  }
23361
23362  return DAG.getNode(Opc, dl, VT, SrcOp,
23363                     DAG.getTargetConstant(ShiftAmt, dl, MVT::i8));
23364}
23365
23366/// Handle vector element shifts where the shift amount may or may not be a
23367/// constant. Takes immediate version of shift as input.
23368static SDValue getTargetVShiftNode(unsigned Opc, const SDLoc &dl, MVT VT,
23369                                   SDValue SrcOp, SDValue ShAmt,
23370                                   const X86Subtarget &Subtarget,
23371                                   SelectionDAG &DAG) {
23372  MVT SVT = ShAmt.getSimpleValueType();
23373  assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
23374
23375  // Catch shift-by-constant.
23376  if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
23377    return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
23378                                      CShAmt->getZExtValue(), DAG);
23379
23380  // Change opcode to non-immediate version.
23381  Opc = getTargetVShiftUniformOpcode(Opc, true);
23382
23383  // Need to build a vector containing shift amount.
23384  // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
23385  // +====================+============+=======================================+
23386  // | ShAmt is           | HasSSE4.1? | Construct ShAmt vector as             |
23387  // +====================+============+=======================================+
23388  // | i64                | Yes, No    | Use ShAmt as lowest elt               |
23389  // | i32                | Yes        | zero-extend in-reg                    |
23390  // | (i32 zext(i16/i8)) | Yes        | zero-extend in-reg                    |
23391  // | (i32 zext(i16/i8)) | No         | byte-shift-in-reg                     |
23392  // | i16/i32            | No         | v4i32 build_vector(ShAmt, 0, ud, ud)) |
23393  // +====================+============+=======================================+
23394
23395  if (SVT == MVT::i64)
23396    ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v2i64, ShAmt);
23397  else if (ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
23398           ShAmt.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
23399           (ShAmt.getOperand(0).getSimpleValueType() == MVT::i16 ||
23400            ShAmt.getOperand(0).getSimpleValueType() == MVT::i8)) {
23401    ShAmt = ShAmt.getOperand(0);
23402    MVT AmtTy = ShAmt.getSimpleValueType() == MVT::i8 ? MVT::v16i8 : MVT::v8i16;
23403    ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), AmtTy, ShAmt);
23404    if (Subtarget.hasSSE41())
23405      ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt),
23406                          MVT::v2i64, ShAmt);
23407    else {
23408      SDValue ByteShift = DAG.getTargetConstant(
23409          (128 - AmtTy.getScalarSizeInBits()) / 8, SDLoc(ShAmt), MVT::i8);
23410      ShAmt = DAG.getBitcast(MVT::v16i8, ShAmt);
23411      ShAmt = DAG.getNode(X86ISD::VSHLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
23412                          ByteShift);
23413      ShAmt = DAG.getNode(X86ISD::VSRLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
23414                          ByteShift);
23415    }
23416  } else if (Subtarget.hasSSE41() &&
23417             ShAmt.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
23418    ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v4i32, ShAmt);
23419    ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt),
23420                        MVT::v2i64, ShAmt);
23421  } else {
23422    SDValue ShOps[4] = {ShAmt, DAG.getConstant(0, dl, SVT), DAG.getUNDEF(SVT),
23423                        DAG.getUNDEF(SVT)};
23424    ShAmt = DAG.getBuildVector(MVT::v4i32, dl, ShOps);
23425  }
23426
23427  // The return type has to be a 128-bit type with the same element
23428  // type as the input type.
23429  MVT EltVT = VT.getVectorElementType();
23430  MVT ShVT = MVT::getVectorVT(EltVT, 128 / EltVT.getSizeInBits());
23431
23432  ShAmt = DAG.getBitcast(ShVT, ShAmt);
23433  return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
23434}
23435
23436/// Return Mask with the necessary casting or extending
23437/// for \p Mask according to \p MaskVT when lowering masking intrinsics
23438static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
23439                           const X86Subtarget &Subtarget, SelectionDAG &DAG,
23440                           const SDLoc &dl) {
23441
23442  if (isAllOnesConstant(Mask))
23443    return DAG.getConstant(1, dl, MaskVT);
23444  if (X86::isZeroNode(Mask))
23445    return DAG.getConstant(0, dl, MaskVT);
23446
23447  assert(MaskVT.bitsLE(Mask.getSimpleValueType()) && "Unexpected mask size!");
23448
23449  if (Mask.getSimpleValueType() == MVT::i64 && Subtarget.is32Bit()) {
23450    assert(MaskVT == MVT::v64i1 && "Expected v64i1 mask!");
23451    assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
23452    // In case 32bit mode, bitcast i64 is illegal, extend/split it.
23453    SDValue Lo, Hi;
23454    Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
23455                        DAG.getConstant(0, dl, MVT::i32));
23456    Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
23457                        DAG.getConstant(1, dl, MVT::i32));
23458
23459    Lo = DAG.getBitcast(MVT::v32i1, Lo);
23460    Hi = DAG.getBitcast(MVT::v32i1, Hi);
23461
23462    return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
23463  } else {
23464    MVT BitcastVT = MVT::getVectorVT(MVT::i1,
23465                                     Mask.getSimpleValueType().getSizeInBits());
23466    // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
23467    // are extracted by EXTRACT_SUBVECTOR.
23468    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
23469                       DAG.getBitcast(BitcastVT, Mask),
23470                       DAG.getIntPtrConstant(0, dl));
23471  }
23472}
23473
23474/// Return (and \p Op, \p Mask) for compare instructions or
23475/// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
23476/// necessary casting or extending for \p Mask when lowering masking intrinsics
23477static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
23478                  SDValue PreservedSrc,
23479                  const X86Subtarget &Subtarget,
23480                  SelectionDAG &DAG) {
23481  MVT VT = Op.getSimpleValueType();
23482  MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
23483  unsigned OpcodeSelect = ISD::VSELECT;
23484  SDLoc dl(Op);
23485
23486  if (isAllOnesConstant(Mask))
23487    return Op;
23488
23489  SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
23490
23491  if (PreservedSrc.isUndef())
23492    PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
23493  return DAG.getNode(OpcodeSelect, dl, VT, VMask, Op, PreservedSrc);
23494}
23495
23496/// Creates an SDNode for a predicated scalar operation.
23497/// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
23498/// The mask is coming as MVT::i8 and it should be transformed
23499/// to MVT::v1i1 while lowering masking intrinsics.
23500/// The main difference between ScalarMaskingNode and VectorMaskingNode is using
23501/// "X86select" instead of "vselect". We just can't create the "vselect" node
23502/// for a scalar instruction.
23503static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
23504                                    SDValue PreservedSrc,
23505                                    const X86Subtarget &Subtarget,
23506                                    SelectionDAG &DAG) {
23507
23508  if (auto *MaskConst = dyn_cast<ConstantSDNode>(Mask))
23509    if (MaskConst->getZExtValue() & 0x1)
23510      return Op;
23511
23512  MVT VT = Op.getSimpleValueType();
23513  SDLoc dl(Op);
23514
23515  assert(Mask.getValueType() == MVT::i8 && "Unexpect type");
23516  SDValue IMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v1i1,
23517                              DAG.getBitcast(MVT::v8i1, Mask),
23518                              DAG.getIntPtrConstant(0, dl));
23519  if (Op.getOpcode() == X86ISD::FSETCCM ||
23520      Op.getOpcode() == X86ISD::FSETCCM_SAE ||
23521      Op.getOpcode() == X86ISD::VFPCLASSS)
23522    return DAG.getNode(ISD::AND, dl, VT, Op, IMask);
23523
23524  if (PreservedSrc.isUndef())
23525    PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
23526  return DAG.getNode(X86ISD::SELECTS, dl, VT, IMask, Op, PreservedSrc);
23527}
23528
23529static int getSEHRegistrationNodeSize(const Function *Fn) {
23530  if (!Fn->hasPersonalityFn())
23531    report_fatal_error(
23532        "querying registration node size for function without personality");
23533  // The RegNodeSize is 6 32-bit words for SEH and 4 for C++ EH. See
23534  // WinEHStatePass for the full struct definition.
23535  switch (classifyEHPersonality(Fn->getPersonalityFn())) {
23536  case EHPersonality::MSVC_X86SEH: return 24;
23537  case EHPersonality::MSVC_CXX: return 16;
23538  default: break;
23539  }
23540  report_fatal_error(
23541      "can only recover FP for 32-bit MSVC EH personality functions");
23542}
23543
23544/// When the MSVC runtime transfers control to us, either to an outlined
23545/// function or when returning to a parent frame after catching an exception, we
23546/// recover the parent frame pointer by doing arithmetic on the incoming EBP.
23547/// Here's the math:
23548///   RegNodeBase = EntryEBP - RegNodeSize
23549///   ParentFP = RegNodeBase - ParentFrameOffset
23550/// Subtracting RegNodeSize takes us to the offset of the registration node, and
23551/// subtracting the offset (negative on x86) takes us back to the parent FP.
23552static SDValue recoverFramePointer(SelectionDAG &DAG, const Function *Fn,
23553                                   SDValue EntryEBP) {
23554  MachineFunction &MF = DAG.getMachineFunction();
23555  SDLoc dl;
23556
23557  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23558  MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
23559
23560  // It's possible that the parent function no longer has a personality function
23561  // if the exceptional code was optimized away, in which case we just return
23562  // the incoming EBP.
23563  if (!Fn->hasPersonalityFn())
23564    return EntryEBP;
23565
23566  // Get an MCSymbol that will ultimately resolve to the frame offset of the EH
23567  // registration, or the .set_setframe offset.
23568  MCSymbol *OffsetSym =
23569      MF.getMMI().getContext().getOrCreateParentFrameOffsetSymbol(
23570          GlobalValue::dropLLVMManglingEscape(Fn->getName()));
23571  SDValue OffsetSymVal = DAG.getMCSymbol(OffsetSym, PtrVT);
23572  SDValue ParentFrameOffset =
23573      DAG.getNode(ISD::LOCAL_RECOVER, dl, PtrVT, OffsetSymVal);
23574
23575  // Return EntryEBP + ParentFrameOffset for x64. This adjusts from RSP after
23576  // prologue to RBP in the parent function.
23577  const X86Subtarget &Subtarget =
23578      static_cast<const X86Subtarget &>(DAG.getSubtarget());
23579  if (Subtarget.is64Bit())
23580    return DAG.getNode(ISD::ADD, dl, PtrVT, EntryEBP, ParentFrameOffset);
23581
23582  int RegNodeSize = getSEHRegistrationNodeSize(Fn);
23583  // RegNodeBase = EntryEBP - RegNodeSize
23584  // ParentFP = RegNodeBase - ParentFrameOffset
23585  SDValue RegNodeBase = DAG.getNode(ISD::SUB, dl, PtrVT, EntryEBP,
23586                                    DAG.getConstant(RegNodeSize, dl, PtrVT));
23587  return DAG.getNode(ISD::SUB, dl, PtrVT, RegNodeBase, ParentFrameOffset);
23588}
23589
23590SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
23591                                                   SelectionDAG &DAG) const {
23592  // Helper to detect if the operand is CUR_DIRECTION rounding mode.
23593  auto isRoundModeCurDirection = [](SDValue Rnd) {
23594    if (auto *C = dyn_cast<ConstantSDNode>(Rnd))
23595      return C->getAPIntValue() == X86::STATIC_ROUNDING::CUR_DIRECTION;
23596
23597    return false;
23598  };
23599  auto isRoundModeSAE = [](SDValue Rnd) {
23600    if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
23601      unsigned RC = C->getZExtValue();
23602      if (RC & X86::STATIC_ROUNDING::NO_EXC) {
23603        // Clear the NO_EXC bit and check remaining bits.
23604        RC ^= X86::STATIC_ROUNDING::NO_EXC;
23605        // As a convenience we allow no other bits or explicitly
23606        // current direction.
23607        return RC == 0 || RC == X86::STATIC_ROUNDING::CUR_DIRECTION;
23608      }
23609    }
23610
23611    return false;
23612  };
23613  auto isRoundModeSAEToX = [](SDValue Rnd, unsigned &RC) {
23614    if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
23615      RC = C->getZExtValue();
23616      if (RC & X86::STATIC_ROUNDING::NO_EXC) {
23617        // Clear the NO_EXC bit and check remaining bits.
23618        RC ^= X86::STATIC_ROUNDING::NO_EXC;
23619        return RC == X86::STATIC_ROUNDING::TO_NEAREST_INT ||
23620               RC == X86::STATIC_ROUNDING::TO_NEG_INF ||
23621               RC == X86::STATIC_ROUNDING::TO_POS_INF ||
23622               RC == X86::STATIC_ROUNDING::TO_ZERO;
23623      }
23624    }
23625
23626    return false;
23627  };
23628
23629  SDLoc dl(Op);
23630  unsigned IntNo = Op.getConstantOperandVal(0);
23631  MVT VT = Op.getSimpleValueType();
23632  const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
23633
23634  if (IntrData) {
23635    switch(IntrData->Type) {
23636    case INTR_TYPE_1OP: {
23637      // We specify 2 possible opcodes for intrinsics with rounding modes.
23638      // First, we check if the intrinsic may have non-default rounding mode,
23639      // (IntrData->Opc1 != 0), then we check the rounding mode operand.
23640      unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
23641      if (IntrWithRoundingModeOpcode != 0) {
23642        SDValue Rnd = Op.getOperand(2);
23643        unsigned RC = 0;
23644        if (isRoundModeSAEToX(Rnd, RC))
23645          return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
23646                             Op.getOperand(1),
23647                             DAG.getTargetConstant(RC, dl, MVT::i32));
23648        if (!isRoundModeCurDirection(Rnd))
23649          return SDValue();
23650      }
23651      return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
23652                         Op.getOperand(1));
23653    }
23654    case INTR_TYPE_1OP_SAE: {
23655      SDValue Sae = Op.getOperand(2);
23656
23657      unsigned Opc;
23658      if (isRoundModeCurDirection(Sae))
23659        Opc = IntrData->Opc0;
23660      else if (isRoundModeSAE(Sae))
23661        Opc = IntrData->Opc1;
23662      else
23663        return SDValue();
23664
23665      return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1));
23666    }
23667    case INTR_TYPE_2OP: {
23668      SDValue Src2 = Op.getOperand(2);
23669
23670      // We specify 2 possible opcodes for intrinsics with rounding modes.
23671      // First, we check if the intrinsic may have non-default rounding mode,
23672      // (IntrData->Opc1 != 0), then we check the rounding mode operand.
23673      unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
23674      if (IntrWithRoundingModeOpcode != 0) {
23675        SDValue Rnd = Op.getOperand(3);
23676        unsigned RC = 0;
23677        if (isRoundModeSAEToX(Rnd, RC))
23678          return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
23679                             Op.getOperand(1), Src2,
23680                             DAG.getTargetConstant(RC, dl, MVT::i32));
23681        if (!isRoundModeCurDirection(Rnd))
23682          return SDValue();
23683      }
23684
23685      return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
23686                         Op.getOperand(1), Src2);
23687    }
23688    case INTR_TYPE_2OP_SAE: {
23689      SDValue Sae = Op.getOperand(3);
23690
23691      unsigned Opc;
23692      if (isRoundModeCurDirection(Sae))
23693        Opc = IntrData->Opc0;
23694      else if (isRoundModeSAE(Sae))
23695        Opc = IntrData->Opc1;
23696      else
23697        return SDValue();
23698
23699      return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1),
23700                         Op.getOperand(2));
23701    }
23702    case INTR_TYPE_3OP:
23703    case INTR_TYPE_3OP_IMM8: {
23704      SDValue Src1 = Op.getOperand(1);
23705      SDValue Src2 = Op.getOperand(2);
23706      SDValue Src3 = Op.getOperand(3);
23707
23708      // We specify 2 possible opcodes for intrinsics with rounding modes.
23709      // First, we check if the intrinsic may have non-default rounding mode,
23710      // (IntrData->Opc1 != 0), then we check the rounding mode operand.
23711      unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
23712      if (IntrWithRoundingModeOpcode != 0) {
23713        SDValue Rnd = Op.getOperand(4);
23714        unsigned RC = 0;
23715        if (isRoundModeSAEToX(Rnd, RC))
23716          return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
23717                             Src1, Src2, Src3,
23718                             DAG.getTargetConstant(RC, dl, MVT::i32));
23719        if (!isRoundModeCurDirection(Rnd))
23720          return SDValue();
23721      }
23722
23723      return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
23724                         {Src1, Src2, Src3});
23725    }
23726    case INTR_TYPE_4OP:
23727      return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
23728        Op.getOperand(2), Op.getOperand(3), Op.getOperand(4));
23729    case INTR_TYPE_1OP_MASK: {
23730      SDValue Src = Op.getOperand(1);
23731      SDValue PassThru = Op.getOperand(2);
23732      SDValue Mask = Op.getOperand(3);
23733      // We add rounding mode to the Node when
23734      //   - RC Opcode is specified and
23735      //   - RC is not "current direction".
23736      unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
23737      if (IntrWithRoundingModeOpcode != 0) {
23738        SDValue Rnd = Op.getOperand(4);
23739        unsigned RC = 0;
23740        if (isRoundModeSAEToX(Rnd, RC))
23741          return getVectorMaskingNode(
23742              DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
23743                          Src, DAG.getTargetConstant(RC, dl, MVT::i32)),
23744              Mask, PassThru, Subtarget, DAG);
23745        if (!isRoundModeCurDirection(Rnd))
23746          return SDValue();
23747      }
23748      return getVectorMaskingNode(
23749          DAG.getNode(IntrData->Opc0, dl, VT, Src), Mask, PassThru,
23750          Subtarget, DAG);
23751    }
23752    case INTR_TYPE_1OP_MASK_SAE: {
23753      SDValue Src = Op.getOperand(1);
23754      SDValue PassThru = Op.getOperand(2);
23755      SDValue Mask = Op.getOperand(3);
23756      SDValue Rnd = Op.getOperand(4);
23757
23758      unsigned Opc;
23759      if (isRoundModeCurDirection(Rnd))
23760        Opc = IntrData->Opc0;
23761      else if (isRoundModeSAE(Rnd))
23762        Opc = IntrData->Opc1;
23763      else
23764        return SDValue();
23765
23766      return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src), Mask, PassThru,
23767                                  Subtarget, DAG);
23768    }
23769    case INTR_TYPE_SCALAR_MASK: {
23770      SDValue Src1 = Op.getOperand(1);
23771      SDValue Src2 = Op.getOperand(2);
23772      SDValue passThru = Op.getOperand(3);
23773      SDValue Mask = Op.getOperand(4);
23774      unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
23775      // There are 2 kinds of intrinsics in this group:
23776      // (1) With suppress-all-exceptions (sae) or rounding mode- 6 operands
23777      // (2) With rounding mode and sae - 7 operands.
23778      bool HasRounding = IntrWithRoundingModeOpcode != 0;
23779      if (Op.getNumOperands() == (5U + HasRounding)) {
23780        if (HasRounding) {
23781          SDValue Rnd = Op.getOperand(5);
23782          unsigned RC = 0;
23783          if (isRoundModeSAEToX(Rnd, RC))
23784            return getScalarMaskingNode(
23785                DAG.getNode(IntrWithRoundingModeOpcode, dl, VT, Src1, Src2,
23786                            DAG.getTargetConstant(RC, dl, MVT::i32)),
23787                Mask, passThru, Subtarget, DAG);
23788          if (!isRoundModeCurDirection(Rnd))
23789            return SDValue();
23790        }
23791        return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1,
23792                                                Src2),
23793                                    Mask, passThru, Subtarget, DAG);
23794      }
23795
23796      assert(Op.getNumOperands() == (6U + HasRounding) &&
23797             "Unexpected intrinsic form");
23798      SDValue RoundingMode = Op.getOperand(5);
23799      unsigned Opc = IntrData->Opc0;
23800      if (HasRounding) {
23801        SDValue Sae = Op.getOperand(6);
23802        if (isRoundModeSAE(Sae))
23803          Opc = IntrWithRoundingModeOpcode;
23804        else if (!isRoundModeCurDirection(Sae))
23805          return SDValue();
23806      }
23807      return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1,
23808                                              Src2, RoundingMode),
23809                                  Mask, passThru, Subtarget, DAG);
23810    }
23811    case INTR_TYPE_SCALAR_MASK_RND: {
23812      SDValue Src1 = Op.getOperand(1);
23813      SDValue Src2 = Op.getOperand(2);
23814      SDValue passThru = Op.getOperand(3);
23815      SDValue Mask = Op.getOperand(4);
23816      SDValue Rnd = Op.getOperand(5);
23817
23818      SDValue NewOp;
23819      unsigned RC = 0;
23820      if (isRoundModeCurDirection(Rnd))
23821        NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
23822      else if (isRoundModeSAEToX(Rnd, RC))
23823        NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
23824                            DAG.getTargetConstant(RC, dl, MVT::i32));
23825      else
23826        return SDValue();
23827
23828      return getScalarMaskingNode(NewOp, Mask, passThru, Subtarget, DAG);
23829    }
23830    case INTR_TYPE_SCALAR_MASK_SAE: {
23831      SDValue Src1 = Op.getOperand(1);
23832      SDValue Src2 = Op.getOperand(2);
23833      SDValue passThru = Op.getOperand(3);
23834      SDValue Mask = Op.getOperand(4);
23835      SDValue Sae = Op.getOperand(5);
23836      unsigned Opc;
23837      if (isRoundModeCurDirection(Sae))
23838        Opc = IntrData->Opc0;
23839      else if (isRoundModeSAE(Sae))
23840        Opc = IntrData->Opc1;
23841      else
23842        return SDValue();
23843
23844      return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
23845                                  Mask, passThru, Subtarget, DAG);
23846    }
23847    case INTR_TYPE_2OP_MASK: {
23848      SDValue Src1 = Op.getOperand(1);
23849      SDValue Src2 = Op.getOperand(2);
23850      SDValue PassThru = Op.getOperand(3);
23851      SDValue Mask = Op.getOperand(4);
23852      SDValue NewOp;
23853      if (IntrData->Opc1 != 0) {
23854        SDValue Rnd = Op.getOperand(5);
23855        unsigned RC = 0;
23856        if (isRoundModeSAEToX(Rnd, RC))
23857          NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
23858                              DAG.getTargetConstant(RC, dl, MVT::i32));
23859        else if (!isRoundModeCurDirection(Rnd))
23860          return SDValue();
23861      }
23862      if (!NewOp)
23863        NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
23864      return getVectorMaskingNode(NewOp, Mask, PassThru, Subtarget, DAG);
23865    }
23866    case INTR_TYPE_2OP_MASK_SAE: {
23867      SDValue Src1 = Op.getOperand(1);
23868      SDValue Src2 = Op.getOperand(2);
23869      SDValue PassThru = Op.getOperand(3);
23870      SDValue Mask = Op.getOperand(4);
23871
23872      unsigned Opc = IntrData->Opc0;
23873      if (IntrData->Opc1 != 0) {
23874        SDValue Sae = Op.getOperand(5);
23875        if (isRoundModeSAE(Sae))
23876          Opc = IntrData->Opc1;
23877        else if (!isRoundModeCurDirection(Sae))
23878          return SDValue();
23879      }
23880
23881      return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
23882                                  Mask, PassThru, Subtarget, DAG);
23883    }
23884    case INTR_TYPE_3OP_SCALAR_MASK_SAE: {
23885      SDValue Src1 = Op.getOperand(1);
23886      SDValue Src2 = Op.getOperand(2);
23887      SDValue Src3 = Op.getOperand(3);
23888      SDValue PassThru = Op.getOperand(4);
23889      SDValue Mask = Op.getOperand(5);
23890      SDValue Sae = Op.getOperand(6);
23891      unsigned Opc;
23892      if (isRoundModeCurDirection(Sae))
23893        Opc = IntrData->Opc0;
23894      else if (isRoundModeSAE(Sae))
23895        Opc = IntrData->Opc1;
23896      else
23897        return SDValue();
23898
23899      return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
23900                                  Mask, PassThru, Subtarget, DAG);
23901    }
23902    case INTR_TYPE_3OP_MASK_SAE: {
23903      SDValue Src1 = Op.getOperand(1);
23904      SDValue Src2 = Op.getOperand(2);
23905      SDValue Src3 = Op.getOperand(3);
23906      SDValue PassThru = Op.getOperand(4);
23907      SDValue Mask = Op.getOperand(5);
23908
23909      unsigned Opc = IntrData->Opc0;
23910      if (IntrData->Opc1 != 0) {
23911        SDValue Sae = Op.getOperand(6);
23912        if (isRoundModeSAE(Sae))
23913          Opc = IntrData->Opc1;
23914        else if (!isRoundModeCurDirection(Sae))
23915          return SDValue();
23916      }
23917      return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
23918                                  Mask, PassThru, Subtarget, DAG);
23919    }
23920    case BLENDV: {
23921      SDValue Src1 = Op.getOperand(1);
23922      SDValue Src2 = Op.getOperand(2);
23923      SDValue Src3 = Op.getOperand(3);
23924
23925      EVT MaskVT = Src3.getValueType().changeVectorElementTypeToInteger();
23926      Src3 = DAG.getBitcast(MaskVT, Src3);
23927
23928      // Reverse the operands to match VSELECT order.
23929      return DAG.getNode(IntrData->Opc0, dl, VT, Src3, Src2, Src1);
23930    }
23931    case VPERM_2OP : {
23932      SDValue Src1 = Op.getOperand(1);
23933      SDValue Src2 = Op.getOperand(2);
23934
23935      // Swap Src1 and Src2 in the node creation
23936      return DAG.getNode(IntrData->Opc0, dl, VT,Src2, Src1);
23937    }
23938    case IFMA_OP:
23939      // NOTE: We need to swizzle the operands to pass the multiply operands
23940      // first.
23941      return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
23942                         Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
23943    case FPCLASSS: {
23944      SDValue Src1 = Op.getOperand(1);
23945      SDValue Imm = Op.getOperand(2);
23946      SDValue Mask = Op.getOperand(3);
23947      SDValue FPclass = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Imm);
23948      SDValue FPclassMask = getScalarMaskingNode(FPclass, Mask, SDValue(),
23949                                                 Subtarget, DAG);
23950      // Need to fill with zeros to ensure the bitcast will produce zeroes
23951      // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
23952      SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
23953                                DAG.getConstant(0, dl, MVT::v8i1),
23954                                FPclassMask, DAG.getIntPtrConstant(0, dl));
23955      return DAG.getBitcast(MVT::i8, Ins);
23956    }
23957
23958    case CMP_MASK_CC: {
23959      MVT MaskVT = Op.getSimpleValueType();
23960      SDValue CC = Op.getOperand(3);
23961      // We specify 2 possible opcodes for intrinsics with rounding modes.
23962      // First, we check if the intrinsic may have non-default rounding mode,
23963      // (IntrData->Opc1 != 0), then we check the rounding mode operand.
23964      if (IntrData->Opc1 != 0) {
23965        SDValue Sae = Op.getOperand(4);
23966        if (isRoundModeSAE(Sae))
23967          return DAG.getNode(IntrData->Opc1, dl, MaskVT, Op.getOperand(1),
23968                             Op.getOperand(2), CC, Sae);
23969        if (!isRoundModeCurDirection(Sae))
23970          return SDValue();
23971      }
23972      //default rounding mode
23973      return DAG.getNode(IntrData->Opc0, dl, MaskVT,
23974                         {Op.getOperand(1), Op.getOperand(2), CC});
23975    }
23976    case CMP_MASK_SCALAR_CC: {
23977      SDValue Src1 = Op.getOperand(1);
23978      SDValue Src2 = Op.getOperand(2);
23979      SDValue CC = Op.getOperand(3);
23980      SDValue Mask = Op.getOperand(4);
23981
23982      SDValue Cmp;
23983      if (IntrData->Opc1 != 0) {
23984        SDValue Sae = Op.getOperand(5);
23985        if (isRoundModeSAE(Sae))
23986          Cmp = DAG.getNode(IntrData->Opc1, dl, MVT::v1i1, Src1, Src2, CC, Sae);
23987        else if (!isRoundModeCurDirection(Sae))
23988          return SDValue();
23989      }
23990      //default rounding mode
23991      if (!Cmp.getNode())
23992        Cmp = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Src2, CC);
23993
23994      SDValue CmpMask = getScalarMaskingNode(Cmp, Mask, SDValue(),
23995                                             Subtarget, DAG);
23996      // Need to fill with zeros to ensure the bitcast will produce zeroes
23997      // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
23998      SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
23999                                DAG.getConstant(0, dl, MVT::v8i1),
24000                                CmpMask, DAG.getIntPtrConstant(0, dl));
24001      return DAG.getBitcast(MVT::i8, Ins);
24002    }
24003    case COMI: { // Comparison intrinsics
24004      ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
24005      SDValue LHS = Op.getOperand(1);
24006      SDValue RHS = Op.getOperand(2);
24007      SDValue Comi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
24008      SDValue InvComi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, RHS, LHS);
24009      SDValue SetCC;
24010      switch (CC) {
24011      case ISD::SETEQ: { // (ZF = 0 and PF = 0)
24012        SetCC = getSETCC(X86::COND_E, Comi, dl, DAG);
24013        SDValue SetNP = getSETCC(X86::COND_NP, Comi, dl, DAG);
24014        SetCC = DAG.getNode(ISD::AND, dl, MVT::i8, SetCC, SetNP);
24015        break;
24016      }
24017      case ISD::SETNE: { // (ZF = 1 or PF = 1)
24018        SetCC = getSETCC(X86::COND_NE, Comi, dl, DAG);
24019        SDValue SetP = getSETCC(X86::COND_P, Comi, dl, DAG);
24020        SetCC = DAG.getNode(ISD::OR, dl, MVT::i8, SetCC, SetP);
24021        break;
24022      }
24023      case ISD::SETGT: // (CF = 0 and ZF = 0)
24024        SetCC = getSETCC(X86::COND_A, Comi, dl, DAG);
24025        break;
24026      case ISD::SETLT: { // The condition is opposite to GT. Swap the operands.
24027        SetCC = getSETCC(X86::COND_A, InvComi, dl, DAG);
24028        break;
24029      }
24030      case ISD::SETGE: // CF = 0
24031        SetCC = getSETCC(X86::COND_AE, Comi, dl, DAG);
24032        break;
24033      case ISD::SETLE: // The condition is opposite to GE. Swap the operands.
24034        SetCC = getSETCC(X86::COND_AE, InvComi, dl, DAG);
24035        break;
24036      default:
24037        llvm_unreachable("Unexpected illegal condition!");
24038      }
24039      return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
24040    }
24041    case COMI_RM: { // Comparison intrinsics with Sae
24042      SDValue LHS = Op.getOperand(1);
24043      SDValue RHS = Op.getOperand(2);
24044      unsigned CondVal = Op.getConstantOperandVal(3);
24045      SDValue Sae = Op.getOperand(4);
24046
24047      SDValue FCmp;
24048      if (isRoundModeCurDirection(Sae))
24049        FCmp = DAG.getNode(X86ISD::FSETCCM, dl, MVT::v1i1, LHS, RHS,
24050                           DAG.getTargetConstant(CondVal, dl, MVT::i8));
24051      else if (isRoundModeSAE(Sae))
24052        FCmp = DAG.getNode(X86ISD::FSETCCM_SAE, dl, MVT::v1i1, LHS, RHS,
24053                           DAG.getTargetConstant(CondVal, dl, MVT::i8), Sae);
24054      else
24055        return SDValue();
24056      // Need to fill with zeros to ensure the bitcast will produce zeroes
24057      // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
24058      SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
24059                                DAG.getConstant(0, dl, MVT::v16i1),
24060                                FCmp, DAG.getIntPtrConstant(0, dl));
24061      return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32,
24062                         DAG.getBitcast(MVT::i16, Ins));
24063    }
24064    case VSHIFT:
24065      return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
24066                                 Op.getOperand(1), Op.getOperand(2), Subtarget,
24067                                 DAG);
24068    case COMPRESS_EXPAND_IN_REG: {
24069      SDValue Mask = Op.getOperand(3);
24070      SDValue DataToCompress = Op.getOperand(1);
24071      SDValue PassThru = Op.getOperand(2);
24072      if (ISD::isBuildVectorAllOnes(Mask.getNode())) // return data as is
24073        return Op.getOperand(1);
24074
24075      // Avoid false dependency.
24076      if (PassThru.isUndef())
24077        PassThru = DAG.getConstant(0, dl, VT);
24078
24079      return DAG.getNode(IntrData->Opc0, dl, VT, DataToCompress, PassThru,
24080                         Mask);
24081    }
24082    case FIXUPIMM:
24083    case FIXUPIMM_MASKZ: {
24084      SDValue Src1 = Op.getOperand(1);
24085      SDValue Src2 = Op.getOperand(2);
24086      SDValue Src3 = Op.getOperand(3);
24087      SDValue Imm = Op.getOperand(4);
24088      SDValue Mask = Op.getOperand(5);
24089      SDValue Passthru = (IntrData->Type == FIXUPIMM)
24090                             ? Src1
24091                             : getZeroVector(VT, Subtarget, DAG, dl);
24092
24093      unsigned Opc = IntrData->Opc0;
24094      if (IntrData->Opc1 != 0) {
24095        SDValue Sae = Op.getOperand(6);
24096        if (isRoundModeSAE(Sae))
24097          Opc = IntrData->Opc1;
24098        else if (!isRoundModeCurDirection(Sae))
24099          return SDValue();
24100      }
24101
24102      SDValue FixupImm = DAG.getNode(Opc, dl, VT, Src1, Src2, Src3, Imm);
24103
24104      if (Opc == X86ISD::VFIXUPIMM || Opc == X86ISD::VFIXUPIMM_SAE)
24105        return getVectorMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
24106
24107      return getScalarMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
24108    }
24109    case ROUNDP: {
24110      assert(IntrData->Opc0 == X86ISD::VRNDSCALE && "Unexpected opcode");
24111      // Clear the upper bits of the rounding immediate so that the legacy
24112      // intrinsic can't trigger the scaling behavior of VRNDSCALE.
24113      auto Round = cast<ConstantSDNode>(Op.getOperand(2));
24114      SDValue RoundingMode =
24115          DAG.getTargetConstant(Round->getZExtValue() & 0xf, dl, MVT::i32);
24116      return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
24117                         Op.getOperand(1), RoundingMode);
24118    }
24119    case ROUNDS: {
24120      assert(IntrData->Opc0 == X86ISD::VRNDSCALES && "Unexpected opcode");
24121      // Clear the upper bits of the rounding immediate so that the legacy
24122      // intrinsic can't trigger the scaling behavior of VRNDSCALE.
24123      auto Round = cast<ConstantSDNode>(Op.getOperand(3));
24124      SDValue RoundingMode =
24125          DAG.getTargetConstant(Round->getZExtValue() & 0xf, dl, MVT::i32);
24126      return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
24127                         Op.getOperand(1), Op.getOperand(2), RoundingMode);
24128    }
24129    case BEXTRI: {
24130      assert(IntrData->Opc0 == X86ISD::BEXTR && "Unexpected opcode");
24131
24132      // The control is a TargetConstant, but we need to convert it to a
24133      // ConstantSDNode.
24134      uint64_t Imm = Op.getConstantOperandVal(2);
24135      SDValue Control = DAG.getConstant(Imm, dl, Op.getValueType());
24136      return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
24137                         Op.getOperand(1), Control);
24138    }
24139    // ADC/ADCX/SBB
24140    case ADX: {
24141      SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::i32);
24142      SDVTList VTs = DAG.getVTList(Op.getOperand(2).getValueType(), MVT::i32);
24143
24144      SDValue Res;
24145      // If the carry in is zero, then we should just use ADD/SUB instead of
24146      // ADC/SBB.
24147      if (isNullConstant(Op.getOperand(1))) {
24148        Res = DAG.getNode(IntrData->Opc1, dl, VTs, Op.getOperand(2),
24149                          Op.getOperand(3));
24150      } else {
24151        SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(1),
24152                                    DAG.getConstant(-1, dl, MVT::i8));
24153        Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(2),
24154                          Op.getOperand(3), GenCF.getValue(1));
24155      }
24156      SDValue SetCC = getSETCC(X86::COND_B, Res.getValue(1), dl, DAG);
24157      SDValue Results[] = { SetCC, Res };
24158      return DAG.getMergeValues(Results, dl);
24159    }
24160    case CVTPD2PS_MASK:
24161    case CVTPD2DQ_MASK:
24162    case CVTQQ2PS_MASK:
24163    case TRUNCATE_TO_REG: {
24164      SDValue Src = Op.getOperand(1);
24165      SDValue PassThru = Op.getOperand(2);
24166      SDValue Mask = Op.getOperand(3);
24167
24168      if (isAllOnesConstant(Mask))
24169        return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
24170
24171      MVT SrcVT = Src.getSimpleValueType();
24172      MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
24173      Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
24174      return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(),
24175                         {Src, PassThru, Mask});
24176    }
24177    case CVTPS2PH_MASK: {
24178      SDValue Src = Op.getOperand(1);
24179      SDValue Rnd = Op.getOperand(2);
24180      SDValue PassThru = Op.getOperand(3);
24181      SDValue Mask = Op.getOperand(4);
24182
24183      if (isAllOnesConstant(Mask))
24184        return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src, Rnd);
24185
24186      MVT SrcVT = Src.getSimpleValueType();
24187      MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
24188      Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
24189      return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, Rnd,
24190                         PassThru, Mask);
24191
24192    }
24193    case CVTNEPS2BF16_MASK: {
24194      SDValue Src = Op.getOperand(1);
24195      SDValue PassThru = Op.getOperand(2);
24196      SDValue Mask = Op.getOperand(3);
24197
24198      if (ISD::isBuildVectorAllOnes(Mask.getNode()))
24199        return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
24200
24201      // Break false dependency.
24202      if (PassThru.isUndef())
24203        PassThru = DAG.getConstant(0, dl, PassThru.getValueType());
24204
24205      return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, PassThru,
24206                         Mask);
24207    }
24208    default:
24209      break;
24210    }
24211  }
24212
24213  switch (IntNo) {
24214  default: return SDValue();    // Don't custom lower most intrinsics.
24215
24216  // ptest and testp intrinsics. The intrinsic these come from are designed to
24217  // return an integer value, not just an instruction so lower it to the ptest
24218  // or testp pattern and a setcc for the result.
24219  case Intrinsic::x86_avx512_ktestc_b:
24220  case Intrinsic::x86_avx512_ktestc_w:
24221  case Intrinsic::x86_avx512_ktestc_d:
24222  case Intrinsic::x86_avx512_ktestc_q:
24223  case Intrinsic::x86_avx512_ktestz_b:
24224  case Intrinsic::x86_avx512_ktestz_w:
24225  case Intrinsic::x86_avx512_ktestz_d:
24226  case Intrinsic::x86_avx512_ktestz_q:
24227  case Intrinsic::x86_sse41_ptestz:
24228  case Intrinsic::x86_sse41_ptestc:
24229  case Intrinsic::x86_sse41_ptestnzc:
24230  case Intrinsic::x86_avx_ptestz_256:
24231  case Intrinsic::x86_avx_ptestc_256:
24232  case Intrinsic::x86_avx_ptestnzc_256:
24233  case Intrinsic::x86_avx_vtestz_ps:
24234  case Intrinsic::x86_avx_vtestc_ps:
24235  case Intrinsic::x86_avx_vtestnzc_ps:
24236  case Intrinsic::x86_avx_vtestz_pd:
24237  case Intrinsic::x86_avx_vtestc_pd:
24238  case Intrinsic::x86_avx_vtestnzc_pd:
24239  case Intrinsic::x86_avx_vtestz_ps_256:
24240  case Intrinsic::x86_avx_vtestc_ps_256:
24241  case Intrinsic::x86_avx_vtestnzc_ps_256:
24242  case Intrinsic::x86_avx_vtestz_pd_256:
24243  case Intrinsic::x86_avx_vtestc_pd_256:
24244  case Intrinsic::x86_avx_vtestnzc_pd_256: {
24245    unsigned TestOpc = X86ISD::PTEST;
24246    X86::CondCode X86CC;
24247    switch (IntNo) {
24248    default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
24249    case Intrinsic::x86_avx512_ktestc_b:
24250    case Intrinsic::x86_avx512_ktestc_w:
24251    case Intrinsic::x86_avx512_ktestc_d:
24252    case Intrinsic::x86_avx512_ktestc_q:
24253      // CF = 1
24254      TestOpc = X86ISD::KTEST;
24255      X86CC = X86::COND_B;
24256      break;
24257    case Intrinsic::x86_avx512_ktestz_b:
24258    case Intrinsic::x86_avx512_ktestz_w:
24259    case Intrinsic::x86_avx512_ktestz_d:
24260    case Intrinsic::x86_avx512_ktestz_q:
24261      TestOpc = X86ISD::KTEST;
24262      X86CC = X86::COND_E;
24263      break;
24264    case Intrinsic::x86_avx_vtestz_ps:
24265    case Intrinsic::x86_avx_vtestz_pd:
24266    case Intrinsic::x86_avx_vtestz_ps_256:
24267    case Intrinsic::x86_avx_vtestz_pd_256:
24268      TestOpc = X86ISD::TESTP;
24269      LLVM_FALLTHROUGH;
24270    case Intrinsic::x86_sse41_ptestz:
24271    case Intrinsic::x86_avx_ptestz_256:
24272      // ZF = 1
24273      X86CC = X86::COND_E;
24274      break;
24275    case Intrinsic::x86_avx_vtestc_ps:
24276    case Intrinsic::x86_avx_vtestc_pd:
24277    case Intrinsic::x86_avx_vtestc_ps_256:
24278    case Intrinsic::x86_avx_vtestc_pd_256:
24279      TestOpc = X86ISD::TESTP;
24280      LLVM_FALLTHROUGH;
24281    case Intrinsic::x86_sse41_ptestc:
24282    case Intrinsic::x86_avx_ptestc_256:
24283      // CF = 1
24284      X86CC = X86::COND_B;
24285      break;
24286    case Intrinsic::x86_avx_vtestnzc_ps:
24287    case Intrinsic::x86_avx_vtestnzc_pd:
24288    case Intrinsic::x86_avx_vtestnzc_ps_256:
24289    case Intrinsic::x86_avx_vtestnzc_pd_256:
24290      TestOpc = X86ISD::TESTP;
24291      LLVM_FALLTHROUGH;
24292    case Intrinsic::x86_sse41_ptestnzc:
24293    case Intrinsic::x86_avx_ptestnzc_256:
24294      // ZF and CF = 0
24295      X86CC = X86::COND_A;
24296      break;
24297    }
24298
24299    SDValue LHS = Op.getOperand(1);
24300    SDValue RHS = Op.getOperand(2);
24301    SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
24302    SDValue SetCC = getSETCC(X86CC, Test, dl, DAG);
24303    return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
24304  }
24305
24306  case Intrinsic::x86_sse42_pcmpistria128:
24307  case Intrinsic::x86_sse42_pcmpestria128:
24308  case Intrinsic::x86_sse42_pcmpistric128:
24309  case Intrinsic::x86_sse42_pcmpestric128:
24310  case Intrinsic::x86_sse42_pcmpistrio128:
24311  case Intrinsic::x86_sse42_pcmpestrio128:
24312  case Intrinsic::x86_sse42_pcmpistris128:
24313  case Intrinsic::x86_sse42_pcmpestris128:
24314  case Intrinsic::x86_sse42_pcmpistriz128:
24315  case Intrinsic::x86_sse42_pcmpestriz128: {
24316    unsigned Opcode;
24317    X86::CondCode X86CC;
24318    switch (IntNo) {
24319    default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
24320    case Intrinsic::x86_sse42_pcmpistria128:
24321      Opcode = X86ISD::PCMPISTR;
24322      X86CC = X86::COND_A;
24323      break;
24324    case Intrinsic::x86_sse42_pcmpestria128:
24325      Opcode = X86ISD::PCMPESTR;
24326      X86CC = X86::COND_A;
24327      break;
24328    case Intrinsic::x86_sse42_pcmpistric128:
24329      Opcode = X86ISD::PCMPISTR;
24330      X86CC = X86::COND_B;
24331      break;
24332    case Intrinsic::x86_sse42_pcmpestric128:
24333      Opcode = X86ISD::PCMPESTR;
24334      X86CC = X86::COND_B;
24335      break;
24336    case Intrinsic::x86_sse42_pcmpistrio128:
24337      Opcode = X86ISD::PCMPISTR;
24338      X86CC = X86::COND_O;
24339      break;
24340    case Intrinsic::x86_sse42_pcmpestrio128:
24341      Opcode = X86ISD::PCMPESTR;
24342      X86CC = X86::COND_O;
24343      break;
24344    case Intrinsic::x86_sse42_pcmpistris128:
24345      Opcode = X86ISD::PCMPISTR;
24346      X86CC = X86::COND_S;
24347      break;
24348    case Intrinsic::x86_sse42_pcmpestris128:
24349      Opcode = X86ISD::PCMPESTR;
24350      X86CC = X86::COND_S;
24351      break;
24352    case Intrinsic::x86_sse42_pcmpistriz128:
24353      Opcode = X86ISD::PCMPISTR;
24354      X86CC = X86::COND_E;
24355      break;
24356    case Intrinsic::x86_sse42_pcmpestriz128:
24357      Opcode = X86ISD::PCMPESTR;
24358      X86CC = X86::COND_E;
24359      break;
24360    }
24361    SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
24362    SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
24363    SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps).getValue(2);
24364    SDValue SetCC = getSETCC(X86CC, PCMP, dl, DAG);
24365    return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
24366  }
24367
24368  case Intrinsic::x86_sse42_pcmpistri128:
24369  case Intrinsic::x86_sse42_pcmpestri128: {
24370    unsigned Opcode;
24371    if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
24372      Opcode = X86ISD::PCMPISTR;
24373    else
24374      Opcode = X86ISD::PCMPESTR;
24375
24376    SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
24377    SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
24378    return DAG.getNode(Opcode, dl, VTs, NewOps);
24379  }
24380
24381  case Intrinsic::x86_sse42_pcmpistrm128:
24382  case Intrinsic::x86_sse42_pcmpestrm128: {
24383    unsigned Opcode;
24384    if (IntNo == Intrinsic::x86_sse42_pcmpistrm128)
24385      Opcode = X86ISD::PCMPISTR;
24386    else
24387      Opcode = X86ISD::PCMPESTR;
24388
24389    SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
24390    SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
24391    return DAG.getNode(Opcode, dl, VTs, NewOps).getValue(1);
24392  }
24393
24394  case Intrinsic::eh_sjlj_lsda: {
24395    MachineFunction &MF = DAG.getMachineFunction();
24396    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24397    MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
24398    auto &Context = MF.getMMI().getContext();
24399    MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
24400                                            Twine(MF.getFunctionNumber()));
24401    return DAG.getNode(getGlobalWrapperKind(), dl, VT,
24402                       DAG.getMCSymbol(S, PtrVT));
24403  }
24404
24405  case Intrinsic::x86_seh_lsda: {
24406    // Compute the symbol for the LSDA. We know it'll get emitted later.
24407    MachineFunction &MF = DAG.getMachineFunction();
24408    SDValue Op1 = Op.getOperand(1);
24409    auto *Fn = cast<Function>(cast<GlobalAddressSDNode>(Op1)->getGlobal());
24410    MCSymbol *LSDASym = MF.getMMI().getContext().getOrCreateLSDASymbol(
24411        GlobalValue::dropLLVMManglingEscape(Fn->getName()));
24412
24413    // Generate a simple absolute symbol reference. This intrinsic is only
24414    // supported on 32-bit Windows, which isn't PIC.
24415    SDValue Result = DAG.getMCSymbol(LSDASym, VT);
24416    return DAG.getNode(X86ISD::Wrapper, dl, VT, Result);
24417  }
24418
24419  case Intrinsic::eh_recoverfp: {
24420    SDValue FnOp = Op.getOperand(1);
24421    SDValue IncomingFPOp = Op.getOperand(2);
24422    GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(FnOp);
24423    auto *Fn = dyn_cast_or_null<Function>(GSD ? GSD->getGlobal() : nullptr);
24424    if (!Fn)
24425      report_fatal_error(
24426          "llvm.eh.recoverfp must take a function as the first argument");
24427    return recoverFramePointer(DAG, Fn, IncomingFPOp);
24428  }
24429
24430  case Intrinsic::localaddress: {
24431    // Returns one of the stack, base, or frame pointer registers, depending on
24432    // which is used to reference local variables.
24433    MachineFunction &MF = DAG.getMachineFunction();
24434    const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
24435    unsigned Reg;
24436    if (RegInfo->hasBasePointer(MF))
24437      Reg = RegInfo->getBaseRegister();
24438    else { // Handles the SP or FP case.
24439      bool CantUseFP = RegInfo->needsStackRealignment(MF);
24440      if (CantUseFP)
24441        Reg = RegInfo->getPtrSizedStackRegister(MF);
24442      else
24443        Reg = RegInfo->getPtrSizedFrameRegister(MF);
24444    }
24445    return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
24446  }
24447
24448  case Intrinsic::x86_avx512_vp2intersect_q_512:
24449  case Intrinsic::x86_avx512_vp2intersect_q_256:
24450  case Intrinsic::x86_avx512_vp2intersect_q_128:
24451  case Intrinsic::x86_avx512_vp2intersect_d_512:
24452  case Intrinsic::x86_avx512_vp2intersect_d_256:
24453  case Intrinsic::x86_avx512_vp2intersect_d_128: {
24454    MVT MaskVT = Op.getSimpleValueType();
24455
24456    SDVTList VTs = DAG.getVTList(MVT::Untyped, MVT::Other);
24457    SDLoc DL(Op);
24458
24459    SDValue Operation =
24460        DAG.getNode(X86ISD::VP2INTERSECT, DL, VTs,
24461                    Op->getOperand(1), Op->getOperand(2));
24462
24463    SDValue Result0 = DAG.getTargetExtractSubreg(X86::sub_mask_0, DL,
24464                                                 MaskVT, Operation);
24465    SDValue Result1 = DAG.getTargetExtractSubreg(X86::sub_mask_1, DL,
24466                                                 MaskVT, Operation);
24467    return DAG.getMergeValues({Result0, Result1}, DL);
24468  }
24469  case Intrinsic::x86_mmx_pslli_w:
24470  case Intrinsic::x86_mmx_pslli_d:
24471  case Intrinsic::x86_mmx_pslli_q:
24472  case Intrinsic::x86_mmx_psrli_w:
24473  case Intrinsic::x86_mmx_psrli_d:
24474  case Intrinsic::x86_mmx_psrli_q:
24475  case Intrinsic::x86_mmx_psrai_w:
24476  case Intrinsic::x86_mmx_psrai_d: {
24477    SDLoc DL(Op);
24478    SDValue ShAmt = Op.getOperand(2);
24479    // If the argument is a constant, convert it to a target constant.
24480    if (auto *C = dyn_cast<ConstantSDNode>(ShAmt)) {
24481      // Clamp out of bounds shift amounts since they will otherwise be masked
24482      // to 8-bits which may make it no longer out of bounds.
24483      unsigned ShiftAmount = C->getAPIntValue().getLimitedValue(255);
24484      return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
24485                         Op.getOperand(0), Op.getOperand(1),
24486                         DAG.getTargetConstant(ShiftAmount, DL, MVT::i32));
24487    }
24488
24489    unsigned NewIntrinsic;
24490    switch (IntNo) {
24491    default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
24492    case Intrinsic::x86_mmx_pslli_w:
24493      NewIntrinsic = Intrinsic::x86_mmx_psll_w;
24494      break;
24495    case Intrinsic::x86_mmx_pslli_d:
24496      NewIntrinsic = Intrinsic::x86_mmx_psll_d;
24497      break;
24498    case Intrinsic::x86_mmx_pslli_q:
24499      NewIntrinsic = Intrinsic::x86_mmx_psll_q;
24500      break;
24501    case Intrinsic::x86_mmx_psrli_w:
24502      NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
24503      break;
24504    case Intrinsic::x86_mmx_psrli_d:
24505      NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
24506      break;
24507    case Intrinsic::x86_mmx_psrli_q:
24508      NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
24509      break;
24510    case Intrinsic::x86_mmx_psrai_w:
24511      NewIntrinsic = Intrinsic::x86_mmx_psra_w;
24512      break;
24513    case Intrinsic::x86_mmx_psrai_d:
24514      NewIntrinsic = Intrinsic::x86_mmx_psra_d;
24515      break;
24516    }
24517
24518    // The vector shift intrinsics with scalars uses 32b shift amounts but
24519    // the sse2/mmx shift instructions reads 64 bits. Copy the 32 bits to an
24520    // MMX register.
24521    ShAmt = DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, ShAmt);
24522    return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
24523                       DAG.getConstant(NewIntrinsic, DL, MVT::i32),
24524                       Op.getOperand(1), ShAmt);
24525
24526  }
24527  }
24528}
24529
24530static SDValue getAVX2GatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
24531                                 SDValue Src, SDValue Mask, SDValue Base,
24532                                 SDValue Index, SDValue ScaleOp, SDValue Chain,
24533                                 const X86Subtarget &Subtarget) {
24534  SDLoc dl(Op);
24535  auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
24536  // Scale must be constant.
24537  if (!C)
24538    return SDValue();
24539  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24540  SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
24541                                        TLI.getPointerTy(DAG.getDataLayout()));
24542  EVT MaskVT = Mask.getValueType().changeVectorElementTypeToInteger();
24543  SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
24544  // If source is undef or we know it won't be used, use a zero vector
24545  // to break register dependency.
24546  // TODO: use undef instead and let BreakFalseDeps deal with it?
24547  if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
24548    Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
24549
24550  MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
24551
24552  SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
24553  SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
24554    VTs, Ops, dl, MemIntr->getMemoryVT(), MemIntr->getMemOperand());
24555  return DAG.getMergeValues({ Res, Res.getValue(2) }, dl);
24556}
24557
24558static SDValue getGatherNode(SDValue Op, SelectionDAG &DAG,
24559                             SDValue Src, SDValue Mask, SDValue Base,
24560                             SDValue Index, SDValue ScaleOp, SDValue Chain,
24561                             const X86Subtarget &Subtarget) {
24562  MVT VT = Op.getSimpleValueType();
24563  SDLoc dl(Op);
24564  auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
24565  // Scale must be constant.
24566  if (!C)
24567    return SDValue();
24568  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24569  SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
24570                                        TLI.getPointerTy(DAG.getDataLayout()));
24571  unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
24572                              VT.getVectorNumElements());
24573  MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
24574
24575  // We support two versions of the gather intrinsics. One with scalar mask and
24576  // one with vXi1 mask. Convert scalar to vXi1 if necessary.
24577  if (Mask.getValueType() != MaskVT)
24578    Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
24579
24580  SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
24581  // If source is undef or we know it won't be used, use a zero vector
24582  // to break register dependency.
24583  // TODO: use undef instead and let BreakFalseDeps deal with it?
24584  if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
24585    Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
24586
24587  MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
24588
24589  SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
24590  SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
24591    VTs, Ops, dl, MemIntr->getMemoryVT(), MemIntr->getMemOperand());
24592  return DAG.getMergeValues({ Res, Res.getValue(2) }, dl);
24593}
24594
24595static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
24596                               SDValue Src, SDValue Mask, SDValue Base,
24597                               SDValue Index, SDValue ScaleOp, SDValue Chain,
24598                               const X86Subtarget &Subtarget) {
24599  SDLoc dl(Op);
24600  auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
24601  // Scale must be constant.
24602  if (!C)
24603    return SDValue();
24604  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24605  SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
24606                                        TLI.getPointerTy(DAG.getDataLayout()));
24607  unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
24608                              Src.getSimpleValueType().getVectorNumElements());
24609  MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
24610
24611  // We support two versions of the scatter intrinsics. One with scalar mask and
24612  // one with vXi1 mask. Convert scalar to vXi1 if necessary.
24613  if (Mask.getValueType() != MaskVT)
24614    Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
24615
24616  MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
24617
24618  SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
24619  SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale};
24620  SDValue Res = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
24621      VTs, Ops, dl, MemIntr->getMemoryVT(), MemIntr->getMemOperand());
24622  return Res.getValue(1);
24623}
24624
24625static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
24626                               SDValue Mask, SDValue Base, SDValue Index,
24627                               SDValue ScaleOp, SDValue Chain,
24628                               const X86Subtarget &Subtarget) {
24629  SDLoc dl(Op);
24630  auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
24631  // Scale must be constant.
24632  if (!C)
24633    return SDValue();
24634  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24635  SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
24636                                        TLI.getPointerTy(DAG.getDataLayout()));
24637  SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
24638  SDValue Segment = DAG.getRegister(0, MVT::i32);
24639  MVT MaskVT =
24640    MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
24641  SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
24642  SDValue Ops[] = {VMask, Base, Scale, Index, Disp, Segment, Chain};
24643  SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
24644  return SDValue(Res, 0);
24645}
24646
24647/// Handles the lowering of builtin intrinsics with chain that return their
24648/// value into registers EDX:EAX.
24649/// If operand ScrReg is a valid register identifier, then operand 2 of N is
24650/// copied to SrcReg. The assumption is that SrcReg is an implicit input to
24651/// TargetOpcode.
24652/// Returns a Glue value which can be used to add extra copy-from-reg if the
24653/// expanded intrinsics implicitly defines extra registers (i.e. not just
24654/// EDX:EAX).
24655static SDValue expandIntrinsicWChainHelper(SDNode *N, const SDLoc &DL,
24656                                        SelectionDAG &DAG,
24657                                        unsigned TargetOpcode,
24658                                        unsigned SrcReg,
24659                                        const X86Subtarget &Subtarget,
24660                                        SmallVectorImpl<SDValue> &Results) {
24661  SDValue Chain = N->getOperand(0);
24662  SDValue Glue;
24663
24664  if (SrcReg) {
24665    assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
24666    Chain = DAG.getCopyToReg(Chain, DL, SrcReg, N->getOperand(2), Glue);
24667    Glue = Chain.getValue(1);
24668  }
24669
24670  SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
24671  SDValue N1Ops[] = {Chain, Glue};
24672  SDNode *N1 = DAG.getMachineNode(
24673      TargetOpcode, DL, Tys, ArrayRef<SDValue>(N1Ops, Glue.getNode() ? 2 : 1));
24674  Chain = SDValue(N1, 0);
24675
24676  // Reads the content of XCR and returns it in registers EDX:EAX.
24677  SDValue LO, HI;
24678  if (Subtarget.is64Bit()) {
24679    LO = DAG.getCopyFromReg(Chain, DL, X86::RAX, MVT::i64, SDValue(N1, 1));
24680    HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
24681                            LO.getValue(2));
24682  } else {
24683    LO = DAG.getCopyFromReg(Chain, DL, X86::EAX, MVT::i32, SDValue(N1, 1));
24684    HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
24685                            LO.getValue(2));
24686  }
24687  Chain = HI.getValue(1);
24688  Glue = HI.getValue(2);
24689
24690  if (Subtarget.is64Bit()) {
24691    // Merge the two 32-bit values into a 64-bit one.
24692    SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
24693                              DAG.getConstant(32, DL, MVT::i8));
24694    Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
24695    Results.push_back(Chain);
24696    return Glue;
24697  }
24698
24699  // Use a buildpair to merge the two 32-bit values into a 64-bit one.
24700  SDValue Ops[] = { LO, HI };
24701  SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
24702  Results.push_back(Pair);
24703  Results.push_back(Chain);
24704  return Glue;
24705}
24706
24707/// Handles the lowering of builtin intrinsics that read the time stamp counter
24708/// (x86_rdtsc and x86_rdtscp). This function is also used to custom lower
24709/// READCYCLECOUNTER nodes.
24710static void getReadTimeStampCounter(SDNode *N, const SDLoc &DL, unsigned Opcode,
24711                                    SelectionDAG &DAG,
24712                                    const X86Subtarget &Subtarget,
24713                                    SmallVectorImpl<SDValue> &Results) {
24714  // The processor's time-stamp counter (a 64-bit MSR) is stored into the
24715  // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
24716  // and the EAX register is loaded with the low-order 32 bits.
24717  SDValue Glue = expandIntrinsicWChainHelper(N, DL, DAG, Opcode,
24718                                             /* NoRegister */0, Subtarget,
24719                                             Results);
24720  if (Opcode != X86::RDTSCP)
24721    return;
24722
24723  SDValue Chain = Results[1];
24724  // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
24725  // the ECX register. Add 'ecx' explicitly to the chain.
24726  SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32, Glue);
24727  Results[1] = ecx;
24728  Results.push_back(ecx.getValue(1));
24729}
24730
24731static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget &Subtarget,
24732                                     SelectionDAG &DAG) {
24733  SmallVector<SDValue, 3> Results;
24734  SDLoc DL(Op);
24735  getReadTimeStampCounter(Op.getNode(), DL, X86::RDTSC, DAG, Subtarget,
24736                          Results);
24737  return DAG.getMergeValues(Results, DL);
24738}
24739
24740static SDValue MarkEHRegistrationNode(SDValue Op, SelectionDAG &DAG) {
24741  MachineFunction &MF = DAG.getMachineFunction();
24742  SDValue Chain = Op.getOperand(0);
24743  SDValue RegNode = Op.getOperand(2);
24744  WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
24745  if (!EHInfo)
24746    report_fatal_error("EH registrations only live in functions using WinEH");
24747
24748  // Cast the operand to an alloca, and remember the frame index.
24749  auto *FINode = dyn_cast<FrameIndexSDNode>(RegNode);
24750  if (!FINode)
24751    report_fatal_error("llvm.x86.seh.ehregnode expects a static alloca");
24752  EHInfo->EHRegNodeFrameIndex = FINode->getIndex();
24753
24754  // Return the chain operand without making any DAG nodes.
24755  return Chain;
24756}
24757
24758static SDValue MarkEHGuard(SDValue Op, SelectionDAG &DAG) {
24759  MachineFunction &MF = DAG.getMachineFunction();
24760  SDValue Chain = Op.getOperand(0);
24761  SDValue EHGuard = Op.getOperand(2);
24762  WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
24763  if (!EHInfo)
24764    report_fatal_error("EHGuard only live in functions using WinEH");
24765
24766  // Cast the operand to an alloca, and remember the frame index.
24767  auto *FINode = dyn_cast<FrameIndexSDNode>(EHGuard);
24768  if (!FINode)
24769    report_fatal_error("llvm.x86.seh.ehguard expects a static alloca");
24770  EHInfo->EHGuardFrameIndex = FINode->getIndex();
24771
24772  // Return the chain operand without making any DAG nodes.
24773  return Chain;
24774}
24775
24776/// Emit Truncating Store with signed or unsigned saturation.
24777static SDValue
24778EmitTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl, SDValue Val,
24779                SDValue Ptr, EVT MemVT, MachineMemOperand *MMO,
24780                SelectionDAG &DAG) {
24781
24782  SDVTList VTs = DAG.getVTList(MVT::Other);
24783  SDValue Undef = DAG.getUNDEF(Ptr.getValueType());
24784  SDValue Ops[] = { Chain, Val, Ptr, Undef };
24785  return SignedSat ?
24786    DAG.getTargetMemSDNode<TruncSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO) :
24787    DAG.getTargetMemSDNode<TruncUSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO);
24788}
24789
24790/// Emit Masked Truncating Store with signed or unsigned saturation.
24791static SDValue
24792EmitMaskedTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl,
24793                      SDValue Val, SDValue Ptr, SDValue Mask, EVT MemVT,
24794                      MachineMemOperand *MMO, SelectionDAG &DAG) {
24795
24796  SDVTList VTs = DAG.getVTList(MVT::Other);
24797  SDValue Ops[] = { Chain, Val, Ptr, Mask };
24798  return SignedSat ?
24799    DAG.getTargetMemSDNode<MaskedTruncSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO) :
24800    DAG.getTargetMemSDNode<MaskedTruncUSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO);
24801}
24802
24803static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
24804                                      SelectionDAG &DAG) {
24805  unsigned IntNo = Op.getConstantOperandVal(1);
24806  const IntrinsicData *IntrData = getIntrinsicWithChain(IntNo);
24807  if (!IntrData) {
24808    switch (IntNo) {
24809    case llvm::Intrinsic::x86_seh_ehregnode:
24810      return MarkEHRegistrationNode(Op, DAG);
24811    case llvm::Intrinsic::x86_seh_ehguard:
24812      return MarkEHGuard(Op, DAG);
24813    case llvm::Intrinsic::x86_rdpkru: {
24814      SDLoc dl(Op);
24815      SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
24816      // Create a RDPKRU node and pass 0 to the ECX parameter.
24817      return DAG.getNode(X86ISD::RDPKRU, dl, VTs, Op.getOperand(0),
24818                         DAG.getConstant(0, dl, MVT::i32));
24819    }
24820    case llvm::Intrinsic::x86_wrpkru: {
24821      SDLoc dl(Op);
24822      // Create a WRPKRU node, pass the input to the EAX parameter,  and pass 0
24823      // to the EDX and ECX parameters.
24824      return DAG.getNode(X86ISD::WRPKRU, dl, MVT::Other,
24825                         Op.getOperand(0), Op.getOperand(2),
24826                         DAG.getConstant(0, dl, MVT::i32),
24827                         DAG.getConstant(0, dl, MVT::i32));
24828    }
24829    case llvm::Intrinsic::x86_flags_read_u32:
24830    case llvm::Intrinsic::x86_flags_read_u64:
24831    case llvm::Intrinsic::x86_flags_write_u32:
24832    case llvm::Intrinsic::x86_flags_write_u64: {
24833      // We need a frame pointer because this will get lowered to a PUSH/POP
24834      // sequence.
24835      MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
24836      MFI.setHasCopyImplyingStackAdjustment(true);
24837      // Don't do anything here, we will expand these intrinsics out later
24838      // during FinalizeISel in EmitInstrWithCustomInserter.
24839      return Op;
24840    }
24841    case Intrinsic::x86_lwpins32:
24842    case Intrinsic::x86_lwpins64:
24843    case Intrinsic::x86_umwait:
24844    case Intrinsic::x86_tpause: {
24845      SDLoc dl(Op);
24846      SDValue Chain = Op->getOperand(0);
24847      SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
24848      unsigned Opcode;
24849
24850      switch (IntNo) {
24851      default: llvm_unreachable("Impossible intrinsic");
24852      case Intrinsic::x86_umwait:
24853        Opcode = X86ISD::UMWAIT;
24854        break;
24855      case Intrinsic::x86_tpause:
24856        Opcode = X86ISD::TPAUSE;
24857        break;
24858      case Intrinsic::x86_lwpins32:
24859      case Intrinsic::x86_lwpins64:
24860        Opcode = X86ISD::LWPINS;
24861        break;
24862      }
24863
24864      SDValue Operation =
24865          DAG.getNode(Opcode, dl, VTs, Chain, Op->getOperand(2),
24866                      Op->getOperand(3), Op->getOperand(4));
24867      SDValue SetCC = getSETCC(X86::COND_B, Operation.getValue(0), dl, DAG);
24868      return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
24869                         Operation.getValue(1));
24870    }
24871    case Intrinsic::x86_enqcmd:
24872    case Intrinsic::x86_enqcmds: {
24873      SDLoc dl(Op);
24874      SDValue Chain = Op.getOperand(0);
24875      SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
24876      unsigned Opcode;
24877      switch (IntNo) {
24878      default: llvm_unreachable("Impossible intrinsic!");
24879      case Intrinsic::x86_enqcmd:
24880        Opcode = X86ISD::ENQCMD;
24881        break;
24882      case Intrinsic::x86_enqcmds:
24883        Opcode = X86ISD::ENQCMDS;
24884        break;
24885      }
24886      SDValue Operation = DAG.getNode(Opcode, dl, VTs, Chain, Op.getOperand(2),
24887                                      Op.getOperand(3));
24888      SDValue SetCC = getSETCC(X86::COND_E, Operation.getValue(0), dl, DAG);
24889      return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
24890                         Operation.getValue(1));
24891    }
24892    }
24893    return SDValue();
24894  }
24895
24896  SDLoc dl(Op);
24897  switch(IntrData->Type) {
24898  default: llvm_unreachable("Unknown Intrinsic Type");
24899  case RDSEED:
24900  case RDRAND: {
24901    // Emit the node with the right value type.
24902    SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32, MVT::Other);
24903    SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
24904
24905    // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
24906    // Otherwise return the value from Rand, which is always 0, casted to i32.
24907    SDValue Ops[] = {DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
24908                     DAG.getConstant(1, dl, Op->getValueType(1)),
24909                     DAG.getTargetConstant(X86::COND_B, dl, MVT::i8),
24910                     SDValue(Result.getNode(), 1)};
24911    SDValue isValid = DAG.getNode(X86ISD::CMOV, dl, Op->getValueType(1), Ops);
24912
24913    // Return { result, isValid, chain }.
24914    return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
24915                       SDValue(Result.getNode(), 2));
24916  }
24917  case GATHER_AVX2: {
24918    SDValue Chain = Op.getOperand(0);
24919    SDValue Src   = Op.getOperand(2);
24920    SDValue Base  = Op.getOperand(3);
24921    SDValue Index = Op.getOperand(4);
24922    SDValue Mask  = Op.getOperand(5);
24923    SDValue Scale = Op.getOperand(6);
24924    return getAVX2GatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
24925                             Scale, Chain, Subtarget);
24926  }
24927  case GATHER: {
24928  //gather(v1, mask, index, base, scale);
24929    SDValue Chain = Op.getOperand(0);
24930    SDValue Src   = Op.getOperand(2);
24931    SDValue Base  = Op.getOperand(3);
24932    SDValue Index = Op.getOperand(4);
24933    SDValue Mask  = Op.getOperand(5);
24934    SDValue Scale = Op.getOperand(6);
24935    return getGatherNode(Op, DAG, Src, Mask, Base, Index, Scale,
24936                         Chain, Subtarget);
24937  }
24938  case SCATTER: {
24939  //scatter(base, mask, index, v1, scale);
24940    SDValue Chain = Op.getOperand(0);
24941    SDValue Base  = Op.getOperand(2);
24942    SDValue Mask  = Op.getOperand(3);
24943    SDValue Index = Op.getOperand(4);
24944    SDValue Src   = Op.getOperand(5);
24945    SDValue Scale = Op.getOperand(6);
24946    return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
24947                          Scale, Chain, Subtarget);
24948  }
24949  case PREFETCH: {
24950    const APInt &HintVal = Op.getConstantOperandAPInt(6);
24951    assert((HintVal == 2 || HintVal == 3) &&
24952           "Wrong prefetch hint in intrinsic: should be 2 or 3");
24953    unsigned Opcode = (HintVal == 2 ? IntrData->Opc1 : IntrData->Opc0);
24954    SDValue Chain = Op.getOperand(0);
24955    SDValue Mask  = Op.getOperand(2);
24956    SDValue Index = Op.getOperand(3);
24957    SDValue Base  = Op.getOperand(4);
24958    SDValue Scale = Op.getOperand(5);
24959    return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain,
24960                           Subtarget);
24961  }
24962  // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
24963  case RDTSC: {
24964    SmallVector<SDValue, 2> Results;
24965    getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget,
24966                            Results);
24967    return DAG.getMergeValues(Results, dl);
24968  }
24969  // Read Performance Monitoring Counters.
24970  case RDPMC:
24971  // GetExtended Control Register.
24972  case XGETBV: {
24973    SmallVector<SDValue, 2> Results;
24974
24975    // RDPMC uses ECX to select the index of the performance counter to read.
24976    // XGETBV uses ECX to select the index of the XCR register to return.
24977    // The result is stored into registers EDX:EAX.
24978    expandIntrinsicWChainHelper(Op.getNode(), dl, DAG, IntrData->Opc0, X86::ECX,
24979                                Subtarget, Results);
24980    return DAG.getMergeValues(Results, dl);
24981  }
24982  // XTEST intrinsics.
24983  case XTEST: {
24984    SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
24985    SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
24986
24987    SDValue SetCC = getSETCC(X86::COND_NE, InTrans, dl, DAG);
24988    SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
24989    return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
24990                       Ret, SDValue(InTrans.getNode(), 1));
24991  }
24992  case TRUNCATE_TO_MEM_VI8:
24993  case TRUNCATE_TO_MEM_VI16:
24994  case TRUNCATE_TO_MEM_VI32: {
24995    SDValue Mask = Op.getOperand(4);
24996    SDValue DataToTruncate = Op.getOperand(3);
24997    SDValue Addr = Op.getOperand(2);
24998    SDValue Chain = Op.getOperand(0);
24999
25000    MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op);
25001    assert(MemIntr && "Expected MemIntrinsicSDNode!");
25002
25003    EVT MemVT  = MemIntr->getMemoryVT();
25004
25005    uint16_t TruncationOp = IntrData->Opc0;
25006    switch (TruncationOp) {
25007    case X86ISD::VTRUNC: {
25008      if (isAllOnesConstant(Mask)) // return just a truncate store
25009        return DAG.getTruncStore(Chain, dl, DataToTruncate, Addr, MemVT,
25010                                 MemIntr->getMemOperand());
25011
25012      MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
25013      SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
25014      SDValue Offset = DAG.getUNDEF(VMask.getValueType());
25015
25016      return DAG.getMaskedStore(Chain, dl, DataToTruncate, Addr, Offset, VMask,
25017                                MemVT, MemIntr->getMemOperand(), ISD::UNINDEXED,
25018                                true /* truncating */);
25019    }
25020    case X86ISD::VTRUNCUS:
25021    case X86ISD::VTRUNCS: {
25022      bool IsSigned = (TruncationOp == X86ISD::VTRUNCS);
25023      if (isAllOnesConstant(Mask))
25024        return EmitTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr, MemVT,
25025                               MemIntr->getMemOperand(), DAG);
25026
25027      MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
25028      SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
25029
25030      return EmitMaskedTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr,
25031                                   VMask, MemVT, MemIntr->getMemOperand(), DAG);
25032    }
25033    default:
25034      llvm_unreachable("Unsupported truncstore intrinsic");
25035    }
25036  }
25037  }
25038}
25039
25040SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
25041                                           SelectionDAG &DAG) const {
25042  MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
25043  MFI.setReturnAddressIsTaken(true);
25044
25045  if (verifyReturnAddressArgumentIsConstant(Op, DAG))
25046    return SDValue();
25047
25048  unsigned Depth = Op.getConstantOperandVal(0);
25049  SDLoc dl(Op);
25050  EVT PtrVT = getPointerTy(DAG.getDataLayout());
25051
25052  if (Depth > 0) {
25053    SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
25054    const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
25055    SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), dl, PtrVT);
25056    return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
25057                       DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
25058                       MachinePointerInfo());
25059  }
25060
25061  // Just load the return address.
25062  SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
25063  return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
25064                     MachinePointerInfo());
25065}
25066
25067SDValue X86TargetLowering::LowerADDROFRETURNADDR(SDValue Op,
25068                                                 SelectionDAG &DAG) const {
25069  DAG.getMachineFunction().getFrameInfo().setReturnAddressIsTaken(true);
25070  return getReturnAddressFrameIndex(DAG);
25071}
25072
25073SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
25074  MachineFunction &MF = DAG.getMachineFunction();
25075  MachineFrameInfo &MFI = MF.getFrameInfo();
25076  X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
25077  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
25078  EVT VT = Op.getValueType();
25079
25080  MFI.setFrameAddressIsTaken(true);
25081
25082  if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
25083    // Depth > 0 makes no sense on targets which use Windows unwind codes.  It
25084    // is not possible to crawl up the stack without looking at the unwind codes
25085    // simultaneously.
25086    int FrameAddrIndex = FuncInfo->getFAIndex();
25087    if (!FrameAddrIndex) {
25088      // Set up a frame object for the return address.
25089      unsigned SlotSize = RegInfo->getSlotSize();
25090      FrameAddrIndex = MF.getFrameInfo().CreateFixedObject(
25091          SlotSize, /*SPOffset=*/0, /*IsImmutable=*/false);
25092      FuncInfo->setFAIndex(FrameAddrIndex);
25093    }
25094    return DAG.getFrameIndex(FrameAddrIndex, VT);
25095  }
25096
25097  unsigned FrameReg =
25098      RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
25099  SDLoc dl(Op);  // FIXME probably not meaningful
25100  unsigned Depth = Op.getConstantOperandVal(0);
25101  assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
25102          (FrameReg == X86::EBP && VT == MVT::i32)) &&
25103         "Invalid Frame Register!");
25104  SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
25105  while (Depth--)
25106    FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
25107                            MachinePointerInfo());
25108  return FrameAddr;
25109}
25110
25111// FIXME? Maybe this could be a TableGen attribute on some registers and
25112// this table could be generated automatically from RegInfo.
25113Register X86TargetLowering::getRegisterByName(const char* RegName, LLT VT,
25114                                              const MachineFunction &MF) const {
25115  const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
25116
25117  Register Reg = StringSwitch<unsigned>(RegName)
25118                       .Case("esp", X86::ESP)
25119                       .Case("rsp", X86::RSP)
25120                       .Case("ebp", X86::EBP)
25121                       .Case("rbp", X86::RBP)
25122                       .Default(0);
25123
25124  if (Reg == X86::EBP || Reg == X86::RBP) {
25125    if (!TFI.hasFP(MF))
25126      report_fatal_error("register " + StringRef(RegName) +
25127                         " is allocatable: function has no frame pointer");
25128#ifndef NDEBUG
25129    else {
25130      const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
25131      Register FrameReg = RegInfo->getPtrSizedFrameRegister(MF);
25132      assert((FrameReg == X86::EBP || FrameReg == X86::RBP) &&
25133             "Invalid Frame Register!");
25134    }
25135#endif
25136  }
25137
25138  if (Reg)
25139    return Reg;
25140
25141  report_fatal_error("Invalid register name global variable");
25142}
25143
25144SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
25145                                                     SelectionDAG &DAG) const {
25146  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
25147  return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize(), SDLoc(Op));
25148}
25149
25150unsigned X86TargetLowering::getExceptionPointerRegister(
25151    const Constant *PersonalityFn) const {
25152  if (classifyEHPersonality(PersonalityFn) == EHPersonality::CoreCLR)
25153    return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
25154
25155  return Subtarget.isTarget64BitLP64() ? X86::RAX : X86::EAX;
25156}
25157
25158unsigned X86TargetLowering::getExceptionSelectorRegister(
25159    const Constant *PersonalityFn) const {
25160  // Funclet personalities don't use selectors (the runtime does the selection).
25161  assert(!isFuncletEHPersonality(classifyEHPersonality(PersonalityFn)));
25162  return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
25163}
25164
25165bool X86TargetLowering::needsFixedCatchObjects() const {
25166  return Subtarget.isTargetWin64();
25167}
25168
25169SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
25170  SDValue Chain     = Op.getOperand(0);
25171  SDValue Offset    = Op.getOperand(1);
25172  SDValue Handler   = Op.getOperand(2);
25173  SDLoc dl      (Op);
25174
25175  EVT PtrVT = getPointerTy(DAG.getDataLayout());
25176  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
25177  Register FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
25178  assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
25179          (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
25180         "Invalid Frame Register!");
25181  SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
25182  unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
25183
25184  SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
25185                                 DAG.getIntPtrConstant(RegInfo->getSlotSize(),
25186                                                       dl));
25187  StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
25188  Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo());
25189  Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
25190
25191  return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
25192                     DAG.getRegister(StoreAddrReg, PtrVT));
25193}
25194
25195SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
25196                                               SelectionDAG &DAG) const {
25197  SDLoc DL(Op);
25198  // If the subtarget is not 64bit, we may need the global base reg
25199  // after isel expand pseudo, i.e., after CGBR pass ran.
25200  // Therefore, ask for the GlobalBaseReg now, so that the pass
25201  // inserts the code for us in case we need it.
25202  // Otherwise, we will end up in a situation where we will
25203  // reference a virtual register that is not defined!
25204  if (!Subtarget.is64Bit()) {
25205    const X86InstrInfo *TII = Subtarget.getInstrInfo();
25206    (void)TII->getGlobalBaseReg(&DAG.getMachineFunction());
25207  }
25208  return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
25209                     DAG.getVTList(MVT::i32, MVT::Other),
25210                     Op.getOperand(0), Op.getOperand(1));
25211}
25212
25213SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
25214                                                SelectionDAG &DAG) const {
25215  SDLoc DL(Op);
25216  return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
25217                     Op.getOperand(0), Op.getOperand(1));
25218}
25219
25220SDValue X86TargetLowering::lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
25221                                                       SelectionDAG &DAG) const {
25222  SDLoc DL(Op);
25223  return DAG.getNode(X86ISD::EH_SJLJ_SETUP_DISPATCH, DL, MVT::Other,
25224                     Op.getOperand(0));
25225}
25226
25227static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
25228  return Op.getOperand(0);
25229}
25230
25231SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
25232                                                SelectionDAG &DAG) const {
25233  SDValue Root = Op.getOperand(0);
25234  SDValue Trmp = Op.getOperand(1); // trampoline
25235  SDValue FPtr = Op.getOperand(2); // nested function
25236  SDValue Nest = Op.getOperand(3); // 'nest' parameter value
25237  SDLoc dl (Op);
25238
25239  const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
25240  const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
25241
25242  if (Subtarget.is64Bit()) {
25243    SDValue OutChains[6];
25244
25245    // Large code-model.
25246    const unsigned char JMP64r  = 0xFF; // 64-bit jmp through register opcode.
25247    const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
25248
25249    const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
25250    const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
25251
25252    const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
25253
25254    // Load the pointer to the nested function into R11.
25255    unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
25256    SDValue Addr = Trmp;
25257    OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
25258                                Addr, MachinePointerInfo(TrmpAddr));
25259
25260    Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
25261                       DAG.getConstant(2, dl, MVT::i64));
25262    OutChains[1] =
25263        DAG.getStore(Root, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 2),
25264                     /* Alignment = */ 2);
25265
25266    // Load the 'nest' parameter value into R10.
25267    // R10 is specified in X86CallingConv.td
25268    OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
25269    Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
25270                       DAG.getConstant(10, dl, MVT::i64));
25271    OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
25272                                Addr, MachinePointerInfo(TrmpAddr, 10));
25273
25274    Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
25275                       DAG.getConstant(12, dl, MVT::i64));
25276    OutChains[3] =
25277        DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12),
25278                     /* Alignment = */ 2);
25279
25280    // Jump to the nested function.
25281    OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
25282    Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
25283                       DAG.getConstant(20, dl, MVT::i64));
25284    OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
25285                                Addr, MachinePointerInfo(TrmpAddr, 20));
25286
25287    unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
25288    Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
25289                       DAG.getConstant(22, dl, MVT::i64));
25290    OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, dl, MVT::i8),
25291                                Addr, MachinePointerInfo(TrmpAddr, 22));
25292
25293    return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
25294  } else {
25295    const Function *Func =
25296      cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
25297    CallingConv::ID CC = Func->getCallingConv();
25298    unsigned NestReg;
25299
25300    switch (CC) {
25301    default:
25302      llvm_unreachable("Unsupported calling convention");
25303    case CallingConv::C:
25304    case CallingConv::X86_StdCall: {
25305      // Pass 'nest' parameter in ECX.
25306      // Must be kept in sync with X86CallingConv.td
25307      NestReg = X86::ECX;
25308
25309      // Check that ECX wasn't needed by an 'inreg' parameter.
25310      FunctionType *FTy = Func->getFunctionType();
25311      const AttributeList &Attrs = Func->getAttributes();
25312
25313      if (!Attrs.isEmpty() && !Func->isVarArg()) {
25314        unsigned InRegCount = 0;
25315        unsigned Idx = 1;
25316
25317        for (FunctionType::param_iterator I = FTy->param_begin(),
25318             E = FTy->param_end(); I != E; ++I, ++Idx)
25319          if (Attrs.hasAttribute(Idx, Attribute::InReg)) {
25320            auto &DL = DAG.getDataLayout();
25321            // FIXME: should only count parameters that are lowered to integers.
25322            InRegCount += (DL.getTypeSizeInBits(*I) + 31) / 32;
25323          }
25324
25325        if (InRegCount > 2) {
25326          report_fatal_error("Nest register in use - reduce number of inreg"
25327                             " parameters!");
25328        }
25329      }
25330      break;
25331    }
25332    case CallingConv::X86_FastCall:
25333    case CallingConv::X86_ThisCall:
25334    case CallingConv::Fast:
25335    case CallingConv::Tail:
25336      // Pass 'nest' parameter in EAX.
25337      // Must be kept in sync with X86CallingConv.td
25338      NestReg = X86::EAX;
25339      break;
25340    }
25341
25342    SDValue OutChains[4];
25343    SDValue Addr, Disp;
25344
25345    Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
25346                       DAG.getConstant(10, dl, MVT::i32));
25347    Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
25348
25349    // This is storing the opcode for MOV32ri.
25350    const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
25351    const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
25352    OutChains[0] =
25353        DAG.getStore(Root, dl, DAG.getConstant(MOV32ri | N86Reg, dl, MVT::i8),
25354                     Trmp, MachinePointerInfo(TrmpAddr));
25355
25356    Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
25357                       DAG.getConstant(1, dl, MVT::i32));
25358    OutChains[1] =
25359        DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 1),
25360                     /* Alignment = */ 1);
25361
25362    const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
25363    Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
25364                       DAG.getConstant(5, dl, MVT::i32));
25365    OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, dl, MVT::i8),
25366                                Addr, MachinePointerInfo(TrmpAddr, 5),
25367                                /* Alignment = */ 1);
25368
25369    Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
25370                       DAG.getConstant(6, dl, MVT::i32));
25371    OutChains[3] =
25372        DAG.getStore(Root, dl, Disp, Addr, MachinePointerInfo(TrmpAddr, 6),
25373                     /* Alignment = */ 1);
25374
25375    return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
25376  }
25377}
25378
25379SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
25380                                            SelectionDAG &DAG) const {
25381  /*
25382   The rounding mode is in bits 11:10 of FPSR, and has the following
25383   settings:
25384     00 Round to nearest
25385     01 Round to -inf
25386     10 Round to +inf
25387     11 Round to 0
25388
25389  FLT_ROUNDS, on the other hand, expects the following:
25390    -1 Undefined
25391     0 Round to 0
25392     1 Round to nearest
25393     2 Round to +inf
25394     3 Round to -inf
25395
25396  To perform the conversion, we do:
25397    (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
25398  */
25399
25400  MachineFunction &MF = DAG.getMachineFunction();
25401  const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
25402  const Align StackAlignment(TFI.getStackAlignment());
25403  MVT VT = Op.getSimpleValueType();
25404  SDLoc DL(Op);
25405
25406  // Save FP Control Word to stack slot
25407  int SSFI =
25408      MF.getFrameInfo().CreateStackObject(2, StackAlignment.value(), false);
25409  SDValue StackSlot =
25410      DAG.getFrameIndex(SSFI, getPointerTy(DAG.getDataLayout()));
25411
25412  MachineMemOperand *MMO =
25413      MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, SSFI),
25414                              MachineMemOperand::MOStore, 2, 2);
25415
25416  SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
25417  SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
25418                                          DAG.getVTList(MVT::Other),
25419                                          Ops, MVT::i16, MMO);
25420
25421  // Load FP Control Word from stack slot
25422  SDValue CWD =
25423      DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MachinePointerInfo());
25424
25425  // Transform as necessary
25426  SDValue CWD1 =
25427    DAG.getNode(ISD::SRL, DL, MVT::i16,
25428                DAG.getNode(ISD::AND, DL, MVT::i16,
25429                            CWD, DAG.getConstant(0x800, DL, MVT::i16)),
25430                DAG.getConstant(11, DL, MVT::i8));
25431  SDValue CWD2 =
25432    DAG.getNode(ISD::SRL, DL, MVT::i16,
25433                DAG.getNode(ISD::AND, DL, MVT::i16,
25434                            CWD, DAG.getConstant(0x400, DL, MVT::i16)),
25435                DAG.getConstant(9, DL, MVT::i8));
25436
25437  SDValue RetVal =
25438    DAG.getNode(ISD::AND, DL, MVT::i16,
25439                DAG.getNode(ISD::ADD, DL, MVT::i16,
25440                            DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
25441                            DAG.getConstant(1, DL, MVT::i16)),
25442                DAG.getConstant(3, DL, MVT::i16));
25443
25444  return DAG.getNode((VT.getSizeInBits() < 16 ?
25445                      ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
25446}
25447
25448// Split an unary integer op into 2 half sized ops.
25449static SDValue LowerVectorIntUnary(SDValue Op, SelectionDAG &DAG) {
25450  MVT VT = Op.getSimpleValueType();
25451  unsigned NumElems = VT.getVectorNumElements();
25452  unsigned SizeInBits = VT.getSizeInBits();
25453  MVT EltVT = VT.getVectorElementType();
25454  SDValue Src = Op.getOperand(0);
25455  assert(EltVT == Src.getSimpleValueType().getVectorElementType() &&
25456         "Src and Op should have the same element type!");
25457
25458  // Extract the Lo/Hi vectors
25459  SDLoc dl(Op);
25460  SDValue Lo = extractSubVector(Src, 0, DAG, dl, SizeInBits / 2);
25461  SDValue Hi = extractSubVector(Src, NumElems / 2, DAG, dl, SizeInBits / 2);
25462
25463  MVT NewVT = MVT::getVectorVT(EltVT, NumElems / 2);
25464  return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
25465                     DAG.getNode(Op.getOpcode(), dl, NewVT, Lo),
25466                     DAG.getNode(Op.getOpcode(), dl, NewVT, Hi));
25467}
25468
25469// Decompose 256-bit ops into smaller 128-bit ops.
25470static SDValue Lower256IntUnary(SDValue Op, SelectionDAG &DAG) {
25471  assert(Op.getSimpleValueType().is256BitVector() &&
25472         Op.getSimpleValueType().isInteger() &&
25473         "Only handle AVX 256-bit vector integer operation");
25474  return LowerVectorIntUnary(Op, DAG);
25475}
25476
25477// Decompose 512-bit ops into smaller 256-bit ops.
25478static SDValue Lower512IntUnary(SDValue Op, SelectionDAG &DAG) {
25479  assert(Op.getSimpleValueType().is512BitVector() &&
25480         Op.getSimpleValueType().isInteger() &&
25481         "Only handle AVX 512-bit vector integer operation");
25482  return LowerVectorIntUnary(Op, DAG);
25483}
25484
25485/// Lower a vector CTLZ using native supported vector CTLZ instruction.
25486//
25487// i8/i16 vector implemented using dword LZCNT vector instruction
25488// ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal,
25489// split the vector, perform operation on it's Lo a Hi part and
25490// concatenate the results.
25491static SDValue LowerVectorCTLZ_AVX512CDI(SDValue Op, SelectionDAG &DAG,
25492                                         const X86Subtarget &Subtarget) {
25493  assert(Op.getOpcode() == ISD::CTLZ);
25494  SDLoc dl(Op);
25495  MVT VT = Op.getSimpleValueType();
25496  MVT EltVT = VT.getVectorElementType();
25497  unsigned NumElems = VT.getVectorNumElements();
25498
25499  assert((EltVT == MVT::i8 || EltVT == MVT::i16) &&
25500          "Unsupported element type");
25501
25502  // Split vector, it's Lo and Hi parts will be handled in next iteration.
25503  if (NumElems > 16 ||
25504      (NumElems == 16 && !Subtarget.canExtendTo512DQ()))
25505    return LowerVectorIntUnary(Op, DAG);
25506
25507  MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
25508  assert((NewVT.is256BitVector() || NewVT.is512BitVector()) &&
25509          "Unsupported value type for operation");
25510
25511  // Use native supported vector instruction vplzcntd.
25512  Op = DAG.getNode(ISD::ZERO_EXTEND, dl, NewVT, Op.getOperand(0));
25513  SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Op);
25514  SDValue TruncNode = DAG.getNode(ISD::TRUNCATE, dl, VT, CtlzNode);
25515  SDValue Delta = DAG.getConstant(32 - EltVT.getSizeInBits(), dl, VT);
25516
25517  return DAG.getNode(ISD::SUB, dl, VT, TruncNode, Delta);
25518}
25519
25520// Lower CTLZ using a PSHUFB lookup table implementation.
25521static SDValue LowerVectorCTLZInRegLUT(SDValue Op, const SDLoc &DL,
25522                                       const X86Subtarget &Subtarget,
25523                                       SelectionDAG &DAG) {
25524  MVT VT = Op.getSimpleValueType();
25525  int NumElts = VT.getVectorNumElements();
25526  int NumBytes = NumElts * (VT.getScalarSizeInBits() / 8);
25527  MVT CurrVT = MVT::getVectorVT(MVT::i8, NumBytes);
25528
25529  // Per-nibble leading zero PSHUFB lookup table.
25530  const int LUT[16] = {/* 0 */ 4, /* 1 */ 3, /* 2 */ 2, /* 3 */ 2,
25531                       /* 4 */ 1, /* 5 */ 1, /* 6 */ 1, /* 7 */ 1,
25532                       /* 8 */ 0, /* 9 */ 0, /* a */ 0, /* b */ 0,
25533                       /* c */ 0, /* d */ 0, /* e */ 0, /* f */ 0};
25534
25535  SmallVector<SDValue, 64> LUTVec;
25536  for (int i = 0; i < NumBytes; ++i)
25537    LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
25538  SDValue InRegLUT = DAG.getBuildVector(CurrVT, DL, LUTVec);
25539
25540  // Begin by bitcasting the input to byte vector, then split those bytes
25541  // into lo/hi nibbles and use the PSHUFB LUT to perform CLTZ on each of them.
25542  // If the hi input nibble is zero then we add both results together, otherwise
25543  // we just take the hi result (by masking the lo result to zero before the
25544  // add).
25545  SDValue Op0 = DAG.getBitcast(CurrVT, Op.getOperand(0));
25546  SDValue Zero = DAG.getConstant(0, DL, CurrVT);
25547
25548  SDValue NibbleShift = DAG.getConstant(0x4, DL, CurrVT);
25549  SDValue Lo = Op0;
25550  SDValue Hi = DAG.getNode(ISD::SRL, DL, CurrVT, Op0, NibbleShift);
25551  SDValue HiZ;
25552  if (CurrVT.is512BitVector()) {
25553    MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
25554    HiZ = DAG.getSetCC(DL, MaskVT, Hi, Zero, ISD::SETEQ);
25555    HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
25556  } else {
25557    HiZ = DAG.getSetCC(DL, CurrVT, Hi, Zero, ISD::SETEQ);
25558  }
25559
25560  Lo = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Lo);
25561  Hi = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Hi);
25562  Lo = DAG.getNode(ISD::AND, DL, CurrVT, Lo, HiZ);
25563  SDValue Res = DAG.getNode(ISD::ADD, DL, CurrVT, Lo, Hi);
25564
25565  // Merge result back from vXi8 back to VT, working on the lo/hi halves
25566  // of the current vector width in the same way we did for the nibbles.
25567  // If the upper half of the input element is zero then add the halves'
25568  // leading zero counts together, otherwise just use the upper half's.
25569  // Double the width of the result until we are at target width.
25570  while (CurrVT != VT) {
25571    int CurrScalarSizeInBits = CurrVT.getScalarSizeInBits();
25572    int CurrNumElts = CurrVT.getVectorNumElements();
25573    MVT NextSVT = MVT::getIntegerVT(CurrScalarSizeInBits * 2);
25574    MVT NextVT = MVT::getVectorVT(NextSVT, CurrNumElts / 2);
25575    SDValue Shift = DAG.getConstant(CurrScalarSizeInBits, DL, NextVT);
25576
25577    // Check if the upper half of the input element is zero.
25578    if (CurrVT.is512BitVector()) {
25579      MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
25580      HiZ = DAG.getSetCC(DL, MaskVT, DAG.getBitcast(CurrVT, Op0),
25581                         DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
25582      HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
25583    } else {
25584      HiZ = DAG.getSetCC(DL, CurrVT, DAG.getBitcast(CurrVT, Op0),
25585                         DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
25586    }
25587    HiZ = DAG.getBitcast(NextVT, HiZ);
25588
25589    // Move the upper/lower halves to the lower bits as we'll be extending to
25590    // NextVT. Mask the lower result to zero if HiZ is true and add the results
25591    // together.
25592    SDValue ResNext = Res = DAG.getBitcast(NextVT, Res);
25593    SDValue R0 = DAG.getNode(ISD::SRL, DL, NextVT, ResNext, Shift);
25594    SDValue R1 = DAG.getNode(ISD::SRL, DL, NextVT, HiZ, Shift);
25595    R1 = DAG.getNode(ISD::AND, DL, NextVT, ResNext, R1);
25596    Res = DAG.getNode(ISD::ADD, DL, NextVT, R0, R1);
25597    CurrVT = NextVT;
25598  }
25599
25600  return Res;
25601}
25602
25603static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
25604                               const X86Subtarget &Subtarget,
25605                               SelectionDAG &DAG) {
25606  MVT VT = Op.getSimpleValueType();
25607
25608  if (Subtarget.hasCDI() &&
25609      // vXi8 vectors need to be promoted to 512-bits for vXi32.
25610      (Subtarget.canExtendTo512DQ() || VT.getVectorElementType() != MVT::i8))
25611    return LowerVectorCTLZ_AVX512CDI(Op, DAG, Subtarget);
25612
25613  // Decompose 256-bit ops into smaller 128-bit ops.
25614  if (VT.is256BitVector() && !Subtarget.hasInt256())
25615    return Lower256IntUnary(Op, DAG);
25616
25617  // Decompose 512-bit ops into smaller 256-bit ops.
25618  if (VT.is512BitVector() && !Subtarget.hasBWI())
25619    return Lower512IntUnary(Op, DAG);
25620
25621  assert(Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB");
25622  return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG);
25623}
25624
25625static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget,
25626                         SelectionDAG &DAG) {
25627  MVT VT = Op.getSimpleValueType();
25628  MVT OpVT = VT;
25629  unsigned NumBits = VT.getSizeInBits();
25630  SDLoc dl(Op);
25631  unsigned Opc = Op.getOpcode();
25632
25633  if (VT.isVector())
25634    return LowerVectorCTLZ(Op, dl, Subtarget, DAG);
25635
25636  Op = Op.getOperand(0);
25637  if (VT == MVT::i8) {
25638    // Zero extend to i32 since there is not an i8 bsr.
25639    OpVT = MVT::i32;
25640    Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
25641  }
25642
25643  // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
25644  SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
25645  Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
25646
25647  if (Opc == ISD::CTLZ) {
25648    // If src is zero (i.e. bsr sets ZF), returns NumBits.
25649    SDValue Ops[] = {Op, DAG.getConstant(NumBits + NumBits - 1, dl, OpVT),
25650                     DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
25651                     Op.getValue(1)};
25652    Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
25653  }
25654
25655  // Finally xor with NumBits-1.
25656  Op = DAG.getNode(ISD::XOR, dl, OpVT, Op,
25657                   DAG.getConstant(NumBits - 1, dl, OpVT));
25658
25659  if (VT == MVT::i8)
25660    Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
25661  return Op;
25662}
25663
25664static SDValue LowerCTTZ(SDValue Op, const X86Subtarget &Subtarget,
25665                         SelectionDAG &DAG) {
25666  MVT VT = Op.getSimpleValueType();
25667  unsigned NumBits = VT.getScalarSizeInBits();
25668  SDValue N0 = Op.getOperand(0);
25669  SDLoc dl(Op);
25670
25671  assert(!VT.isVector() && Op.getOpcode() == ISD::CTTZ &&
25672         "Only scalar CTTZ requires custom lowering");
25673
25674  // Issue a bsf (scan bits forward) which also sets EFLAGS.
25675  SDVTList VTs = DAG.getVTList(VT, MVT::i32);
25676  Op = DAG.getNode(X86ISD::BSF, dl, VTs, N0);
25677
25678  // If src is zero (i.e. bsf sets ZF), returns NumBits.
25679  SDValue Ops[] = {Op, DAG.getConstant(NumBits, dl, VT),
25680                   DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
25681                   Op.getValue(1)};
25682  return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
25683}
25684
25685/// Break a 256-bit integer operation into two new 128-bit ones and then
25686/// concatenate the result back.
25687static SDValue split256IntArith(SDValue Op, SelectionDAG &DAG) {
25688  MVT VT = Op.getSimpleValueType();
25689
25690  assert(VT.is256BitVector() && VT.isInteger() &&
25691         "Unsupported value type for operation");
25692
25693  unsigned NumElems = VT.getVectorNumElements();
25694  SDLoc dl(Op);
25695
25696  // Extract the LHS vectors
25697  SDValue LHS = Op.getOperand(0);
25698  SDValue LHS1 = extract128BitVector(LHS, 0, DAG, dl);
25699  SDValue LHS2 = extract128BitVector(LHS, NumElems / 2, DAG, dl);
25700
25701  // Extract the RHS vectors
25702  SDValue RHS = Op.getOperand(1);
25703  SDValue RHS1 = extract128BitVector(RHS, 0, DAG, dl);
25704  SDValue RHS2 = extract128BitVector(RHS, NumElems / 2, DAG, dl);
25705
25706  MVT EltVT = VT.getVectorElementType();
25707  MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
25708
25709  return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
25710                     DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
25711                     DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
25712}
25713
25714/// Break a 512-bit integer operation into two new 256-bit ones and then
25715/// concatenate the result back.
25716static SDValue split512IntArith(SDValue Op, SelectionDAG &DAG) {
25717  MVT VT = Op.getSimpleValueType();
25718
25719  assert(VT.is512BitVector() && VT.isInteger() &&
25720         "Unsupported value type for operation");
25721
25722  unsigned NumElems = VT.getVectorNumElements();
25723  SDLoc dl(Op);
25724
25725  // Extract the LHS vectors
25726  SDValue LHS = Op.getOperand(0);
25727  SDValue LHS1 = extract256BitVector(LHS, 0, DAG, dl);
25728  SDValue LHS2 = extract256BitVector(LHS, NumElems / 2, DAG, dl);
25729
25730  // Extract the RHS vectors
25731  SDValue RHS = Op.getOperand(1);
25732  SDValue RHS1 = extract256BitVector(RHS, 0, DAG, dl);
25733  SDValue RHS2 = extract256BitVector(RHS, NumElems / 2, DAG, dl);
25734
25735  MVT EltVT = VT.getVectorElementType();
25736  MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
25737
25738  return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
25739                     DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
25740                     DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
25741}
25742
25743static SDValue lowerAddSub(SDValue Op, SelectionDAG &DAG,
25744                           const X86Subtarget &Subtarget) {
25745  MVT VT = Op.getSimpleValueType();
25746  if (VT == MVT::i16 || VT == MVT::i32)
25747    return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
25748
25749  if (VT.getScalarType() == MVT::i1)
25750    return DAG.getNode(ISD::XOR, SDLoc(Op), VT,
25751                       Op.getOperand(0), Op.getOperand(1));
25752
25753  assert(Op.getSimpleValueType().is256BitVector() &&
25754         Op.getSimpleValueType().isInteger() &&
25755         "Only handle AVX 256-bit vector integer operation");
25756  return split256IntArith(Op, DAG);
25757}
25758
25759static SDValue LowerADDSAT_SUBSAT(SDValue Op, SelectionDAG &DAG,
25760                                  const X86Subtarget &Subtarget) {
25761  MVT VT = Op.getSimpleValueType();
25762  SDValue X = Op.getOperand(0), Y = Op.getOperand(1);
25763  unsigned Opcode = Op.getOpcode();
25764  if (VT.getScalarType() == MVT::i1) {
25765    SDLoc dl(Op);
25766    switch (Opcode) {
25767    default: llvm_unreachable("Expected saturated arithmetic opcode");
25768    case ISD::UADDSAT:
25769    case ISD::SADDSAT:
25770      // *addsat i1 X, Y --> X | Y
25771      return DAG.getNode(ISD::OR, dl, VT, X, Y);
25772    case ISD::USUBSAT:
25773    case ISD::SSUBSAT:
25774      // *subsat i1 X, Y --> X & ~Y
25775      return DAG.getNode(ISD::AND, dl, VT, X, DAG.getNOT(dl, Y, VT));
25776    }
25777  }
25778
25779  if (VT.is128BitVector()) {
25780    // Avoid the generic expansion with min/max if we don't have pminu*/pmaxu*.
25781    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25782    EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
25783                                                 *DAG.getContext(), VT);
25784    SDLoc DL(Op);
25785    if (Opcode == ISD::UADDSAT && !TLI.isOperationLegal(ISD::UMIN, VT)) {
25786      // uaddsat X, Y --> (X >u (X + Y)) ? -1 : X + Y
25787      SDValue Add = DAG.getNode(ISD::ADD, DL, VT, X, Y);
25788      SDValue Cmp = DAG.getSetCC(DL, SetCCResultType, X, Add, ISD::SETUGT);
25789      return DAG.getSelect(DL, VT, Cmp, DAG.getAllOnesConstant(DL, VT), Add);
25790    }
25791    if (Opcode == ISD::USUBSAT && !TLI.isOperationLegal(ISD::UMAX, VT)) {
25792      // usubsat X, Y --> (X >u Y) ? X - Y : 0
25793      SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, X, Y);
25794      SDValue Cmp = DAG.getSetCC(DL, SetCCResultType, X, Y, ISD::SETUGT);
25795      return DAG.getSelect(DL, VT, Cmp, Sub, DAG.getConstant(0, DL, VT));
25796    }
25797    // Use default expansion.
25798    return SDValue();
25799  }
25800
25801  assert(Op.getSimpleValueType().is256BitVector() &&
25802         Op.getSimpleValueType().isInteger() &&
25803         "Only handle AVX 256-bit vector integer operation");
25804  return split256IntArith(Op, DAG);
25805}
25806
25807static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget,
25808                        SelectionDAG &DAG) {
25809  MVT VT = Op.getSimpleValueType();
25810  if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) {
25811    // Since X86 does not have CMOV for 8-bit integer, we don't convert
25812    // 8-bit integer abs to NEG and CMOV.
25813    SDLoc DL(Op);
25814    SDValue N0 = Op.getOperand(0);
25815    SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
25816                              DAG.getConstant(0, DL, VT), N0);
25817    SDValue Ops[] = {N0, Neg, DAG.getTargetConstant(X86::COND_GE, DL, MVT::i8),
25818                     SDValue(Neg.getNode(), 1)};
25819    return DAG.getNode(X86ISD::CMOV, DL, VT, Ops);
25820  }
25821
25822  // ABS(vXi64 X) --> VPBLENDVPD(X, 0-X, X).
25823  if ((VT == MVT::v2i64 || VT == MVT::v4i64) && Subtarget.hasSSE41()) {
25824    SDLoc DL(Op);
25825    SDValue Src = Op.getOperand(0);
25826    SDValue Sub =
25827        DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
25828    return DAG.getNode(X86ISD::BLENDV, DL, VT, Src, Sub, Src);
25829  }
25830
25831  if (VT.is256BitVector() && !Subtarget.hasInt256()) {
25832    assert(VT.isInteger() &&
25833           "Only handle AVX 256-bit vector integer operation");
25834    return Lower256IntUnary(Op, DAG);
25835  }
25836
25837  // Default to expand.
25838  return SDValue();
25839}
25840
25841static SDValue LowerMINMAX(SDValue Op, SelectionDAG &DAG) {
25842  MVT VT = Op.getSimpleValueType();
25843
25844  // For AVX1 cases, split to use legal ops (everything but v4i64).
25845  if (VT.getScalarType() != MVT::i64 && VT.is256BitVector())
25846    return split256IntArith(Op, DAG);
25847
25848  SDLoc DL(Op);
25849  unsigned Opcode = Op.getOpcode();
25850  SDValue N0 = Op.getOperand(0);
25851  SDValue N1 = Op.getOperand(1);
25852
25853  // For pre-SSE41, we can perform UMIN/UMAX v8i16 by flipping the signbit,
25854  // using the SMIN/SMAX instructions and flipping the signbit back.
25855  if (VT == MVT::v8i16) {
25856    assert((Opcode == ISD::UMIN || Opcode == ISD::UMAX) &&
25857           "Unexpected MIN/MAX opcode");
25858    SDValue Sign = DAG.getConstant(APInt::getSignedMinValue(16), DL, VT);
25859    N0 = DAG.getNode(ISD::XOR, DL, VT, N0, Sign);
25860    N1 = DAG.getNode(ISD::XOR, DL, VT, N1, Sign);
25861    Opcode = (Opcode == ISD::UMIN ? ISD::SMIN : ISD::SMAX);
25862    SDValue Result = DAG.getNode(Opcode, DL, VT, N0, N1);
25863    return DAG.getNode(ISD::XOR, DL, VT, Result, Sign);
25864  }
25865
25866  // Else, expand to a compare/select.
25867  ISD::CondCode CC;
25868  switch (Opcode) {
25869  case ISD::SMIN: CC = ISD::CondCode::SETLT;  break;
25870  case ISD::SMAX: CC = ISD::CondCode::SETGT;  break;
25871  case ISD::UMIN: CC = ISD::CondCode::SETULT; break;
25872  case ISD::UMAX: CC = ISD::CondCode::SETUGT; break;
25873  default: llvm_unreachable("Unknown MINMAX opcode");
25874  }
25875
25876  SDValue Cond = DAG.getSetCC(DL, VT, N0, N1, CC);
25877  return DAG.getSelect(DL, VT, Cond, N0, N1);
25878}
25879
25880static SDValue LowerMUL(SDValue Op, const X86Subtarget &Subtarget,
25881                        SelectionDAG &DAG) {
25882  SDLoc dl(Op);
25883  MVT VT = Op.getSimpleValueType();
25884
25885  if (VT.getScalarType() == MVT::i1)
25886    return DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0), Op.getOperand(1));
25887
25888  // Decompose 256-bit ops into 128-bit ops.
25889  if (VT.is256BitVector() && !Subtarget.hasInt256())
25890    return split256IntArith(Op, DAG);
25891
25892  SDValue A = Op.getOperand(0);
25893  SDValue B = Op.getOperand(1);
25894
25895  // Lower v16i8/v32i8/v64i8 mul as sign-extension to v8i16/v16i16/v32i16
25896  // vector pairs, multiply and truncate.
25897  if (VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8) {
25898    unsigned NumElts = VT.getVectorNumElements();
25899
25900    if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
25901        (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
25902      MVT ExVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
25903      return DAG.getNode(
25904          ISD::TRUNCATE, dl, VT,
25905          DAG.getNode(ISD::MUL, dl, ExVT,
25906                      DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, A),
25907                      DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, B)));
25908    }
25909
25910    MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
25911
25912    // Extract the lo/hi parts to any extend to i16.
25913    // We're going to mask off the low byte of each result element of the
25914    // pmullw, so it doesn't matter what's in the high byte of each 16-bit
25915    // element.
25916    SDValue Undef = DAG.getUNDEF(VT);
25917    SDValue ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A, Undef));
25918    SDValue AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A, Undef));
25919
25920    SDValue BLo, BHi;
25921    if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
25922      // If the LHS is a constant, manually unpackl/unpackh.
25923      SmallVector<SDValue, 16> LoOps, HiOps;
25924      for (unsigned i = 0; i != NumElts; i += 16) {
25925        for (unsigned j = 0; j != 8; ++j) {
25926          LoOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j), dl,
25927                                               MVT::i16));
25928          HiOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j + 8), dl,
25929                                               MVT::i16));
25930        }
25931      }
25932
25933      BLo = DAG.getBuildVector(ExVT, dl, LoOps);
25934      BHi = DAG.getBuildVector(ExVT, dl, HiOps);
25935    } else {
25936      BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B, Undef));
25937      BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B, Undef));
25938    }
25939
25940    // Multiply, mask the lower 8bits of the lo/hi results and pack.
25941    SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
25942    SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
25943    RLo = DAG.getNode(ISD::AND, dl, ExVT, RLo, DAG.getConstant(255, dl, ExVT));
25944    RHi = DAG.getNode(ISD::AND, dl, ExVT, RHi, DAG.getConstant(255, dl, ExVT));
25945    return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
25946  }
25947
25948  // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
25949  if (VT == MVT::v4i32) {
25950    assert(Subtarget.hasSSE2() && !Subtarget.hasSSE41() &&
25951           "Should not custom lower when pmulld is available!");
25952
25953    // Extract the odd parts.
25954    static const int UnpackMask[] = { 1, -1, 3, -1 };
25955    SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
25956    SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
25957
25958    // Multiply the even parts.
25959    SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
25960                                DAG.getBitcast(MVT::v2i64, A),
25961                                DAG.getBitcast(MVT::v2i64, B));
25962    // Now multiply odd parts.
25963    SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
25964                               DAG.getBitcast(MVT::v2i64, Aodds),
25965                               DAG.getBitcast(MVT::v2i64, Bodds));
25966
25967    Evens = DAG.getBitcast(VT, Evens);
25968    Odds = DAG.getBitcast(VT, Odds);
25969
25970    // Merge the two vectors back together with a shuffle. This expands into 2
25971    // shuffles.
25972    static const int ShufMask[] = { 0, 4, 2, 6 };
25973    return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
25974  }
25975
25976  assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
25977         "Only know how to lower V2I64/V4I64/V8I64 multiply");
25978  assert(!Subtarget.hasDQI() && "DQI should use MULLQ");
25979
25980  //  Ahi = psrlqi(a, 32);
25981  //  Bhi = psrlqi(b, 32);
25982  //
25983  //  AloBlo = pmuludq(a, b);
25984  //  AloBhi = pmuludq(a, Bhi);
25985  //  AhiBlo = pmuludq(Ahi, b);
25986  //
25987  //  Hi = psllqi(AloBhi + AhiBlo, 32);
25988  //  return AloBlo + Hi;
25989  KnownBits AKnown = DAG.computeKnownBits(A);
25990  KnownBits BKnown = DAG.computeKnownBits(B);
25991
25992  APInt LowerBitsMask = APInt::getLowBitsSet(64, 32);
25993  bool ALoIsZero = LowerBitsMask.isSubsetOf(AKnown.Zero);
25994  bool BLoIsZero = LowerBitsMask.isSubsetOf(BKnown.Zero);
25995
25996  APInt UpperBitsMask = APInt::getHighBitsSet(64, 32);
25997  bool AHiIsZero = UpperBitsMask.isSubsetOf(AKnown.Zero);
25998  bool BHiIsZero = UpperBitsMask.isSubsetOf(BKnown.Zero);
25999
26000  SDValue Zero = DAG.getConstant(0, dl, VT);
26001
26002  // Only multiply lo/hi halves that aren't known to be zero.
26003  SDValue AloBlo = Zero;
26004  if (!ALoIsZero && !BLoIsZero)
26005    AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
26006
26007  SDValue AloBhi = Zero;
26008  if (!ALoIsZero && !BHiIsZero) {
26009    SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
26010    AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
26011  }
26012
26013  SDValue AhiBlo = Zero;
26014  if (!AHiIsZero && !BLoIsZero) {
26015    SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
26016    AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
26017  }
26018
26019  SDValue Hi = DAG.getNode(ISD::ADD, dl, VT, AloBhi, AhiBlo);
26020  Hi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Hi, 32, DAG);
26021
26022  return DAG.getNode(ISD::ADD, dl, VT, AloBlo, Hi);
26023}
26024
26025static SDValue LowerMULH(SDValue Op, const X86Subtarget &Subtarget,
26026                         SelectionDAG &DAG) {
26027  SDLoc dl(Op);
26028  MVT VT = Op.getSimpleValueType();
26029  bool IsSigned = Op->getOpcode() == ISD::MULHS;
26030  unsigned NumElts = VT.getVectorNumElements();
26031  SDValue A = Op.getOperand(0);
26032  SDValue B = Op.getOperand(1);
26033
26034  // Decompose 256-bit ops into 128-bit ops.
26035  if (VT.is256BitVector() && !Subtarget.hasInt256())
26036    return split256IntArith(Op, DAG);
26037
26038  if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32) {
26039    assert((VT == MVT::v4i32 && Subtarget.hasSSE2()) ||
26040           (VT == MVT::v8i32 && Subtarget.hasInt256()) ||
26041           (VT == MVT::v16i32 && Subtarget.hasAVX512()));
26042
26043    // PMULxD operations multiply each even value (starting at 0) of LHS with
26044    // the related value of RHS and produce a widen result.
26045    // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
26046    // => <2 x i64> <ae|cg>
26047    //
26048    // In other word, to have all the results, we need to perform two PMULxD:
26049    // 1. one with the even values.
26050    // 2. one with the odd values.
26051    // To achieve #2, with need to place the odd values at an even position.
26052    //
26053    // Place the odd value at an even position (basically, shift all values 1
26054    // step to the left):
26055    const int Mask[] = {1, -1,  3, -1,  5, -1,  7, -1,
26056                        9, -1, 11, -1, 13, -1, 15, -1};
26057    // <a|b|c|d> => <b|undef|d|undef>
26058    SDValue Odd0 = DAG.getVectorShuffle(VT, dl, A, A,
26059                                        makeArrayRef(&Mask[0], NumElts));
26060    // <e|f|g|h> => <f|undef|h|undef>
26061    SDValue Odd1 = DAG.getVectorShuffle(VT, dl, B, B,
26062                                        makeArrayRef(&Mask[0], NumElts));
26063
26064    // Emit two multiplies, one for the lower 2 ints and one for the higher 2
26065    // ints.
26066    MVT MulVT = MVT::getVectorVT(MVT::i64, NumElts / 2);
26067    unsigned Opcode =
26068        (IsSigned && Subtarget.hasSSE41()) ? X86ISD::PMULDQ : X86ISD::PMULUDQ;
26069    // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
26070    // => <2 x i64> <ae|cg>
26071    SDValue Mul1 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
26072                                                  DAG.getBitcast(MulVT, A),
26073                                                  DAG.getBitcast(MulVT, B)));
26074    // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
26075    // => <2 x i64> <bf|dh>
26076    SDValue Mul2 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
26077                                                  DAG.getBitcast(MulVT, Odd0),
26078                                                  DAG.getBitcast(MulVT, Odd1)));
26079
26080    // Shuffle it back into the right order.
26081    SmallVector<int, 16> ShufMask(NumElts);
26082    for (int i = 0; i != (int)NumElts; ++i)
26083      ShufMask[i] = (i / 2) * 2 + ((i % 2) * NumElts) + 1;
26084
26085    SDValue Res = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, ShufMask);
26086
26087    // If we have a signed multiply but no PMULDQ fix up the result of an
26088    // unsigned multiply.
26089    if (IsSigned && !Subtarget.hasSSE41()) {
26090      SDValue Zero = DAG.getConstant(0, dl, VT);
26091      SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
26092                               DAG.getSetCC(dl, VT, Zero, A, ISD::SETGT), B);
26093      SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
26094                               DAG.getSetCC(dl, VT, Zero, B, ISD::SETGT), A);
26095
26096      SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
26097      Res = DAG.getNode(ISD::SUB, dl, VT, Res, Fixup);
26098    }
26099
26100    return Res;
26101  }
26102
26103  // Only i8 vectors should need custom lowering after this.
26104  assert((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) ||
26105         (VT == MVT::v64i8 && Subtarget.hasBWI())) &&
26106         "Unsupported vector type");
26107
26108  // Lower v16i8/v32i8 as extension to v8i16/v16i16 vector pairs, multiply,
26109  // logical shift down the upper half and pack back to i8.
26110
26111  // With SSE41 we can use sign/zero extend, but for pre-SSE41 we unpack
26112  // and then ashr/lshr the upper bits down to the lower bits before multiply.
26113  unsigned ExAVX = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
26114
26115  if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
26116      (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
26117    MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
26118    SDValue ExA = DAG.getNode(ExAVX, dl, ExVT, A);
26119    SDValue ExB = DAG.getNode(ExAVX, dl, ExVT, B);
26120    SDValue Mul = DAG.getNode(ISD::MUL, dl, ExVT, ExA, ExB);
26121    Mul = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
26122    return DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
26123  }
26124
26125  // For signed 512-bit vectors, split into 256-bit vectors to allow the
26126  // sign-extension to occur.
26127  if (VT == MVT::v64i8 && IsSigned)
26128    return split512IntArith(Op, DAG);
26129
26130  // Signed AVX2 implementation - extend xmm subvectors to ymm.
26131  if (VT == MVT::v32i8 && IsSigned) {
26132    MVT ExVT = MVT::v16i16;
26133    SDValue ALo = extract128BitVector(A, 0, DAG, dl);
26134    SDValue BLo = extract128BitVector(B, 0, DAG, dl);
26135    SDValue AHi = extract128BitVector(A, NumElts / 2, DAG, dl);
26136    SDValue BHi = extract128BitVector(B, NumElts / 2, DAG, dl);
26137    ALo = DAG.getNode(ExAVX, dl, ExVT, ALo);
26138    BLo = DAG.getNode(ExAVX, dl, ExVT, BLo);
26139    AHi = DAG.getNode(ExAVX, dl, ExVT, AHi);
26140    BHi = DAG.getNode(ExAVX, dl, ExVT, BHi);
26141    SDValue Lo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
26142    SDValue Hi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
26143    Lo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Lo, 8, DAG);
26144    Hi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Hi, 8, DAG);
26145
26146    // Bitcast back to VT and then pack all the even elements from Lo and Hi.
26147    // Shuffle lowering should turn this into PACKUS+PERMQ
26148    Lo = DAG.getBitcast(VT, Lo);
26149    Hi = DAG.getBitcast(VT, Hi);
26150    return DAG.getVectorShuffle(VT, dl, Lo, Hi,
26151                                { 0,  2,  4,  6,  8, 10, 12, 14,
26152                                 16, 18, 20, 22, 24, 26, 28, 30,
26153                                 32, 34, 36, 38, 40, 42, 44, 46,
26154                                 48, 50, 52, 54, 56, 58, 60, 62});
26155  }
26156
26157  // For signed v16i8 and all unsigned vXi8 we will unpack the low and high
26158  // half of each 128 bit lane to widen to a vXi16 type. Do the multiplies,
26159  // shift the results and pack the half lane results back together.
26160
26161  MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
26162
26163  static const int PSHUFDMask[] = { 8,  9, 10, 11, 12, 13, 14, 15,
26164                                   -1, -1, -1, -1, -1, -1, -1, -1};
26165
26166  // Extract the lo parts and zero/sign extend to i16.
26167  // Only use SSE4.1 instructions for signed v16i8 where using unpack requires
26168  // shifts to sign extend. Using unpack for unsigned only requires an xor to
26169  // create zeros and a copy due to tied registers contraints pre-avx. But using
26170  // zero_extend_vector_inreg would require an additional pshufd for the high
26171  // part.
26172
26173  SDValue ALo, AHi;
26174  if (IsSigned && VT == MVT::v16i8 && Subtarget.hasSSE41()) {
26175    ALo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, A);
26176
26177    AHi = DAG.getVectorShuffle(VT, dl, A, A, PSHUFDMask);
26178    AHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, AHi);
26179  } else if (IsSigned) {
26180    ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), A));
26181    AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), A));
26182
26183    ALo = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, ALo, 8, DAG);
26184    AHi = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, AHi, 8, DAG);
26185  } else {
26186    ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A,
26187                                          DAG.getConstant(0, dl, VT)));
26188    AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A,
26189                                          DAG.getConstant(0, dl, VT)));
26190  }
26191
26192  SDValue BLo, BHi;
26193  if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
26194    // If the LHS is a constant, manually unpackl/unpackh and extend.
26195    SmallVector<SDValue, 16> LoOps, HiOps;
26196    for (unsigned i = 0; i != NumElts; i += 16) {
26197      for (unsigned j = 0; j != 8; ++j) {
26198        SDValue LoOp = B.getOperand(i + j);
26199        SDValue HiOp = B.getOperand(i + j + 8);
26200
26201        if (IsSigned) {
26202          LoOp = DAG.getSExtOrTrunc(LoOp, dl, MVT::i16);
26203          HiOp = DAG.getSExtOrTrunc(HiOp, dl, MVT::i16);
26204        } else {
26205          LoOp = DAG.getZExtOrTrunc(LoOp, dl, MVT::i16);
26206          HiOp = DAG.getZExtOrTrunc(HiOp, dl, MVT::i16);
26207        }
26208
26209        LoOps.push_back(LoOp);
26210        HiOps.push_back(HiOp);
26211      }
26212    }
26213
26214    BLo = DAG.getBuildVector(ExVT, dl, LoOps);
26215    BHi = DAG.getBuildVector(ExVT, dl, HiOps);
26216  } else if (IsSigned && VT == MVT::v16i8 && Subtarget.hasSSE41()) {
26217    BLo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, B);
26218
26219    BHi = DAG.getVectorShuffle(VT, dl, B, B, PSHUFDMask);
26220    BHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, BHi);
26221  } else if (IsSigned) {
26222    BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), B));
26223    BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), B));
26224
26225    BLo = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, BLo, 8, DAG);
26226    BHi = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, BHi, 8, DAG);
26227  } else {
26228    BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B,
26229                                          DAG.getConstant(0, dl, VT)));
26230    BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B,
26231                                          DAG.getConstant(0, dl, VT)));
26232  }
26233
26234  // Multiply, lshr the upper 8bits to the lower 8bits of the lo/hi results and
26235  // pack back to vXi8.
26236  SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
26237  SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
26238  RLo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, RLo, 8, DAG);
26239  RHi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, RHi, 8, DAG);
26240
26241  // Bitcast back to VT and then pack all the even elements from Lo and Hi.
26242  return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
26243}
26244
26245SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
26246  assert(Subtarget.isTargetWin64() && "Unexpected target");
26247  EVT VT = Op.getValueType();
26248  assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
26249         "Unexpected return type for lowering");
26250
26251  RTLIB::Libcall LC;
26252  bool isSigned;
26253  switch (Op->getOpcode()) {
26254  default: llvm_unreachable("Unexpected request for libcall!");
26255  case ISD::SDIV:      isSigned = true;  LC = RTLIB::SDIV_I128;    break;
26256  case ISD::UDIV:      isSigned = false; LC = RTLIB::UDIV_I128;    break;
26257  case ISD::SREM:      isSigned = true;  LC = RTLIB::SREM_I128;    break;
26258  case ISD::UREM:      isSigned = false; LC = RTLIB::UREM_I128;    break;
26259  case ISD::SDIVREM:   isSigned = true;  LC = RTLIB::SDIVREM_I128; break;
26260  case ISD::UDIVREM:   isSigned = false; LC = RTLIB::UDIVREM_I128; break;
26261  }
26262
26263  SDLoc dl(Op);
26264  SDValue InChain = DAG.getEntryNode();
26265
26266  TargetLowering::ArgListTy Args;
26267  TargetLowering::ArgListEntry Entry;
26268  for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
26269    EVT ArgVT = Op->getOperand(i).getValueType();
26270    assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
26271           "Unexpected argument type for lowering");
26272    SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
26273    Entry.Node = StackPtr;
26274    InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr,
26275                           MachinePointerInfo(), /* Alignment = */ 16);
26276    Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
26277    Entry.Ty = PointerType::get(ArgTy,0);
26278    Entry.IsSExt = false;
26279    Entry.IsZExt = false;
26280    Args.push_back(Entry);
26281  }
26282
26283  SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
26284                                         getPointerTy(DAG.getDataLayout()));
26285
26286  TargetLowering::CallLoweringInfo CLI(DAG);
26287  CLI.setDebugLoc(dl)
26288      .setChain(InChain)
26289      .setLibCallee(
26290          getLibcallCallingConv(LC),
26291          static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()), Callee,
26292          std::move(Args))
26293      .setInRegister()
26294      .setSExtResult(isSigned)
26295      .setZExtResult(!isSigned);
26296
26297  std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
26298  return DAG.getBitcast(VT, CallInfo.first);
26299}
26300
26301// Return true if the required (according to Opcode) shift-imm form is natively
26302// supported by the Subtarget
26303static bool SupportedVectorShiftWithImm(MVT VT, const X86Subtarget &Subtarget,
26304                                        unsigned Opcode) {
26305  if (VT.getScalarSizeInBits() < 16)
26306    return false;
26307
26308  if (VT.is512BitVector() && Subtarget.hasAVX512() &&
26309      (VT.getScalarSizeInBits() > 16 || Subtarget.hasBWI()))
26310    return true;
26311
26312  bool LShift = (VT.is128BitVector() && Subtarget.hasSSE2()) ||
26313                (VT.is256BitVector() && Subtarget.hasInt256());
26314
26315  bool AShift = LShift && (Subtarget.hasAVX512() ||
26316                           (VT != MVT::v2i64 && VT != MVT::v4i64));
26317  return (Opcode == ISD::SRA) ? AShift : LShift;
26318}
26319
26320// The shift amount is a variable, but it is the same for all vector lanes.
26321// These instructions are defined together with shift-immediate.
26322static
26323bool SupportedVectorShiftWithBaseAmnt(MVT VT, const X86Subtarget &Subtarget,
26324                                      unsigned Opcode) {
26325  return SupportedVectorShiftWithImm(VT, Subtarget, Opcode);
26326}
26327
26328// Return true if the required (according to Opcode) variable-shift form is
26329// natively supported by the Subtarget
26330static bool SupportedVectorVarShift(MVT VT, const X86Subtarget &Subtarget,
26331                                    unsigned Opcode) {
26332
26333  if (!Subtarget.hasInt256() || VT.getScalarSizeInBits() < 16)
26334    return false;
26335
26336  // vXi16 supported only on AVX-512, BWI
26337  if (VT.getScalarSizeInBits() == 16 && !Subtarget.hasBWI())
26338    return false;
26339
26340  if (Subtarget.hasAVX512())
26341    return true;
26342
26343  bool LShift = VT.is128BitVector() || VT.is256BitVector();
26344  bool AShift = LShift &&  VT != MVT::v2i64 && VT != MVT::v4i64;
26345  return (Opcode == ISD::SRA) ? AShift : LShift;
26346}
26347
26348static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
26349                                         const X86Subtarget &Subtarget) {
26350  MVT VT = Op.getSimpleValueType();
26351  SDLoc dl(Op);
26352  SDValue R = Op.getOperand(0);
26353  SDValue Amt = Op.getOperand(1);
26354  unsigned X86Opc = getTargetVShiftUniformOpcode(Op.getOpcode(), false);
26355
26356  auto ArithmeticShiftRight64 = [&](uint64_t ShiftAmt) {
26357    assert((VT == MVT::v2i64 || VT == MVT::v4i64) && "Unexpected SRA type");
26358    MVT ExVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
26359    SDValue Ex = DAG.getBitcast(ExVT, R);
26360
26361    // ashr(R, 63) === cmp_slt(R, 0)
26362    if (ShiftAmt == 63 && Subtarget.hasSSE42()) {
26363      assert((VT != MVT::v4i64 || Subtarget.hasInt256()) &&
26364             "Unsupported PCMPGT op");
26365      return DAG.getNode(X86ISD::PCMPGT, dl, VT, DAG.getConstant(0, dl, VT), R);
26366    }
26367
26368    if (ShiftAmt >= 32) {
26369      // Splat sign to upper i32 dst, and SRA upper i32 src to lower i32.
26370      SDValue Upper =
26371          getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex, 31, DAG);
26372      SDValue Lower = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
26373                                                 ShiftAmt - 32, DAG);
26374      if (VT == MVT::v2i64)
26375        Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {5, 1, 7, 3});
26376      if (VT == MVT::v4i64)
26377        Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
26378                                  {9, 1, 11, 3, 13, 5, 15, 7});
26379    } else {
26380      // SRA upper i32, SRL whole i64 and select lower i32.
26381      SDValue Upper = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
26382                                                 ShiftAmt, DAG);
26383      SDValue Lower =
26384          getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt, DAG);
26385      Lower = DAG.getBitcast(ExVT, Lower);
26386      if (VT == MVT::v2i64)
26387        Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {4, 1, 6, 3});
26388      if (VT == MVT::v4i64)
26389        Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
26390                                  {8, 1, 10, 3, 12, 5, 14, 7});
26391    }
26392    return DAG.getBitcast(VT, Ex);
26393  };
26394
26395  // Optimize shl/srl/sra with constant shift amount.
26396  APInt APIntShiftAmt;
26397  if (!X86::isConstantSplat(Amt, APIntShiftAmt))
26398    return SDValue();
26399
26400  // If the shift amount is out of range, return undef.
26401  if (APIntShiftAmt.uge(VT.getScalarSizeInBits()))
26402    return DAG.getUNDEF(VT);
26403
26404  uint64_t ShiftAmt = APIntShiftAmt.getZExtValue();
26405
26406  if (SupportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode()))
26407    return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG);
26408
26409  // i64 SRA needs to be performed as partial shifts.
26410  if (((!Subtarget.hasXOP() && VT == MVT::v2i64) ||
26411       (Subtarget.hasInt256() && VT == MVT::v4i64)) &&
26412      Op.getOpcode() == ISD::SRA)
26413    return ArithmeticShiftRight64(ShiftAmt);
26414
26415  if (VT == MVT::v16i8 || (Subtarget.hasInt256() && VT == MVT::v32i8) ||
26416      VT == MVT::v64i8) {
26417    unsigned NumElts = VT.getVectorNumElements();
26418    MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
26419
26420    // Simple i8 add case
26421    if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1)
26422      return DAG.getNode(ISD::ADD, dl, VT, R, R);
26423
26424    // ashr(R, 7)  === cmp_slt(R, 0)
26425    if (Op.getOpcode() == ISD::SRA && ShiftAmt == 7) {
26426      SDValue Zeros = DAG.getConstant(0, dl, VT);
26427      if (VT.is512BitVector()) {
26428        assert(VT == MVT::v64i8 && "Unexpected element type!");
26429        SDValue CMP = DAG.getSetCC(dl, MVT::v64i1, Zeros, R, ISD::SETGT);
26430        return DAG.getNode(ISD::SIGN_EXTEND, dl, VT, CMP);
26431      }
26432      return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
26433    }
26434
26435    // XOP can shift v16i8 directly instead of as shift v8i16 + mask.
26436    if (VT == MVT::v16i8 && Subtarget.hasXOP())
26437      return SDValue();
26438
26439    if (Op.getOpcode() == ISD::SHL) {
26440      // Make a large shift.
26441      SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ShiftVT, R,
26442                                               ShiftAmt, DAG);
26443      SHL = DAG.getBitcast(VT, SHL);
26444      // Zero out the rightmost bits.
26445      APInt Mask = APInt::getHighBitsSet(8, 8 - ShiftAmt);
26446      return DAG.getNode(ISD::AND, dl, VT, SHL, DAG.getConstant(Mask, dl, VT));
26447    }
26448    if (Op.getOpcode() == ISD::SRL) {
26449      // Make a large shift.
26450      SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ShiftVT, R,
26451                                               ShiftAmt, DAG);
26452      SRL = DAG.getBitcast(VT, SRL);
26453      // Zero out the leftmost bits.
26454      return DAG.getNode(ISD::AND, dl, VT, SRL,
26455                         DAG.getConstant(uint8_t(-1U) >> ShiftAmt, dl, VT));
26456    }
26457    if (Op.getOpcode() == ISD::SRA) {
26458      // ashr(R, Amt) === sub(xor(lshr(R, Amt), Mask), Mask)
26459      SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
26460
26461      SDValue Mask = DAG.getConstant(128 >> ShiftAmt, dl, VT);
26462      Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
26463      Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
26464      return Res;
26465    }
26466    llvm_unreachable("Unknown shift opcode.");
26467  }
26468
26469  return SDValue();
26470}
26471
26472static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
26473                                        const X86Subtarget &Subtarget) {
26474  MVT VT = Op.getSimpleValueType();
26475  SDLoc dl(Op);
26476  SDValue R = Op.getOperand(0);
26477  SDValue Amt = Op.getOperand(1);
26478  unsigned Opcode = Op.getOpcode();
26479  unsigned X86OpcI = getTargetVShiftUniformOpcode(Opcode, false);
26480  unsigned X86OpcV = getTargetVShiftUniformOpcode(Opcode, true);
26481
26482  if (SDValue BaseShAmt = DAG.getSplatValue(Amt)) {
26483    if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Opcode)) {
26484      MVT EltVT = VT.getVectorElementType();
26485      assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
26486      if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
26487        BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
26488      else if (EltVT.bitsLT(MVT::i32))
26489        BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
26490
26491      return getTargetVShiftNode(X86OpcI, dl, VT, R, BaseShAmt, Subtarget, DAG);
26492    }
26493
26494    // vXi8 shifts - shift as v8i16 + mask result.
26495    if (((VT == MVT::v16i8 && !Subtarget.canExtendTo512DQ()) ||
26496         (VT == MVT::v32i8 && !Subtarget.canExtendTo512BW()) ||
26497         VT == MVT::v64i8) &&
26498        !Subtarget.hasXOP()) {
26499      unsigned NumElts = VT.getVectorNumElements();
26500      MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
26501      if (SupportedVectorShiftWithBaseAmnt(ExtVT, Subtarget, Opcode)) {
26502        unsigned LogicalOp = (Opcode == ISD::SHL ? ISD::SHL : ISD::SRL);
26503        unsigned LogicalX86Op = getTargetVShiftUniformOpcode(LogicalOp, false);
26504        BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
26505
26506        // Create the mask using vXi16 shifts. For shift-rights we need to move
26507        // the upper byte down before splatting the vXi8 mask.
26508        SDValue BitMask = DAG.getConstant(-1, dl, ExtVT);
26509        BitMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, BitMask,
26510                                      BaseShAmt, Subtarget, DAG);
26511        if (Opcode != ISD::SHL)
26512          BitMask = getTargetVShiftByConstNode(LogicalX86Op, dl, ExtVT, BitMask,
26513                                               8, DAG);
26514        BitMask = DAG.getBitcast(VT, BitMask);
26515        BitMask = DAG.getVectorShuffle(VT, dl, BitMask, BitMask,
26516                                       SmallVector<int, 64>(NumElts, 0));
26517
26518        SDValue Res = getTargetVShiftNode(LogicalX86Op, dl, ExtVT,
26519                                          DAG.getBitcast(ExtVT, R), BaseShAmt,
26520                                          Subtarget, DAG);
26521        Res = DAG.getBitcast(VT, Res);
26522        Res = DAG.getNode(ISD::AND, dl, VT, Res, BitMask);
26523
26524        if (Opcode == ISD::SRA) {
26525          // ashr(R, Amt) === sub(xor(lshr(R, Amt), SignMask), SignMask)
26526          // SignMask = lshr(SignBit, Amt) - safe to do this with PSRLW.
26527          SDValue SignMask = DAG.getConstant(0x8080, dl, ExtVT);
26528          SignMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, SignMask,
26529                                         BaseShAmt, Subtarget, DAG);
26530          SignMask = DAG.getBitcast(VT, SignMask);
26531          Res = DAG.getNode(ISD::XOR, dl, VT, Res, SignMask);
26532          Res = DAG.getNode(ISD::SUB, dl, VT, Res, SignMask);
26533        }
26534        return Res;
26535      }
26536    }
26537  }
26538
26539  // Check cases (mainly 32-bit) where i64 is expanded into high and low parts.
26540  if (VT == MVT::v2i64 && Amt.getOpcode() == ISD::BITCAST &&
26541      Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
26542    Amt = Amt.getOperand(0);
26543    unsigned Ratio = 64 / Amt.getScalarValueSizeInBits();
26544    std::vector<SDValue> Vals(Ratio);
26545    for (unsigned i = 0; i != Ratio; ++i)
26546      Vals[i] = Amt.getOperand(i);
26547    for (unsigned i = Ratio, e = Amt.getNumOperands(); i != e; i += Ratio) {
26548      for (unsigned j = 0; j != Ratio; ++j)
26549        if (Vals[j] != Amt.getOperand(i + j))
26550          return SDValue();
26551    }
26552
26553    if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Op.getOpcode()))
26554      return DAG.getNode(X86OpcV, dl, VT, R, Op.getOperand(1));
26555  }
26556  return SDValue();
26557}
26558
26559// Convert a shift/rotate left amount to a multiplication scale factor.
26560static SDValue convertShiftLeftToScale(SDValue Amt, const SDLoc &dl,
26561                                       const X86Subtarget &Subtarget,
26562                                       SelectionDAG &DAG) {
26563  MVT VT = Amt.getSimpleValueType();
26564  if (!(VT == MVT::v8i16 || VT == MVT::v4i32 ||
26565        (Subtarget.hasInt256() && VT == MVT::v16i16) ||
26566        (!Subtarget.hasAVX512() && VT == MVT::v16i8)))
26567    return SDValue();
26568
26569  if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
26570    SmallVector<SDValue, 8> Elts;
26571    MVT SVT = VT.getVectorElementType();
26572    unsigned SVTBits = SVT.getSizeInBits();
26573    APInt One(SVTBits, 1);
26574    unsigned NumElems = VT.getVectorNumElements();
26575
26576    for (unsigned i = 0; i != NumElems; ++i) {
26577      SDValue Op = Amt->getOperand(i);
26578      if (Op->isUndef()) {
26579        Elts.push_back(Op);
26580        continue;
26581      }
26582
26583      ConstantSDNode *ND = cast<ConstantSDNode>(Op);
26584      APInt C(SVTBits, ND->getZExtValue());
26585      uint64_t ShAmt = C.getZExtValue();
26586      if (ShAmt >= SVTBits) {
26587        Elts.push_back(DAG.getUNDEF(SVT));
26588        continue;
26589      }
26590      Elts.push_back(DAG.getConstant(One.shl(ShAmt), dl, SVT));
26591    }
26592    return DAG.getBuildVector(VT, dl, Elts);
26593  }
26594
26595  // If the target doesn't support variable shifts, use either FP conversion
26596  // or integer multiplication to avoid shifting each element individually.
26597  if (VT == MVT::v4i32) {
26598    Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, dl, VT));
26599    Amt = DAG.getNode(ISD::ADD, dl, VT, Amt,
26600                      DAG.getConstant(0x3f800000U, dl, VT));
26601    Amt = DAG.getBitcast(MVT::v4f32, Amt);
26602    return DAG.getNode(ISD::FP_TO_SINT, dl, VT, Amt);
26603  }
26604
26605  // AVX2 can more effectively perform this as a zext/trunc to/from v8i32.
26606  if (VT == MVT::v8i16 && !Subtarget.hasAVX2()) {
26607    SDValue Z = DAG.getConstant(0, dl, VT);
26608    SDValue Lo = DAG.getBitcast(MVT::v4i32, getUnpackl(DAG, dl, VT, Amt, Z));
26609    SDValue Hi = DAG.getBitcast(MVT::v4i32, getUnpackh(DAG, dl, VT, Amt, Z));
26610    Lo = convertShiftLeftToScale(Lo, dl, Subtarget, DAG);
26611    Hi = convertShiftLeftToScale(Hi, dl, Subtarget, DAG);
26612    if (Subtarget.hasSSE41())
26613      return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
26614
26615    return DAG.getVectorShuffle(VT, dl, DAG.getBitcast(VT, Lo),
26616                                        DAG.getBitcast(VT, Hi),
26617                                        {0, 2, 4, 6, 8, 10, 12, 14});
26618  }
26619
26620  return SDValue();
26621}
26622
26623static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
26624                          SelectionDAG &DAG) {
26625  MVT VT = Op.getSimpleValueType();
26626  SDLoc dl(Op);
26627  SDValue R = Op.getOperand(0);
26628  SDValue Amt = Op.getOperand(1);
26629  unsigned EltSizeInBits = VT.getScalarSizeInBits();
26630  bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
26631
26632  unsigned Opc = Op.getOpcode();
26633  unsigned X86OpcV = getTargetVShiftUniformOpcode(Opc, true);
26634  unsigned X86OpcI = getTargetVShiftUniformOpcode(Opc, false);
26635
26636  assert(VT.isVector() && "Custom lowering only for vector shifts!");
26637  assert(Subtarget.hasSSE2() && "Only custom lower when we have SSE2!");
26638
26639  if (SDValue V = LowerScalarImmediateShift(Op, DAG, Subtarget))
26640    return V;
26641
26642  if (SDValue V = LowerScalarVariableShift(Op, DAG, Subtarget))
26643    return V;
26644
26645  if (SupportedVectorVarShift(VT, Subtarget, Opc))
26646    return Op;
26647
26648  // XOP has 128-bit variable logical/arithmetic shifts.
26649  // +ve/-ve Amt = shift left/right.
26650  if (Subtarget.hasXOP() && (VT == MVT::v2i64 || VT == MVT::v4i32 ||
26651                             VT == MVT::v8i16 || VT == MVT::v16i8)) {
26652    if (Opc == ISD::SRL || Opc == ISD::SRA) {
26653      SDValue Zero = DAG.getConstant(0, dl, VT);
26654      Amt = DAG.getNode(ISD::SUB, dl, VT, Zero, Amt);
26655    }
26656    if (Opc == ISD::SHL || Opc == ISD::SRL)
26657      return DAG.getNode(X86ISD::VPSHL, dl, VT, R, Amt);
26658    if (Opc == ISD::SRA)
26659      return DAG.getNode(X86ISD::VPSHA, dl, VT, R, Amt);
26660  }
26661
26662  // 2i64 vector logical shifts can efficiently avoid scalarization - do the
26663  // shifts per-lane and then shuffle the partial results back together.
26664  if (VT == MVT::v2i64 && Opc != ISD::SRA) {
26665    // Splat the shift amounts so the scalar shifts above will catch it.
26666    SDValue Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {0, 0});
26667    SDValue Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {1, 1});
26668    SDValue R0 = DAG.getNode(Opc, dl, VT, R, Amt0);
26669    SDValue R1 = DAG.getNode(Opc, dl, VT, R, Amt1);
26670    return DAG.getVectorShuffle(VT, dl, R0, R1, {0, 3});
26671  }
26672
26673  // i64 vector arithmetic shift can be emulated with the transform:
26674  // M = lshr(SIGN_MASK, Amt)
26675  // ashr(R, Amt) === sub(xor(lshr(R, Amt), M), M)
26676  if ((VT == MVT::v2i64 || (VT == MVT::v4i64 && Subtarget.hasInt256())) &&
26677      Opc == ISD::SRA) {
26678    SDValue S = DAG.getConstant(APInt::getSignMask(64), dl, VT);
26679    SDValue M = DAG.getNode(ISD::SRL, dl, VT, S, Amt);
26680    R = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
26681    R = DAG.getNode(ISD::XOR, dl, VT, R, M);
26682    R = DAG.getNode(ISD::SUB, dl, VT, R, M);
26683    return R;
26684  }
26685
26686  // If possible, lower this shift as a sequence of two shifts by
26687  // constant plus a BLENDing shuffle instead of scalarizing it.
26688  // Example:
26689  //   (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
26690  //
26691  // Could be rewritten as:
26692  //   (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
26693  //
26694  // The advantage is that the two shifts from the example would be
26695  // lowered as X86ISD::VSRLI nodes in parallel before blending.
26696  if (ConstantAmt && (VT == MVT::v8i16 || VT == MVT::v4i32 ||
26697                      (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
26698    SDValue Amt1, Amt2;
26699    unsigned NumElts = VT.getVectorNumElements();
26700    SmallVector<int, 8> ShuffleMask;
26701    for (unsigned i = 0; i != NumElts; ++i) {
26702      SDValue A = Amt->getOperand(i);
26703      if (A.isUndef()) {
26704        ShuffleMask.push_back(SM_SentinelUndef);
26705        continue;
26706      }
26707      if (!Amt1 || Amt1 == A) {
26708        ShuffleMask.push_back(i);
26709        Amt1 = A;
26710        continue;
26711      }
26712      if (!Amt2 || Amt2 == A) {
26713        ShuffleMask.push_back(i + NumElts);
26714        Amt2 = A;
26715        continue;
26716      }
26717      break;
26718    }
26719
26720    // Only perform this blend if we can perform it without loading a mask.
26721    if (ShuffleMask.size() == NumElts && Amt1 && Amt2 &&
26722        (VT != MVT::v16i16 ||
26723         is128BitLaneRepeatedShuffleMask(VT, ShuffleMask)) &&
26724        (VT == MVT::v4i32 || Subtarget.hasSSE41() || Opc != ISD::SHL ||
26725         canWidenShuffleElements(ShuffleMask))) {
26726      auto *Cst1 = dyn_cast<ConstantSDNode>(Amt1);
26727      auto *Cst2 = dyn_cast<ConstantSDNode>(Amt2);
26728      if (Cst1 && Cst2 && Cst1->getAPIntValue().ult(EltSizeInBits) &&
26729          Cst2->getAPIntValue().ult(EltSizeInBits)) {
26730        SDValue Shift1 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
26731                                                    Cst1->getZExtValue(), DAG);
26732        SDValue Shift2 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
26733                                                    Cst2->getZExtValue(), DAG);
26734        return DAG.getVectorShuffle(VT, dl, Shift1, Shift2, ShuffleMask);
26735      }
26736    }
26737  }
26738
26739  // If possible, lower this packed shift into a vector multiply instead of
26740  // expanding it into a sequence of scalar shifts.
26741  if (Opc == ISD::SHL)
26742    if (SDValue Scale = convertShiftLeftToScale(Amt, dl, Subtarget, DAG))
26743      return DAG.getNode(ISD::MUL, dl, VT, R, Scale);
26744
26745  // Constant ISD::SRL can be performed efficiently on vXi16 vectors as we
26746  // can replace with ISD::MULHU, creating scale factor from (NumEltBits - Amt).
26747  if (Opc == ISD::SRL && ConstantAmt &&
26748      (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
26749    SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
26750    SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
26751    if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
26752      SDValue Zero = DAG.getConstant(0, dl, VT);
26753      SDValue ZAmt = DAG.getSetCC(dl, VT, Amt, Zero, ISD::SETEQ);
26754      SDValue Res = DAG.getNode(ISD::MULHU, dl, VT, R, Scale);
26755      return DAG.getSelect(dl, VT, ZAmt, R, Res);
26756    }
26757  }
26758
26759  // Constant ISD::SRA can be performed efficiently on vXi16 vectors as we
26760  // can replace with ISD::MULHS, creating scale factor from (NumEltBits - Amt).
26761  // TODO: Special case handling for shift by 0/1, really we can afford either
26762  // of these cases in pre-SSE41/XOP/AVX512 but not both.
26763  if (Opc == ISD::SRA && ConstantAmt &&
26764      (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256())) &&
26765      ((Subtarget.hasSSE41() && !Subtarget.hasXOP() &&
26766        !Subtarget.hasAVX512()) ||
26767       DAG.isKnownNeverZero(Amt))) {
26768    SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
26769    SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
26770    if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
26771      SDValue Amt0 =
26772          DAG.getSetCC(dl, VT, Amt, DAG.getConstant(0, dl, VT), ISD::SETEQ);
26773      SDValue Amt1 =
26774          DAG.getSetCC(dl, VT, Amt, DAG.getConstant(1, dl, VT), ISD::SETEQ);
26775      SDValue Sra1 =
26776          getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, 1, DAG);
26777      SDValue Res = DAG.getNode(ISD::MULHS, dl, VT, R, Scale);
26778      Res = DAG.getSelect(dl, VT, Amt0, R, Res);
26779      return DAG.getSelect(dl, VT, Amt1, Sra1, Res);
26780    }
26781  }
26782
26783  // v4i32 Non Uniform Shifts.
26784  // If the shift amount is constant we can shift each lane using the SSE2
26785  // immediate shifts, else we need to zero-extend each lane to the lower i64
26786  // and shift using the SSE2 variable shifts.
26787  // The separate results can then be blended together.
26788  if (VT == MVT::v4i32) {
26789    SDValue Amt0, Amt1, Amt2, Amt3;
26790    if (ConstantAmt) {
26791      Amt0 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {0, 0, 0, 0});
26792      Amt1 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {1, 1, 1, 1});
26793      Amt2 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {2, 2, 2, 2});
26794      Amt3 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {3, 3, 3, 3});
26795    } else {
26796      // The SSE2 shifts use the lower i64 as the same shift amount for
26797      // all lanes and the upper i64 is ignored. On AVX we're better off
26798      // just zero-extending, but for SSE just duplicating the top 16-bits is
26799      // cheaper and has the same effect for out of range values.
26800      if (Subtarget.hasAVX()) {
26801        SDValue Z = DAG.getConstant(0, dl, VT);
26802        Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Z, {0, 4, -1, -1});
26803        Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Z, {1, 5, -1, -1});
26804        Amt2 = DAG.getVectorShuffle(VT, dl, Amt, Z, {2, 6, -1, -1});
26805        Amt3 = DAG.getVectorShuffle(VT, dl, Amt, Z, {3, 7, -1, -1});
26806      } else {
26807        SDValue Amt01 = DAG.getBitcast(MVT::v8i16, Amt);
26808        SDValue Amt23 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
26809                                             {4, 5, 6, 7, -1, -1, -1, -1});
26810        Amt0 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
26811                                    {0, 1, 1, 1, -1, -1, -1, -1});
26812        Amt1 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
26813                                    {2, 3, 3, 3, -1, -1, -1, -1});
26814        Amt2 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt23, Amt23,
26815                                    {0, 1, 1, 1, -1, -1, -1, -1});
26816        Amt3 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt23, Amt23,
26817                                    {2, 3, 3, 3, -1, -1, -1, -1});
26818      }
26819    }
26820
26821    unsigned ShOpc = ConstantAmt ? Opc : X86OpcV;
26822    SDValue R0 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt0));
26823    SDValue R1 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt1));
26824    SDValue R2 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt2));
26825    SDValue R3 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt3));
26826
26827    // Merge the shifted lane results optimally with/without PBLENDW.
26828    // TODO - ideally shuffle combining would handle this.
26829    if (Subtarget.hasSSE41()) {
26830      SDValue R02 = DAG.getVectorShuffle(VT, dl, R0, R2, {0, -1, 6, -1});
26831      SDValue R13 = DAG.getVectorShuffle(VT, dl, R1, R3, {-1, 1, -1, 7});
26832      return DAG.getVectorShuffle(VT, dl, R02, R13, {0, 5, 2, 7});
26833    }
26834    SDValue R01 = DAG.getVectorShuffle(VT, dl, R0, R1, {0, -1, -1, 5});
26835    SDValue R23 = DAG.getVectorShuffle(VT, dl, R2, R3, {2, -1, -1, 7});
26836    return DAG.getVectorShuffle(VT, dl, R01, R23, {0, 3, 4, 7});
26837  }
26838
26839  // It's worth extending once and using the vXi16/vXi32 shifts for smaller
26840  // types, but without AVX512 the extra overheads to get from vXi8 to vXi32
26841  // make the existing SSE solution better.
26842  // NOTE: We honor prefered vector width before promoting to 512-bits.
26843  if ((Subtarget.hasInt256() && VT == MVT::v8i16) ||
26844      (Subtarget.canExtendTo512DQ() && VT == MVT::v16i16) ||
26845      (Subtarget.canExtendTo512DQ() && VT == MVT::v16i8) ||
26846      (Subtarget.canExtendTo512BW() && VT == MVT::v32i8) ||
26847      (Subtarget.hasBWI() && Subtarget.hasVLX() && VT == MVT::v16i8)) {
26848    assert((!Subtarget.hasBWI() || VT == MVT::v32i8 || VT == MVT::v16i8) &&
26849           "Unexpected vector type");
26850    MVT EvtSVT = Subtarget.hasBWI() ? MVT::i16 : MVT::i32;
26851    MVT ExtVT = MVT::getVectorVT(EvtSVT, VT.getVectorNumElements());
26852    unsigned ExtOpc = Opc == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
26853    R = DAG.getNode(ExtOpc, dl, ExtVT, R);
26854    Amt = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Amt);
26855    return DAG.getNode(ISD::TRUNCATE, dl, VT,
26856                       DAG.getNode(Opc, dl, ExtVT, R, Amt));
26857  }
26858
26859  // Constant ISD::SRA/SRL can be performed efficiently on vXi8 vectors as we
26860  // extend to vXi16 to perform a MUL scale effectively as a MUL_LOHI.
26861  if (ConstantAmt && (Opc == ISD::SRA || Opc == ISD::SRL) &&
26862      (VT == MVT::v16i8 || VT == MVT::v64i8 ||
26863       (VT == MVT::v32i8 && Subtarget.hasInt256())) &&
26864      !Subtarget.hasXOP()) {
26865    int NumElts = VT.getVectorNumElements();
26866    SDValue Cst8 = DAG.getTargetConstant(8, dl, MVT::i8);
26867
26868    // Extend constant shift amount to vXi16 (it doesn't matter if the type
26869    // isn't legal).
26870    MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
26871    Amt = DAG.getZExtOrTrunc(Amt, dl, ExVT);
26872    Amt = DAG.getNode(ISD::SUB, dl, ExVT, DAG.getConstant(8, dl, ExVT), Amt);
26873    Amt = DAG.getNode(ISD::SHL, dl, ExVT, DAG.getConstant(1, dl, ExVT), Amt);
26874    assert(ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()) &&
26875           "Constant build vector expected");
26876
26877    if (VT == MVT::v16i8 && Subtarget.hasInt256()) {
26878      R = Opc == ISD::SRA ? DAG.getSExtOrTrunc(R, dl, ExVT)
26879                          : DAG.getZExtOrTrunc(R, dl, ExVT);
26880      R = DAG.getNode(ISD::MUL, dl, ExVT, R, Amt);
26881      R = DAG.getNode(X86ISD::VSRLI, dl, ExVT, R, Cst8);
26882      return DAG.getZExtOrTrunc(R, dl, VT);
26883    }
26884
26885    SmallVector<SDValue, 16> LoAmt, HiAmt;
26886    for (int i = 0; i != NumElts; i += 16) {
26887      for (int j = 0; j != 8; ++j) {
26888        LoAmt.push_back(Amt.getOperand(i + j));
26889        HiAmt.push_back(Amt.getOperand(i + j + 8));
26890      }
26891    }
26892
26893    MVT VT16 = MVT::getVectorVT(MVT::i16, NumElts / 2);
26894    SDValue LoA = DAG.getBuildVector(VT16, dl, LoAmt);
26895    SDValue HiA = DAG.getBuildVector(VT16, dl, HiAmt);
26896
26897    SDValue LoR = DAG.getBitcast(VT16, getUnpackl(DAG, dl, VT, R, R));
26898    SDValue HiR = DAG.getBitcast(VT16, getUnpackh(DAG, dl, VT, R, R));
26899    LoR = DAG.getNode(X86OpcI, dl, VT16, LoR, Cst8);
26900    HiR = DAG.getNode(X86OpcI, dl, VT16, HiR, Cst8);
26901    LoR = DAG.getNode(ISD::MUL, dl, VT16, LoR, LoA);
26902    HiR = DAG.getNode(ISD::MUL, dl, VT16, HiR, HiA);
26903    LoR = DAG.getNode(X86ISD::VSRLI, dl, VT16, LoR, Cst8);
26904    HiR = DAG.getNode(X86ISD::VSRLI, dl, VT16, HiR, Cst8);
26905    return DAG.getNode(X86ISD::PACKUS, dl, VT, LoR, HiR);
26906  }
26907
26908  if (VT == MVT::v16i8 ||
26909      (VT == MVT::v32i8 && Subtarget.hasInt256() && !Subtarget.hasXOP()) ||
26910      (VT == MVT::v64i8 && Subtarget.hasBWI())) {
26911    MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
26912
26913    auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
26914      if (VT.is512BitVector()) {
26915        // On AVX512BW targets we make use of the fact that VSELECT lowers
26916        // to a masked blend which selects bytes based just on the sign bit
26917        // extracted to a mask.
26918        MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
26919        V0 = DAG.getBitcast(VT, V0);
26920        V1 = DAG.getBitcast(VT, V1);
26921        Sel = DAG.getBitcast(VT, Sel);
26922        Sel = DAG.getSetCC(dl, MaskVT, DAG.getConstant(0, dl, VT), Sel,
26923                           ISD::SETGT);
26924        return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1));
26925      } else if (Subtarget.hasSSE41()) {
26926        // On SSE41 targets we make use of the fact that VSELECT lowers
26927        // to PBLENDVB which selects bytes based just on the sign bit.
26928        V0 = DAG.getBitcast(VT, V0);
26929        V1 = DAG.getBitcast(VT, V1);
26930        Sel = DAG.getBitcast(VT, Sel);
26931        return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1));
26932      }
26933      // On pre-SSE41 targets we test for the sign bit by comparing to
26934      // zero - a negative value will set all bits of the lanes to true
26935      // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
26936      SDValue Z = DAG.getConstant(0, dl, SelVT);
26937      SDValue C = DAG.getNode(X86ISD::PCMPGT, dl, SelVT, Z, Sel);
26938      return DAG.getSelect(dl, SelVT, C, V0, V1);
26939    };
26940
26941    // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
26942    // We can safely do this using i16 shifts as we're only interested in
26943    // the 3 lower bits of each byte.
26944    Amt = DAG.getBitcast(ExtVT, Amt);
26945    Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ExtVT, Amt, 5, DAG);
26946    Amt = DAG.getBitcast(VT, Amt);
26947
26948    if (Opc == ISD::SHL || Opc == ISD::SRL) {
26949      // r = VSELECT(r, shift(r, 4), a);
26950      SDValue M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(4, dl, VT));
26951      R = SignBitSelect(VT, Amt, M, R);
26952
26953      // a += a
26954      Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
26955
26956      // r = VSELECT(r, shift(r, 2), a);
26957      M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(2, dl, VT));
26958      R = SignBitSelect(VT, Amt, M, R);
26959
26960      // a += a
26961      Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
26962
26963      // return VSELECT(r, shift(r, 1), a);
26964      M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(1, dl, VT));
26965      R = SignBitSelect(VT, Amt, M, R);
26966      return R;
26967    }
26968
26969    if (Opc == ISD::SRA) {
26970      // For SRA we need to unpack each byte to the higher byte of a i16 vector
26971      // so we can correctly sign extend. We don't care what happens to the
26972      // lower byte.
26973      SDValue ALo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
26974      SDValue AHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
26975      SDValue RLo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), R);
26976      SDValue RHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), R);
26977      ALo = DAG.getBitcast(ExtVT, ALo);
26978      AHi = DAG.getBitcast(ExtVT, AHi);
26979      RLo = DAG.getBitcast(ExtVT, RLo);
26980      RHi = DAG.getBitcast(ExtVT, RHi);
26981
26982      // r = VSELECT(r, shift(r, 4), a);
26983      SDValue MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 4, DAG);
26984      SDValue MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 4, DAG);
26985      RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
26986      RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
26987
26988      // a += a
26989      ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
26990      AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
26991
26992      // r = VSELECT(r, shift(r, 2), a);
26993      MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 2, DAG);
26994      MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 2, DAG);
26995      RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
26996      RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
26997
26998      // a += a
26999      ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
27000      AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
27001
27002      // r = VSELECT(r, shift(r, 1), a);
27003      MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 1, DAG);
27004      MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 1, DAG);
27005      RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
27006      RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
27007
27008      // Logical shift the result back to the lower byte, leaving a zero upper
27009      // byte meaning that we can safely pack with PACKUSWB.
27010      RLo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RLo, 8, DAG);
27011      RHi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RHi, 8, DAG);
27012      return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
27013    }
27014  }
27015
27016  if (Subtarget.hasInt256() && !Subtarget.hasXOP() && VT == MVT::v16i16) {
27017    MVT ExtVT = MVT::v8i32;
27018    SDValue Z = DAG.getConstant(0, dl, VT);
27019    SDValue ALo = getUnpackl(DAG, dl, VT, Amt, Z);
27020    SDValue AHi = getUnpackh(DAG, dl, VT, Amt, Z);
27021    SDValue RLo = getUnpackl(DAG, dl, VT, Z, R);
27022    SDValue RHi = getUnpackh(DAG, dl, VT, Z, R);
27023    ALo = DAG.getBitcast(ExtVT, ALo);
27024    AHi = DAG.getBitcast(ExtVT, AHi);
27025    RLo = DAG.getBitcast(ExtVT, RLo);
27026    RHi = DAG.getBitcast(ExtVT, RHi);
27027    SDValue Lo = DAG.getNode(Opc, dl, ExtVT, RLo, ALo);
27028    SDValue Hi = DAG.getNode(Opc, dl, ExtVT, RHi, AHi);
27029    Lo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Lo, 16, DAG);
27030    Hi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Hi, 16, DAG);
27031    return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
27032  }
27033
27034  if (VT == MVT::v8i16) {
27035    // If we have a constant shift amount, the non-SSE41 path is best as
27036    // avoiding bitcasts make it easier to constant fold and reduce to PBLENDW.
27037    bool UseSSE41 = Subtarget.hasSSE41() &&
27038                    !ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
27039
27040    auto SignBitSelect = [&](SDValue Sel, SDValue V0, SDValue V1) {
27041      // On SSE41 targets we make use of the fact that VSELECT lowers
27042      // to PBLENDVB which selects bytes based just on the sign bit.
27043      if (UseSSE41) {
27044        MVT ExtVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() * 2);
27045        V0 = DAG.getBitcast(ExtVT, V0);
27046        V1 = DAG.getBitcast(ExtVT, V1);
27047        Sel = DAG.getBitcast(ExtVT, Sel);
27048        return DAG.getBitcast(VT, DAG.getSelect(dl, ExtVT, Sel, V0, V1));
27049      }
27050      // On pre-SSE41 targets we splat the sign bit - a negative value will
27051      // set all bits of the lanes to true and VSELECT uses that in
27052      // its OR(AND(V0,C),AND(V1,~C)) lowering.
27053      SDValue C =
27054          getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Sel, 15, DAG);
27055      return DAG.getSelect(dl, VT, C, V0, V1);
27056    };
27057
27058    // Turn 'a' into a mask suitable for VSELECT: a = a << 12;
27059    if (UseSSE41) {
27060      // On SSE41 targets we need to replicate the shift mask in both
27061      // bytes for PBLENDVB.
27062      Amt = DAG.getNode(
27063          ISD::OR, dl, VT,
27064          getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 4, DAG),
27065          getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG));
27066    } else {
27067      Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG);
27068    }
27069
27070    // r = VSELECT(r, shift(r, 8), a);
27071    SDValue M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 8, DAG);
27072    R = SignBitSelect(Amt, M, R);
27073
27074    // a += a
27075    Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
27076
27077    // r = VSELECT(r, shift(r, 4), a);
27078    M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 4, DAG);
27079    R = SignBitSelect(Amt, M, R);
27080
27081    // a += a
27082    Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
27083
27084    // r = VSELECT(r, shift(r, 2), a);
27085    M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 2, DAG);
27086    R = SignBitSelect(Amt, M, R);
27087
27088    // a += a
27089    Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
27090
27091    // return VSELECT(r, shift(r, 1), a);
27092    M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 1, DAG);
27093    R = SignBitSelect(Amt, M, R);
27094    return R;
27095  }
27096
27097  // Decompose 256-bit shifts into 128-bit shifts.
27098  if (VT.is256BitVector())
27099    return split256IntArith(Op, DAG);
27100
27101  return SDValue();
27102}
27103
27104static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
27105                           SelectionDAG &DAG) {
27106  MVT VT = Op.getSimpleValueType();
27107  assert(VT.isVector() && "Custom lowering only for vector rotates!");
27108
27109  SDLoc DL(Op);
27110  SDValue R = Op.getOperand(0);
27111  SDValue Amt = Op.getOperand(1);
27112  unsigned Opcode = Op.getOpcode();
27113  unsigned EltSizeInBits = VT.getScalarSizeInBits();
27114  int NumElts = VT.getVectorNumElements();
27115
27116  // Check for constant splat rotation amount.
27117  APInt UndefElts;
27118  SmallVector<APInt, 32> EltBits;
27119  int CstSplatIndex = -1;
27120  if (getTargetConstantBitsFromNode(Amt, EltSizeInBits, UndefElts, EltBits))
27121    for (int i = 0; i != NumElts; ++i)
27122      if (!UndefElts[i]) {
27123        if (CstSplatIndex < 0 || EltBits[i] == EltBits[CstSplatIndex]) {
27124          CstSplatIndex = i;
27125          continue;
27126        }
27127        CstSplatIndex = -1;
27128        break;
27129      }
27130
27131  // AVX512 implicitly uses modulo rotation amounts.
27132  if (Subtarget.hasAVX512() && 32 <= EltSizeInBits) {
27133    // Attempt to rotate by immediate.
27134    if (0 <= CstSplatIndex) {
27135      unsigned Op = (Opcode == ISD::ROTL ? X86ISD::VROTLI : X86ISD::VROTRI);
27136      uint64_t RotateAmt = EltBits[CstSplatIndex].urem(EltSizeInBits);
27137      return DAG.getNode(Op, DL, VT, R,
27138                         DAG.getTargetConstant(RotateAmt, DL, MVT::i8));
27139    }
27140
27141    // Else, fall-back on VPROLV/VPRORV.
27142    return Op;
27143  }
27144
27145  assert((Opcode == ISD::ROTL) && "Only ROTL supported");
27146
27147  // XOP has 128-bit vector variable + immediate rotates.
27148  // +ve/-ve Amt = rotate left/right - just need to handle ISD::ROTL.
27149  // XOP implicitly uses modulo rotation amounts.
27150  if (Subtarget.hasXOP()) {
27151    if (VT.is256BitVector())
27152      return split256IntArith(Op, DAG);
27153    assert(VT.is128BitVector() && "Only rotate 128-bit vectors!");
27154
27155    // Attempt to rotate by immediate.
27156    if (0 <= CstSplatIndex) {
27157      uint64_t RotateAmt = EltBits[CstSplatIndex].urem(EltSizeInBits);
27158      return DAG.getNode(X86ISD::VROTLI, DL, VT, R,
27159                         DAG.getTargetConstant(RotateAmt, DL, MVT::i8));
27160    }
27161
27162    // Use general rotate by variable (per-element).
27163    return Op;
27164  }
27165
27166  // Split 256-bit integers on pre-AVX2 targets.
27167  if (VT.is256BitVector() && !Subtarget.hasAVX2())
27168    return split256IntArith(Op, DAG);
27169
27170  assert((VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 ||
27171          ((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) &&
27172           Subtarget.hasAVX2())) &&
27173         "Only vXi32/vXi16/vXi8 vector rotates supported");
27174
27175  // Rotate by an uniform constant - expand back to shifts.
27176  if (0 <= CstSplatIndex)
27177    return SDValue();
27178
27179  bool IsSplatAmt = DAG.isSplatValue(Amt);
27180
27181  // v16i8/v32i8: Split rotation into rot4/rot2/rot1 stages and select by
27182  // the amount bit.
27183  if (EltSizeInBits == 8 && !IsSplatAmt) {
27184    if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()))
27185      return SDValue();
27186
27187    // We don't need ModuloAmt here as we just peek at individual bits.
27188    MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
27189
27190    auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
27191      if (Subtarget.hasSSE41()) {
27192        // On SSE41 targets we make use of the fact that VSELECT lowers
27193        // to PBLENDVB which selects bytes based just on the sign bit.
27194        V0 = DAG.getBitcast(VT, V0);
27195        V1 = DAG.getBitcast(VT, V1);
27196        Sel = DAG.getBitcast(VT, Sel);
27197        return DAG.getBitcast(SelVT, DAG.getSelect(DL, VT, Sel, V0, V1));
27198      }
27199      // On pre-SSE41 targets we test for the sign bit by comparing to
27200      // zero - a negative value will set all bits of the lanes to true
27201      // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
27202      SDValue Z = DAG.getConstant(0, DL, SelVT);
27203      SDValue C = DAG.getNode(X86ISD::PCMPGT, DL, SelVT, Z, Sel);
27204      return DAG.getSelect(DL, SelVT, C, V0, V1);
27205    };
27206
27207    // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
27208    // We can safely do this using i16 shifts as we're only interested in
27209    // the 3 lower bits of each byte.
27210    Amt = DAG.getBitcast(ExtVT, Amt);
27211    Amt = DAG.getNode(ISD::SHL, DL, ExtVT, Amt, DAG.getConstant(5, DL, ExtVT));
27212    Amt = DAG.getBitcast(VT, Amt);
27213
27214    // r = VSELECT(r, rot(r, 4), a);
27215    SDValue M;
27216    M = DAG.getNode(
27217        ISD::OR, DL, VT,
27218        DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(4, DL, VT)),
27219        DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(4, DL, VT)));
27220    R = SignBitSelect(VT, Amt, M, R);
27221
27222    // a += a
27223    Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
27224
27225    // r = VSELECT(r, rot(r, 2), a);
27226    M = DAG.getNode(
27227        ISD::OR, DL, VT,
27228        DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(2, DL, VT)),
27229        DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(6, DL, VT)));
27230    R = SignBitSelect(VT, Amt, M, R);
27231
27232    // a += a
27233    Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
27234
27235    // return VSELECT(r, rot(r, 1), a);
27236    M = DAG.getNode(
27237        ISD::OR, DL, VT,
27238        DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(1, DL, VT)),
27239        DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(7, DL, VT)));
27240    return SignBitSelect(VT, Amt, M, R);
27241  }
27242
27243  // ISD::ROT* uses modulo rotate amounts.
27244  Amt = DAG.getNode(ISD::AND, DL, VT, Amt,
27245                    DAG.getConstant(EltSizeInBits - 1, DL, VT));
27246
27247  bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
27248  bool LegalVarShifts = SupportedVectorVarShift(VT, Subtarget, ISD::SHL) &&
27249                        SupportedVectorVarShift(VT, Subtarget, ISD::SRL);
27250
27251  // Fallback for splats + all supported variable shifts.
27252  // Fallback for non-constants AVX2 vXi16 as well.
27253  if (IsSplatAmt || LegalVarShifts || (Subtarget.hasAVX2() && !ConstantAmt)) {
27254    SDValue AmtR = DAG.getConstant(EltSizeInBits, DL, VT);
27255    AmtR = DAG.getNode(ISD::SUB, DL, VT, AmtR, Amt);
27256    SDValue SHL = DAG.getNode(ISD::SHL, DL, VT, R, Amt);
27257    SDValue SRL = DAG.getNode(ISD::SRL, DL, VT, R, AmtR);
27258    return DAG.getNode(ISD::OR, DL, VT, SHL, SRL);
27259  }
27260
27261  // As with shifts, convert the rotation amount to a multiplication factor.
27262  SDValue Scale = convertShiftLeftToScale(Amt, DL, Subtarget, DAG);
27263  assert(Scale && "Failed to convert ROTL amount to scale");
27264
27265  // v8i16/v16i16: perform unsigned multiply hi/lo and OR the results.
27266  if (EltSizeInBits == 16) {
27267    SDValue Lo = DAG.getNode(ISD::MUL, DL, VT, R, Scale);
27268    SDValue Hi = DAG.getNode(ISD::MULHU, DL, VT, R, Scale);
27269    return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
27270  }
27271
27272  // v4i32: make use of the PMULUDQ instruction to multiply 2 lanes of v4i32
27273  // to v2i64 results at a time. The upper 32-bits contain the wrapped bits
27274  // that can then be OR'd with the lower 32-bits.
27275  assert(VT == MVT::v4i32 && "Only v4i32 vector rotate expected");
27276  static const int OddMask[] = {1, -1, 3, -1};
27277  SDValue R13 = DAG.getVectorShuffle(VT, DL, R, R, OddMask);
27278  SDValue Scale13 = DAG.getVectorShuffle(VT, DL, Scale, Scale, OddMask);
27279
27280  SDValue Res02 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
27281                              DAG.getBitcast(MVT::v2i64, R),
27282                              DAG.getBitcast(MVT::v2i64, Scale));
27283  SDValue Res13 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
27284                              DAG.getBitcast(MVT::v2i64, R13),
27285                              DAG.getBitcast(MVT::v2i64, Scale13));
27286  Res02 = DAG.getBitcast(VT, Res02);
27287  Res13 = DAG.getBitcast(VT, Res13);
27288
27289  return DAG.getNode(ISD::OR, DL, VT,
27290                     DAG.getVectorShuffle(VT, DL, Res02, Res13, {0, 4, 2, 6}),
27291                     DAG.getVectorShuffle(VT, DL, Res02, Res13, {1, 5, 3, 7}));
27292}
27293
27294/// Returns true if the operand type is exactly twice the native width, and
27295/// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
27296/// Used to know whether to use cmpxchg8/16b when expanding atomic operations
27297/// (otherwise we leave them alone to become __sync_fetch_and_... calls).
27298bool X86TargetLowering::needsCmpXchgNb(Type *MemType) const {
27299  unsigned OpWidth = MemType->getPrimitiveSizeInBits();
27300
27301  if (OpWidth == 64)
27302    return Subtarget.hasCmpxchg8b() && !Subtarget.is64Bit();
27303  if (OpWidth == 128)
27304    return Subtarget.hasCmpxchg16b();
27305
27306  return false;
27307}
27308
27309// TODO: In 32-bit mode, use MOVLPS when SSE1 is available?
27310// TODO: In 32-bit mode, use FISTP when X87 is available?
27311bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
27312  Type *MemType = SI->getValueOperand()->getType();
27313
27314  bool NoImplicitFloatOps =
27315      SI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
27316  if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
27317      !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2())
27318    return false;
27319
27320  return needsCmpXchgNb(MemType);
27321}
27322
27323// Note: this turns large loads into lock cmpxchg8b/16b.
27324// TODO: In 32-bit mode, use MOVLPS when SSE1 is available?
27325TargetLowering::AtomicExpansionKind
27326X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
27327  Type *MemType = LI->getType();
27328
27329  // If this a 64 bit atomic load on a 32-bit target and SSE2 is enabled, we
27330  // can use movq to do the load. If we have X87 we can load into an 80-bit
27331  // X87 register and store it to a stack temporary.
27332  bool NoImplicitFloatOps =
27333      LI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
27334  if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
27335      !Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
27336      (Subtarget.hasSSE2() || Subtarget.hasX87()))
27337    return AtomicExpansionKind::None;
27338
27339  return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
27340                                 : AtomicExpansionKind::None;
27341}
27342
27343TargetLowering::AtomicExpansionKind
27344X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
27345  unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
27346  Type *MemType = AI->getType();
27347
27348  // If the operand is too big, we must see if cmpxchg8/16b is available
27349  // and default to library calls otherwise.
27350  if (MemType->getPrimitiveSizeInBits() > NativeWidth) {
27351    return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
27352                                   : AtomicExpansionKind::None;
27353  }
27354
27355  AtomicRMWInst::BinOp Op = AI->getOperation();
27356  switch (Op) {
27357  default:
27358    llvm_unreachable("Unknown atomic operation");
27359  case AtomicRMWInst::Xchg:
27360  case AtomicRMWInst::Add:
27361  case AtomicRMWInst::Sub:
27362    // It's better to use xadd, xsub or xchg for these in all cases.
27363    return AtomicExpansionKind::None;
27364  case AtomicRMWInst::Or:
27365  case AtomicRMWInst::And:
27366  case AtomicRMWInst::Xor:
27367    // If the atomicrmw's result isn't actually used, we can just add a "lock"
27368    // prefix to a normal instruction for these operations.
27369    return !AI->use_empty() ? AtomicExpansionKind::CmpXChg
27370                            : AtomicExpansionKind::None;
27371  case AtomicRMWInst::Nand:
27372  case AtomicRMWInst::Max:
27373  case AtomicRMWInst::Min:
27374  case AtomicRMWInst::UMax:
27375  case AtomicRMWInst::UMin:
27376  case AtomicRMWInst::FAdd:
27377  case AtomicRMWInst::FSub:
27378    // These always require a non-trivial set of data operations on x86. We must
27379    // use a cmpxchg loop.
27380    return AtomicExpansionKind::CmpXChg;
27381  }
27382}
27383
27384LoadInst *
27385X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
27386  unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
27387  Type *MemType = AI->getType();
27388  // Accesses larger than the native width are turned into cmpxchg/libcalls, so
27389  // there is no benefit in turning such RMWs into loads, and it is actually
27390  // harmful as it introduces a mfence.
27391  if (MemType->getPrimitiveSizeInBits() > NativeWidth)
27392    return nullptr;
27393
27394  // If this is a canonical idempotent atomicrmw w/no uses, we have a better
27395  // lowering available in lowerAtomicArith.
27396  // TODO: push more cases through this path.
27397  if (auto *C = dyn_cast<ConstantInt>(AI->getValOperand()))
27398    if (AI->getOperation() == AtomicRMWInst::Or && C->isZero() &&
27399        AI->use_empty())
27400      return nullptr;
27401
27402  auto Builder = IRBuilder<>(AI);
27403  Module *M = Builder.GetInsertBlock()->getParent()->getParent();
27404  auto SSID = AI->getSyncScopeID();
27405  // We must restrict the ordering to avoid generating loads with Release or
27406  // ReleaseAcquire orderings.
27407  auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
27408
27409  // Before the load we need a fence. Here is an example lifted from
27410  // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
27411  // is required:
27412  // Thread 0:
27413  //   x.store(1, relaxed);
27414  //   r1 = y.fetch_add(0, release);
27415  // Thread 1:
27416  //   y.fetch_add(42, acquire);
27417  //   r2 = x.load(relaxed);
27418  // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
27419  // lowered to just a load without a fence. A mfence flushes the store buffer,
27420  // making the optimization clearly correct.
27421  // FIXME: it is required if isReleaseOrStronger(Order) but it is not clear
27422  // otherwise, we might be able to be more aggressive on relaxed idempotent
27423  // rmw. In practice, they do not look useful, so we don't try to be
27424  // especially clever.
27425  if (SSID == SyncScope::SingleThread)
27426    // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
27427    // the IR level, so we must wrap it in an intrinsic.
27428    return nullptr;
27429
27430  if (!Subtarget.hasMFence())
27431    // FIXME: it might make sense to use a locked operation here but on a
27432    // different cache-line to prevent cache-line bouncing. In practice it
27433    // is probably a small win, and x86 processors without mfence are rare
27434    // enough that we do not bother.
27435    return nullptr;
27436
27437  Function *MFence =
27438      llvm::Intrinsic::getDeclaration(M, Intrinsic::x86_sse2_mfence);
27439  Builder.CreateCall(MFence, {});
27440
27441  // Finally we can emit the atomic load.
27442  LoadInst *Loaded =
27443      Builder.CreateAlignedLoad(AI->getType(), AI->getPointerOperand(),
27444                                AI->getType()->getPrimitiveSizeInBits());
27445  Loaded->setAtomic(Order, SSID);
27446  AI->replaceAllUsesWith(Loaded);
27447  AI->eraseFromParent();
27448  return Loaded;
27449}
27450
27451bool X86TargetLowering::lowerAtomicStoreAsStoreSDNode(const StoreInst &SI) const {
27452  if (!SI.isUnordered())
27453    return false;
27454  return ExperimentalUnorderedISEL;
27455}
27456bool X86TargetLowering::lowerAtomicLoadAsLoadSDNode(const LoadInst &LI) const {
27457  if (!LI.isUnordered())
27458    return false;
27459  return ExperimentalUnorderedISEL;
27460}
27461
27462
27463/// Emit a locked operation on a stack location which does not change any
27464/// memory location, but does involve a lock prefix.  Location is chosen to be
27465/// a) very likely accessed only by a single thread to minimize cache traffic,
27466/// and b) definitely dereferenceable.  Returns the new Chain result.
27467static SDValue emitLockedStackOp(SelectionDAG &DAG,
27468                                 const X86Subtarget &Subtarget,
27469                                 SDValue Chain, SDLoc DL) {
27470  // Implementation notes:
27471  // 1) LOCK prefix creates a full read/write reordering barrier for memory
27472  // operations issued by the current processor.  As such, the location
27473  // referenced is not relevant for the ordering properties of the instruction.
27474  // See: Intel�� 64 and IA-32 ArchitecturesSoftware Developer���s Manual,
27475  // 8.2.3.9  Loads and Stores Are Not Reordered with Locked Instructions
27476  // 2) Using an immediate operand appears to be the best encoding choice
27477  // here since it doesn't require an extra register.
27478  // 3) OR appears to be very slightly faster than ADD. (Though, the difference
27479  // is small enough it might just be measurement noise.)
27480  // 4) When choosing offsets, there are several contributing factors:
27481  //   a) If there's no redzone, we default to TOS.  (We could allocate a cache
27482  //      line aligned stack object to improve this case.)
27483  //   b) To minimize our chances of introducing a false dependence, we prefer
27484  //      to offset the stack usage from TOS slightly.
27485  //   c) To minimize concerns about cross thread stack usage - in particular,
27486  //      the idiomatic MyThreadPool.run([&StackVars]() {...}) pattern which
27487  //      captures state in the TOS frame and accesses it from many threads -
27488  //      we want to use an offset such that the offset is in a distinct cache
27489  //      line from the TOS frame.
27490  //
27491  // For a general discussion of the tradeoffs and benchmark results, see:
27492  // https://shipilev.net/blog/2014/on-the-fence-with-dependencies/
27493
27494  auto &MF = DAG.getMachineFunction();
27495  auto &TFL = *Subtarget.getFrameLowering();
27496  const unsigned SPOffset = TFL.has128ByteRedZone(MF) ? -64 : 0;
27497
27498  if (Subtarget.is64Bit()) {
27499    SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
27500    SDValue Ops[] = {
27501      DAG.getRegister(X86::RSP, MVT::i64),                  // Base
27502      DAG.getTargetConstant(1, DL, MVT::i8),                // Scale
27503      DAG.getRegister(0, MVT::i64),                         // Index
27504      DAG.getTargetConstant(SPOffset, DL, MVT::i32),        // Disp
27505      DAG.getRegister(0, MVT::i16),                         // Segment.
27506      Zero,
27507      Chain};
27508    SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
27509                                     MVT::Other, Ops);
27510    return SDValue(Res, 1);
27511  }
27512
27513  SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
27514  SDValue Ops[] = {
27515    DAG.getRegister(X86::ESP, MVT::i32),            // Base
27516    DAG.getTargetConstant(1, DL, MVT::i8),          // Scale
27517    DAG.getRegister(0, MVT::i32),                   // Index
27518    DAG.getTargetConstant(SPOffset, DL, MVT::i32),  // Disp
27519    DAG.getRegister(0, MVT::i16),                   // Segment.
27520    Zero,
27521    Chain
27522  };
27523  SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
27524                                   MVT::Other, Ops);
27525  return SDValue(Res, 1);
27526}
27527
27528static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget,
27529                                 SelectionDAG &DAG) {
27530  SDLoc dl(Op);
27531  AtomicOrdering FenceOrdering =
27532      static_cast<AtomicOrdering>(Op.getConstantOperandVal(1));
27533  SyncScope::ID FenceSSID =
27534      static_cast<SyncScope::ID>(Op.getConstantOperandVal(2));
27535
27536  // The only fence that needs an instruction is a sequentially-consistent
27537  // cross-thread fence.
27538  if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
27539      FenceSSID == SyncScope::System) {
27540    if (Subtarget.hasMFence())
27541      return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
27542
27543    SDValue Chain = Op.getOperand(0);
27544    return emitLockedStackOp(DAG, Subtarget, Chain, dl);
27545  }
27546
27547  // MEMBARRIER is a compiler barrier; it codegens to a no-op.
27548  return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
27549}
27550
27551static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget &Subtarget,
27552                             SelectionDAG &DAG) {
27553  MVT T = Op.getSimpleValueType();
27554  SDLoc DL(Op);
27555  unsigned Reg = 0;
27556  unsigned size = 0;
27557  switch(T.SimpleTy) {
27558  default: llvm_unreachable("Invalid value type!");
27559  case MVT::i8:  Reg = X86::AL;  size = 1; break;
27560  case MVT::i16: Reg = X86::AX;  size = 2; break;
27561  case MVT::i32: Reg = X86::EAX; size = 4; break;
27562  case MVT::i64:
27563    assert(Subtarget.is64Bit() && "Node not type legal!");
27564    Reg = X86::RAX; size = 8;
27565    break;
27566  }
27567  SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
27568                                  Op.getOperand(2), SDValue());
27569  SDValue Ops[] = { cpIn.getValue(0),
27570                    Op.getOperand(1),
27571                    Op.getOperand(3),
27572                    DAG.getTargetConstant(size, DL, MVT::i8),
27573                    cpIn.getValue(1) };
27574  SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
27575  MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
27576  SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
27577                                           Ops, T, MMO);
27578
27579  SDValue cpOut =
27580    DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
27581  SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
27582                                      MVT::i32, cpOut.getValue(2));
27583  SDValue Success = getSETCC(X86::COND_E, EFLAGS, DL, DAG);
27584
27585  return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(),
27586                     cpOut, Success, EFLAGS.getValue(1));
27587}
27588
27589// Create MOVMSKB, taking into account whether we need to split for AVX1.
27590static SDValue getPMOVMSKB(const SDLoc &DL, SDValue V, SelectionDAG &DAG,
27591                           const X86Subtarget &Subtarget) {
27592  MVT InVT = V.getSimpleValueType();
27593
27594  if (InVT == MVT::v64i8) {
27595    SDValue Lo, Hi;
27596    std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
27597    Lo = getPMOVMSKB(DL, Lo, DAG, Subtarget);
27598    Hi = getPMOVMSKB(DL, Hi, DAG, Subtarget);
27599    Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Lo);
27600    Hi = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Hi);
27601    Hi = DAG.getNode(ISD::SHL, DL, MVT::i64, Hi,
27602                     DAG.getConstant(32, DL, MVT::i8));
27603    return DAG.getNode(ISD::OR, DL, MVT::i64, Lo, Hi);
27604  }
27605  if (InVT == MVT::v32i8 && !Subtarget.hasInt256()) {
27606    SDValue Lo, Hi;
27607    std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
27608    Lo = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Lo);
27609    Hi = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Hi);
27610    Hi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi,
27611                     DAG.getConstant(16, DL, MVT::i8));
27612    return DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi);
27613  }
27614
27615  return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
27616}
27617
27618static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget,
27619                            SelectionDAG &DAG) {
27620  SDValue Src = Op.getOperand(0);
27621  MVT SrcVT = Src.getSimpleValueType();
27622  MVT DstVT = Op.getSimpleValueType();
27623
27624  // Legalize (v64i1 (bitcast i64 (X))) by splitting the i64, bitcasting each
27625  // half to v32i1 and concatenating the result.
27626  if (SrcVT == MVT::i64 && DstVT == MVT::v64i1) {
27627    assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
27628    assert(Subtarget.hasBWI() && "Expected BWI target");
27629    SDLoc dl(Op);
27630    SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
27631                             DAG.getIntPtrConstant(0, dl));
27632    Lo = DAG.getBitcast(MVT::v32i1, Lo);
27633    SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
27634                             DAG.getIntPtrConstant(1, dl));
27635    Hi = DAG.getBitcast(MVT::v32i1, Hi);
27636    return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
27637  }
27638
27639  // Custom splitting for BWI types when AVX512F is available but BWI isn't.
27640  if ((SrcVT == MVT::v32i16 || SrcVT == MVT::v64i8) && DstVT.isVector() &&
27641    DAG.getTargetLoweringInfo().isTypeLegal(DstVT)) {
27642    SDLoc dl(Op);
27643    SDValue Lo, Hi;
27644    std::tie(Lo, Hi) = DAG.SplitVector(Op.getOperand(0), dl);
27645    MVT CastVT = DstVT.getHalfNumVectorElementsVT();
27646    Lo = DAG.getBitcast(CastVT, Lo);
27647    Hi = DAG.getBitcast(CastVT, Hi);
27648    return DAG.getNode(ISD::CONCAT_VECTORS, dl, DstVT, Lo, Hi);
27649  }
27650
27651  // Use MOVMSK for vector to scalar conversion to prevent scalarization.
27652  if ((SrcVT == MVT::v16i1 || SrcVT == MVT::v32i1) && DstVT.isScalarInteger()) {
27653    assert(!Subtarget.hasAVX512() && "Should use K-registers with AVX512");
27654    MVT SExtVT = SrcVT == MVT::v16i1 ? MVT::v16i8 : MVT::v32i8;
27655    SDLoc DL(Op);
27656    SDValue V = DAG.getSExtOrTrunc(Src, DL, SExtVT);
27657    V = getPMOVMSKB(DL, V, DAG, Subtarget);
27658    return DAG.getZExtOrTrunc(V, DL, DstVT);
27659  }
27660
27661  assert((SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8 ||
27662          SrcVT == MVT::i64) && "Unexpected VT!");
27663
27664  assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
27665  if (!(DstVT == MVT::f64 && SrcVT == MVT::i64) &&
27666      !(DstVT == MVT::x86mmx && SrcVT.isVector()))
27667    // This conversion needs to be expanded.
27668    return SDValue();
27669
27670  SDLoc dl(Op);
27671  if (SrcVT.isVector()) {
27672    // Widen the vector in input in the case of MVT::v2i32.
27673    // Example: from MVT::v2i32 to MVT::v4i32.
27674    MVT NewVT = MVT::getVectorVT(SrcVT.getVectorElementType(),
27675                                 SrcVT.getVectorNumElements() * 2);
27676    Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewVT, Src,
27677                      DAG.getUNDEF(SrcVT));
27678  } else {
27679    assert(SrcVT == MVT::i64 && !Subtarget.is64Bit() &&
27680           "Unexpected source type in LowerBITCAST");
27681    Src = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Src);
27682  }
27683
27684  MVT V2X64VT = DstVT == MVT::f64 ? MVT::v2f64 : MVT::v2i64;
27685  Src = DAG.getNode(ISD::BITCAST, dl, V2X64VT, Src);
27686
27687  if (DstVT == MVT::x86mmx)
27688    return DAG.getNode(X86ISD::MOVDQ2Q, dl, DstVT, Src);
27689
27690  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, DstVT, Src,
27691                     DAG.getIntPtrConstant(0, dl));
27692}
27693
27694/// Compute the horizontal sum of bytes in V for the elements of VT.
27695///
27696/// Requires V to be a byte vector and VT to be an integer vector type with
27697/// wider elements than V's type. The width of the elements of VT determines
27698/// how many bytes of V are summed horizontally to produce each element of the
27699/// result.
27700static SDValue LowerHorizontalByteSum(SDValue V, MVT VT,
27701                                      const X86Subtarget &Subtarget,
27702                                      SelectionDAG &DAG) {
27703  SDLoc DL(V);
27704  MVT ByteVecVT = V.getSimpleValueType();
27705  MVT EltVT = VT.getVectorElementType();
27706  assert(ByteVecVT.getVectorElementType() == MVT::i8 &&
27707         "Expected value to have byte element type.");
27708  assert(EltVT != MVT::i8 &&
27709         "Horizontal byte sum only makes sense for wider elements!");
27710  unsigned VecSize = VT.getSizeInBits();
27711  assert(ByteVecVT.getSizeInBits() == VecSize && "Cannot change vector size!");
27712
27713  // PSADBW instruction horizontally add all bytes and leave the result in i64
27714  // chunks, thus directly computes the pop count for v2i64 and v4i64.
27715  if (EltVT == MVT::i64) {
27716    SDValue Zeros = DAG.getConstant(0, DL, ByteVecVT);
27717    MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
27718    V = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT, V, Zeros);
27719    return DAG.getBitcast(VT, V);
27720  }
27721
27722  if (EltVT == MVT::i32) {
27723    // We unpack the low half and high half into i32s interleaved with zeros so
27724    // that we can use PSADBW to horizontally sum them. The most useful part of
27725    // this is that it lines up the results of two PSADBW instructions to be
27726    // two v2i64 vectors which concatenated are the 4 population counts. We can
27727    // then use PACKUSWB to shrink and concatenate them into a v4i32 again.
27728    SDValue Zeros = DAG.getConstant(0, DL, VT);
27729    SDValue V32 = DAG.getBitcast(VT, V);
27730    SDValue Low = getUnpackl(DAG, DL, VT, V32, Zeros);
27731    SDValue High = getUnpackh(DAG, DL, VT, V32, Zeros);
27732
27733    // Do the horizontal sums into two v2i64s.
27734    Zeros = DAG.getConstant(0, DL, ByteVecVT);
27735    MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
27736    Low = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
27737                      DAG.getBitcast(ByteVecVT, Low), Zeros);
27738    High = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
27739                       DAG.getBitcast(ByteVecVT, High), Zeros);
27740
27741    // Merge them together.
27742    MVT ShortVecVT = MVT::getVectorVT(MVT::i16, VecSize / 16);
27743    V = DAG.getNode(X86ISD::PACKUS, DL, ByteVecVT,
27744                    DAG.getBitcast(ShortVecVT, Low),
27745                    DAG.getBitcast(ShortVecVT, High));
27746
27747    return DAG.getBitcast(VT, V);
27748  }
27749
27750  // The only element type left is i16.
27751  assert(EltVT == MVT::i16 && "Unknown how to handle type");
27752
27753  // To obtain pop count for each i16 element starting from the pop count for
27754  // i8 elements, shift the i16s left by 8, sum as i8s, and then shift as i16s
27755  // right by 8. It is important to shift as i16s as i8 vector shift isn't
27756  // directly supported.
27757  SDValue ShifterV = DAG.getConstant(8, DL, VT);
27758  SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
27759  V = DAG.getNode(ISD::ADD, DL, ByteVecVT, DAG.getBitcast(ByteVecVT, Shl),
27760                  DAG.getBitcast(ByteVecVT, V));
27761  return DAG.getNode(ISD::SRL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
27762}
27763
27764static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, const SDLoc &DL,
27765                                        const X86Subtarget &Subtarget,
27766                                        SelectionDAG &DAG) {
27767  MVT VT = Op.getSimpleValueType();
27768  MVT EltVT = VT.getVectorElementType();
27769  int NumElts = VT.getVectorNumElements();
27770  (void)EltVT;
27771  assert(EltVT == MVT::i8 && "Only vXi8 vector CTPOP lowering supported.");
27772
27773  // Implement a lookup table in register by using an algorithm based on:
27774  // http://wm.ite.pl/articles/sse-popcount.html
27775  //
27776  // The general idea is that every lower byte nibble in the input vector is an
27777  // index into a in-register pre-computed pop count table. We then split up the
27778  // input vector in two new ones: (1) a vector with only the shifted-right
27779  // higher nibbles for each byte and (2) a vector with the lower nibbles (and
27780  // masked out higher ones) for each byte. PSHUFB is used separately with both
27781  // to index the in-register table. Next, both are added and the result is a
27782  // i8 vector where each element contains the pop count for input byte.
27783  const int LUT[16] = {/* 0 */ 0, /* 1 */ 1, /* 2 */ 1, /* 3 */ 2,
27784                       /* 4 */ 1, /* 5 */ 2, /* 6 */ 2, /* 7 */ 3,
27785                       /* 8 */ 1, /* 9 */ 2, /* a */ 2, /* b */ 3,
27786                       /* c */ 2, /* d */ 3, /* e */ 3, /* f */ 4};
27787
27788  SmallVector<SDValue, 64> LUTVec;
27789  for (int i = 0; i < NumElts; ++i)
27790    LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
27791  SDValue InRegLUT = DAG.getBuildVector(VT, DL, LUTVec);
27792  SDValue M0F = DAG.getConstant(0x0F, DL, VT);
27793
27794  // High nibbles
27795  SDValue FourV = DAG.getConstant(4, DL, VT);
27796  SDValue HiNibbles = DAG.getNode(ISD::SRL, DL, VT, Op, FourV);
27797
27798  // Low nibbles
27799  SDValue LoNibbles = DAG.getNode(ISD::AND, DL, VT, Op, M0F);
27800
27801  // The input vector is used as the shuffle mask that index elements into the
27802  // LUT. After counting low and high nibbles, add the vector to obtain the
27803  // final pop count per i8 element.
27804  SDValue HiPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, HiNibbles);
27805  SDValue LoPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, LoNibbles);
27806  return DAG.getNode(ISD::ADD, DL, VT, HiPopCnt, LoPopCnt);
27807}
27808
27809// Please ensure that any codegen change from LowerVectorCTPOP is reflected in
27810// updated cost models in X86TTIImpl::getIntrinsicInstrCost.
27811static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget &Subtarget,
27812                                SelectionDAG &DAG) {
27813  MVT VT = Op.getSimpleValueType();
27814  assert((VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector()) &&
27815         "Unknown CTPOP type to handle");
27816  SDLoc DL(Op.getNode());
27817  SDValue Op0 = Op.getOperand(0);
27818
27819  // TRUNC(CTPOP(ZEXT(X))) to make use of vXi32/vXi64 VPOPCNT instructions.
27820  if (Subtarget.hasVPOPCNTDQ()) {
27821    unsigned NumElems = VT.getVectorNumElements();
27822    assert((VT.getVectorElementType() == MVT::i8 ||
27823            VT.getVectorElementType() == MVT::i16) && "Unexpected type");
27824    if (NumElems < 16 || (NumElems == 16 && Subtarget.canExtendTo512DQ())) {
27825      MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
27826      Op = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, Op0);
27827      Op = DAG.getNode(ISD::CTPOP, DL, NewVT, Op);
27828      return DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
27829    }
27830  }
27831
27832  // Decompose 256-bit ops into smaller 128-bit ops.
27833  if (VT.is256BitVector() && !Subtarget.hasInt256())
27834    return Lower256IntUnary(Op, DAG);
27835
27836  // Decompose 512-bit ops into smaller 256-bit ops.
27837  if (VT.is512BitVector() && !Subtarget.hasBWI())
27838    return Lower512IntUnary(Op, DAG);
27839
27840  // For element types greater than i8, do vXi8 pop counts and a bytesum.
27841  if (VT.getScalarType() != MVT::i8) {
27842    MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
27843    SDValue ByteOp = DAG.getBitcast(ByteVT, Op0);
27844    SDValue PopCnt8 = DAG.getNode(ISD::CTPOP, DL, ByteVT, ByteOp);
27845    return LowerHorizontalByteSum(PopCnt8, VT, Subtarget, DAG);
27846  }
27847
27848  // We can't use the fast LUT approach, so fall back on LegalizeDAG.
27849  if (!Subtarget.hasSSSE3())
27850    return SDValue();
27851
27852  return LowerVectorCTPOPInRegLUT(Op0, DL, Subtarget, DAG);
27853}
27854
27855static SDValue LowerCTPOP(SDValue Op, const X86Subtarget &Subtarget,
27856                          SelectionDAG &DAG) {
27857  assert(Op.getSimpleValueType().isVector() &&
27858         "We only do custom lowering for vector population count.");
27859  return LowerVectorCTPOP(Op, Subtarget, DAG);
27860}
27861
27862static SDValue LowerBITREVERSE_XOP(SDValue Op, SelectionDAG &DAG) {
27863  MVT VT = Op.getSimpleValueType();
27864  SDValue In = Op.getOperand(0);
27865  SDLoc DL(Op);
27866
27867  // For scalars, its still beneficial to transfer to/from the SIMD unit to
27868  // perform the BITREVERSE.
27869  if (!VT.isVector()) {
27870    MVT VecVT = MVT::getVectorVT(VT, 128 / VT.getSizeInBits());
27871    SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, In);
27872    Res = DAG.getNode(ISD::BITREVERSE, DL, VecVT, Res);
27873    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Res,
27874                       DAG.getIntPtrConstant(0, DL));
27875  }
27876
27877  int NumElts = VT.getVectorNumElements();
27878  int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;
27879
27880  // Decompose 256-bit ops into smaller 128-bit ops.
27881  if (VT.is256BitVector())
27882    return Lower256IntUnary(Op, DAG);
27883
27884  assert(VT.is128BitVector() &&
27885         "Only 128-bit vector bitreverse lowering supported.");
27886
27887  // VPPERM reverses the bits of a byte with the permute Op (2 << 5), and we
27888  // perform the BSWAP in the shuffle.
27889  // Its best to shuffle using the second operand as this will implicitly allow
27890  // memory folding for multiple vectors.
27891  SmallVector<SDValue, 16> MaskElts;
27892  for (int i = 0; i != NumElts; ++i) {
27893    for (int j = ScalarSizeInBytes - 1; j >= 0; --j) {
27894      int SourceByte = 16 + (i * ScalarSizeInBytes) + j;
27895      int PermuteByte = SourceByte | (2 << 5);
27896      MaskElts.push_back(DAG.getConstant(PermuteByte, DL, MVT::i8));
27897    }
27898  }
27899
27900  SDValue Mask = DAG.getBuildVector(MVT::v16i8, DL, MaskElts);
27901  SDValue Res = DAG.getBitcast(MVT::v16i8, In);
27902  Res = DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, DAG.getUNDEF(MVT::v16i8),
27903                    Res, Mask);
27904  return DAG.getBitcast(VT, Res);
27905}
27906
27907static SDValue LowerBITREVERSE(SDValue Op, const X86Subtarget &Subtarget,
27908                               SelectionDAG &DAG) {
27909  MVT VT = Op.getSimpleValueType();
27910
27911  if (Subtarget.hasXOP() && !VT.is512BitVector())
27912    return LowerBITREVERSE_XOP(Op, DAG);
27913
27914  assert(Subtarget.hasSSSE3() && "SSSE3 required for BITREVERSE");
27915
27916  SDValue In = Op.getOperand(0);
27917  SDLoc DL(Op);
27918
27919  // Split v8i64/v16i32 without BWI so that we can still use the PSHUFB
27920  // lowering.
27921  if (VT == MVT::v8i64 || VT == MVT::v16i32) {
27922    assert(!Subtarget.hasBWI() && "BWI should Expand BITREVERSE");
27923    return Lower512IntUnary(Op, DAG);
27924  }
27925
27926  unsigned NumElts = VT.getVectorNumElements();
27927  assert(VT.getScalarType() == MVT::i8 &&
27928         "Only byte vector BITREVERSE supported");
27929
27930  // Decompose 256-bit ops into smaller 128-bit ops on pre-AVX2.
27931  if (VT.is256BitVector() && !Subtarget.hasInt256())
27932    return Lower256IntUnary(Op, DAG);
27933
27934  // Perform BITREVERSE using PSHUFB lookups. Each byte is split into
27935  // two nibbles and a PSHUFB lookup to find the bitreverse of each
27936  // 0-15 value (moved to the other nibble).
27937  SDValue NibbleMask = DAG.getConstant(0xF, DL, VT);
27938  SDValue Lo = DAG.getNode(ISD::AND, DL, VT, In, NibbleMask);
27939  SDValue Hi = DAG.getNode(ISD::SRL, DL, VT, In, DAG.getConstant(4, DL, VT));
27940
27941  const int LoLUT[16] = {
27942      /* 0 */ 0x00, /* 1 */ 0x80, /* 2 */ 0x40, /* 3 */ 0xC0,
27943      /* 4 */ 0x20, /* 5 */ 0xA0, /* 6 */ 0x60, /* 7 */ 0xE0,
27944      /* 8 */ 0x10, /* 9 */ 0x90, /* a */ 0x50, /* b */ 0xD0,
27945      /* c */ 0x30, /* d */ 0xB0, /* e */ 0x70, /* f */ 0xF0};
27946  const int HiLUT[16] = {
27947      /* 0 */ 0x00, /* 1 */ 0x08, /* 2 */ 0x04, /* 3 */ 0x0C,
27948      /* 4 */ 0x02, /* 5 */ 0x0A, /* 6 */ 0x06, /* 7 */ 0x0E,
27949      /* 8 */ 0x01, /* 9 */ 0x09, /* a */ 0x05, /* b */ 0x0D,
27950      /* c */ 0x03, /* d */ 0x0B, /* e */ 0x07, /* f */ 0x0F};
27951
27952  SmallVector<SDValue, 16> LoMaskElts, HiMaskElts;
27953  for (unsigned i = 0; i < NumElts; ++i) {
27954    LoMaskElts.push_back(DAG.getConstant(LoLUT[i % 16], DL, MVT::i8));
27955    HiMaskElts.push_back(DAG.getConstant(HiLUT[i % 16], DL, MVT::i8));
27956  }
27957
27958  SDValue LoMask = DAG.getBuildVector(VT, DL, LoMaskElts);
27959  SDValue HiMask = DAG.getBuildVector(VT, DL, HiMaskElts);
27960  Lo = DAG.getNode(X86ISD::PSHUFB, DL, VT, LoMask, Lo);
27961  Hi = DAG.getNode(X86ISD::PSHUFB, DL, VT, HiMask, Hi);
27962  return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
27963}
27964
27965static SDValue lowerAtomicArithWithLOCK(SDValue N, SelectionDAG &DAG,
27966                                        const X86Subtarget &Subtarget) {
27967  unsigned NewOpc = 0;
27968  switch (N->getOpcode()) {
27969  case ISD::ATOMIC_LOAD_ADD:
27970    NewOpc = X86ISD::LADD;
27971    break;
27972  case ISD::ATOMIC_LOAD_SUB:
27973    NewOpc = X86ISD::LSUB;
27974    break;
27975  case ISD::ATOMIC_LOAD_OR:
27976    NewOpc = X86ISD::LOR;
27977    break;
27978  case ISD::ATOMIC_LOAD_XOR:
27979    NewOpc = X86ISD::LXOR;
27980    break;
27981  case ISD::ATOMIC_LOAD_AND:
27982    NewOpc = X86ISD::LAND;
27983    break;
27984  default:
27985    llvm_unreachable("Unknown ATOMIC_LOAD_ opcode");
27986  }
27987
27988  MachineMemOperand *MMO = cast<MemSDNode>(N)->getMemOperand();
27989
27990  return DAG.getMemIntrinsicNode(
27991      NewOpc, SDLoc(N), DAG.getVTList(MVT::i32, MVT::Other),
27992      {N->getOperand(0), N->getOperand(1), N->getOperand(2)},
27993      /*MemVT=*/N->getSimpleValueType(0), MMO);
27994}
27995
27996/// Lower atomic_load_ops into LOCK-prefixed operations.
27997static SDValue lowerAtomicArith(SDValue N, SelectionDAG &DAG,
27998                                const X86Subtarget &Subtarget) {
27999  AtomicSDNode *AN = cast<AtomicSDNode>(N.getNode());
28000  SDValue Chain = N->getOperand(0);
28001  SDValue LHS = N->getOperand(1);
28002  SDValue RHS = N->getOperand(2);
28003  unsigned Opc = N->getOpcode();
28004  MVT VT = N->getSimpleValueType(0);
28005  SDLoc DL(N);
28006
28007  // We can lower atomic_load_add into LXADD. However, any other atomicrmw op
28008  // can only be lowered when the result is unused.  They should have already
28009  // been transformed into a cmpxchg loop in AtomicExpand.
28010  if (N->hasAnyUseOfValue(0)) {
28011    // Handle (atomic_load_sub p, v) as (atomic_load_add p, -v), to be able to
28012    // select LXADD if LOCK_SUB can't be selected.
28013    if (Opc == ISD::ATOMIC_LOAD_SUB) {
28014      RHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), RHS);
28015      return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, VT, Chain, LHS,
28016                           RHS, AN->getMemOperand());
28017    }
28018    assert(Opc == ISD::ATOMIC_LOAD_ADD &&
28019           "Used AtomicRMW ops other than Add should have been expanded!");
28020    return N;
28021  }
28022
28023  // Specialized lowering for the canonical form of an idemptotent atomicrmw.
28024  // The core idea here is that since the memory location isn't actually
28025  // changing, all we need is a lowering for the *ordering* impacts of the
28026  // atomicrmw.  As such, we can chose a different operation and memory
28027  // location to minimize impact on other code.
28028  if (Opc == ISD::ATOMIC_LOAD_OR && isNullConstant(RHS)) {
28029    // On X86, the only ordering which actually requires an instruction is
28030    // seq_cst which isn't SingleThread, everything just needs to be preserved
28031    // during codegen and then dropped. Note that we expect (but don't assume),
28032    // that orderings other than seq_cst and acq_rel have been canonicalized to
28033    // a store or load.
28034    if (AN->getOrdering() == AtomicOrdering::SequentiallyConsistent &&
28035        AN->getSyncScopeID() == SyncScope::System) {
28036      // Prefer a locked operation against a stack location to minimize cache
28037      // traffic.  This assumes that stack locations are very likely to be
28038      // accessed only by the owning thread.
28039      SDValue NewChain = emitLockedStackOp(DAG, Subtarget, Chain, DL);
28040      assert(!N->hasAnyUseOfValue(0));
28041      // NOTE: The getUNDEF is needed to give something for the unused result 0.
28042      return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
28043                         DAG.getUNDEF(VT), NewChain);
28044    }
28045    // MEMBARRIER is a compiler barrier; it codegens to a no-op.
28046    SDValue NewChain = DAG.getNode(X86ISD::MEMBARRIER, DL, MVT::Other, Chain);
28047    assert(!N->hasAnyUseOfValue(0));
28048    // NOTE: The getUNDEF is needed to give something for the unused result 0.
28049    return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
28050                       DAG.getUNDEF(VT), NewChain);
28051  }
28052
28053  SDValue LockOp = lowerAtomicArithWithLOCK(N, DAG, Subtarget);
28054  // RAUW the chain, but don't worry about the result, as it's unused.
28055  assert(!N->hasAnyUseOfValue(0));
28056  // NOTE: The getUNDEF is needed to give something for the unused result 0.
28057  return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
28058                     DAG.getUNDEF(VT), LockOp.getValue(1));
28059}
28060
28061static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG,
28062                                 const X86Subtarget &Subtarget) {
28063  auto *Node = cast<AtomicSDNode>(Op.getNode());
28064  SDLoc dl(Node);
28065  EVT VT = Node->getMemoryVT();
28066
28067  bool IsSeqCst = Node->getOrdering() == AtomicOrdering::SequentiallyConsistent;
28068  bool IsTypeLegal = DAG.getTargetLoweringInfo().isTypeLegal(VT);
28069
28070  // If this store is not sequentially consistent and the type is legal
28071  // we can just keep it.
28072  if (!IsSeqCst && IsTypeLegal)
28073    return Op;
28074
28075  if (VT == MVT::i64 && !IsTypeLegal) {
28076    // For illegal i64 atomic_stores, we can try to use MOVQ if SSE2 is enabled.
28077    // FIXME: Use movlps with SSE1.
28078    // FIXME: Use fist with X87.
28079    bool NoImplicitFloatOps =
28080        DAG.getMachineFunction().getFunction().hasFnAttribute(
28081            Attribute::NoImplicitFloat);
28082    if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
28083        Subtarget.hasSSE2()) {
28084      SDValue SclToVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
28085                                     Node->getOperand(2));
28086      SDVTList Tys = DAG.getVTList(MVT::Other);
28087      SDValue Ops[] = { Node->getChain(), SclToVec, Node->getBasePtr() };
28088      SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys,
28089                                              Ops, MVT::i64,
28090                                              Node->getMemOperand());
28091
28092      // If this is a sequentially consistent store, also emit an appropriate
28093      // barrier.
28094      if (IsSeqCst)
28095        Chain = emitLockedStackOp(DAG, Subtarget, Chain, dl);
28096
28097      return Chain;
28098    }
28099  }
28100
28101  // Convert seq_cst store -> xchg
28102  // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
28103  // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
28104  SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
28105                               Node->getMemoryVT(),
28106                               Node->getOperand(0),
28107                               Node->getOperand(1), Node->getOperand(2),
28108                               Node->getMemOperand());
28109  return Swap.getValue(1);
28110}
28111
28112static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) {
28113  SDNode *N = Op.getNode();
28114  MVT VT = N->getSimpleValueType(0);
28115
28116  // Let legalize expand this if it isn't a legal type yet.
28117  if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
28118    return SDValue();
28119
28120  SDVTList VTs = DAG.getVTList(VT, MVT::i32);
28121  SDLoc DL(N);
28122
28123  // Set the carry flag.
28124  SDValue Carry = Op.getOperand(2);
28125  EVT CarryVT = Carry.getValueType();
28126  APInt NegOne = APInt::getAllOnesValue(CarryVT.getScalarSizeInBits());
28127  Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
28128                      Carry, DAG.getConstant(NegOne, DL, CarryVT));
28129
28130  unsigned Opc = Op.getOpcode() == ISD::ADDCARRY ? X86ISD::ADC : X86ISD::SBB;
28131  SDValue Sum = DAG.getNode(Opc, DL, VTs, Op.getOperand(0),
28132                            Op.getOperand(1), Carry.getValue(1));
28133
28134  SDValue SetCC = getSETCC(X86::COND_B, Sum.getValue(1), DL, DAG);
28135  if (N->getValueType(1) == MVT::i1)
28136    SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
28137
28138  return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
28139}
28140
28141static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget,
28142                            SelectionDAG &DAG) {
28143  assert(Subtarget.isTargetDarwin() && Subtarget.is64Bit());
28144
28145  // For MacOSX, we want to call an alternative entry point: __sincos_stret,
28146  // which returns the values as { float, float } (in XMM0) or
28147  // { double, double } (which is returned in XMM0, XMM1).
28148  SDLoc dl(Op);
28149  SDValue Arg = Op.getOperand(0);
28150  EVT ArgVT = Arg.getValueType();
28151  Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
28152
28153  TargetLowering::ArgListTy Args;
28154  TargetLowering::ArgListEntry Entry;
28155
28156  Entry.Node = Arg;
28157  Entry.Ty = ArgTy;
28158  Entry.IsSExt = false;
28159  Entry.IsZExt = false;
28160  Args.push_back(Entry);
28161
28162  bool isF64 = ArgVT == MVT::f64;
28163  // Only optimize x86_64 for now. i386 is a bit messy. For f32,
28164  // the small struct {f32, f32} is returned in (eax, edx). For f64,
28165  // the results are returned via SRet in memory.
28166  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28167  RTLIB::Libcall LC = isF64 ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32;
28168  const char *LibcallName = TLI.getLibcallName(LC);
28169  SDValue Callee =
28170      DAG.getExternalSymbol(LibcallName, TLI.getPointerTy(DAG.getDataLayout()));
28171
28172  Type *RetTy = isF64 ? (Type *)StructType::get(ArgTy, ArgTy)
28173                      : (Type *)VectorType::get(ArgTy, 4);
28174
28175  TargetLowering::CallLoweringInfo CLI(DAG);
28176  CLI.setDebugLoc(dl)
28177      .setChain(DAG.getEntryNode())
28178      .setLibCallee(CallingConv::C, RetTy, Callee, std::move(Args));
28179
28180  std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
28181
28182  if (isF64)
28183    // Returned in xmm0 and xmm1.
28184    return CallResult.first;
28185
28186  // Returned in bits 0:31 and 32:64 xmm0.
28187  SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
28188                               CallResult.first, DAG.getIntPtrConstant(0, dl));
28189  SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
28190                               CallResult.first, DAG.getIntPtrConstant(1, dl));
28191  SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
28192  return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
28193}
28194
28195/// Widen a vector input to a vector of NVT.  The
28196/// input vector must have the same element type as NVT.
28197static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
28198                            bool FillWithZeroes = false) {
28199  // Check if InOp already has the right width.
28200  MVT InVT = InOp.getSimpleValueType();
28201  if (InVT == NVT)
28202    return InOp;
28203
28204  if (InOp.isUndef())
28205    return DAG.getUNDEF(NVT);
28206
28207  assert(InVT.getVectorElementType() == NVT.getVectorElementType() &&
28208         "input and widen element type must match");
28209
28210  unsigned InNumElts = InVT.getVectorNumElements();
28211  unsigned WidenNumElts = NVT.getVectorNumElements();
28212  assert(WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0 &&
28213         "Unexpected request for vector widening");
28214
28215  SDLoc dl(InOp);
28216  if (InOp.getOpcode() == ISD::CONCAT_VECTORS &&
28217      InOp.getNumOperands() == 2) {
28218    SDValue N1 = InOp.getOperand(1);
28219    if ((ISD::isBuildVectorAllZeros(N1.getNode()) && FillWithZeroes) ||
28220        N1.isUndef()) {
28221      InOp = InOp.getOperand(0);
28222      InVT = InOp.getSimpleValueType();
28223      InNumElts = InVT.getVectorNumElements();
28224    }
28225  }
28226  if (ISD::isBuildVectorOfConstantSDNodes(InOp.getNode()) ||
28227      ISD::isBuildVectorOfConstantFPSDNodes(InOp.getNode())) {
28228    SmallVector<SDValue, 16> Ops;
28229    for (unsigned i = 0; i < InNumElts; ++i)
28230      Ops.push_back(InOp.getOperand(i));
28231
28232    EVT EltVT = InOp.getOperand(0).getValueType();
28233
28234    SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, EltVT) :
28235      DAG.getUNDEF(EltVT);
28236    for (unsigned i = 0; i < WidenNumElts - InNumElts; ++i)
28237      Ops.push_back(FillVal);
28238    return DAG.getBuildVector(NVT, dl, Ops);
28239  }
28240  SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, NVT) :
28241    DAG.getUNDEF(NVT);
28242  return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NVT, FillVal,
28243                     InOp, DAG.getIntPtrConstant(0, dl));
28244}
28245
28246static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget &Subtarget,
28247                             SelectionDAG &DAG) {
28248  assert(Subtarget.hasAVX512() &&
28249         "MGATHER/MSCATTER are supported on AVX-512 arch only");
28250
28251  MaskedScatterSDNode *N = cast<MaskedScatterSDNode>(Op.getNode());
28252  SDValue Src = N->getValue();
28253  MVT VT = Src.getSimpleValueType();
28254  assert(VT.getScalarSizeInBits() >= 32 && "Unsupported scatter op");
28255  SDLoc dl(Op);
28256
28257  SDValue Scale = N->getScale();
28258  SDValue Index = N->getIndex();
28259  SDValue Mask = N->getMask();
28260  SDValue Chain = N->getChain();
28261  SDValue BasePtr = N->getBasePtr();
28262
28263  if (VT == MVT::v2f32 || VT == MVT::v2i32) {
28264    assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
28265    // If the index is v2i64 and we have VLX we can use xmm for data and index.
28266    if (Index.getValueType() == MVT::v2i64 && Subtarget.hasVLX()) {
28267      const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28268      EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
28269      Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Src, DAG.getUNDEF(VT));
28270      SDVTList VTs = DAG.getVTList(MVT::v2i1, MVT::Other);
28271      SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
28272      SDValue NewScatter = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
28273          VTs, Ops, dl, N->getMemoryVT(), N->getMemOperand());
28274      return SDValue(NewScatter.getNode(), 1);
28275    }
28276    return SDValue();
28277  }
28278
28279  MVT IndexVT = Index.getSimpleValueType();
28280  MVT MaskVT = Mask.getSimpleValueType();
28281
28282  // If the index is v2i32, we're being called by type legalization and we
28283  // should just let the default handling take care of it.
28284  if (IndexVT == MVT::v2i32)
28285    return SDValue();
28286
28287  // If we don't have VLX and neither the passthru or index is 512-bits, we
28288  // need to widen until one is.
28289  if (!Subtarget.hasVLX() && !VT.is512BitVector() &&
28290      !Index.getSimpleValueType().is512BitVector()) {
28291    // Determine how much we need to widen by to get a 512-bit type.
28292    unsigned Factor = std::min(512/VT.getSizeInBits(),
28293                               512/IndexVT.getSizeInBits());
28294    unsigned NumElts = VT.getVectorNumElements() * Factor;
28295
28296    VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
28297    IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
28298    MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
28299
28300    Src = ExtendToType(Src, VT, DAG);
28301    Index = ExtendToType(Index, IndexVT, DAG);
28302    Mask = ExtendToType(Mask, MaskVT, DAG, true);
28303  }
28304
28305  SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
28306  SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
28307  SDValue NewScatter = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
28308      VTs, Ops, dl, N->getMemoryVT(), N->getMemOperand());
28309  return SDValue(NewScatter.getNode(), 1);
28310}
28311
28312static SDValue LowerMLOAD(SDValue Op, const X86Subtarget &Subtarget,
28313                          SelectionDAG &DAG) {
28314
28315  MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode());
28316  MVT VT = Op.getSimpleValueType();
28317  MVT ScalarVT = VT.getScalarType();
28318  SDValue Mask = N->getMask();
28319  MVT MaskVT = Mask.getSimpleValueType();
28320  SDValue PassThru = N->getPassThru();
28321  SDLoc dl(Op);
28322
28323  // Handle AVX masked loads which don't support passthru other than 0.
28324  if (MaskVT.getVectorElementType() != MVT::i1) {
28325    // We also allow undef in the isel pattern.
28326    if (PassThru.isUndef() || ISD::isBuildVectorAllZeros(PassThru.getNode()))
28327      return Op;
28328
28329    SDValue NewLoad = DAG.getMaskedLoad(
28330        VT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask,
28331        getZeroVector(VT, Subtarget, DAG, dl), N->getMemoryVT(),
28332        N->getMemOperand(), N->getAddressingMode(), N->getExtensionType(),
28333        N->isExpandingLoad());
28334    // Emit a blend.
28335    SDValue Select = DAG.getNode(ISD::VSELECT, dl, MaskVT, Mask, NewLoad,
28336                                 PassThru);
28337    return DAG.getMergeValues({ Select, NewLoad.getValue(1) }, dl);
28338  }
28339
28340  assert((!N->isExpandingLoad() || Subtarget.hasAVX512()) &&
28341         "Expanding masked load is supported on AVX-512 target only!");
28342
28343  assert((!N->isExpandingLoad() || ScalarVT.getSizeInBits() >= 32) &&
28344         "Expanding masked load is supported for 32 and 64-bit types only!");
28345
28346  assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
28347         "Cannot lower masked load op.");
28348
28349  assert((ScalarVT.getSizeInBits() >= 32 ||
28350          (Subtarget.hasBWI() &&
28351              (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
28352         "Unsupported masked load op.");
28353
28354  // This operation is legal for targets with VLX, but without
28355  // VLX the vector should be widened to 512 bit
28356  unsigned NumEltsInWideVec = 512 / VT.getScalarSizeInBits();
28357  MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
28358  PassThru = ExtendToType(PassThru, WideDataVT, DAG);
28359
28360  // Mask element has to be i1.
28361  assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
28362         "Unexpected mask type");
28363
28364  MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
28365
28366  Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
28367  SDValue NewLoad = DAG.getMaskedLoad(
28368      WideDataVT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask,
28369      PassThru, N->getMemoryVT(), N->getMemOperand(), N->getAddressingMode(),
28370      N->getExtensionType(), N->isExpandingLoad());
28371
28372  SDValue Exract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
28373                               NewLoad.getValue(0),
28374                               DAG.getIntPtrConstant(0, dl));
28375  SDValue RetOps[] = {Exract, NewLoad.getValue(1)};
28376  return DAG.getMergeValues(RetOps, dl);
28377}
28378
28379static SDValue LowerMSTORE(SDValue Op, const X86Subtarget &Subtarget,
28380                           SelectionDAG &DAG) {
28381  MaskedStoreSDNode *N = cast<MaskedStoreSDNode>(Op.getNode());
28382  SDValue DataToStore = N->getValue();
28383  MVT VT = DataToStore.getSimpleValueType();
28384  MVT ScalarVT = VT.getScalarType();
28385  SDValue Mask = N->getMask();
28386  SDLoc dl(Op);
28387
28388  assert((!N->isCompressingStore() || Subtarget.hasAVX512()) &&
28389         "Expanding masked load is supported on AVX-512 target only!");
28390
28391  assert((!N->isCompressingStore() || ScalarVT.getSizeInBits() >= 32) &&
28392         "Expanding masked load is supported for 32 and 64-bit types only!");
28393
28394  assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
28395         "Cannot lower masked store op.");
28396
28397  assert((ScalarVT.getSizeInBits() >= 32 ||
28398          (Subtarget.hasBWI() &&
28399              (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
28400          "Unsupported masked store op.");
28401
28402  // This operation is legal for targets with VLX, but without
28403  // VLX the vector should be widened to 512 bit
28404  unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
28405  MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
28406
28407  // Mask element has to be i1.
28408  assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
28409         "Unexpected mask type");
28410
28411  MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
28412
28413  DataToStore = ExtendToType(DataToStore, WideDataVT, DAG);
28414  Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
28415  return DAG.getMaskedStore(N->getChain(), dl, DataToStore, N->getBasePtr(),
28416                            N->getOffset(), Mask, N->getMemoryVT(),
28417                            N->getMemOperand(), N->getAddressingMode(),
28418                            N->isTruncatingStore(), N->isCompressingStore());
28419}
28420
28421static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget,
28422                            SelectionDAG &DAG) {
28423  assert(Subtarget.hasAVX2() &&
28424         "MGATHER/MSCATTER are supported on AVX-512/AVX-2 arch only");
28425
28426  MaskedGatherSDNode *N = cast<MaskedGatherSDNode>(Op.getNode());
28427  SDLoc dl(Op);
28428  MVT VT = Op.getSimpleValueType();
28429  SDValue Index = N->getIndex();
28430  SDValue Mask = N->getMask();
28431  SDValue PassThru = N->getPassThru();
28432  MVT IndexVT = Index.getSimpleValueType();
28433  MVT MaskVT = Mask.getSimpleValueType();
28434
28435  assert(VT.getScalarSizeInBits() >= 32 && "Unsupported gather op");
28436
28437  // If the index is v2i32, we're being called by type legalization.
28438  if (IndexVT == MVT::v2i32)
28439    return SDValue();
28440
28441  // If we don't have VLX and neither the passthru or index is 512-bits, we
28442  // need to widen until one is.
28443  MVT OrigVT = VT;
28444  if (Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
28445      !IndexVT.is512BitVector()) {
28446    // Determine how much we need to widen by to get a 512-bit type.
28447    unsigned Factor = std::min(512/VT.getSizeInBits(),
28448                               512/IndexVT.getSizeInBits());
28449
28450    unsigned NumElts = VT.getVectorNumElements() * Factor;
28451
28452    VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
28453    IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
28454    MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
28455
28456    PassThru = ExtendToType(PassThru, VT, DAG);
28457    Index = ExtendToType(Index, IndexVT, DAG);
28458    Mask = ExtendToType(Mask, MaskVT, DAG, true);
28459  }
28460
28461  SDValue Ops[] = { N->getChain(), PassThru, Mask, N->getBasePtr(), Index,
28462                    N->getScale() };
28463  SDValue NewGather = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
28464      DAG.getVTList(VT, MaskVT, MVT::Other), Ops, dl, N->getMemoryVT(),
28465      N->getMemOperand());
28466  SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OrigVT,
28467                                NewGather, DAG.getIntPtrConstant(0, dl));
28468  return DAG.getMergeValues({Extract, NewGather.getValue(2)}, dl);
28469}
28470
28471static SDValue LowerADDRSPACECAST(SDValue Op, SelectionDAG &DAG) {
28472  SDLoc dl(Op);
28473  SDValue Src = Op.getOperand(0);
28474  MVT DstVT = Op.getSimpleValueType();
28475
28476  AddrSpaceCastSDNode *N = cast<AddrSpaceCastSDNode>(Op.getNode());
28477  unsigned SrcAS = N->getSrcAddressSpace();
28478
28479  assert(SrcAS != N->getDestAddressSpace() &&
28480         "addrspacecast must be between different address spaces");
28481
28482  if (SrcAS == X86AS::PTR32_UPTR && DstVT == MVT::i64) {
28483    Op = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Src);
28484  } else if (DstVT == MVT::i64) {
28485    Op = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Src);
28486  } else if (DstVT == MVT::i32) {
28487    Op = DAG.getNode(ISD::TRUNCATE, dl, DstVT, Src);
28488  } else {
28489    report_fatal_error("Bad address space in addrspacecast");
28490  }
28491  return Op;
28492}
28493
28494SDValue X86TargetLowering::LowerGC_TRANSITION(SDValue Op,
28495                                              SelectionDAG &DAG) const {
28496  // TODO: Eventually, the lowering of these nodes should be informed by or
28497  // deferred to the GC strategy for the function in which they appear. For
28498  // now, however, they must be lowered to something. Since they are logically
28499  // no-ops in the case of a null GC strategy (or a GC strategy which does not
28500  // require special handling for these nodes), lower them as literal NOOPs for
28501  // the time being.
28502  SmallVector<SDValue, 2> Ops;
28503
28504  Ops.push_back(Op.getOperand(0));
28505  if (Op->getGluedNode())
28506    Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));
28507
28508  SDLoc OpDL(Op);
28509  SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
28510  SDValue NOOP(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);
28511
28512  return NOOP;
28513}
28514
28515SDValue X86TargetLowering::LowerF128Call(SDValue Op, SelectionDAG &DAG,
28516                                         RTLIB::Libcall Call) const {
28517
28518  bool IsStrict = Op->isStrictFPOpcode();
28519  unsigned Offset = IsStrict ? 1 : 0;
28520  SmallVector<SDValue, 2> Ops(Op->op_begin() + Offset, Op->op_end());
28521
28522  SDLoc dl(Op);
28523  SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
28524  MakeLibCallOptions CallOptions;
28525  std::pair<SDValue, SDValue> Tmp = makeLibCall(DAG, Call, MVT::f128, Ops,
28526                                                CallOptions, dl, Chain);
28527
28528  if (IsStrict)
28529    return DAG.getMergeValues({ Tmp.first, Tmp.second }, dl);
28530
28531  return Tmp.first;
28532}
28533
28534/// Provide custom lowering hooks for some operations.
28535SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
28536  switch (Op.getOpcode()) {
28537  default: llvm_unreachable("Should not custom lower this!");
28538  case ISD::ATOMIC_FENCE:       return LowerATOMIC_FENCE(Op, Subtarget, DAG);
28539  case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
28540    return LowerCMP_SWAP(Op, Subtarget, DAG);
28541  case ISD::CTPOP:              return LowerCTPOP(Op, Subtarget, DAG);
28542  case ISD::ATOMIC_LOAD_ADD:
28543  case ISD::ATOMIC_LOAD_SUB:
28544  case ISD::ATOMIC_LOAD_OR:
28545  case ISD::ATOMIC_LOAD_XOR:
28546  case ISD::ATOMIC_LOAD_AND:    return lowerAtomicArith(Op, DAG, Subtarget);
28547  case ISD::ATOMIC_STORE:       return LowerATOMIC_STORE(Op, DAG, Subtarget);
28548  case ISD::BITREVERSE:         return LowerBITREVERSE(Op, Subtarget, DAG);
28549  case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG);
28550  case ISD::CONCAT_VECTORS:     return LowerCONCAT_VECTORS(Op, Subtarget, DAG);
28551  case ISD::VECTOR_SHUFFLE:     return lowerVECTOR_SHUFFLE(Op, Subtarget, DAG);
28552  case ISD::VSELECT:            return LowerVSELECT(Op, DAG);
28553  case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
28554  case ISD::INSERT_VECTOR_ELT:  return LowerINSERT_VECTOR_ELT(Op, DAG);
28555  case ISD::INSERT_SUBVECTOR:   return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
28556  case ISD::EXTRACT_SUBVECTOR:  return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
28557  case ISD::SCALAR_TO_VECTOR:   return LowerSCALAR_TO_VECTOR(Op, Subtarget,DAG);
28558  case ISD::ConstantPool:       return LowerConstantPool(Op, DAG);
28559  case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG);
28560  case ISD::GlobalTLSAddress:   return LowerGlobalTLSAddress(Op, DAG);
28561  case ISD::ExternalSymbol:     return LowerExternalSymbol(Op, DAG);
28562  case ISD::BlockAddress:       return LowerBlockAddress(Op, DAG);
28563  case ISD::SHL_PARTS:
28564  case ISD::SRA_PARTS:
28565  case ISD::SRL_PARTS:          return LowerShiftParts(Op, DAG);
28566  case ISD::FSHL:
28567  case ISD::FSHR:               return LowerFunnelShift(Op, Subtarget, DAG);
28568  case ISD::STRICT_SINT_TO_FP:
28569  case ISD::SINT_TO_FP:         return LowerSINT_TO_FP(Op, DAG);
28570  case ISD::STRICT_UINT_TO_FP:
28571  case ISD::UINT_TO_FP:         return LowerUINT_TO_FP(Op, DAG);
28572  case ISD::TRUNCATE:           return LowerTRUNCATE(Op, DAG);
28573  case ISD::ZERO_EXTEND:        return LowerZERO_EXTEND(Op, Subtarget, DAG);
28574  case ISD::SIGN_EXTEND:        return LowerSIGN_EXTEND(Op, Subtarget, DAG);
28575  case ISD::ANY_EXTEND:         return LowerANY_EXTEND(Op, Subtarget, DAG);
28576  case ISD::ZERO_EXTEND_VECTOR_INREG:
28577  case ISD::SIGN_EXTEND_VECTOR_INREG:
28578    return LowerEXTEND_VECTOR_INREG(Op, Subtarget, DAG);
28579  case ISD::FP_TO_SINT:
28580  case ISD::STRICT_FP_TO_SINT:
28581  case ISD::FP_TO_UINT:
28582  case ISD::STRICT_FP_TO_UINT:  return LowerFP_TO_INT(Op, DAG);
28583  case ISD::FP_EXTEND:
28584  case ISD::STRICT_FP_EXTEND:   return LowerFP_EXTEND(Op, DAG);
28585  case ISD::FP_ROUND:
28586  case ISD::STRICT_FP_ROUND:    return LowerFP_ROUND(Op, DAG);
28587  case ISD::LOAD:               return LowerLoad(Op, Subtarget, DAG);
28588  case ISD::STORE:              return LowerStore(Op, Subtarget, DAG);
28589  case ISD::FADD:
28590  case ISD::FSUB:               return lowerFaddFsub(Op, DAG);
28591  case ISD::FABS:
28592  case ISD::FNEG:               return LowerFABSorFNEG(Op, DAG);
28593  case ISD::FCOPYSIGN:          return LowerFCOPYSIGN(Op, DAG);
28594  case ISD::FGETSIGN:           return LowerFGETSIGN(Op, DAG);
28595  case ISD::SETCC:
28596  case ISD::STRICT_FSETCC:
28597  case ISD::STRICT_FSETCCS:     return LowerSETCC(Op, DAG);
28598  case ISD::SETCCCARRY:         return LowerSETCCCARRY(Op, DAG);
28599  case ISD::SELECT:             return LowerSELECT(Op, DAG);
28600  case ISD::BRCOND:             return LowerBRCOND(Op, DAG);
28601  case ISD::JumpTable:          return LowerJumpTable(Op, DAG);
28602  case ISD::VASTART:            return LowerVASTART(Op, DAG);
28603  case ISD::VAARG:              return LowerVAARG(Op, DAG);
28604  case ISD::VACOPY:             return LowerVACOPY(Op, Subtarget, DAG);
28605  case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
28606  case ISD::INTRINSIC_VOID:
28607  case ISD::INTRINSIC_W_CHAIN:  return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
28608  case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG);
28609  case ISD::ADDROFRETURNADDR:   return LowerADDROFRETURNADDR(Op, DAG);
28610  case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
28611  case ISD::FRAME_TO_ARGS_OFFSET:
28612                                return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
28613  case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
28614  case ISD::EH_RETURN:          return LowerEH_RETURN(Op, DAG);
28615  case ISD::EH_SJLJ_SETJMP:     return lowerEH_SJLJ_SETJMP(Op, DAG);
28616  case ISD::EH_SJLJ_LONGJMP:    return lowerEH_SJLJ_LONGJMP(Op, DAG);
28617  case ISD::EH_SJLJ_SETUP_DISPATCH:
28618    return lowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
28619  case ISD::INIT_TRAMPOLINE:    return LowerINIT_TRAMPOLINE(Op, DAG);
28620  case ISD::ADJUST_TRAMPOLINE:  return LowerADJUST_TRAMPOLINE(Op, DAG);
28621  case ISD::FLT_ROUNDS_:        return LowerFLT_ROUNDS_(Op, DAG);
28622  case ISD::CTLZ:
28623  case ISD::CTLZ_ZERO_UNDEF:    return LowerCTLZ(Op, Subtarget, DAG);
28624  case ISD::CTTZ:
28625  case ISD::CTTZ_ZERO_UNDEF:    return LowerCTTZ(Op, Subtarget, DAG);
28626  case ISD::MUL:                return LowerMUL(Op, Subtarget, DAG);
28627  case ISD::MULHS:
28628  case ISD::MULHU:              return LowerMULH(Op, Subtarget, DAG);
28629  case ISD::ROTL:
28630  case ISD::ROTR:               return LowerRotate(Op, Subtarget, DAG);
28631  case ISD::SRA:
28632  case ISD::SRL:
28633  case ISD::SHL:                return LowerShift(Op, Subtarget, DAG);
28634  case ISD::SADDO:
28635  case ISD::UADDO:
28636  case ISD::SSUBO:
28637  case ISD::USUBO:
28638  case ISD::SMULO:
28639  case ISD::UMULO:              return LowerXALUO(Op, DAG);
28640  case ISD::READCYCLECOUNTER:   return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
28641  case ISD::BITCAST:            return LowerBITCAST(Op, Subtarget, DAG);
28642  case ISD::ADDCARRY:
28643  case ISD::SUBCARRY:           return LowerADDSUBCARRY(Op, DAG);
28644  case ISD::ADD:
28645  case ISD::SUB:                return lowerAddSub(Op, DAG, Subtarget);
28646  case ISD::UADDSAT:
28647  case ISD::SADDSAT:
28648  case ISD::USUBSAT:
28649  case ISD::SSUBSAT:            return LowerADDSAT_SUBSAT(Op, DAG, Subtarget);
28650  case ISD::SMAX:
28651  case ISD::SMIN:
28652  case ISD::UMAX:
28653  case ISD::UMIN:               return LowerMINMAX(Op, DAG);
28654  case ISD::ABS:                return LowerABS(Op, Subtarget, DAG);
28655  case ISD::FSINCOS:            return LowerFSINCOS(Op, Subtarget, DAG);
28656  case ISD::MLOAD:              return LowerMLOAD(Op, Subtarget, DAG);
28657  case ISD::MSTORE:             return LowerMSTORE(Op, Subtarget, DAG);
28658  case ISD::MGATHER:            return LowerMGATHER(Op, Subtarget, DAG);
28659  case ISD::MSCATTER:           return LowerMSCATTER(Op, Subtarget, DAG);
28660  case ISD::GC_TRANSITION_START:
28661  case ISD::GC_TRANSITION_END:  return LowerGC_TRANSITION(Op, DAG);
28662  case ISD::ADDRSPACECAST:
28663    return LowerADDRSPACECAST(Op, DAG);
28664  }
28665}
28666
28667/// Places new result values for the node in Results (their number
28668/// and types must exactly match those of the original return values of
28669/// the node), or leaves Results empty, which indicates that the node is not
28670/// to be custom lowered after all.
28671void X86TargetLowering::LowerOperationWrapper(SDNode *N,
28672                                              SmallVectorImpl<SDValue> &Results,
28673                                              SelectionDAG &DAG) const {
28674  SDValue Res = LowerOperation(SDValue(N, 0), DAG);
28675
28676  if (!Res.getNode())
28677    return;
28678
28679  // If the original node has one result, take the return value from
28680  // LowerOperation as is. It might not be result number 0.
28681  if (N->getNumValues() == 1) {
28682    Results.push_back(Res);
28683    return;
28684  }
28685
28686  // If the original node has multiple results, then the return node should
28687  // have the same number of results.
28688  assert((N->getNumValues() == Res->getNumValues()) &&
28689      "Lowering returned the wrong number of results!");
28690
28691  // Places new result values base on N result number.
28692  for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
28693    Results.push_back(Res.getValue(I));
28694}
28695
28696/// Replace a node with an illegal result type with a new node built out of
28697/// custom code.
28698void X86TargetLowering::ReplaceNodeResults(SDNode *N,
28699                                           SmallVectorImpl<SDValue>&Results,
28700                                           SelectionDAG &DAG) const {
28701  SDLoc dl(N);
28702  switch (N->getOpcode()) {
28703  default:
28704#ifndef NDEBUG
28705    dbgs() << "ReplaceNodeResults: ";
28706    N->dump(&DAG);
28707#endif
28708    llvm_unreachable("Do not know how to custom type legalize this operation!");
28709  case ISD::CTPOP: {
28710    assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
28711    // Use a v2i64 if possible.
28712    bool NoImplicitFloatOps =
28713        DAG.getMachineFunction().getFunction().hasFnAttribute(
28714            Attribute::NoImplicitFloat);
28715    if (isTypeLegal(MVT::v2i64) && !NoImplicitFloatOps) {
28716      SDValue Wide =
28717          DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, N->getOperand(0));
28718      Wide = DAG.getNode(ISD::CTPOP, dl, MVT::v2i64, Wide);
28719      // Bit count should fit in 32-bits, extract it as that and then zero
28720      // extend to i64. Otherwise we end up extracting bits 63:32 separately.
28721      Wide = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Wide);
28722      Wide = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, Wide,
28723                         DAG.getIntPtrConstant(0, dl));
28724      Wide = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Wide);
28725      Results.push_back(Wide);
28726    }
28727    return;
28728  }
28729  case ISD::MUL: {
28730    EVT VT = N->getValueType(0);
28731    assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
28732           VT.getVectorElementType() == MVT::i8 && "Unexpected VT!");
28733    // Pre-promote these to vXi16 to avoid op legalization thinking all 16
28734    // elements are needed.
28735    MVT MulVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
28736    SDValue Op0 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(0));
28737    SDValue Op1 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(1));
28738    SDValue Res = DAG.getNode(ISD::MUL, dl, MulVT, Op0, Op1);
28739    Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
28740    unsigned NumConcats = 16 / VT.getVectorNumElements();
28741    SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
28742    ConcatOps[0] = Res;
28743    Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i8, ConcatOps);
28744    Results.push_back(Res);
28745    return;
28746  }
28747  case X86ISD::VPMADDWD:
28748  case X86ISD::AVG: {
28749    // Legalize types for X86ISD::AVG/VPMADDWD by widening.
28750    assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
28751
28752    EVT VT = N->getValueType(0);
28753    EVT InVT = N->getOperand(0).getValueType();
28754    assert(VT.getSizeInBits() < 128 && 128 % VT.getSizeInBits() == 0 &&
28755           "Expected a VT that divides into 128 bits.");
28756    assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
28757           "Unexpected type action!");
28758    unsigned NumConcat = 128 / InVT.getSizeInBits();
28759
28760    EVT InWideVT = EVT::getVectorVT(*DAG.getContext(),
28761                                    InVT.getVectorElementType(),
28762                                    NumConcat * InVT.getVectorNumElements());
28763    EVT WideVT = EVT::getVectorVT(*DAG.getContext(),
28764                                  VT.getVectorElementType(),
28765                                  NumConcat * VT.getVectorNumElements());
28766
28767    SmallVector<SDValue, 16> Ops(NumConcat, DAG.getUNDEF(InVT));
28768    Ops[0] = N->getOperand(0);
28769    SDValue InVec0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
28770    Ops[0] = N->getOperand(1);
28771    SDValue InVec1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
28772
28773    SDValue Res = DAG.getNode(N->getOpcode(), dl, WideVT, InVec0, InVec1);
28774    Results.push_back(Res);
28775    return;
28776  }
28777  case ISD::ABS: {
28778    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28779    assert(N->getValueType(0) == MVT::i64 &&
28780           "Unexpected type (!= i64) on ABS.");
28781    MVT HalfT = MVT::i32;
28782    SDValue Lo, Hi, Tmp;
28783    SDVTList VTList = DAG.getVTList(HalfT, MVT::i1);
28784
28785    Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0),
28786                     DAG.getConstant(0, dl, HalfT));
28787    Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0),
28788                     DAG.getConstant(1, dl, HalfT));
28789    Tmp = DAG.getNode(
28790        ISD::SRA, dl, HalfT, Hi,
28791        DAG.getConstant(HalfT.getSizeInBits() - 1, dl,
28792                        TLI.getShiftAmountTy(HalfT, DAG.getDataLayout())));
28793    Lo = DAG.getNode(ISD::UADDO, dl, VTList, Tmp, Lo);
28794    Hi = DAG.getNode(ISD::ADDCARRY, dl, VTList, Tmp, Hi,
28795                     SDValue(Lo.getNode(), 1));
28796    Hi = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Hi);
28797    Lo = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Lo);
28798    Results.push_back(Lo);
28799    Results.push_back(Hi);
28800    return;
28801  }
28802  // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
28803  case X86ISD::FMINC:
28804  case X86ISD::FMIN:
28805  case X86ISD::FMAXC:
28806  case X86ISD::FMAX: {
28807    EVT VT = N->getValueType(0);
28808    assert(VT == MVT::v2f32 && "Unexpected type (!= v2f32) on FMIN/FMAX.");
28809    SDValue UNDEF = DAG.getUNDEF(VT);
28810    SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
28811                              N->getOperand(0), UNDEF);
28812    SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
28813                              N->getOperand(1), UNDEF);
28814    Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
28815    return;
28816  }
28817  case ISD::SDIV:
28818  case ISD::UDIV:
28819  case ISD::SREM:
28820  case ISD::UREM: {
28821    EVT VT = N->getValueType(0);
28822    if (VT.isVector()) {
28823      assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
28824             "Unexpected type action!");
28825      // If this RHS is a constant splat vector we can widen this and let
28826      // division/remainder by constant optimize it.
28827      // TODO: Can we do something for non-splat?
28828      APInt SplatVal;
28829      if (ISD::isConstantSplatVector(N->getOperand(1).getNode(), SplatVal)) {
28830        unsigned NumConcats = 128 / VT.getSizeInBits();
28831        SmallVector<SDValue, 8> Ops0(NumConcats, DAG.getUNDEF(VT));
28832        Ops0[0] = N->getOperand(0);
28833        EVT ResVT = getTypeToTransformTo(*DAG.getContext(), VT);
28834        SDValue N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Ops0);
28835        SDValue N1 = DAG.getConstant(SplatVal, dl, ResVT);
28836        SDValue Res = DAG.getNode(N->getOpcode(), dl, ResVT, N0, N1);
28837        Results.push_back(Res);
28838      }
28839      return;
28840    }
28841
28842    LLVM_FALLTHROUGH;
28843  }
28844  case ISD::SDIVREM:
28845  case ISD::UDIVREM: {
28846    SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
28847    Results.push_back(V);
28848    return;
28849  }
28850  case ISD::TRUNCATE: {
28851    MVT VT = N->getSimpleValueType(0);
28852    if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
28853      return;
28854
28855    // The generic legalizer will try to widen the input type to the same
28856    // number of elements as the widened result type. But this isn't always
28857    // the best thing so do some custom legalization to avoid some cases.
28858    MVT WidenVT = getTypeToTransformTo(*DAG.getContext(), VT).getSimpleVT();
28859    SDValue In = N->getOperand(0);
28860    EVT InVT = In.getValueType();
28861
28862    unsigned InBits = InVT.getSizeInBits();
28863    if (128 % InBits == 0) {
28864      // 128 bit and smaller inputs should avoid truncate all together and
28865      // just use a build_vector that will become a shuffle.
28866      // TODO: Widen and use a shuffle directly?
28867      MVT InEltVT = InVT.getSimpleVT().getVectorElementType();
28868      EVT EltVT = VT.getVectorElementType();
28869      unsigned WidenNumElts = WidenVT.getVectorNumElements();
28870      SmallVector<SDValue, 16> Ops(WidenNumElts, DAG.getUNDEF(EltVT));
28871      // Use the original element count so we don't do more scalar opts than
28872      // necessary.
28873      unsigned MinElts = VT.getVectorNumElements();
28874      for (unsigned i=0; i < MinElts; ++i) {
28875        SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, In,
28876                                  DAG.getIntPtrConstant(i, dl));
28877        Ops[i] = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Val);
28878      }
28879      Results.push_back(DAG.getBuildVector(WidenVT, dl, Ops));
28880      return;
28881    }
28882    // With AVX512 there are some cases that can use a target specific
28883    // truncate node to go from 256/512 to less than 128 with zeros in the
28884    // upper elements of the 128 bit result.
28885    if (Subtarget.hasAVX512() && isTypeLegal(InVT)) {
28886      // We can use VTRUNC directly if for 256 bits with VLX or for any 512.
28887      if ((InBits == 256 && Subtarget.hasVLX()) || InBits == 512) {
28888        Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
28889        return;
28890      }
28891      // There's one case we can widen to 512 bits and use VTRUNC.
28892      if (InVT == MVT::v4i64 && VT == MVT::v4i8 && isTypeLegal(MVT::v8i64)) {
28893        In = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i64, In,
28894                         DAG.getUNDEF(MVT::v4i64));
28895        Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
28896        return;
28897      }
28898    }
28899    if (Subtarget.hasVLX() && InVT == MVT::v8i64 && VT == MVT::v8i8 &&
28900        getTypeAction(*DAG.getContext(), InVT) == TypeSplitVector &&
28901        isTypeLegal(MVT::v4i64)) {
28902      // Input needs to be split and output needs to widened. Let's use two
28903      // VTRUNCs, and shuffle their results together into the wider type.
28904      SDValue Lo, Hi;
28905      std::tie(Lo, Hi) = DAG.SplitVector(In, dl);
28906
28907      Lo = DAG.getNode(X86ISD::VTRUNC, dl, MVT::v16i8, Lo);
28908      Hi = DAG.getNode(X86ISD::VTRUNC, dl, MVT::v16i8, Hi);
28909      SDValue Res = DAG.getVectorShuffle(MVT::v16i8, dl, Lo, Hi,
28910                                         { 0,  1,  2,  3, 16, 17, 18, 19,
28911                                          -1, -1, -1, -1, -1, -1, -1, -1 });
28912      Results.push_back(Res);
28913      return;
28914    }
28915
28916    return;
28917  }
28918  case ISD::ANY_EXTEND:
28919    // Right now, only MVT::v8i8 has Custom action for an illegal type.
28920    // It's intended to custom handle the input type.
28921    assert(N->getValueType(0) == MVT::v8i8 &&
28922           "Do not know how to legalize this Node");
28923    return;
28924  case ISD::SIGN_EXTEND:
28925  case ISD::ZERO_EXTEND: {
28926    EVT VT = N->getValueType(0);
28927    SDValue In = N->getOperand(0);
28928    EVT InVT = In.getValueType();
28929    if (!Subtarget.hasSSE41() && VT == MVT::v4i64 &&
28930        (InVT == MVT::v4i16 || InVT == MVT::v4i8)){
28931      assert(getTypeAction(*DAG.getContext(), InVT) == TypeWidenVector &&
28932             "Unexpected type action!");
28933      assert(N->getOpcode() == ISD::SIGN_EXTEND && "Unexpected opcode");
28934      // Custom split this so we can extend i8/i16->i32 invec. This is better
28935      // since sign_extend_inreg i8/i16->i64 requires an extend to i32 using
28936      // sra. Then extending from i32 to i64 using pcmpgt. By custom splitting
28937      // we allow the sra from the extend to i32 to be shared by the split.
28938      In = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, In);
28939
28940      // Fill a vector with sign bits for each element.
28941      SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
28942      SDValue SignBits = DAG.getSetCC(dl, MVT::v4i32, Zero, In, ISD::SETGT);
28943
28944      // Create an unpackl and unpackh to interleave the sign bits then bitcast
28945      // to v2i64.
28946      SDValue Lo = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
28947                                        {0, 4, 1, 5});
28948      Lo = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Lo);
28949      SDValue Hi = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
28950                                        {2, 6, 3, 7});
28951      Hi = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Hi);
28952
28953      SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
28954      Results.push_back(Res);
28955      return;
28956    }
28957
28958    if (VT == MVT::v16i32 || VT == MVT::v8i64) {
28959      if (!InVT.is128BitVector()) {
28960        // Not a 128 bit vector, but maybe type legalization will promote
28961        // it to 128 bits.
28962        if (getTypeAction(*DAG.getContext(), InVT) != TypePromoteInteger)
28963          return;
28964        InVT = getTypeToTransformTo(*DAG.getContext(), InVT);
28965        if (!InVT.is128BitVector())
28966          return;
28967
28968        // Promote the input to 128 bits. Type legalization will turn this into
28969        // zext_inreg/sext_inreg.
28970        In = DAG.getNode(N->getOpcode(), dl, InVT, In);
28971      }
28972
28973      // Perform custom splitting instead of the two stage extend we would get
28974      // by default.
28975      EVT LoVT, HiVT;
28976      std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
28977      assert(isTypeLegal(LoVT) && "Split VT not legal?");
28978
28979      SDValue Lo = getExtendInVec(N->getOpcode(), dl, LoVT, In, DAG);
28980
28981      // We need to shift the input over by half the number of elements.
28982      unsigned NumElts = InVT.getVectorNumElements();
28983      unsigned HalfNumElts = NumElts / 2;
28984      SmallVector<int, 16> ShufMask(NumElts, SM_SentinelUndef);
28985      for (unsigned i = 0; i != HalfNumElts; ++i)
28986        ShufMask[i] = i + HalfNumElts;
28987
28988      SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
28989      Hi = getExtendInVec(N->getOpcode(), dl, HiVT, Hi, DAG);
28990
28991      SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
28992      Results.push_back(Res);
28993    }
28994    return;
28995  }
28996  case ISD::FP_TO_SINT:
28997  case ISD::STRICT_FP_TO_SINT:
28998  case ISD::FP_TO_UINT:
28999  case ISD::STRICT_FP_TO_UINT: {
29000    bool IsStrict = N->isStrictFPOpcode();
29001    bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
29002                    N->getOpcode() == ISD::STRICT_FP_TO_SINT;
29003    EVT VT = N->getValueType(0);
29004    SDValue Src = N->getOperand(IsStrict ? 1 : 0);
29005    EVT SrcVT = Src.getValueType();
29006
29007    if (VT.isVector() && VT.getScalarSizeInBits() < 32) {
29008      assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
29009             "Unexpected type action!");
29010
29011      // Try to create a 128 bit vector, but don't exceed a 32 bit element.
29012      unsigned NewEltWidth = std::min(128 / VT.getVectorNumElements(), 32U);
29013      MVT PromoteVT = MVT::getVectorVT(MVT::getIntegerVT(NewEltWidth),
29014                                       VT.getVectorNumElements());
29015      SDValue Res;
29016      SDValue Chain;
29017      if (IsStrict) {
29018        Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, {PromoteVT, MVT::Other},
29019                          {N->getOperand(0), Src});
29020        Chain = Res.getValue(1);
29021      } else
29022        Res = DAG.getNode(ISD::FP_TO_SINT, dl, PromoteVT, Src);
29023
29024      // Preserve what we know about the size of the original result. Except
29025      // when the result is v2i32 since we can't widen the assert.
29026      if (PromoteVT != MVT::v2i32)
29027        Res = DAG.getNode(!IsSigned ? ISD::AssertZext : ISD::AssertSext,
29028                          dl, PromoteVT, Res,
29029                          DAG.getValueType(VT.getVectorElementType()));
29030
29031      // Truncate back to the original width.
29032      Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
29033
29034      // Now widen to 128 bits.
29035      unsigned NumConcats = 128 / VT.getSizeInBits();
29036      MVT ConcatVT = MVT::getVectorVT(VT.getSimpleVT().getVectorElementType(),
29037                                      VT.getVectorNumElements() * NumConcats);
29038      SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
29039      ConcatOps[0] = Res;
29040      Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatVT, ConcatOps);
29041      Results.push_back(Res);
29042      if (IsStrict)
29043        Results.push_back(Chain);
29044      return;
29045    }
29046
29047
29048    if (VT == MVT::v2i32) {
29049      assert((IsSigned || Subtarget.hasAVX512()) &&
29050             "Can only handle signed conversion without AVX512");
29051      assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
29052      assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
29053             "Unexpected type action!");
29054      if (Src.getValueType() == MVT::v2f64) {
29055        unsigned Opc;
29056        if (IsStrict)
29057          Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
29058        else
29059          Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
29060
29061        // If we have VLX we can emit a target specific FP_TO_UINT node,.
29062        if (!IsSigned && !Subtarget.hasVLX()) {
29063          // Otherwise we can defer to the generic legalizer which will widen
29064          // the input as well. This will be further widened during op
29065          // legalization to v8i32<-v8f64.
29066          // For strict nodes we'll need to widen ourselves.
29067          // FIXME: Fix the type legalizer to safely widen strict nodes?
29068          if (!IsStrict)
29069            return;
29070          Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f64, Src,
29071                            DAG.getConstantFP(0.0, dl, MVT::v2f64));
29072          Opc = N->getOpcode();
29073        }
29074        SDValue Res;
29075        SDValue Chain;
29076        if (IsStrict) {
29077          Res = DAG.getNode(Opc, dl, {MVT::v4i32, MVT::Other},
29078                            {N->getOperand(0), Src});
29079          Chain = Res.getValue(1);
29080        } else {
29081          Res = DAG.getNode(Opc, dl, MVT::v4i32, Src);
29082        }
29083        Results.push_back(Res);
29084        if (IsStrict)
29085          Results.push_back(Chain);
29086        return;
29087      }
29088
29089      // Custom widen strict v2f32->v2i32 by padding with zeros.
29090      // FIXME: Should generic type legalizer do this?
29091      if (Src.getValueType() == MVT::v2f32 && IsStrict) {
29092        Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
29093                          DAG.getConstantFP(0.0, dl, MVT::v2f32));
29094        SDValue Res = DAG.getNode(N->getOpcode(), dl, {MVT::v4i32, MVT::Other},
29095                                  {N->getOperand(0), Src});
29096        Results.push_back(Res);
29097        Results.push_back(Res.getValue(1));
29098        return;
29099      }
29100
29101      // The FP_TO_INTHelper below only handles f32/f64/f80 scalar inputs,
29102      // so early out here.
29103      return;
29104    }
29105
29106    assert(!VT.isVector() && "Vectors should have been handled above!");
29107
29108    if (Subtarget.hasDQI() && VT == MVT::i64 &&
29109        (SrcVT == MVT::f32 || SrcVT == MVT::f64)) {
29110      assert(!Subtarget.is64Bit() && "i64 should be legal");
29111      unsigned NumElts = Subtarget.hasVLX() ? 2 : 8;
29112      // If we use a 128-bit result we might need to use a target specific node.
29113      unsigned SrcElts =
29114          std::max(NumElts, 128U / (unsigned)SrcVT.getSizeInBits());
29115      MVT VecVT = MVT::getVectorVT(MVT::i64, NumElts);
29116      MVT VecInVT = MVT::getVectorVT(SrcVT.getSimpleVT(), SrcElts);
29117      unsigned Opc = N->getOpcode();
29118      if (NumElts != SrcElts) {
29119        if (IsStrict)
29120          Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
29121        else
29122          Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
29123      }
29124
29125      SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
29126      SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecInVT,
29127                                DAG.getConstantFP(0.0, dl, VecInVT), Src,
29128                                ZeroIdx);
29129      SDValue Chain;
29130      if (IsStrict) {
29131        SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
29132        Res = DAG.getNode(Opc, SDLoc(N), Tys, N->getOperand(0), Res);
29133        Chain = Res.getValue(1);
29134      } else
29135        Res = DAG.getNode(Opc, SDLoc(N), VecVT, Res);
29136      Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res, ZeroIdx);
29137      Results.push_back(Res);
29138      if (IsStrict)
29139        Results.push_back(Chain);
29140      return;
29141    }
29142
29143    SDValue Chain;
29144    if (SDValue V = FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, Chain)) {
29145      Results.push_back(V);
29146      if (IsStrict)
29147        Results.push_back(Chain);
29148    }
29149    return;
29150  }
29151  case ISD::SINT_TO_FP:
29152  case ISD::STRICT_SINT_TO_FP:
29153  case ISD::UINT_TO_FP:
29154  case ISD::STRICT_UINT_TO_FP: {
29155    bool IsStrict = N->isStrictFPOpcode();
29156    bool IsSigned = N->getOpcode() == ISD::SINT_TO_FP ||
29157                    N->getOpcode() == ISD::STRICT_SINT_TO_FP;
29158    EVT VT = N->getValueType(0);
29159    if (VT != MVT::v2f32)
29160      return;
29161    SDValue Src = N->getOperand(IsStrict ? 1 : 0);
29162    EVT SrcVT = Src.getValueType();
29163    if (Subtarget.hasDQI() && Subtarget.hasVLX() && SrcVT == MVT::v2i64) {
29164      if (IsStrict) {
29165        unsigned Opc = IsSigned ? X86ISD::STRICT_CVTSI2P
29166                                : X86ISD::STRICT_CVTUI2P;
29167        SDValue Res = DAG.getNode(Opc, dl, {MVT::v4f32, MVT::Other},
29168                                  {N->getOperand(0), Src});
29169        Results.push_back(Res);
29170        Results.push_back(Res.getValue(1));
29171      } else {
29172        unsigned Opc = IsSigned ? X86ISD::CVTSI2P : X86ISD::CVTUI2P;
29173        Results.push_back(DAG.getNode(Opc, dl, MVT::v4f32, Src));
29174      }
29175      return;
29176    }
29177    if (SrcVT == MVT::v2i64 && !IsSigned && Subtarget.is64Bit() &&
29178        Subtarget.hasSSE41() && !Subtarget.hasAVX512()) {
29179      SDValue Zero = DAG.getConstant(0, dl, SrcVT);
29180      SDValue One  = DAG.getConstant(1, dl, SrcVT);
29181      SDValue Sign = DAG.getNode(ISD::OR, dl, SrcVT,
29182                                 DAG.getNode(ISD::SRL, dl, SrcVT, Src, One),
29183                                 DAG.getNode(ISD::AND, dl, SrcVT, Src, One));
29184      SDValue IsNeg = DAG.getSetCC(dl, MVT::v2i64, Src, Zero, ISD::SETLT);
29185      SDValue SignSrc = DAG.getSelect(dl, SrcVT, IsNeg, Sign, Src);
29186      SmallVector<SDValue, 4> SignCvts(4, DAG.getConstantFP(0.0, dl, MVT::f32));
29187      for (int i = 0; i != 2; ++i) {
29188        SDValue Src = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64,
29189                                  SignSrc, DAG.getIntPtrConstant(i, dl));
29190        if (IsStrict)
29191          SignCvts[i] =
29192              DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {MVT::f32, MVT::Other},
29193                          {N->getOperand(0), Src});
29194        else
29195          SignCvts[i] = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Src);
29196      };
29197      SDValue SignCvt = DAG.getBuildVector(MVT::v4f32, dl, SignCvts);
29198      SDValue Slow, Chain;
29199      if (IsStrict) {
29200        Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
29201                            SignCvts[0].getValue(1), SignCvts[1].getValue(1));
29202        Slow = DAG.getNode(ISD::STRICT_FADD, dl, {MVT::v4f32, MVT::Other},
29203                           {Chain, SignCvt, SignCvt});
29204        Chain = Slow.getValue(1);
29205      } else {
29206        Slow = DAG.getNode(ISD::FADD, dl, MVT::v4f32, SignCvt, SignCvt);
29207      }
29208      IsNeg = DAG.getBitcast(MVT::v4i32, IsNeg);
29209      IsNeg =
29210          DAG.getVectorShuffle(MVT::v4i32, dl, IsNeg, IsNeg, {1, 3, -1, -1});
29211      SDValue Cvt = DAG.getSelect(dl, MVT::v4f32, IsNeg, Slow, SignCvt);
29212      Results.push_back(Cvt);
29213      if (IsStrict)
29214        Results.push_back(Chain);
29215      return;
29216    }
29217
29218    if (SrcVT != MVT::v2i32)
29219      return;
29220
29221    if (IsSigned || Subtarget.hasAVX512()) {
29222      if (!IsStrict)
29223        return;
29224
29225      // Custom widen strict v2i32->v2f32 to avoid scalarization.
29226      // FIXME: Should generic type legalizer do this?
29227      Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
29228                        DAG.getConstant(0, dl, MVT::v2i32));
29229      SDValue Res = DAG.getNode(N->getOpcode(), dl, {MVT::v4f32, MVT::Other},
29230                                {N->getOperand(0), Src});
29231      Results.push_back(Res);
29232      Results.push_back(Res.getValue(1));
29233      return;
29234    }
29235
29236    assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
29237    SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64, Src);
29238    SDValue VBias =
29239        DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl, MVT::v2f64);
29240    SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
29241                             DAG.getBitcast(MVT::v2i64, VBias));
29242    Or = DAG.getBitcast(MVT::v2f64, Or);
29243    if (IsStrict) {
29244      SDValue Sub = DAG.getNode(ISD::STRICT_FSUB, dl, {MVT::v2f64, MVT::Other},
29245                                {N->getOperand(0), Or, VBias});
29246      SDValue Res = DAG.getNode(X86ISD::STRICT_VFPROUND, dl,
29247                                {MVT::v4f32, MVT::Other},
29248                                {Sub.getValue(1), Sub});
29249      Results.push_back(Res);
29250      Results.push_back(Res.getValue(1));
29251    } else {
29252      // TODO: Are there any fast-math-flags to propagate here?
29253      SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
29254      Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
29255    }
29256    return;
29257  }
29258  case ISD::STRICT_FP_ROUND:
29259  case ISD::FP_ROUND: {
29260    bool IsStrict = N->isStrictFPOpcode();
29261    SDValue Src = N->getOperand(IsStrict ? 1 : 0);
29262    if (!isTypeLegal(Src.getValueType()))
29263      return;
29264    SDValue V;
29265    if (IsStrict)
29266      V = DAG.getNode(X86ISD::STRICT_VFPROUND, dl, {MVT::v4f32, MVT::Other},
29267                      {N->getOperand(0), N->getOperand(1)});
29268    else
29269      V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
29270    Results.push_back(V);
29271    if (IsStrict)
29272      Results.push_back(V.getValue(1));
29273    return;
29274  }
29275  case ISD::FP_EXTEND: {
29276    // Right now, only MVT::v2f32 has OperationAction for FP_EXTEND.
29277    // No other ValueType for FP_EXTEND should reach this point.
29278    assert(N->getValueType(0) == MVT::v2f32 &&
29279           "Do not know how to legalize this Node");
29280    return;
29281  }
29282  case ISD::INTRINSIC_W_CHAIN: {
29283    unsigned IntNo = N->getConstantOperandVal(1);
29284    switch (IntNo) {
29285    default : llvm_unreachable("Do not know how to custom type "
29286                               "legalize this intrinsic operation!");
29287    case Intrinsic::x86_rdtsc:
29288      return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget,
29289                                     Results);
29290    case Intrinsic::x86_rdtscp:
29291      return getReadTimeStampCounter(N, dl, X86::RDTSCP, DAG, Subtarget,
29292                                     Results);
29293    case Intrinsic::x86_rdpmc:
29294      expandIntrinsicWChainHelper(N, dl, DAG, X86::RDPMC, X86::ECX, Subtarget,
29295                                  Results);
29296      return;
29297    case Intrinsic::x86_xgetbv:
29298      expandIntrinsicWChainHelper(N, dl, DAG, X86::XGETBV, X86::ECX, Subtarget,
29299                                  Results);
29300      return;
29301    }
29302  }
29303  case ISD::READCYCLECOUNTER: {
29304    return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget, Results);
29305  }
29306  case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
29307    EVT T = N->getValueType(0);
29308    assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
29309    bool Regs64bit = T == MVT::i128;
29310    assert((!Regs64bit || Subtarget.hasCmpxchg16b()) &&
29311           "64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS requires CMPXCHG16B");
29312    MVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
29313    SDValue cpInL, cpInH;
29314    cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
29315                        DAG.getConstant(0, dl, HalfT));
29316    cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
29317                        DAG.getConstant(1, dl, HalfT));
29318    cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
29319                             Regs64bit ? X86::RAX : X86::EAX,
29320                             cpInL, SDValue());
29321    cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
29322                             Regs64bit ? X86::RDX : X86::EDX,
29323                             cpInH, cpInL.getValue(1));
29324    SDValue swapInL, swapInH;
29325    swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
29326                          DAG.getConstant(0, dl, HalfT));
29327    swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
29328                          DAG.getConstant(1, dl, HalfT));
29329    swapInH =
29330        DAG.getCopyToReg(cpInH.getValue(0), dl, Regs64bit ? X86::RCX : X86::ECX,
29331                         swapInH, cpInH.getValue(1));
29332    // If the current function needs the base pointer, RBX,
29333    // we shouldn't use cmpxchg directly.
29334    // Indeed the lowering of that instruction will clobber
29335    // that register and since RBX will be a reserved register
29336    // the register allocator will not make sure its value will
29337    // be properly saved and restored around this live-range.
29338    const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
29339    SDValue Result;
29340    SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
29341    Register BasePtr = TRI->getBaseRegister();
29342    MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
29343    if (TRI->hasBasePointer(DAG.getMachineFunction()) &&
29344        (BasePtr == X86::RBX || BasePtr == X86::EBX)) {
29345      // ISel prefers the LCMPXCHG64 variant.
29346      // If that assert breaks, that means it is not the case anymore,
29347      // and we need to teach LCMPXCHG8_SAVE_EBX_DAG how to save RBX,
29348      // not just EBX. This is a matter of accepting i64 input for that
29349      // pseudo, and restoring into the register of the right wide
29350      // in expand pseudo. Everything else should just work.
29351      assert(((Regs64bit == (BasePtr == X86::RBX)) || BasePtr == X86::EBX) &&
29352             "Saving only half of the RBX");
29353      unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_SAVE_RBX_DAG
29354                                  : X86ISD::LCMPXCHG8_SAVE_EBX_DAG;
29355      SDValue RBXSave = DAG.getCopyFromReg(swapInH.getValue(0), dl,
29356                                           Regs64bit ? X86::RBX : X86::EBX,
29357                                           HalfT, swapInH.getValue(1));
29358      SDValue Ops[] = {/*Chain*/ RBXSave.getValue(1), N->getOperand(1), swapInL,
29359                       RBXSave,
29360                       /*Glue*/ RBXSave.getValue(2)};
29361      Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
29362    } else {
29363      unsigned Opcode =
29364          Regs64bit ? X86ISD::LCMPXCHG16_DAG : X86ISD::LCMPXCHG8_DAG;
29365      swapInL = DAG.getCopyToReg(swapInH.getValue(0), dl,
29366                                 Regs64bit ? X86::RBX : X86::EBX, swapInL,
29367                                 swapInH.getValue(1));
29368      SDValue Ops[] = {swapInL.getValue(0), N->getOperand(1),
29369                       swapInL.getValue(1)};
29370      Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
29371    }
29372    SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
29373                                        Regs64bit ? X86::RAX : X86::EAX,
29374                                        HalfT, Result.getValue(1));
29375    SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
29376                                        Regs64bit ? X86::RDX : X86::EDX,
29377                                        HalfT, cpOutL.getValue(2));
29378    SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
29379
29380    SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
29381                                        MVT::i32, cpOutH.getValue(2));
29382    SDValue Success = getSETCC(X86::COND_E, EFLAGS, dl, DAG);
29383    Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
29384
29385    Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
29386    Results.push_back(Success);
29387    Results.push_back(EFLAGS.getValue(1));
29388    return;
29389  }
29390  case ISD::ATOMIC_LOAD: {
29391    assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
29392    bool NoImplicitFloatOps =
29393        DAG.getMachineFunction().getFunction().hasFnAttribute(
29394            Attribute::NoImplicitFloat);
29395    if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps) {
29396      auto *Node = cast<AtomicSDNode>(N);
29397      if (Subtarget.hasSSE2()) {
29398        // Use a VZEXT_LOAD which will be selected as MOVQ. Then extract the
29399        // lower 64-bits.
29400        SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
29401        SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
29402        SDValue Ld = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
29403                                             MVT::i64, Node->getMemOperand());
29404        SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Ld,
29405                                  DAG.getIntPtrConstant(0, dl));
29406        Results.push_back(Res);
29407        Results.push_back(Ld.getValue(1));
29408        return;
29409      }
29410      if (Subtarget.hasX87()) {
29411        // First load this into an 80-bit X87 register. This will put the whole
29412        // integer into the significand.
29413        // FIXME: Do we need to glue? See FIXME comment in BuildFILD.
29414        SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other, MVT::Glue);
29415        SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
29416        SDValue Result = DAG.getMemIntrinsicNode(X86ISD::FILD_FLAG,
29417                                                 dl, Tys, Ops, MVT::i64,
29418                                                 Node->getMemOperand());
29419        SDValue Chain = Result.getValue(1);
29420        SDValue InFlag = Result.getValue(2);
29421
29422        // Now store the X87 register to a stack temporary and convert to i64.
29423        // This store is not atomic and doesn't need to be.
29424        // FIXME: We don't need a stack temporary if the result of the load
29425        // is already being stored. We could just directly store there.
29426        SDValue StackPtr = DAG.CreateStackTemporary(MVT::i64);
29427        int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
29428        MachinePointerInfo MPI =
29429            MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
29430        SDValue StoreOps[] = { Chain, Result, StackPtr, InFlag };
29431        Chain = DAG.getMemIntrinsicNode(X86ISD::FIST, dl,
29432                                        DAG.getVTList(MVT::Other), StoreOps,
29433                                        MVT::i64, MPI, 0 /*Align*/,
29434                                        MachineMemOperand::MOStore);
29435
29436        // Finally load the value back from the stack temporary and return it.
29437        // This load is not atomic and doesn't need to be.
29438        // This load will be further type legalized.
29439        Result = DAG.getLoad(MVT::i64, dl, Chain, StackPtr, MPI);
29440        Results.push_back(Result);
29441        Results.push_back(Result.getValue(1));
29442        return;
29443      }
29444    }
29445    // TODO: Use MOVLPS when SSE1 is available?
29446    // Delegate to generic TypeLegalization. Situations we can really handle
29447    // should have already been dealt with by AtomicExpandPass.cpp.
29448    break;
29449  }
29450  case ISD::ATOMIC_SWAP:
29451  case ISD::ATOMIC_LOAD_ADD:
29452  case ISD::ATOMIC_LOAD_SUB:
29453  case ISD::ATOMIC_LOAD_AND:
29454  case ISD::ATOMIC_LOAD_OR:
29455  case ISD::ATOMIC_LOAD_XOR:
29456  case ISD::ATOMIC_LOAD_NAND:
29457  case ISD::ATOMIC_LOAD_MIN:
29458  case ISD::ATOMIC_LOAD_MAX:
29459  case ISD::ATOMIC_LOAD_UMIN:
29460  case ISD::ATOMIC_LOAD_UMAX:
29461    // Delegate to generic TypeLegalization. Situations we can really handle
29462    // should have already been dealt with by AtomicExpandPass.cpp.
29463    break;
29464
29465  case ISD::BITCAST: {
29466    assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
29467    EVT DstVT = N->getValueType(0);
29468    EVT SrcVT = N->getOperand(0).getValueType();
29469
29470    // If this is a bitcast from a v64i1 k-register to a i64 on a 32-bit target
29471    // we can split using the k-register rather than memory.
29472    if (SrcVT == MVT::v64i1 && DstVT == MVT::i64 && Subtarget.hasBWI()) {
29473      assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
29474      SDValue Lo, Hi;
29475      std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
29476      Lo = DAG.getBitcast(MVT::i32, Lo);
29477      Hi = DAG.getBitcast(MVT::i32, Hi);
29478      SDValue Res = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
29479      Results.push_back(Res);
29480      return;
29481    }
29482
29483    // Custom splitting for BWI types when AVX512F is available but BWI isn't.
29484    if ((DstVT == MVT::v32i16 || DstVT == MVT::v64i8) &&
29485        SrcVT.isVector() && isTypeLegal(SrcVT)) {
29486      SDValue Lo, Hi;
29487      std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
29488      MVT CastVT = (DstVT == MVT::v32i16) ? MVT::v16i16 : MVT::v32i8;
29489      Lo = DAG.getBitcast(CastVT, Lo);
29490      Hi = DAG.getBitcast(CastVT, Hi);
29491      SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, DstVT, Lo, Hi);
29492      Results.push_back(Res);
29493      return;
29494    }
29495
29496    if (DstVT.isVector() && SrcVT == MVT::x86mmx) {
29497      assert(getTypeAction(*DAG.getContext(), DstVT) == TypeWidenVector &&
29498             "Unexpected type action!");
29499      EVT WideVT = getTypeToTransformTo(*DAG.getContext(), DstVT);
29500      SDValue Res = DAG.getNode(X86ISD::MOVQ2DQ, dl, WideVT, N->getOperand(0));
29501      Results.push_back(Res);
29502      return;
29503    }
29504
29505    return;
29506  }
29507  case ISD::MGATHER: {
29508    EVT VT = N->getValueType(0);
29509    if ((VT == MVT::v2f32 || VT == MVT::v2i32) &&
29510        (Subtarget.hasVLX() || !Subtarget.hasAVX512())) {
29511      auto *Gather = cast<MaskedGatherSDNode>(N);
29512      SDValue Index = Gather->getIndex();
29513      if (Index.getValueType() != MVT::v2i64)
29514        return;
29515      assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
29516             "Unexpected type action!");
29517      EVT WideVT = getTypeToTransformTo(*DAG.getContext(), VT);
29518      SDValue Mask = Gather->getMask();
29519      assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
29520      SDValue PassThru = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT,
29521                                     Gather->getPassThru(),
29522                                     DAG.getUNDEF(VT));
29523      if (!Subtarget.hasVLX()) {
29524        // We need to widen the mask, but the instruction will only use 2
29525        // of its elements. So we can use undef.
29526        Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask,
29527                           DAG.getUNDEF(MVT::v2i1));
29528        Mask = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Mask);
29529      }
29530      SDValue Ops[] = { Gather->getChain(), PassThru, Mask,
29531                        Gather->getBasePtr(), Index, Gather->getScale() };
29532      SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
29533        DAG.getVTList(WideVT, Mask.getValueType(), MVT::Other), Ops, dl,
29534        Gather->getMemoryVT(), Gather->getMemOperand());
29535      Results.push_back(Res);
29536      Results.push_back(Res.getValue(2));
29537      return;
29538    }
29539    return;
29540  }
29541  case ISD::LOAD: {
29542    // Use an f64/i64 load and a scalar_to_vector for v2f32/v2i32 loads. This
29543    // avoids scalarizing in 32-bit mode. In 64-bit mode this avoids a int->fp
29544    // cast since type legalization will try to use an i64 load.
29545    MVT VT = N->getSimpleValueType(0);
29546    assert(VT.isVector() && VT.getSizeInBits() == 64 && "Unexpected VT");
29547    assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
29548           "Unexpected type action!");
29549    if (!ISD::isNON_EXTLoad(N))
29550      return;
29551    auto *Ld = cast<LoadSDNode>(N);
29552    if (Subtarget.hasSSE2()) {
29553      MVT LdVT = Subtarget.is64Bit() && VT.isInteger() ? MVT::i64 : MVT::f64;
29554      SDValue Res = DAG.getLoad(LdVT, dl, Ld->getChain(), Ld->getBasePtr(),
29555                                Ld->getPointerInfo(), Ld->getAlignment(),
29556                                Ld->getMemOperand()->getFlags());
29557      SDValue Chain = Res.getValue(1);
29558      MVT VecVT = MVT::getVectorVT(LdVT, 2);
29559      Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Res);
29560      EVT WideVT = getTypeToTransformTo(*DAG.getContext(), VT);
29561      Res = DAG.getBitcast(WideVT, Res);
29562      Results.push_back(Res);
29563      Results.push_back(Chain);
29564      return;
29565    }
29566    assert(Subtarget.hasSSE1() && "Expected SSE");
29567    SDVTList Tys = DAG.getVTList(MVT::v4f32, MVT::Other);
29568    SDValue Ops[] = {Ld->getChain(), Ld->getBasePtr()};
29569    SDValue Res = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
29570                                          MVT::i64, Ld->getMemOperand());
29571    Results.push_back(Res);
29572    Results.push_back(Res.getValue(1));
29573    return;
29574  }
29575  case ISD::ADDRSPACECAST: {
29576    SDValue Src = N->getOperand(0);
29577    EVT DstVT = N->getValueType(0);
29578    AddrSpaceCastSDNode *CastN = cast<AddrSpaceCastSDNode>(N);
29579    unsigned SrcAS = CastN->getSrcAddressSpace();
29580
29581    assert(SrcAS != CastN->getDestAddressSpace() &&
29582           "addrspacecast must be between different address spaces");
29583
29584    SDValue Res;
29585    if (SrcAS == X86AS::PTR32_UPTR && DstVT == MVT::i64)
29586      Res = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Src);
29587    else if (DstVT == MVT::i64)
29588      Res = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Src);
29589    else if (DstVT == MVT::i32)
29590      Res = DAG.getNode(ISD::TRUNCATE, dl, DstVT, Src);
29591    else
29592      report_fatal_error("Unrecognized addrspacecast type legalization");
29593
29594    Results.push_back(Res);
29595    return;
29596  }
29597  }
29598}
29599
29600const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
29601  switch ((X86ISD::NodeType)Opcode) {
29602  case X86ISD::FIRST_NUMBER:       break;
29603  case X86ISD::BSF:                return "X86ISD::BSF";
29604  case X86ISD::BSR:                return "X86ISD::BSR";
29605  case X86ISD::SHLD:               return "X86ISD::SHLD";
29606  case X86ISD::SHRD:               return "X86ISD::SHRD";
29607  case X86ISD::FAND:               return "X86ISD::FAND";
29608  case X86ISD::FANDN:              return "X86ISD::FANDN";
29609  case X86ISD::FOR:                return "X86ISD::FOR";
29610  case X86ISD::FXOR:               return "X86ISD::FXOR";
29611  case X86ISD::FILD:               return "X86ISD::FILD";
29612  case X86ISD::FILD_FLAG:          return "X86ISD::FILD_FLAG";
29613  case X86ISD::FIST:               return "X86ISD::FIST";
29614  case X86ISD::FP_TO_INT_IN_MEM:   return "X86ISD::FP_TO_INT_IN_MEM";
29615  case X86ISD::FLD:                return "X86ISD::FLD";
29616  case X86ISD::FST:                return "X86ISD::FST";
29617  case X86ISD::CALL:               return "X86ISD::CALL";
29618  case X86ISD::BT:                 return "X86ISD::BT";
29619  case X86ISD::CMP:                return "X86ISD::CMP";
29620  case X86ISD::STRICT_FCMP:        return "X86ISD::STRICT_FCMP";
29621  case X86ISD::STRICT_FCMPS:       return "X86ISD::STRICT_FCMPS";
29622  case X86ISD::COMI:               return "X86ISD::COMI";
29623  case X86ISD::UCOMI:              return "X86ISD::UCOMI";
29624  case X86ISD::CMPM:               return "X86ISD::CMPM";
29625  case X86ISD::STRICT_CMPM:        return "X86ISD::STRICT_CMPM";
29626  case X86ISD::CMPM_SAE:           return "X86ISD::CMPM_SAE";
29627  case X86ISD::SETCC:              return "X86ISD::SETCC";
29628  case X86ISD::SETCC_CARRY:        return "X86ISD::SETCC_CARRY";
29629  case X86ISD::FSETCC:             return "X86ISD::FSETCC";
29630  case X86ISD::FSETCCM:            return "X86ISD::FSETCCM";
29631  case X86ISD::FSETCCM_SAE:        return "X86ISD::FSETCCM_SAE";
29632  case X86ISD::CMOV:               return "X86ISD::CMOV";
29633  case X86ISD::BRCOND:             return "X86ISD::BRCOND";
29634  case X86ISD::RET_FLAG:           return "X86ISD::RET_FLAG";
29635  case X86ISD::IRET:               return "X86ISD::IRET";
29636  case X86ISD::REP_STOS:           return "X86ISD::REP_STOS";
29637  case X86ISD::REP_MOVS:           return "X86ISD::REP_MOVS";
29638  case X86ISD::GlobalBaseReg:      return "X86ISD::GlobalBaseReg";
29639  case X86ISD::Wrapper:            return "X86ISD::Wrapper";
29640  case X86ISD::WrapperRIP:         return "X86ISD::WrapperRIP";
29641  case X86ISD::MOVQ2DQ:            return "X86ISD::MOVQ2DQ";
29642  case X86ISD::MOVDQ2Q:            return "X86ISD::MOVDQ2Q";
29643  case X86ISD::MMX_MOVD2W:         return "X86ISD::MMX_MOVD2W";
29644  case X86ISD::MMX_MOVW2D:         return "X86ISD::MMX_MOVW2D";
29645  case X86ISD::PEXTRB:             return "X86ISD::PEXTRB";
29646  case X86ISD::PEXTRW:             return "X86ISD::PEXTRW";
29647  case X86ISD::INSERTPS:           return "X86ISD::INSERTPS";
29648  case X86ISD::PINSRB:             return "X86ISD::PINSRB";
29649  case X86ISD::PINSRW:             return "X86ISD::PINSRW";
29650  case X86ISD::PSHUFB:             return "X86ISD::PSHUFB";
29651  case X86ISD::ANDNP:              return "X86ISD::ANDNP";
29652  case X86ISD::BLENDI:             return "X86ISD::BLENDI";
29653  case X86ISD::BLENDV:             return "X86ISD::BLENDV";
29654  case X86ISD::HADD:               return "X86ISD::HADD";
29655  case X86ISD::HSUB:               return "X86ISD::HSUB";
29656  case X86ISD::FHADD:              return "X86ISD::FHADD";
29657  case X86ISD::FHSUB:              return "X86ISD::FHSUB";
29658  case X86ISD::CONFLICT:           return "X86ISD::CONFLICT";
29659  case X86ISD::FMAX:               return "X86ISD::FMAX";
29660  case X86ISD::FMAXS:              return "X86ISD::FMAXS";
29661  case X86ISD::FMAX_SAE:           return "X86ISD::FMAX_SAE";
29662  case X86ISD::FMAXS_SAE:          return "X86ISD::FMAXS_SAE";
29663  case X86ISD::FMIN:               return "X86ISD::FMIN";
29664  case X86ISD::FMINS:              return "X86ISD::FMINS";
29665  case X86ISD::FMIN_SAE:           return "X86ISD::FMIN_SAE";
29666  case X86ISD::FMINS_SAE:          return "X86ISD::FMINS_SAE";
29667  case X86ISD::FMAXC:              return "X86ISD::FMAXC";
29668  case X86ISD::FMINC:              return "X86ISD::FMINC";
29669  case X86ISD::FRSQRT:             return "X86ISD::FRSQRT";
29670  case X86ISD::FRCP:               return "X86ISD::FRCP";
29671  case X86ISD::EXTRQI:             return "X86ISD::EXTRQI";
29672  case X86ISD::INSERTQI:           return "X86ISD::INSERTQI";
29673  case X86ISD::TLSADDR:            return "X86ISD::TLSADDR";
29674  case X86ISD::TLSBASEADDR:        return "X86ISD::TLSBASEADDR";
29675  case X86ISD::TLSCALL:            return "X86ISD::TLSCALL";
29676  case X86ISD::EH_SJLJ_SETJMP:     return "X86ISD::EH_SJLJ_SETJMP";
29677  case X86ISD::EH_SJLJ_LONGJMP:    return "X86ISD::EH_SJLJ_LONGJMP";
29678  case X86ISD::EH_SJLJ_SETUP_DISPATCH:
29679    return "X86ISD::EH_SJLJ_SETUP_DISPATCH";
29680  case X86ISD::EH_RETURN:          return "X86ISD::EH_RETURN";
29681  case X86ISD::TC_RETURN:          return "X86ISD::TC_RETURN";
29682  case X86ISD::FNSTCW16m:          return "X86ISD::FNSTCW16m";
29683  case X86ISD::FNSTSW16r:          return "X86ISD::FNSTSW16r";
29684  case X86ISD::LCMPXCHG_DAG:       return "X86ISD::LCMPXCHG_DAG";
29685  case X86ISD::LCMPXCHG8_DAG:      return "X86ISD::LCMPXCHG8_DAG";
29686  case X86ISD::LCMPXCHG16_DAG:     return "X86ISD::LCMPXCHG16_DAG";
29687  case X86ISD::LCMPXCHG8_SAVE_EBX_DAG:
29688    return "X86ISD::LCMPXCHG8_SAVE_EBX_DAG";
29689  case X86ISD::LCMPXCHG16_SAVE_RBX_DAG:
29690    return "X86ISD::LCMPXCHG16_SAVE_RBX_DAG";
29691  case X86ISD::LADD:               return "X86ISD::LADD";
29692  case X86ISD::LSUB:               return "X86ISD::LSUB";
29693  case X86ISD::LOR:                return "X86ISD::LOR";
29694  case X86ISD::LXOR:               return "X86ISD::LXOR";
29695  case X86ISD::LAND:               return "X86ISD::LAND";
29696  case X86ISD::VZEXT_MOVL:         return "X86ISD::VZEXT_MOVL";
29697  case X86ISD::VZEXT_LOAD:         return "X86ISD::VZEXT_LOAD";
29698  case X86ISD::VEXTRACT_STORE:     return "X86ISD::VEXTRACT_STORE";
29699  case X86ISD::VTRUNC:             return "X86ISD::VTRUNC";
29700  case X86ISD::VTRUNCS:            return "X86ISD::VTRUNCS";
29701  case X86ISD::VTRUNCUS:           return "X86ISD::VTRUNCUS";
29702  case X86ISD::VMTRUNC:            return "X86ISD::VMTRUNC";
29703  case X86ISD::VMTRUNCS:           return "X86ISD::VMTRUNCS";
29704  case X86ISD::VMTRUNCUS:          return "X86ISD::VMTRUNCUS";
29705  case X86ISD::VTRUNCSTORES:       return "X86ISD::VTRUNCSTORES";
29706  case X86ISD::VTRUNCSTOREUS:      return "X86ISD::VTRUNCSTOREUS";
29707  case X86ISD::VMTRUNCSTORES:      return "X86ISD::VMTRUNCSTORES";
29708  case X86ISD::VMTRUNCSTOREUS:     return "X86ISD::VMTRUNCSTOREUS";
29709  case X86ISD::VFPEXT:             return "X86ISD::VFPEXT";
29710  case X86ISD::STRICT_VFPEXT:      return "X86ISD::STRICT_VFPEXT";
29711  case X86ISD::VFPEXT_SAE:         return "X86ISD::VFPEXT_SAE";
29712  case X86ISD::VFPEXTS:            return "X86ISD::VFPEXTS";
29713  case X86ISD::VFPEXTS_SAE:        return "X86ISD::VFPEXTS_SAE";
29714  case X86ISD::VFPROUND:           return "X86ISD::VFPROUND";
29715  case X86ISD::STRICT_VFPROUND:    return "X86ISD::STRICT_VFPROUND";
29716  case X86ISD::VMFPROUND:          return "X86ISD::VMFPROUND";
29717  case X86ISD::VFPROUND_RND:       return "X86ISD::VFPROUND_RND";
29718  case X86ISD::VFPROUNDS:          return "X86ISD::VFPROUNDS";
29719  case X86ISD::VFPROUNDS_RND:      return "X86ISD::VFPROUNDS_RND";
29720  case X86ISD::VSHLDQ:             return "X86ISD::VSHLDQ";
29721  case X86ISD::VSRLDQ:             return "X86ISD::VSRLDQ";
29722  case X86ISD::VSHL:               return "X86ISD::VSHL";
29723  case X86ISD::VSRL:               return "X86ISD::VSRL";
29724  case X86ISD::VSRA:               return "X86ISD::VSRA";
29725  case X86ISD::VSHLI:              return "X86ISD::VSHLI";
29726  case X86ISD::VSRLI:              return "X86ISD::VSRLI";
29727  case X86ISD::VSRAI:              return "X86ISD::VSRAI";
29728  case X86ISD::VSHLV:              return "X86ISD::VSHLV";
29729  case X86ISD::VSRLV:              return "X86ISD::VSRLV";
29730  case X86ISD::VSRAV:              return "X86ISD::VSRAV";
29731  case X86ISD::VROTLI:             return "X86ISD::VROTLI";
29732  case X86ISD::VROTRI:             return "X86ISD::VROTRI";
29733  case X86ISD::VPPERM:             return "X86ISD::VPPERM";
29734  case X86ISD::CMPP:               return "X86ISD::CMPP";
29735  case X86ISD::STRICT_CMPP:        return "X86ISD::STRICT_CMPP";
29736  case X86ISD::PCMPEQ:             return "X86ISD::PCMPEQ";
29737  case X86ISD::PCMPGT:             return "X86ISD::PCMPGT";
29738  case X86ISD::PHMINPOS:           return "X86ISD::PHMINPOS";
29739  case X86ISD::ADD:                return "X86ISD::ADD";
29740  case X86ISD::SUB:                return "X86ISD::SUB";
29741  case X86ISD::ADC:                return "X86ISD::ADC";
29742  case X86ISD::SBB:                return "X86ISD::SBB";
29743  case X86ISD::SMUL:               return "X86ISD::SMUL";
29744  case X86ISD::UMUL:               return "X86ISD::UMUL";
29745  case X86ISD::OR:                 return "X86ISD::OR";
29746  case X86ISD::XOR:                return "X86ISD::XOR";
29747  case X86ISD::AND:                return "X86ISD::AND";
29748  case X86ISD::BEXTR:              return "X86ISD::BEXTR";
29749  case X86ISD::BZHI:               return "X86ISD::BZHI";
29750  case X86ISD::MUL_IMM:            return "X86ISD::MUL_IMM";
29751  case X86ISD::MOVMSK:             return "X86ISD::MOVMSK";
29752  case X86ISD::PTEST:              return "X86ISD::PTEST";
29753  case X86ISD::TESTP:              return "X86ISD::TESTP";
29754  case X86ISD::KORTEST:            return "X86ISD::KORTEST";
29755  case X86ISD::KTEST:              return "X86ISD::KTEST";
29756  case X86ISD::KADD:               return "X86ISD::KADD";
29757  case X86ISD::KSHIFTL:            return "X86ISD::KSHIFTL";
29758  case X86ISD::KSHIFTR:            return "X86ISD::KSHIFTR";
29759  case X86ISD::PACKSS:             return "X86ISD::PACKSS";
29760  case X86ISD::PACKUS:             return "X86ISD::PACKUS";
29761  case X86ISD::PALIGNR:            return "X86ISD::PALIGNR";
29762  case X86ISD::VALIGN:             return "X86ISD::VALIGN";
29763  case X86ISD::VSHLD:              return "X86ISD::VSHLD";
29764  case X86ISD::VSHRD:              return "X86ISD::VSHRD";
29765  case X86ISD::VSHLDV:             return "X86ISD::VSHLDV";
29766  case X86ISD::VSHRDV:             return "X86ISD::VSHRDV";
29767  case X86ISD::PSHUFD:             return "X86ISD::PSHUFD";
29768  case X86ISD::PSHUFHW:            return "X86ISD::PSHUFHW";
29769  case X86ISD::PSHUFLW:            return "X86ISD::PSHUFLW";
29770  case X86ISD::SHUFP:              return "X86ISD::SHUFP";
29771  case X86ISD::SHUF128:            return "X86ISD::SHUF128";
29772  case X86ISD::MOVLHPS:            return "X86ISD::MOVLHPS";
29773  case X86ISD::MOVHLPS:            return "X86ISD::MOVHLPS";
29774  case X86ISD::MOVDDUP:            return "X86ISD::MOVDDUP";
29775  case X86ISD::MOVSHDUP:           return "X86ISD::MOVSHDUP";
29776  case X86ISD::MOVSLDUP:           return "X86ISD::MOVSLDUP";
29777  case X86ISD::MOVSD:              return "X86ISD::MOVSD";
29778  case X86ISD::MOVSS:              return "X86ISD::MOVSS";
29779  case X86ISD::UNPCKL:             return "X86ISD::UNPCKL";
29780  case X86ISD::UNPCKH:             return "X86ISD::UNPCKH";
29781  case X86ISD::VBROADCAST:         return "X86ISD::VBROADCAST";
29782  case X86ISD::VBROADCAST_LOAD:    return "X86ISD::VBROADCAST_LOAD";
29783  case X86ISD::VBROADCASTM:        return "X86ISD::VBROADCASTM";
29784  case X86ISD::SUBV_BROADCAST:     return "X86ISD::SUBV_BROADCAST";
29785  case X86ISD::VPERMILPV:          return "X86ISD::VPERMILPV";
29786  case X86ISD::VPERMILPI:          return "X86ISD::VPERMILPI";
29787  case X86ISD::VPERM2X128:         return "X86ISD::VPERM2X128";
29788  case X86ISD::VPERMV:             return "X86ISD::VPERMV";
29789  case X86ISD::VPERMV3:            return "X86ISD::VPERMV3";
29790  case X86ISD::VPERMI:             return "X86ISD::VPERMI";
29791  case X86ISD::VPTERNLOG:          return "X86ISD::VPTERNLOG";
29792  case X86ISD::VFIXUPIMM:          return "X86ISD::VFIXUPIMM";
29793  case X86ISD::VFIXUPIMM_SAE:      return "X86ISD::VFIXUPIMM_SAE";
29794  case X86ISD::VFIXUPIMMS:         return "X86ISD::VFIXUPIMMS";
29795  case X86ISD::VFIXUPIMMS_SAE:     return "X86ISD::VFIXUPIMMS_SAE";
29796  case X86ISD::VRANGE:             return "X86ISD::VRANGE";
29797  case X86ISD::VRANGE_SAE:         return "X86ISD::VRANGE_SAE";
29798  case X86ISD::VRANGES:            return "X86ISD::VRANGES";
29799  case X86ISD::VRANGES_SAE:        return "X86ISD::VRANGES_SAE";
29800  case X86ISD::PMULUDQ:            return "X86ISD::PMULUDQ";
29801  case X86ISD::PMULDQ:             return "X86ISD::PMULDQ";
29802  case X86ISD::PSADBW:             return "X86ISD::PSADBW";
29803  case X86ISD::DBPSADBW:           return "X86ISD::DBPSADBW";
29804  case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
29805  case X86ISD::VAARG_64:           return "X86ISD::VAARG_64";
29806  case X86ISD::WIN_ALLOCA:         return "X86ISD::WIN_ALLOCA";
29807  case X86ISD::MEMBARRIER:         return "X86ISD::MEMBARRIER";
29808  case X86ISD::MFENCE:             return "X86ISD::MFENCE";
29809  case X86ISD::SEG_ALLOCA:         return "X86ISD::SEG_ALLOCA";
29810  case X86ISD::SAHF:               return "X86ISD::SAHF";
29811  case X86ISD::RDRAND:             return "X86ISD::RDRAND";
29812  case X86ISD::RDSEED:             return "X86ISD::RDSEED";
29813  case X86ISD::RDPKRU:             return "X86ISD::RDPKRU";
29814  case X86ISD::WRPKRU:             return "X86ISD::WRPKRU";
29815  case X86ISD::VPMADDUBSW:         return "X86ISD::VPMADDUBSW";
29816  case X86ISD::VPMADDWD:           return "X86ISD::VPMADDWD";
29817  case X86ISD::VPSHA:              return "X86ISD::VPSHA";
29818  case X86ISD::VPSHL:              return "X86ISD::VPSHL";
29819  case X86ISD::VPCOM:              return "X86ISD::VPCOM";
29820  case X86ISD::VPCOMU:             return "X86ISD::VPCOMU";
29821  case X86ISD::VPERMIL2:           return "X86ISD::VPERMIL2";
29822  case X86ISD::FMSUB:              return "X86ISD::FMSUB";
29823  case X86ISD::FNMADD:             return "X86ISD::FNMADD";
29824  case X86ISD::FNMSUB:             return "X86ISD::FNMSUB";
29825  case X86ISD::FMADDSUB:           return "X86ISD::FMADDSUB";
29826  case X86ISD::FMSUBADD:           return "X86ISD::FMSUBADD";
29827  case X86ISD::FMADD_RND:          return "X86ISD::FMADD_RND";
29828  case X86ISD::FNMADD_RND:         return "X86ISD::FNMADD_RND";
29829  case X86ISD::FMSUB_RND:          return "X86ISD::FMSUB_RND";
29830  case X86ISD::FNMSUB_RND:         return "X86ISD::FNMSUB_RND";
29831  case X86ISD::FMADDSUB_RND:       return "X86ISD::FMADDSUB_RND";
29832  case X86ISD::FMSUBADD_RND:       return "X86ISD::FMSUBADD_RND";
29833  case X86ISD::VPMADD52H:          return "X86ISD::VPMADD52H";
29834  case X86ISD::VPMADD52L:          return "X86ISD::VPMADD52L";
29835  case X86ISD::VRNDSCALE:          return "X86ISD::VRNDSCALE";
29836  case X86ISD::STRICT_VRNDSCALE:   return "X86ISD::STRICT_VRNDSCALE";
29837  case X86ISD::VRNDSCALE_SAE:      return "X86ISD::VRNDSCALE_SAE";
29838  case X86ISD::VRNDSCALES:         return "X86ISD::VRNDSCALES";
29839  case X86ISD::VRNDSCALES_SAE:     return "X86ISD::VRNDSCALES_SAE";
29840  case X86ISD::VREDUCE:            return "X86ISD::VREDUCE";
29841  case X86ISD::VREDUCE_SAE:        return "X86ISD::VREDUCE_SAE";
29842  case X86ISD::VREDUCES:           return "X86ISD::VREDUCES";
29843  case X86ISD::VREDUCES_SAE:       return "X86ISD::VREDUCES_SAE";
29844  case X86ISD::VGETMANT:           return "X86ISD::VGETMANT";
29845  case X86ISD::VGETMANT_SAE:       return "X86ISD::VGETMANT_SAE";
29846  case X86ISD::VGETMANTS:          return "X86ISD::VGETMANTS";
29847  case X86ISD::VGETMANTS_SAE:      return "X86ISD::VGETMANTS_SAE";
29848  case X86ISD::PCMPESTR:           return "X86ISD::PCMPESTR";
29849  case X86ISD::PCMPISTR:           return "X86ISD::PCMPISTR";
29850  case X86ISD::XTEST:              return "X86ISD::XTEST";
29851  case X86ISD::COMPRESS:           return "X86ISD::COMPRESS";
29852  case X86ISD::EXPAND:             return "X86ISD::EXPAND";
29853  case X86ISD::SELECTS:            return "X86ISD::SELECTS";
29854  case X86ISD::ADDSUB:             return "X86ISD::ADDSUB";
29855  case X86ISD::RCP14:              return "X86ISD::RCP14";
29856  case X86ISD::RCP14S:             return "X86ISD::RCP14S";
29857  case X86ISD::RCP28:              return "X86ISD::RCP28";
29858  case X86ISD::RCP28_SAE:          return "X86ISD::RCP28_SAE";
29859  case X86ISD::RCP28S:             return "X86ISD::RCP28S";
29860  case X86ISD::RCP28S_SAE:         return "X86ISD::RCP28S_SAE";
29861  case X86ISD::EXP2:               return "X86ISD::EXP2";
29862  case X86ISD::EXP2_SAE:           return "X86ISD::EXP2_SAE";
29863  case X86ISD::RSQRT14:            return "X86ISD::RSQRT14";
29864  case X86ISD::RSQRT14S:           return "X86ISD::RSQRT14S";
29865  case X86ISD::RSQRT28:            return "X86ISD::RSQRT28";
29866  case X86ISD::RSQRT28_SAE:        return "X86ISD::RSQRT28_SAE";
29867  case X86ISD::RSQRT28S:           return "X86ISD::RSQRT28S";
29868  case X86ISD::RSQRT28S_SAE:       return "X86ISD::RSQRT28S_SAE";
29869  case X86ISD::FADD_RND:           return "X86ISD::FADD_RND";
29870  case X86ISD::FADDS:              return "X86ISD::FADDS";
29871  case X86ISD::FADDS_RND:          return "X86ISD::FADDS_RND";
29872  case X86ISD::FSUB_RND:           return "X86ISD::FSUB_RND";
29873  case X86ISD::FSUBS:              return "X86ISD::FSUBS";
29874  case X86ISD::FSUBS_RND:          return "X86ISD::FSUBS_RND";
29875  case X86ISD::FMUL_RND:           return "X86ISD::FMUL_RND";
29876  case X86ISD::FMULS:              return "X86ISD::FMULS";
29877  case X86ISD::FMULS_RND:          return "X86ISD::FMULS_RND";
29878  case X86ISD::FDIV_RND:           return "X86ISD::FDIV_RND";
29879  case X86ISD::FDIVS:              return "X86ISD::FDIVS";
29880  case X86ISD::FDIVS_RND:          return "X86ISD::FDIVS_RND";
29881  case X86ISD::FSQRT_RND:          return "X86ISD::FSQRT_RND";
29882  case X86ISD::FSQRTS:             return "X86ISD::FSQRTS";
29883  case X86ISD::FSQRTS_RND:         return "X86ISD::FSQRTS_RND";
29884  case X86ISD::FGETEXP:            return "X86ISD::FGETEXP";
29885  case X86ISD::FGETEXP_SAE:        return "X86ISD::FGETEXP_SAE";
29886  case X86ISD::FGETEXPS:           return "X86ISD::FGETEXPS";
29887  case X86ISD::FGETEXPS_SAE:       return "X86ISD::FGETEXPS_SAE";
29888  case X86ISD::SCALEF:             return "X86ISD::SCALEF";
29889  case X86ISD::SCALEF_RND:         return "X86ISD::SCALEF_RND";
29890  case X86ISD::SCALEFS:            return "X86ISD::SCALEFS";
29891  case X86ISD::SCALEFS_RND:        return "X86ISD::SCALEFS_RND";
29892  case X86ISD::AVG:                return "X86ISD::AVG";
29893  case X86ISD::MULHRS:             return "X86ISD::MULHRS";
29894  case X86ISD::SINT_TO_FP_RND:     return "X86ISD::SINT_TO_FP_RND";
29895  case X86ISD::UINT_TO_FP_RND:     return "X86ISD::UINT_TO_FP_RND";
29896  case X86ISD::CVTTP2SI:           return "X86ISD::CVTTP2SI";
29897  case X86ISD::CVTTP2UI:           return "X86ISD::CVTTP2UI";
29898  case X86ISD::STRICT_CVTTP2SI:    return "X86ISD::STRICT_CVTTP2SI";
29899  case X86ISD::STRICT_CVTTP2UI:    return "X86ISD::STRICT_CVTTP2UI";
29900  case X86ISD::MCVTTP2SI:          return "X86ISD::MCVTTP2SI";
29901  case X86ISD::MCVTTP2UI:          return "X86ISD::MCVTTP2UI";
29902  case X86ISD::CVTTP2SI_SAE:       return "X86ISD::CVTTP2SI_SAE";
29903  case X86ISD::CVTTP2UI_SAE:       return "X86ISD::CVTTP2UI_SAE";
29904  case X86ISD::CVTTS2SI:           return "X86ISD::CVTTS2SI";
29905  case X86ISD::CVTTS2UI:           return "X86ISD::CVTTS2UI";
29906  case X86ISD::CVTTS2SI_SAE:       return "X86ISD::CVTTS2SI_SAE";
29907  case X86ISD::CVTTS2UI_SAE:       return "X86ISD::CVTTS2UI_SAE";
29908  case X86ISD::CVTSI2P:            return "X86ISD::CVTSI2P";
29909  case X86ISD::CVTUI2P:            return "X86ISD::CVTUI2P";
29910  case X86ISD::STRICT_CVTSI2P:     return "X86ISD::STRICT_CVTSI2P";
29911  case X86ISD::STRICT_CVTUI2P:     return "X86ISD::STRICT_CVTUI2P";
29912  case X86ISD::MCVTSI2P:           return "X86ISD::MCVTSI2P";
29913  case X86ISD::MCVTUI2P:           return "X86ISD::MCVTUI2P";
29914  case X86ISD::VFPCLASS:           return "X86ISD::VFPCLASS";
29915  case X86ISD::VFPCLASSS:          return "X86ISD::VFPCLASSS";
29916  case X86ISD::MULTISHIFT:         return "X86ISD::MULTISHIFT";
29917  case X86ISD::SCALAR_SINT_TO_FP:     return "X86ISD::SCALAR_SINT_TO_FP";
29918  case X86ISD::SCALAR_SINT_TO_FP_RND: return "X86ISD::SCALAR_SINT_TO_FP_RND";
29919  case X86ISD::SCALAR_UINT_TO_FP:     return "X86ISD::SCALAR_UINT_TO_FP";
29920  case X86ISD::SCALAR_UINT_TO_FP_RND: return "X86ISD::SCALAR_UINT_TO_FP_RND";
29921  case X86ISD::CVTPS2PH:           return "X86ISD::CVTPS2PH";
29922  case X86ISD::MCVTPS2PH:          return "X86ISD::MCVTPS2PH";
29923  case X86ISD::CVTPH2PS:           return "X86ISD::CVTPH2PS";
29924  case X86ISD::CVTPH2PS_SAE:       return "X86ISD::CVTPH2PS_SAE";
29925  case X86ISD::CVTP2SI:            return "X86ISD::CVTP2SI";
29926  case X86ISD::CVTP2UI:            return "X86ISD::CVTP2UI";
29927  case X86ISD::MCVTP2SI:           return "X86ISD::MCVTP2SI";
29928  case X86ISD::MCVTP2UI:           return "X86ISD::MCVTP2UI";
29929  case X86ISD::CVTP2SI_RND:        return "X86ISD::CVTP2SI_RND";
29930  case X86ISD::CVTP2UI_RND:        return "X86ISD::CVTP2UI_RND";
29931  case X86ISD::CVTS2SI:            return "X86ISD::CVTS2SI";
29932  case X86ISD::CVTS2UI:            return "X86ISD::CVTS2UI";
29933  case X86ISD::CVTS2SI_RND:        return "X86ISD::CVTS2SI_RND";
29934  case X86ISD::CVTS2UI_RND:        return "X86ISD::CVTS2UI_RND";
29935  case X86ISD::CVTNE2PS2BF16:      return "X86ISD::CVTNE2PS2BF16";
29936  case X86ISD::CVTNEPS2BF16:       return "X86ISD::CVTNEPS2BF16";
29937  case X86ISD::MCVTNEPS2BF16:      return "X86ISD::MCVTNEPS2BF16";
29938  case X86ISD::DPBF16PS:           return "X86ISD::DPBF16PS";
29939  case X86ISD::LWPINS:             return "X86ISD::LWPINS";
29940  case X86ISD::MGATHER:            return "X86ISD::MGATHER";
29941  case X86ISD::MSCATTER:           return "X86ISD::MSCATTER";
29942  case X86ISD::VPDPBUSD:           return "X86ISD::VPDPBUSD";
29943  case X86ISD::VPDPBUSDS:          return "X86ISD::VPDPBUSDS";
29944  case X86ISD::VPDPWSSD:           return "X86ISD::VPDPWSSD";
29945  case X86ISD::VPDPWSSDS:          return "X86ISD::VPDPWSSDS";
29946  case X86ISD::VPSHUFBITQMB:       return "X86ISD::VPSHUFBITQMB";
29947  case X86ISD::GF2P8MULB:          return "X86ISD::GF2P8MULB";
29948  case X86ISD::GF2P8AFFINEQB:      return "X86ISD::GF2P8AFFINEQB";
29949  case X86ISD::GF2P8AFFINEINVQB:   return "X86ISD::GF2P8AFFINEINVQB";
29950  case X86ISD::NT_CALL:            return "X86ISD::NT_CALL";
29951  case X86ISD::NT_BRIND:           return "X86ISD::NT_BRIND";
29952  case X86ISD::UMWAIT:             return "X86ISD::UMWAIT";
29953  case X86ISD::TPAUSE:             return "X86ISD::TPAUSE";
29954  case X86ISD::ENQCMD:             return "X86ISD:ENQCMD";
29955  case X86ISD::ENQCMDS:            return "X86ISD:ENQCMDS";
29956  case X86ISD::VP2INTERSECT:       return "X86ISD::VP2INTERSECT";
29957  }
29958  return nullptr;
29959}
29960
29961/// Return true if the addressing mode represented by AM is legal for this
29962/// target, for a load/store of the specified type.
29963bool X86TargetLowering::isLegalAddressingMode(const DataLayout &DL,
29964                                              const AddrMode &AM, Type *Ty,
29965                                              unsigned AS,
29966                                              Instruction *I) const {
29967  // X86 supports extremely general addressing modes.
29968  CodeModel::Model M = getTargetMachine().getCodeModel();
29969
29970  // X86 allows a sign-extended 32-bit immediate field as a displacement.
29971  if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
29972    return false;
29973
29974  if (AM.BaseGV) {
29975    unsigned GVFlags = Subtarget.classifyGlobalReference(AM.BaseGV);
29976
29977    // If a reference to this global requires an extra load, we can't fold it.
29978    if (isGlobalStubReference(GVFlags))
29979      return false;
29980
29981    // If BaseGV requires a register for the PIC base, we cannot also have a
29982    // BaseReg specified.
29983    if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
29984      return false;
29985
29986    // If lower 4G is not available, then we must use rip-relative addressing.
29987    if ((M != CodeModel::Small || isPositionIndependent()) &&
29988        Subtarget.is64Bit() && (AM.BaseOffs || AM.Scale > 1))
29989      return false;
29990  }
29991
29992  switch (AM.Scale) {
29993  case 0:
29994  case 1:
29995  case 2:
29996  case 4:
29997  case 8:
29998    // These scales always work.
29999    break;
30000  case 3:
30001  case 5:
30002  case 9:
30003    // These scales are formed with basereg+scalereg.  Only accept if there is
30004    // no basereg yet.
30005    if (AM.HasBaseReg)
30006      return false;
30007    break;
30008  default:  // Other stuff never works.
30009    return false;
30010  }
30011
30012  return true;
30013}
30014
30015bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
30016  unsigned Bits = Ty->getScalarSizeInBits();
30017
30018  // 8-bit shifts are always expensive, but versions with a scalar amount aren't
30019  // particularly cheaper than those without.
30020  if (Bits == 8)
30021    return false;
30022
30023  // XOP has v16i8/v8i16/v4i32/v2i64 variable vector shifts.
30024  if (Subtarget.hasXOP() && Ty->getPrimitiveSizeInBits() == 128 &&
30025      (Bits == 8 || Bits == 16 || Bits == 32 || Bits == 64))
30026    return false;
30027
30028  // AVX2 has vpsllv[dq] instructions (and other shifts) that make variable
30029  // shifts just as cheap as scalar ones.
30030  if (Subtarget.hasAVX2() && (Bits == 32 || Bits == 64))
30031    return false;
30032
30033  // AVX512BW has shifts such as vpsllvw.
30034  if (Subtarget.hasBWI() && Bits == 16)
30035      return false;
30036
30037  // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
30038  // fully general vector.
30039  return true;
30040}
30041
30042bool X86TargetLowering::isBinOp(unsigned Opcode) const {
30043  switch (Opcode) {
30044  // These are non-commutative binops.
30045  // TODO: Add more X86ISD opcodes once we have test coverage.
30046  case X86ISD::ANDNP:
30047  case X86ISD::PCMPGT:
30048  case X86ISD::FMAX:
30049  case X86ISD::FMIN:
30050  case X86ISD::FANDN:
30051    return true;
30052  }
30053
30054  return TargetLoweringBase::isBinOp(Opcode);
30055}
30056
30057bool X86TargetLowering::isCommutativeBinOp(unsigned Opcode) const {
30058  switch (Opcode) {
30059  // TODO: Add more X86ISD opcodes once we have test coverage.
30060  case X86ISD::PCMPEQ:
30061  case X86ISD::PMULDQ:
30062  case X86ISD::PMULUDQ:
30063  case X86ISD::FMAXC:
30064  case X86ISD::FMINC:
30065  case X86ISD::FAND:
30066  case X86ISD::FOR:
30067  case X86ISD::FXOR:
30068    return true;
30069  }
30070
30071  return TargetLoweringBase::isCommutativeBinOp(Opcode);
30072}
30073
30074bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
30075  if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
30076    return false;
30077  unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
30078  unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
30079  return NumBits1 > NumBits2;
30080}
30081
30082bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
30083  if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
30084    return false;
30085
30086  if (!isTypeLegal(EVT::getEVT(Ty1)))
30087    return false;
30088
30089  assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
30090
30091  // Assuming the caller doesn't have a zeroext or signext return parameter,
30092  // truncation all the way down to i1 is valid.
30093  return true;
30094}
30095
30096bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
30097  return isInt<32>(Imm);
30098}
30099
30100bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
30101  // Can also use sub to handle negated immediates.
30102  return isInt<32>(Imm);
30103}
30104
30105bool X86TargetLowering::isLegalStoreImmediate(int64_t Imm) const {
30106  return isInt<32>(Imm);
30107}
30108
30109bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
30110  if (!VT1.isInteger() || !VT2.isInteger())
30111    return false;
30112  unsigned NumBits1 = VT1.getSizeInBits();
30113  unsigned NumBits2 = VT2.getSizeInBits();
30114  return NumBits1 > NumBits2;
30115}
30116
30117bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
30118  // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
30119  return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget.is64Bit();
30120}
30121
30122bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
30123  // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
30124  return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget.is64Bit();
30125}
30126
30127bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
30128  EVT VT1 = Val.getValueType();
30129  if (isZExtFree(VT1, VT2))
30130    return true;
30131
30132  if (Val.getOpcode() != ISD::LOAD)
30133    return false;
30134
30135  if (!VT1.isSimple() || !VT1.isInteger() ||
30136      !VT2.isSimple() || !VT2.isInteger())
30137    return false;
30138
30139  switch (VT1.getSimpleVT().SimpleTy) {
30140  default: break;
30141  case MVT::i8:
30142  case MVT::i16:
30143  case MVT::i32:
30144    // X86 has 8, 16, and 32-bit zero-extending loads.
30145    return true;
30146  }
30147
30148  return false;
30149}
30150
30151bool X86TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
30152  if (isa<MaskedLoadSDNode>(ExtVal.getOperand(0)))
30153    return false;
30154
30155  EVT SrcVT = ExtVal.getOperand(0).getValueType();
30156
30157  // There is no extending load for vXi1.
30158  if (SrcVT.getScalarType() == MVT::i1)
30159    return false;
30160
30161  return true;
30162}
30163
30164bool X86TargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
30165                                                   EVT VT) const {
30166  if (!Subtarget.hasAnyFMA())
30167    return false;
30168
30169  VT = VT.getScalarType();
30170
30171  if (!VT.isSimple())
30172    return false;
30173
30174  switch (VT.getSimpleVT().SimpleTy) {
30175  case MVT::f32:
30176  case MVT::f64:
30177    return true;
30178  default:
30179    break;
30180  }
30181
30182  return false;
30183}
30184
30185bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
30186  // i16 instructions are longer (0x66 prefix) and potentially slower.
30187  return !(VT1 == MVT::i32 && VT2 == MVT::i16);
30188}
30189
30190/// Targets can use this to indicate that they only support *some*
30191/// VECTOR_SHUFFLE operations, those with specific masks.
30192/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
30193/// are assumed to be legal.
30194bool X86TargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
30195  if (!VT.isSimple())
30196    return false;
30197
30198  // Not for i1 vectors
30199  if (VT.getSimpleVT().getScalarType() == MVT::i1)
30200    return false;
30201
30202  // Very little shuffling can be done for 64-bit vectors right now.
30203  if (VT.getSimpleVT().getSizeInBits() == 64)
30204    return false;
30205
30206  // We only care that the types being shuffled are legal. The lowering can
30207  // handle any possible shuffle mask that results.
30208  return isTypeLegal(VT.getSimpleVT());
30209}
30210
30211bool X86TargetLowering::isVectorClearMaskLegal(ArrayRef<int> Mask,
30212                                               EVT VT) const {
30213  // Don't convert an 'and' into a shuffle that we don't directly support.
30214  // vpblendw and vpshufb for 256-bit vectors are not available on AVX1.
30215  if (!Subtarget.hasAVX2())
30216    if (VT == MVT::v32i8 || VT == MVT::v16i16)
30217      return false;
30218
30219  // Just delegate to the generic legality, clear masks aren't special.
30220  return isShuffleMaskLegal(Mask, VT);
30221}
30222
30223bool X86TargetLowering::areJTsAllowed(const Function *Fn) const {
30224  // If the subtarget is using thunks, we need to not generate jump tables.
30225  if (Subtarget.useIndirectThunkBranches())
30226    return false;
30227
30228  // Otherwise, fallback on the generic logic.
30229  return TargetLowering::areJTsAllowed(Fn);
30230}
30231
30232//===----------------------------------------------------------------------===//
30233//                           X86 Scheduler Hooks
30234//===----------------------------------------------------------------------===//
30235
30236/// Utility function to emit xbegin specifying the start of an RTM region.
30237static MachineBasicBlock *emitXBegin(MachineInstr &MI, MachineBasicBlock *MBB,
30238                                     const TargetInstrInfo *TII) {
30239  DebugLoc DL = MI.getDebugLoc();
30240
30241  const BasicBlock *BB = MBB->getBasicBlock();
30242  MachineFunction::iterator I = ++MBB->getIterator();
30243
30244  // For the v = xbegin(), we generate
30245  //
30246  // thisMBB:
30247  //  xbegin sinkMBB
30248  //
30249  // mainMBB:
30250  //  s0 = -1
30251  //
30252  // fallBB:
30253  //  eax = # XABORT_DEF
30254  //  s1 = eax
30255  //
30256  // sinkMBB:
30257  //  v = phi(s0/mainBB, s1/fallBB)
30258
30259  MachineBasicBlock *thisMBB = MBB;
30260  MachineFunction *MF = MBB->getParent();
30261  MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
30262  MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
30263  MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
30264  MF->insert(I, mainMBB);
30265  MF->insert(I, fallMBB);
30266  MF->insert(I, sinkMBB);
30267
30268  // Transfer the remainder of BB and its successor edges to sinkMBB.
30269  sinkMBB->splice(sinkMBB->begin(), MBB,
30270                  std::next(MachineBasicBlock::iterator(MI)), MBB->end());
30271  sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
30272
30273  MachineRegisterInfo &MRI = MF->getRegInfo();
30274  Register DstReg = MI.getOperand(0).getReg();
30275  const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
30276  Register mainDstReg = MRI.createVirtualRegister(RC);
30277  Register fallDstReg = MRI.createVirtualRegister(RC);
30278
30279  // thisMBB:
30280  //  xbegin fallMBB
30281  //  # fallthrough to mainMBB
30282  //  # abortion to fallMBB
30283  BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(fallMBB);
30284  thisMBB->addSuccessor(mainMBB);
30285  thisMBB->addSuccessor(fallMBB);
30286
30287  // mainMBB:
30288  //  mainDstReg := -1
30289  BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), mainDstReg).addImm(-1);
30290  BuildMI(mainMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
30291  mainMBB->addSuccessor(sinkMBB);
30292
30293  // fallMBB:
30294  //  ; pseudo instruction to model hardware's definition from XABORT
30295  //  EAX := XABORT_DEF
30296  //  fallDstReg := EAX
30297  BuildMI(fallMBB, DL, TII->get(X86::XABORT_DEF));
30298  BuildMI(fallMBB, DL, TII->get(TargetOpcode::COPY), fallDstReg)
30299      .addReg(X86::EAX);
30300  fallMBB->addSuccessor(sinkMBB);
30301
30302  // sinkMBB:
30303  //  DstReg := phi(mainDstReg/mainBB, fallDstReg/fallBB)
30304  BuildMI(*sinkMBB, sinkMBB->begin(), DL, TII->get(X86::PHI), DstReg)
30305      .addReg(mainDstReg).addMBB(mainMBB)
30306      .addReg(fallDstReg).addMBB(fallMBB);
30307
30308  MI.eraseFromParent();
30309  return sinkMBB;
30310}
30311
30312
30313
30314MachineBasicBlock *
30315X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
30316                                                 MachineBasicBlock *MBB) const {
30317  // Emit va_arg instruction on X86-64.
30318
30319  // Operands to this pseudo-instruction:
30320  // 0  ) Output        : destination address (reg)
30321  // 1-5) Input         : va_list address (addr, i64mem)
30322  // 6  ) ArgSize       : Size (in bytes) of vararg type
30323  // 7  ) ArgMode       : 0=overflow only, 1=use gp_offset, 2=use fp_offset
30324  // 8  ) Align         : Alignment of type
30325  // 9  ) EFLAGS (implicit-def)
30326
30327  assert(MI.getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
30328  static_assert(X86::AddrNumOperands == 5,
30329                "VAARG_64 assumes 5 address operands");
30330
30331  Register DestReg = MI.getOperand(0).getReg();
30332  MachineOperand &Base = MI.getOperand(1);
30333  MachineOperand &Scale = MI.getOperand(2);
30334  MachineOperand &Index = MI.getOperand(3);
30335  MachineOperand &Disp = MI.getOperand(4);
30336  MachineOperand &Segment = MI.getOperand(5);
30337  unsigned ArgSize = MI.getOperand(6).getImm();
30338  unsigned ArgMode = MI.getOperand(7).getImm();
30339  unsigned Align = MI.getOperand(8).getImm();
30340
30341  MachineFunction *MF = MBB->getParent();
30342
30343  // Memory Reference
30344  assert(MI.hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
30345
30346  MachineMemOperand *OldMMO = MI.memoperands().front();
30347
30348  // Clone the MMO into two separate MMOs for loading and storing
30349  MachineMemOperand *LoadOnlyMMO = MF->getMachineMemOperand(
30350      OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOStore);
30351  MachineMemOperand *StoreOnlyMMO = MF->getMachineMemOperand(
30352      OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOLoad);
30353
30354  // Machine Information
30355  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
30356  MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
30357  const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
30358  const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
30359  DebugLoc DL = MI.getDebugLoc();
30360
30361  // struct va_list {
30362  //   i32   gp_offset
30363  //   i32   fp_offset
30364  //   i64   overflow_area (address)
30365  //   i64   reg_save_area (address)
30366  // }
30367  // sizeof(va_list) = 24
30368  // alignment(va_list) = 8
30369
30370  unsigned TotalNumIntRegs = 6;
30371  unsigned TotalNumXMMRegs = 8;
30372  bool UseGPOffset = (ArgMode == 1);
30373  bool UseFPOffset = (ArgMode == 2);
30374  unsigned MaxOffset = TotalNumIntRegs * 8 +
30375                       (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
30376
30377  /* Align ArgSize to a multiple of 8 */
30378  unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
30379  bool NeedsAlign = (Align > 8);
30380
30381  MachineBasicBlock *thisMBB = MBB;
30382  MachineBasicBlock *overflowMBB;
30383  MachineBasicBlock *offsetMBB;
30384  MachineBasicBlock *endMBB;
30385
30386  unsigned OffsetDestReg = 0;    // Argument address computed by offsetMBB
30387  unsigned OverflowDestReg = 0;  // Argument address computed by overflowMBB
30388  unsigned OffsetReg = 0;
30389
30390  if (!UseGPOffset && !UseFPOffset) {
30391    // If we only pull from the overflow region, we don't create a branch.
30392    // We don't need to alter control flow.
30393    OffsetDestReg = 0; // unused
30394    OverflowDestReg = DestReg;
30395
30396    offsetMBB = nullptr;
30397    overflowMBB = thisMBB;
30398    endMBB = thisMBB;
30399  } else {
30400    // First emit code to check if gp_offset (or fp_offset) is below the bound.
30401    // If so, pull the argument from reg_save_area. (branch to offsetMBB)
30402    // If not, pull from overflow_area. (branch to overflowMBB)
30403    //
30404    //       thisMBB
30405    //         |     .
30406    //         |        .
30407    //     offsetMBB   overflowMBB
30408    //         |        .
30409    //         |     .
30410    //        endMBB
30411
30412    // Registers for the PHI in endMBB
30413    OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
30414    OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
30415
30416    const BasicBlock *LLVM_BB = MBB->getBasicBlock();
30417    overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
30418    offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
30419    endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
30420
30421    MachineFunction::iterator MBBIter = ++MBB->getIterator();
30422
30423    // Insert the new basic blocks
30424    MF->insert(MBBIter, offsetMBB);
30425    MF->insert(MBBIter, overflowMBB);
30426    MF->insert(MBBIter, endMBB);
30427
30428    // Transfer the remainder of MBB and its successor edges to endMBB.
30429    endMBB->splice(endMBB->begin(), thisMBB,
30430                   std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
30431    endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
30432
30433    // Make offsetMBB and overflowMBB successors of thisMBB
30434    thisMBB->addSuccessor(offsetMBB);
30435    thisMBB->addSuccessor(overflowMBB);
30436
30437    // endMBB is a successor of both offsetMBB and overflowMBB
30438    offsetMBB->addSuccessor(endMBB);
30439    overflowMBB->addSuccessor(endMBB);
30440
30441    // Load the offset value into a register
30442    OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
30443    BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
30444        .add(Base)
30445        .add(Scale)
30446        .add(Index)
30447        .addDisp(Disp, UseFPOffset ? 4 : 0)
30448        .add(Segment)
30449        .setMemRefs(LoadOnlyMMO);
30450
30451    // Check if there is enough room left to pull this argument.
30452    BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
30453      .addReg(OffsetReg)
30454      .addImm(MaxOffset + 8 - ArgSizeA8);
30455
30456    // Branch to "overflowMBB" if offset >= max
30457    // Fall through to "offsetMBB" otherwise
30458    BuildMI(thisMBB, DL, TII->get(X86::JCC_1))
30459      .addMBB(overflowMBB).addImm(X86::COND_AE);
30460  }
30461
30462  // In offsetMBB, emit code to use the reg_save_area.
30463  if (offsetMBB) {
30464    assert(OffsetReg != 0);
30465
30466    // Read the reg_save_area address.
30467    Register RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
30468    BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
30469        .add(Base)
30470        .add(Scale)
30471        .add(Index)
30472        .addDisp(Disp, 16)
30473        .add(Segment)
30474        .setMemRefs(LoadOnlyMMO);
30475
30476    // Zero-extend the offset
30477    Register OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
30478    BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
30479        .addImm(0)
30480        .addReg(OffsetReg)
30481        .addImm(X86::sub_32bit);
30482
30483    // Add the offset to the reg_save_area to get the final address.
30484    BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
30485      .addReg(OffsetReg64)
30486      .addReg(RegSaveReg);
30487
30488    // Compute the offset for the next argument
30489    Register NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
30490    BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
30491      .addReg(OffsetReg)
30492      .addImm(UseFPOffset ? 16 : 8);
30493
30494    // Store it back into the va_list.
30495    BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
30496        .add(Base)
30497        .add(Scale)
30498        .add(Index)
30499        .addDisp(Disp, UseFPOffset ? 4 : 0)
30500        .add(Segment)
30501        .addReg(NextOffsetReg)
30502        .setMemRefs(StoreOnlyMMO);
30503
30504    // Jump to endMBB
30505    BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
30506      .addMBB(endMBB);
30507  }
30508
30509  //
30510  // Emit code to use overflow area
30511  //
30512
30513  // Load the overflow_area address into a register.
30514  Register OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
30515  BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
30516      .add(Base)
30517      .add(Scale)
30518      .add(Index)
30519      .addDisp(Disp, 8)
30520      .add(Segment)
30521      .setMemRefs(LoadOnlyMMO);
30522
30523  // If we need to align it, do so. Otherwise, just copy the address
30524  // to OverflowDestReg.
30525  if (NeedsAlign) {
30526    // Align the overflow address
30527    assert(isPowerOf2_32(Align) && "Alignment must be a power of 2");
30528    Register TmpReg = MRI.createVirtualRegister(AddrRegClass);
30529
30530    // aligned_addr = (addr + (align-1)) & ~(align-1)
30531    BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
30532      .addReg(OverflowAddrReg)
30533      .addImm(Align-1);
30534
30535    BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
30536      .addReg(TmpReg)
30537      .addImm(~(uint64_t)(Align-1));
30538  } else {
30539    BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
30540      .addReg(OverflowAddrReg);
30541  }
30542
30543  // Compute the next overflow address after this argument.
30544  // (the overflow address should be kept 8-byte aligned)
30545  Register NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
30546  BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
30547    .addReg(OverflowDestReg)
30548    .addImm(ArgSizeA8);
30549
30550  // Store the new overflow address.
30551  BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
30552      .add(Base)
30553      .add(Scale)
30554      .add(Index)
30555      .addDisp(Disp, 8)
30556      .add(Segment)
30557      .addReg(NextAddrReg)
30558      .setMemRefs(StoreOnlyMMO);
30559
30560  // If we branched, emit the PHI to the front of endMBB.
30561  if (offsetMBB) {
30562    BuildMI(*endMBB, endMBB->begin(), DL,
30563            TII->get(X86::PHI), DestReg)
30564      .addReg(OffsetDestReg).addMBB(offsetMBB)
30565      .addReg(OverflowDestReg).addMBB(overflowMBB);
30566  }
30567
30568  // Erase the pseudo instruction
30569  MI.eraseFromParent();
30570
30571  return endMBB;
30572}
30573
30574MachineBasicBlock *X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
30575    MachineInstr &MI, MachineBasicBlock *MBB) const {
30576  // Emit code to save XMM registers to the stack. The ABI says that the
30577  // number of registers to save is given in %al, so it's theoretically
30578  // possible to do an indirect jump trick to avoid saving all of them,
30579  // however this code takes a simpler approach and just executes all
30580  // of the stores if %al is non-zero. It's less code, and it's probably
30581  // easier on the hardware branch predictor, and stores aren't all that
30582  // expensive anyway.
30583
30584  // Create the new basic blocks. One block contains all the XMM stores,
30585  // and one block is the final destination regardless of whether any
30586  // stores were performed.
30587  const BasicBlock *LLVM_BB = MBB->getBasicBlock();
30588  MachineFunction *F = MBB->getParent();
30589  MachineFunction::iterator MBBIter = ++MBB->getIterator();
30590  MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
30591  MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
30592  F->insert(MBBIter, XMMSaveMBB);
30593  F->insert(MBBIter, EndMBB);
30594
30595  // Transfer the remainder of MBB and its successor edges to EndMBB.
30596  EndMBB->splice(EndMBB->begin(), MBB,
30597                 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
30598  EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
30599
30600  // The original block will now fall through to the XMM save block.
30601  MBB->addSuccessor(XMMSaveMBB);
30602  // The XMMSaveMBB will fall through to the end block.
30603  XMMSaveMBB->addSuccessor(EndMBB);
30604
30605  // Now add the instructions.
30606  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
30607  DebugLoc DL = MI.getDebugLoc();
30608
30609  Register CountReg = MI.getOperand(0).getReg();
30610  int64_t RegSaveFrameIndex = MI.getOperand(1).getImm();
30611  int64_t VarArgsFPOffset = MI.getOperand(2).getImm();
30612
30613  if (!Subtarget.isCallingConvWin64(F->getFunction().getCallingConv())) {
30614    // If %al is 0, branch around the XMM save block.
30615    BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
30616    BuildMI(MBB, DL, TII->get(X86::JCC_1)).addMBB(EndMBB).addImm(X86::COND_E);
30617    MBB->addSuccessor(EndMBB);
30618  }
30619
30620  // Make sure the last operand is EFLAGS, which gets clobbered by the branch
30621  // that was just emitted, but clearly shouldn't be "saved".
30622  assert((MI.getNumOperands() <= 3 ||
30623          !MI.getOperand(MI.getNumOperands() - 1).isReg() ||
30624          MI.getOperand(MI.getNumOperands() - 1).getReg() == X86::EFLAGS) &&
30625         "Expected last argument to be EFLAGS");
30626  unsigned MOVOpc = Subtarget.hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr;
30627  // In the XMM save block, save all the XMM argument registers.
30628  for (int i = 3, e = MI.getNumOperands() - 1; i != e; ++i) {
30629    int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
30630    MachineMemOperand *MMO = F->getMachineMemOperand(
30631        MachinePointerInfo::getFixedStack(*F, RegSaveFrameIndex, Offset),
30632        MachineMemOperand::MOStore,
30633        /*Size=*/16, /*Align=*/16);
30634    BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
30635        .addFrameIndex(RegSaveFrameIndex)
30636        .addImm(/*Scale=*/1)
30637        .addReg(/*IndexReg=*/0)
30638        .addImm(/*Disp=*/Offset)
30639        .addReg(/*Segment=*/0)
30640        .addReg(MI.getOperand(i).getReg())
30641        .addMemOperand(MMO);
30642  }
30643
30644  MI.eraseFromParent(); // The pseudo instruction is gone now.
30645
30646  return EndMBB;
30647}
30648
30649// The EFLAGS operand of SelectItr might be missing a kill marker
30650// because there were multiple uses of EFLAGS, and ISel didn't know
30651// which to mark. Figure out whether SelectItr should have had a
30652// kill marker, and set it if it should. Returns the correct kill
30653// marker value.
30654static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
30655                                     MachineBasicBlock* BB,
30656                                     const TargetRegisterInfo* TRI) {
30657  // Scan forward through BB for a use/def of EFLAGS.
30658  MachineBasicBlock::iterator miI(std::next(SelectItr));
30659  for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
30660    const MachineInstr& mi = *miI;
30661    if (mi.readsRegister(X86::EFLAGS))
30662      return false;
30663    if (mi.definesRegister(X86::EFLAGS))
30664      break; // Should have kill-flag - update below.
30665  }
30666
30667  // If we hit the end of the block, check whether EFLAGS is live into a
30668  // successor.
30669  if (miI == BB->end()) {
30670    for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
30671                                          sEnd = BB->succ_end();
30672         sItr != sEnd; ++sItr) {
30673      MachineBasicBlock* succ = *sItr;
30674      if (succ->isLiveIn(X86::EFLAGS))
30675        return false;
30676    }
30677  }
30678
30679  // We found a def, or hit the end of the basic block and EFLAGS wasn't live
30680  // out. SelectMI should have a kill flag on EFLAGS.
30681  SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
30682  return true;
30683}
30684
30685// Return true if it is OK for this CMOV pseudo-opcode to be cascaded
30686// together with other CMOV pseudo-opcodes into a single basic-block with
30687// conditional jump around it.
30688static bool isCMOVPseudo(MachineInstr &MI) {
30689  switch (MI.getOpcode()) {
30690  case X86::CMOV_FR32:
30691  case X86::CMOV_FR32X:
30692  case X86::CMOV_FR64:
30693  case X86::CMOV_FR64X:
30694  case X86::CMOV_GR8:
30695  case X86::CMOV_GR16:
30696  case X86::CMOV_GR32:
30697  case X86::CMOV_RFP32:
30698  case X86::CMOV_RFP64:
30699  case X86::CMOV_RFP80:
30700  case X86::CMOV_VR128:
30701  case X86::CMOV_VR128X:
30702  case X86::CMOV_VR256:
30703  case X86::CMOV_VR256X:
30704  case X86::CMOV_VR512:
30705  case X86::CMOV_VK2:
30706  case X86::CMOV_VK4:
30707  case X86::CMOV_VK8:
30708  case X86::CMOV_VK16:
30709  case X86::CMOV_VK32:
30710  case X86::CMOV_VK64:
30711    return true;
30712
30713  default:
30714    return false;
30715  }
30716}
30717
30718// Helper function, which inserts PHI functions into SinkMBB:
30719//   %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ],
30720// where %FalseValue(i) and %TrueValue(i) are taken from the consequent CMOVs
30721// in [MIItBegin, MIItEnd) range. It returns the last MachineInstrBuilder for
30722// the last PHI function inserted.
30723static MachineInstrBuilder createPHIsForCMOVsInSinkBB(
30724    MachineBasicBlock::iterator MIItBegin, MachineBasicBlock::iterator MIItEnd,
30725    MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
30726    MachineBasicBlock *SinkMBB) {
30727  MachineFunction *MF = TrueMBB->getParent();
30728  const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
30729  DebugLoc DL = MIItBegin->getDebugLoc();
30730
30731  X86::CondCode CC = X86::CondCode(MIItBegin->getOperand(3).getImm());
30732  X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
30733
30734  MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
30735
30736  // As we are creating the PHIs, we have to be careful if there is more than
30737  // one.  Later CMOVs may reference the results of earlier CMOVs, but later
30738  // PHIs have to reference the individual true/false inputs from earlier PHIs.
30739  // That also means that PHI construction must work forward from earlier to
30740  // later, and that the code must maintain a mapping from earlier PHI's
30741  // destination registers, and the registers that went into the PHI.
30742  DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
30743  MachineInstrBuilder MIB;
30744
30745  for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
30746    Register DestReg = MIIt->getOperand(0).getReg();
30747    Register Op1Reg = MIIt->getOperand(1).getReg();
30748    Register Op2Reg = MIIt->getOperand(2).getReg();
30749
30750    // If this CMOV we are generating is the opposite condition from
30751    // the jump we generated, then we have to swap the operands for the
30752    // PHI that is going to be generated.
30753    if (MIIt->getOperand(3).getImm() == OppCC)
30754      std::swap(Op1Reg, Op2Reg);
30755
30756    if (RegRewriteTable.find(Op1Reg) != RegRewriteTable.end())
30757      Op1Reg = RegRewriteTable[Op1Reg].first;
30758
30759    if (RegRewriteTable.find(Op2Reg) != RegRewriteTable.end())
30760      Op2Reg = RegRewriteTable[Op2Reg].second;
30761
30762    MIB = BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(X86::PHI), DestReg)
30763              .addReg(Op1Reg)
30764              .addMBB(FalseMBB)
30765              .addReg(Op2Reg)
30766              .addMBB(TrueMBB);
30767
30768    // Add this PHI to the rewrite table.
30769    RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
30770  }
30771
30772  return MIB;
30773}
30774
30775// Lower cascaded selects in form of (SecondCmov (FirstCMOV F, T, cc1), T, cc2).
30776MachineBasicBlock *
30777X86TargetLowering::EmitLoweredCascadedSelect(MachineInstr &FirstCMOV,
30778                                             MachineInstr &SecondCascadedCMOV,
30779                                             MachineBasicBlock *ThisMBB) const {
30780  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
30781  DebugLoc DL = FirstCMOV.getDebugLoc();
30782
30783  // We lower cascaded CMOVs such as
30784  //
30785  //   (SecondCascadedCMOV (FirstCMOV F, T, cc1), T, cc2)
30786  //
30787  // to two successive branches.
30788  //
30789  // Without this, we would add a PHI between the two jumps, which ends up
30790  // creating a few copies all around. For instance, for
30791  //
30792  //    (sitofp (zext (fcmp une)))
30793  //
30794  // we would generate:
30795  //
30796  //         ucomiss %xmm1, %xmm0
30797  //         movss  <1.0f>, %xmm0
30798  //         movaps  %xmm0, %xmm1
30799  //         jne     .LBB5_2
30800  //         xorps   %xmm1, %xmm1
30801  // .LBB5_2:
30802  //         jp      .LBB5_4
30803  //         movaps  %xmm1, %xmm0
30804  // .LBB5_4:
30805  //         retq
30806  //
30807  // because this custom-inserter would have generated:
30808  //
30809  //   A
30810  //   | \
30811  //   |  B
30812  //   | /
30813  //   C
30814  //   | \
30815  //   |  D
30816  //   | /
30817  //   E
30818  //
30819  // A: X = ...; Y = ...
30820  // B: empty
30821  // C: Z = PHI [X, A], [Y, B]
30822  // D: empty
30823  // E: PHI [X, C], [Z, D]
30824  //
30825  // If we lower both CMOVs in a single step, we can instead generate:
30826  //
30827  //   A
30828  //   | \
30829  //   |  C
30830  //   | /|
30831  //   |/ |
30832  //   |  |
30833  //   |  D
30834  //   | /
30835  //   E
30836  //
30837  // A: X = ...; Y = ...
30838  // D: empty
30839  // E: PHI [X, A], [X, C], [Y, D]
30840  //
30841  // Which, in our sitofp/fcmp example, gives us something like:
30842  //
30843  //         ucomiss %xmm1, %xmm0
30844  //         movss  <1.0f>, %xmm0
30845  //         jne     .LBB5_4
30846  //         jp      .LBB5_4
30847  //         xorps   %xmm0, %xmm0
30848  // .LBB5_4:
30849  //         retq
30850  //
30851
30852  // We lower cascaded CMOV into two successive branches to the same block.
30853  // EFLAGS is used by both, so mark it as live in the second.
30854  const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
30855  MachineFunction *F = ThisMBB->getParent();
30856  MachineBasicBlock *FirstInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
30857  MachineBasicBlock *SecondInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
30858  MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
30859
30860  MachineFunction::iterator It = ++ThisMBB->getIterator();
30861  F->insert(It, FirstInsertedMBB);
30862  F->insert(It, SecondInsertedMBB);
30863  F->insert(It, SinkMBB);
30864
30865  // For a cascaded CMOV, we lower it to two successive branches to
30866  // the same block (SinkMBB).  EFLAGS is used by both, so mark it as live in
30867  // the FirstInsertedMBB.
30868  FirstInsertedMBB->addLiveIn(X86::EFLAGS);
30869
30870  // If the EFLAGS register isn't dead in the terminator, then claim that it's
30871  // live into the sink and copy blocks.
30872  const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
30873  if (!SecondCascadedCMOV.killsRegister(X86::EFLAGS) &&
30874      !checkAndUpdateEFLAGSKill(SecondCascadedCMOV, ThisMBB, TRI)) {
30875    SecondInsertedMBB->addLiveIn(X86::EFLAGS);
30876    SinkMBB->addLiveIn(X86::EFLAGS);
30877  }
30878
30879  // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
30880  SinkMBB->splice(SinkMBB->begin(), ThisMBB,
30881                  std::next(MachineBasicBlock::iterator(FirstCMOV)),
30882                  ThisMBB->end());
30883  SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
30884
30885  // Fallthrough block for ThisMBB.
30886  ThisMBB->addSuccessor(FirstInsertedMBB);
30887  // The true block target of the first branch is always SinkMBB.
30888  ThisMBB->addSuccessor(SinkMBB);
30889  // Fallthrough block for FirstInsertedMBB.
30890  FirstInsertedMBB->addSuccessor(SecondInsertedMBB);
30891  // The true block for the branch of FirstInsertedMBB.
30892  FirstInsertedMBB->addSuccessor(SinkMBB);
30893  // This is fallthrough.
30894  SecondInsertedMBB->addSuccessor(SinkMBB);
30895
30896  // Create the conditional branch instructions.
30897  X86::CondCode FirstCC = X86::CondCode(FirstCMOV.getOperand(3).getImm());
30898  BuildMI(ThisMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(FirstCC);
30899
30900  X86::CondCode SecondCC =
30901      X86::CondCode(SecondCascadedCMOV.getOperand(3).getImm());
30902  BuildMI(FirstInsertedMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(SecondCC);
30903
30904  //  SinkMBB:
30905  //   %Result = phi [ %FalseValue, SecondInsertedMBB ], [ %TrueValue, ThisMBB ]
30906  Register DestReg = FirstCMOV.getOperand(0).getReg();
30907  Register Op1Reg = FirstCMOV.getOperand(1).getReg();
30908  Register Op2Reg = FirstCMOV.getOperand(2).getReg();
30909  MachineInstrBuilder MIB =
30910      BuildMI(*SinkMBB, SinkMBB->begin(), DL, TII->get(X86::PHI), DestReg)
30911          .addReg(Op1Reg)
30912          .addMBB(SecondInsertedMBB)
30913          .addReg(Op2Reg)
30914          .addMBB(ThisMBB);
30915
30916  // The second SecondInsertedMBB provides the same incoming value as the
30917  // FirstInsertedMBB (the True operand of the SELECT_CC/CMOV nodes).
30918  MIB.addReg(FirstCMOV.getOperand(2).getReg()).addMBB(FirstInsertedMBB);
30919  // Copy the PHI result to the register defined by the second CMOV.
30920  BuildMI(*SinkMBB, std::next(MachineBasicBlock::iterator(MIB.getInstr())), DL,
30921          TII->get(TargetOpcode::COPY),
30922          SecondCascadedCMOV.getOperand(0).getReg())
30923      .addReg(FirstCMOV.getOperand(0).getReg());
30924
30925  // Now remove the CMOVs.
30926  FirstCMOV.eraseFromParent();
30927  SecondCascadedCMOV.eraseFromParent();
30928
30929  return SinkMBB;
30930}
30931
30932MachineBasicBlock *
30933X86TargetLowering::EmitLoweredSelect(MachineInstr &MI,
30934                                     MachineBasicBlock *ThisMBB) const {
30935  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
30936  DebugLoc DL = MI.getDebugLoc();
30937
30938  // To "insert" a SELECT_CC instruction, we actually have to insert the
30939  // diamond control-flow pattern.  The incoming instruction knows the
30940  // destination vreg to set, the condition code register to branch on, the
30941  // true/false values to select between and a branch opcode to use.
30942
30943  //  ThisMBB:
30944  //  ...
30945  //   TrueVal = ...
30946  //   cmpTY ccX, r1, r2
30947  //   bCC copy1MBB
30948  //   fallthrough --> FalseMBB
30949
30950  // This code lowers all pseudo-CMOV instructions. Generally it lowers these
30951  // as described above, by inserting a BB, and then making a PHI at the join
30952  // point to select the true and false operands of the CMOV in the PHI.
30953  //
30954  // The code also handles two different cases of multiple CMOV opcodes
30955  // in a row.
30956  //
30957  // Case 1:
30958  // In this case, there are multiple CMOVs in a row, all which are based on
30959  // the same condition setting (or the exact opposite condition setting).
30960  // In this case we can lower all the CMOVs using a single inserted BB, and
30961  // then make a number of PHIs at the join point to model the CMOVs. The only
30962  // trickiness here, is that in a case like:
30963  //
30964  // t2 = CMOV cond1 t1, f1
30965  // t3 = CMOV cond1 t2, f2
30966  //
30967  // when rewriting this into PHIs, we have to perform some renaming on the
30968  // temps since you cannot have a PHI operand refer to a PHI result earlier
30969  // in the same block.  The "simple" but wrong lowering would be:
30970  //
30971  // t2 = PHI t1(BB1), f1(BB2)
30972  // t3 = PHI t2(BB1), f2(BB2)
30973  //
30974  // but clearly t2 is not defined in BB1, so that is incorrect. The proper
30975  // renaming is to note that on the path through BB1, t2 is really just a
30976  // copy of t1, and do that renaming, properly generating:
30977  //
30978  // t2 = PHI t1(BB1), f1(BB2)
30979  // t3 = PHI t1(BB1), f2(BB2)
30980  //
30981  // Case 2:
30982  // CMOV ((CMOV F, T, cc1), T, cc2) is checked here and handled by a separate
30983  // function - EmitLoweredCascadedSelect.
30984
30985  X86::CondCode CC = X86::CondCode(MI.getOperand(3).getImm());
30986  X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
30987  MachineInstr *LastCMOV = &MI;
30988  MachineBasicBlock::iterator NextMIIt = MachineBasicBlock::iterator(MI);
30989
30990  // Check for case 1, where there are multiple CMOVs with the same condition
30991  // first.  Of the two cases of multiple CMOV lowerings, case 1 reduces the
30992  // number of jumps the most.
30993
30994  if (isCMOVPseudo(MI)) {
30995    // See if we have a string of CMOVS with the same condition. Skip over
30996    // intervening debug insts.
30997    while (NextMIIt != ThisMBB->end() && isCMOVPseudo(*NextMIIt) &&
30998           (NextMIIt->getOperand(3).getImm() == CC ||
30999            NextMIIt->getOperand(3).getImm() == OppCC)) {
31000      LastCMOV = &*NextMIIt;
31001      ++NextMIIt;
31002      NextMIIt = skipDebugInstructionsForward(NextMIIt, ThisMBB->end());
31003    }
31004  }
31005
31006  // This checks for case 2, but only do this if we didn't already find
31007  // case 1, as indicated by LastCMOV == MI.
31008  if (LastCMOV == &MI && NextMIIt != ThisMBB->end() &&
31009      NextMIIt->getOpcode() == MI.getOpcode() &&
31010      NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() &&
31011      NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() &&
31012      NextMIIt->getOperand(1).isKill()) {
31013    return EmitLoweredCascadedSelect(MI, *NextMIIt, ThisMBB);
31014  }
31015
31016  const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
31017  MachineFunction *F = ThisMBB->getParent();
31018  MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
31019  MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
31020
31021  MachineFunction::iterator It = ++ThisMBB->getIterator();
31022  F->insert(It, FalseMBB);
31023  F->insert(It, SinkMBB);
31024
31025  // If the EFLAGS register isn't dead in the terminator, then claim that it's
31026  // live into the sink and copy blocks.
31027  const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
31028  if (!LastCMOV->killsRegister(X86::EFLAGS) &&
31029      !checkAndUpdateEFLAGSKill(LastCMOV, ThisMBB, TRI)) {
31030    FalseMBB->addLiveIn(X86::EFLAGS);
31031    SinkMBB->addLiveIn(X86::EFLAGS);
31032  }
31033
31034  // Transfer any debug instructions inside the CMOV sequence to the sunk block.
31035  auto DbgEnd = MachineBasicBlock::iterator(LastCMOV);
31036  auto DbgIt = MachineBasicBlock::iterator(MI);
31037  while (DbgIt != DbgEnd) {
31038    auto Next = std::next(DbgIt);
31039    if (DbgIt->isDebugInstr())
31040      SinkMBB->push_back(DbgIt->removeFromParent());
31041    DbgIt = Next;
31042  }
31043
31044  // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
31045  SinkMBB->splice(SinkMBB->end(), ThisMBB,
31046                  std::next(MachineBasicBlock::iterator(LastCMOV)),
31047                  ThisMBB->end());
31048  SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
31049
31050  // Fallthrough block for ThisMBB.
31051  ThisMBB->addSuccessor(FalseMBB);
31052  // The true block target of the first (or only) branch is always a SinkMBB.
31053  ThisMBB->addSuccessor(SinkMBB);
31054  // Fallthrough block for FalseMBB.
31055  FalseMBB->addSuccessor(SinkMBB);
31056
31057  // Create the conditional branch instruction.
31058  BuildMI(ThisMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(CC);
31059
31060  //  SinkMBB:
31061  //   %Result = phi [ %FalseValue, FalseMBB ], [ %TrueValue, ThisMBB ]
31062  //  ...
31063  MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
31064  MachineBasicBlock::iterator MIItEnd =
31065      std::next(MachineBasicBlock::iterator(LastCMOV));
31066  createPHIsForCMOVsInSinkBB(MIItBegin, MIItEnd, ThisMBB, FalseMBB, SinkMBB);
31067
31068  // Now remove the CMOV(s).
31069  ThisMBB->erase(MIItBegin, MIItEnd);
31070
31071  return SinkMBB;
31072}
31073
31074MachineBasicBlock *
31075X86TargetLowering::EmitLoweredSegAlloca(MachineInstr &MI,
31076                                        MachineBasicBlock *BB) const {
31077  MachineFunction *MF = BB->getParent();
31078  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
31079  DebugLoc DL = MI.getDebugLoc();
31080  const BasicBlock *LLVM_BB = BB->getBasicBlock();
31081
31082  assert(MF->shouldSplitStack());
31083
31084  const bool Is64Bit = Subtarget.is64Bit();
31085  const bool IsLP64 = Subtarget.isTarget64BitLP64();
31086
31087  const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
31088  const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
31089
31090  // BB:
31091  //  ... [Till the alloca]
31092  // If stacklet is not large enough, jump to mallocMBB
31093  //
31094  // bumpMBB:
31095  //  Allocate by subtracting from RSP
31096  //  Jump to continueMBB
31097  //
31098  // mallocMBB:
31099  //  Allocate by call to runtime
31100  //
31101  // continueMBB:
31102  //  ...
31103  //  [rest of original BB]
31104  //
31105
31106  MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
31107  MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
31108  MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
31109
31110  MachineRegisterInfo &MRI = MF->getRegInfo();
31111  const TargetRegisterClass *AddrRegClass =
31112      getRegClassFor(getPointerTy(MF->getDataLayout()));
31113
31114  unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
31115           bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
31116           tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
31117           SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
31118           sizeVReg = MI.getOperand(1).getReg(),
31119           physSPReg =
31120               IsLP64 || Subtarget.isTargetNaCl64() ? X86::RSP : X86::ESP;
31121
31122  MachineFunction::iterator MBBIter = ++BB->getIterator();
31123
31124  MF->insert(MBBIter, bumpMBB);
31125  MF->insert(MBBIter, mallocMBB);
31126  MF->insert(MBBIter, continueMBB);
31127
31128  continueMBB->splice(continueMBB->begin(), BB,
31129                      std::next(MachineBasicBlock::iterator(MI)), BB->end());
31130  continueMBB->transferSuccessorsAndUpdatePHIs(BB);
31131
31132  // Add code to the main basic block to check if the stack limit has been hit,
31133  // and if so, jump to mallocMBB otherwise to bumpMBB.
31134  BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
31135  BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
31136    .addReg(tmpSPVReg).addReg(sizeVReg);
31137  BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
31138    .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
31139    .addReg(SPLimitVReg);
31140  BuildMI(BB, DL, TII->get(X86::JCC_1)).addMBB(mallocMBB).addImm(X86::COND_G);
31141
31142  // bumpMBB simply decreases the stack pointer, since we know the current
31143  // stacklet has enough space.
31144  BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
31145    .addReg(SPLimitVReg);
31146  BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
31147    .addReg(SPLimitVReg);
31148  BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
31149
31150  // Calls into a routine in libgcc to allocate more space from the heap.
31151  const uint32_t *RegMask =
31152      Subtarget.getRegisterInfo()->getCallPreservedMask(*MF, CallingConv::C);
31153  if (IsLP64) {
31154    BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
31155      .addReg(sizeVReg);
31156    BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
31157      .addExternalSymbol("__morestack_allocate_stack_space")
31158      .addRegMask(RegMask)
31159      .addReg(X86::RDI, RegState::Implicit)
31160      .addReg(X86::RAX, RegState::ImplicitDefine);
31161  } else if (Is64Bit) {
31162    BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
31163      .addReg(sizeVReg);
31164    BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
31165      .addExternalSymbol("__morestack_allocate_stack_space")
31166      .addRegMask(RegMask)
31167      .addReg(X86::EDI, RegState::Implicit)
31168      .addReg(X86::EAX, RegState::ImplicitDefine);
31169  } else {
31170    BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
31171      .addImm(12);
31172    BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
31173    BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
31174      .addExternalSymbol("__morestack_allocate_stack_space")
31175      .addRegMask(RegMask)
31176      .addReg(X86::EAX, RegState::ImplicitDefine);
31177  }
31178
31179  if (!Is64Bit)
31180    BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
31181      .addImm(16);
31182
31183  BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
31184    .addReg(IsLP64 ? X86::RAX : X86::EAX);
31185  BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
31186
31187  // Set up the CFG correctly.
31188  BB->addSuccessor(bumpMBB);
31189  BB->addSuccessor(mallocMBB);
31190  mallocMBB->addSuccessor(continueMBB);
31191  bumpMBB->addSuccessor(continueMBB);
31192
31193  // Take care of the PHI nodes.
31194  BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
31195          MI.getOperand(0).getReg())
31196      .addReg(mallocPtrVReg)
31197      .addMBB(mallocMBB)
31198      .addReg(bumpSPPtrVReg)
31199      .addMBB(bumpMBB);
31200
31201  // Delete the original pseudo instruction.
31202  MI.eraseFromParent();
31203
31204  // And we're done.
31205  return continueMBB;
31206}
31207
31208MachineBasicBlock *
31209X86TargetLowering::EmitLoweredCatchRet(MachineInstr &MI,
31210                                       MachineBasicBlock *BB) const {
31211  MachineFunction *MF = BB->getParent();
31212  const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
31213  MachineBasicBlock *TargetMBB = MI.getOperand(0).getMBB();
31214  DebugLoc DL = MI.getDebugLoc();
31215
31216  assert(!isAsynchronousEHPersonality(
31217             classifyEHPersonality(MF->getFunction().getPersonalityFn())) &&
31218         "SEH does not use catchret!");
31219
31220  // Only 32-bit EH needs to worry about manually restoring stack pointers.
31221  if (!Subtarget.is32Bit())
31222    return BB;
31223
31224  // C++ EH creates a new target block to hold the restore code, and wires up
31225  // the new block to the return destination with a normal JMP_4.
31226  MachineBasicBlock *RestoreMBB =
31227      MF->CreateMachineBasicBlock(BB->getBasicBlock());
31228  assert(BB->succ_size() == 1);
31229  MF->insert(std::next(BB->getIterator()), RestoreMBB);
31230  RestoreMBB->transferSuccessorsAndUpdatePHIs(BB);
31231  BB->addSuccessor(RestoreMBB);
31232  MI.getOperand(0).setMBB(RestoreMBB);
31233
31234  auto RestoreMBBI = RestoreMBB->begin();
31235  BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::EH_RESTORE));
31236  BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::JMP_4)).addMBB(TargetMBB);
31237  return BB;
31238}
31239
31240MachineBasicBlock *
31241X86TargetLowering::EmitLoweredCatchPad(MachineInstr &MI,
31242                                       MachineBasicBlock *BB) const {
31243  MachineFunction *MF = BB->getParent();
31244  const Constant *PerFn = MF->getFunction().getPersonalityFn();
31245  bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(PerFn));
31246  // Only 32-bit SEH requires special handling for catchpad.
31247  if (IsSEH && Subtarget.is32Bit()) {
31248    const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
31249    DebugLoc DL = MI.getDebugLoc();
31250    BuildMI(*BB, MI, DL, TII.get(X86::EH_RESTORE));
31251  }
31252  MI.eraseFromParent();
31253  return BB;
31254}
31255
31256MachineBasicBlock *
31257X86TargetLowering::EmitLoweredTLSAddr(MachineInstr &MI,
31258                                      MachineBasicBlock *BB) const {
31259  // So, here we replace TLSADDR with the sequence:
31260  // adjust_stackdown -> TLSADDR -> adjust_stackup.
31261  // We need this because TLSADDR is lowered into calls
31262  // inside MC, therefore without the two markers shrink-wrapping
31263  // may push the prologue/epilogue pass them.
31264  const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
31265  DebugLoc DL = MI.getDebugLoc();
31266  MachineFunction &MF = *BB->getParent();
31267
31268  // Emit CALLSEQ_START right before the instruction.
31269  unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
31270  MachineInstrBuilder CallseqStart =
31271    BuildMI(MF, DL, TII.get(AdjStackDown)).addImm(0).addImm(0).addImm(0);
31272  BB->insert(MachineBasicBlock::iterator(MI), CallseqStart);
31273
31274  // Emit CALLSEQ_END right after the instruction.
31275  // We don't call erase from parent because we want to keep the
31276  // original instruction around.
31277  unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
31278  MachineInstrBuilder CallseqEnd =
31279    BuildMI(MF, DL, TII.get(AdjStackUp)).addImm(0).addImm(0);
31280  BB->insertAfter(MachineBasicBlock::iterator(MI), CallseqEnd);
31281
31282  return BB;
31283}
31284
31285MachineBasicBlock *
31286X86TargetLowering::EmitLoweredTLSCall(MachineInstr &MI,
31287                                      MachineBasicBlock *BB) const {
31288  // This is pretty easy.  We're taking the value that we received from
31289  // our load from the relocation, sticking it in either RDI (x86-64)
31290  // or EAX and doing an indirect call.  The return value will then
31291  // be in the normal return register.
31292  MachineFunction *F = BB->getParent();
31293  const X86InstrInfo *TII = Subtarget.getInstrInfo();
31294  DebugLoc DL = MI.getDebugLoc();
31295
31296  assert(Subtarget.isTargetDarwin() && "Darwin only instr emitted?");
31297  assert(MI.getOperand(3).isGlobal() && "This should be a global");
31298
31299  // Get a register mask for the lowered call.
31300  // FIXME: The 32-bit calls have non-standard calling conventions. Use a
31301  // proper register mask.
31302  const uint32_t *RegMask =
31303      Subtarget.is64Bit() ?
31304      Subtarget.getRegisterInfo()->getDarwinTLSCallPreservedMask() :
31305      Subtarget.getRegisterInfo()->getCallPreservedMask(*F, CallingConv::C);
31306  if (Subtarget.is64Bit()) {
31307    MachineInstrBuilder MIB =
31308        BuildMI(*BB, MI, DL, TII->get(X86::MOV64rm), X86::RDI)
31309            .addReg(X86::RIP)
31310            .addImm(0)
31311            .addReg(0)
31312            .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
31313                              MI.getOperand(3).getTargetFlags())
31314            .addReg(0);
31315    MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
31316    addDirectMem(MIB, X86::RDI);
31317    MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
31318  } else if (!isPositionIndependent()) {
31319    MachineInstrBuilder MIB =
31320        BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
31321            .addReg(0)
31322            .addImm(0)
31323            .addReg(0)
31324            .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
31325                              MI.getOperand(3).getTargetFlags())
31326            .addReg(0);
31327    MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
31328    addDirectMem(MIB, X86::EAX);
31329    MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
31330  } else {
31331    MachineInstrBuilder MIB =
31332        BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
31333            .addReg(TII->getGlobalBaseReg(F))
31334            .addImm(0)
31335            .addReg(0)
31336            .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
31337                              MI.getOperand(3).getTargetFlags())
31338            .addReg(0);
31339    MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
31340    addDirectMem(MIB, X86::EAX);
31341    MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
31342  }
31343
31344  MI.eraseFromParent(); // The pseudo instruction is gone now.
31345  return BB;
31346}
31347
31348static unsigned getOpcodeForIndirectThunk(unsigned RPOpc) {
31349  switch (RPOpc) {
31350  case X86::INDIRECT_THUNK_CALL32:
31351    return X86::CALLpcrel32;
31352  case X86::INDIRECT_THUNK_CALL64:
31353    return X86::CALL64pcrel32;
31354  case X86::INDIRECT_THUNK_TCRETURN32:
31355    return X86::TCRETURNdi;
31356  case X86::INDIRECT_THUNK_TCRETURN64:
31357    return X86::TCRETURNdi64;
31358  }
31359  llvm_unreachable("not indirect thunk opcode");
31360}
31361
31362static const char *getIndirectThunkSymbol(const X86Subtarget &Subtarget,
31363                                          unsigned Reg) {
31364  if (Subtarget.useRetpolineExternalThunk()) {
31365    // When using an external thunk for retpolines, we pick names that match the
31366    // names GCC happens to use as well. This helps simplify the implementation
31367    // of the thunks for kernels where they have no easy ability to create
31368    // aliases and are doing non-trivial configuration of the thunk's body. For
31369    // example, the Linux kernel will do boot-time hot patching of the thunk
31370    // bodies and cannot easily export aliases of these to loaded modules.
31371    //
31372    // Note that at any point in the future, we may need to change the semantics
31373    // of how we implement retpolines and at that time will likely change the
31374    // name of the called thunk. Essentially, there is no hard guarantee that
31375    // LLVM will generate calls to specific thunks, we merely make a best-effort
31376    // attempt to help out kernels and other systems where duplicating the
31377    // thunks is costly.
31378    switch (Reg) {
31379    case X86::EAX:
31380      assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
31381      return "__x86_indirect_thunk_eax";
31382    case X86::ECX:
31383      assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
31384      return "__x86_indirect_thunk_ecx";
31385    case X86::EDX:
31386      assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
31387      return "__x86_indirect_thunk_edx";
31388    case X86::EDI:
31389      assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
31390      return "__x86_indirect_thunk_edi";
31391    case X86::R11:
31392      assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
31393      return "__x86_indirect_thunk_r11";
31394    }
31395    llvm_unreachable("unexpected reg for external indirect thunk");
31396  }
31397
31398  if (Subtarget.useRetpolineIndirectCalls() ||
31399      Subtarget.useRetpolineIndirectBranches()) {
31400    // When targeting an internal COMDAT thunk use an LLVM-specific name.
31401    switch (Reg) {
31402    case X86::EAX:
31403      assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
31404      return "__llvm_retpoline_eax";
31405    case X86::ECX:
31406      assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
31407      return "__llvm_retpoline_ecx";
31408    case X86::EDX:
31409      assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
31410      return "__llvm_retpoline_edx";
31411    case X86::EDI:
31412      assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
31413      return "__llvm_retpoline_edi";
31414    case X86::R11:
31415      assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
31416      return "__llvm_retpoline_r11";
31417    }
31418    llvm_unreachable("unexpected reg for retpoline");
31419  }
31420
31421  if (Subtarget.useLVIControlFlowIntegrity()) {
31422    assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
31423    return "__llvm_lvi_thunk_r11";
31424  }
31425  llvm_unreachable("getIndirectThunkSymbol() invoked without thunk feature");
31426}
31427
31428MachineBasicBlock *
31429X86TargetLowering::EmitLoweredIndirectThunk(MachineInstr &MI,
31430                                            MachineBasicBlock *BB) const {
31431  // Copy the virtual register into the R11 physical register and
31432  // call the retpoline thunk.
31433  DebugLoc DL = MI.getDebugLoc();
31434  const X86InstrInfo *TII = Subtarget.getInstrInfo();
31435  Register CalleeVReg = MI.getOperand(0).getReg();
31436  unsigned Opc = getOpcodeForIndirectThunk(MI.getOpcode());
31437
31438  // Find an available scratch register to hold the callee. On 64-bit, we can
31439  // just use R11, but we scan for uses anyway to ensure we don't generate
31440  // incorrect code. On 32-bit, we use one of EAX, ECX, or EDX that isn't
31441  // already a register use operand to the call to hold the callee. If none
31442  // are available, use EDI instead. EDI is chosen because EBX is the PIC base
31443  // register and ESI is the base pointer to realigned stack frames with VLAs.
31444  SmallVector<unsigned, 3> AvailableRegs;
31445  if (Subtarget.is64Bit())
31446    AvailableRegs.push_back(X86::R11);
31447  else
31448    AvailableRegs.append({X86::EAX, X86::ECX, X86::EDX, X86::EDI});
31449
31450  // Zero out any registers that are already used.
31451  for (const auto &MO : MI.operands()) {
31452    if (MO.isReg() && MO.isUse())
31453      for (unsigned &Reg : AvailableRegs)
31454        if (Reg == MO.getReg())
31455          Reg = 0;
31456  }
31457
31458  // Choose the first remaining non-zero available register.
31459  unsigned AvailableReg = 0;
31460  for (unsigned MaybeReg : AvailableRegs) {
31461    if (MaybeReg) {
31462      AvailableReg = MaybeReg;
31463      break;
31464    }
31465  }
31466  if (!AvailableReg)
31467    report_fatal_error("calling convention incompatible with retpoline, no "
31468                       "available registers");
31469
31470  const char *Symbol = getIndirectThunkSymbol(Subtarget, AvailableReg);
31471
31472  BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), AvailableReg)
31473      .addReg(CalleeVReg);
31474  MI.getOperand(0).ChangeToES(Symbol);
31475  MI.setDesc(TII->get(Opc));
31476  MachineInstrBuilder(*BB->getParent(), &MI)
31477      .addReg(AvailableReg, RegState::Implicit | RegState::Kill);
31478  return BB;
31479}
31480
31481/// SetJmp implies future control flow change upon calling the corresponding
31482/// LongJmp.
31483/// Instead of using the 'return' instruction, the long jump fixes the stack and
31484/// performs an indirect branch. To do so it uses the registers that were stored
31485/// in the jump buffer (when calling SetJmp).
31486/// In case the shadow stack is enabled we need to fix it as well, because some
31487/// return addresses will be skipped.
31488/// The function will save the SSP for future fixing in the function
31489/// emitLongJmpShadowStackFix.
31490/// \sa emitLongJmpShadowStackFix
31491/// \param [in] MI The temporary Machine Instruction for the builtin.
31492/// \param [in] MBB The Machine Basic Block that will be modified.
31493void X86TargetLowering::emitSetJmpShadowStackFix(MachineInstr &MI,
31494                                                 MachineBasicBlock *MBB) const {
31495  DebugLoc DL = MI.getDebugLoc();
31496  MachineFunction *MF = MBB->getParent();
31497  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
31498  MachineRegisterInfo &MRI = MF->getRegInfo();
31499  MachineInstrBuilder MIB;
31500
31501  // Memory Reference.
31502  SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
31503                                           MI.memoperands_end());
31504
31505  // Initialize a register with zero.
31506  MVT PVT = getPointerTy(MF->getDataLayout());
31507  const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
31508  Register ZReg = MRI.createVirtualRegister(PtrRC);
31509  unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr;
31510  BuildMI(*MBB, MI, DL, TII->get(XorRROpc))
31511      .addDef(ZReg)
31512      .addReg(ZReg, RegState::Undef)
31513      .addReg(ZReg, RegState::Undef);
31514
31515  // Read the current SSP Register value to the zeroed register.
31516  Register SSPCopyReg = MRI.createVirtualRegister(PtrRC);
31517  unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
31518  BuildMI(*MBB, MI, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
31519
31520  // Write the SSP register value to offset 3 in input memory buffer.
31521  unsigned PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
31522  MIB = BuildMI(*MBB, MI, DL, TII->get(PtrStoreOpc));
31523  const int64_t SSPOffset = 3 * PVT.getStoreSize();
31524  const unsigned MemOpndSlot = 1;
31525  for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
31526    if (i == X86::AddrDisp)
31527      MIB.addDisp(MI.getOperand(MemOpndSlot + i), SSPOffset);
31528    else
31529      MIB.add(MI.getOperand(MemOpndSlot + i));
31530  }
31531  MIB.addReg(SSPCopyReg);
31532  MIB.setMemRefs(MMOs);
31533}
31534
31535MachineBasicBlock *
31536X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
31537                                    MachineBasicBlock *MBB) const {
31538  DebugLoc DL = MI.getDebugLoc();
31539  MachineFunction *MF = MBB->getParent();
31540  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
31541  const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
31542  MachineRegisterInfo &MRI = MF->getRegInfo();
31543
31544  const BasicBlock *BB = MBB->getBasicBlock();
31545  MachineFunction::iterator I = ++MBB->getIterator();
31546
31547  // Memory Reference
31548  SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
31549                                           MI.memoperands_end());
31550
31551  unsigned DstReg;
31552  unsigned MemOpndSlot = 0;
31553
31554  unsigned CurOp = 0;
31555
31556  DstReg = MI.getOperand(CurOp++).getReg();
31557  const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
31558  assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
31559  (void)TRI;
31560  Register mainDstReg = MRI.createVirtualRegister(RC);
31561  Register restoreDstReg = MRI.createVirtualRegister(RC);
31562
31563  MemOpndSlot = CurOp;
31564
31565  MVT PVT = getPointerTy(MF->getDataLayout());
31566  assert((PVT == MVT::i64 || PVT == MVT::i32) &&
31567         "Invalid Pointer Size!");
31568
31569  // For v = setjmp(buf), we generate
31570  //
31571  // thisMBB:
31572  //  buf[LabelOffset] = restoreMBB <-- takes address of restoreMBB
31573  //  SjLjSetup restoreMBB
31574  //
31575  // mainMBB:
31576  //  v_main = 0
31577  //
31578  // sinkMBB:
31579  //  v = phi(main, restore)
31580  //
31581  // restoreMBB:
31582  //  if base pointer being used, load it from frame
31583  //  v_restore = 1
31584
31585  MachineBasicBlock *thisMBB = MBB;
31586  MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
31587  MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
31588  MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
31589  MF->insert(I, mainMBB);
31590  MF->insert(I, sinkMBB);
31591  MF->push_back(restoreMBB);
31592  restoreMBB->setHasAddressTaken();
31593
31594  MachineInstrBuilder MIB;
31595
31596  // Transfer the remainder of BB and its successor edges to sinkMBB.
31597  sinkMBB->splice(sinkMBB->begin(), MBB,
31598                  std::next(MachineBasicBlock::iterator(MI)), MBB->end());
31599  sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
31600
31601  // thisMBB:
31602  unsigned PtrStoreOpc = 0;
31603  unsigned LabelReg = 0;
31604  const int64_t LabelOffset = 1 * PVT.getStoreSize();
31605  bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
31606                     !isPositionIndependent();
31607
31608  // Prepare IP either in reg or imm.
31609  if (!UseImmLabel) {
31610    PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
31611    const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
31612    LabelReg = MRI.createVirtualRegister(PtrRC);
31613    if (Subtarget.is64Bit()) {
31614      MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
31615              .addReg(X86::RIP)
31616              .addImm(0)
31617              .addReg(0)
31618              .addMBB(restoreMBB)
31619              .addReg(0);
31620    } else {
31621      const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
31622      MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
31623              .addReg(XII->getGlobalBaseReg(MF))
31624              .addImm(0)
31625              .addReg(0)
31626              .addMBB(restoreMBB, Subtarget.classifyBlockAddressReference())
31627              .addReg(0);
31628    }
31629  } else
31630    PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
31631  // Store IP
31632  MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
31633  for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
31634    if (i == X86::AddrDisp)
31635      MIB.addDisp(MI.getOperand(MemOpndSlot + i), LabelOffset);
31636    else
31637      MIB.add(MI.getOperand(MemOpndSlot + i));
31638  }
31639  if (!UseImmLabel)
31640    MIB.addReg(LabelReg);
31641  else
31642    MIB.addMBB(restoreMBB);
31643  MIB.setMemRefs(MMOs);
31644
31645  if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
31646    emitSetJmpShadowStackFix(MI, thisMBB);
31647  }
31648
31649  // Setup
31650  MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
31651          .addMBB(restoreMBB);
31652
31653  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
31654  MIB.addRegMask(RegInfo->getNoPreservedMask());
31655  thisMBB->addSuccessor(mainMBB);
31656  thisMBB->addSuccessor(restoreMBB);
31657
31658  // mainMBB:
31659  //  EAX = 0
31660  BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
31661  mainMBB->addSuccessor(sinkMBB);
31662
31663  // sinkMBB:
31664  BuildMI(*sinkMBB, sinkMBB->begin(), DL,
31665          TII->get(X86::PHI), DstReg)
31666    .addReg(mainDstReg).addMBB(mainMBB)
31667    .addReg(restoreDstReg).addMBB(restoreMBB);
31668
31669  // restoreMBB:
31670  if (RegInfo->hasBasePointer(*MF)) {
31671    const bool Uses64BitFramePtr =
31672        Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
31673    X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
31674    X86FI->setRestoreBasePointer(MF);
31675    Register FramePtr = RegInfo->getFrameRegister(*MF);
31676    Register BasePtr = RegInfo->getBaseRegister();
31677    unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
31678    addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
31679                 FramePtr, true, X86FI->getRestoreBasePointerOffset())
31680      .setMIFlag(MachineInstr::FrameSetup);
31681  }
31682  BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
31683  BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
31684  restoreMBB->addSuccessor(sinkMBB);
31685
31686  MI.eraseFromParent();
31687  return sinkMBB;
31688}
31689
31690/// Fix the shadow stack using the previously saved SSP pointer.
31691/// \sa emitSetJmpShadowStackFix
31692/// \param [in] MI The temporary Machine Instruction for the builtin.
31693/// \param [in] MBB The Machine Basic Block that will be modified.
31694/// \return The sink MBB that will perform the future indirect branch.
31695MachineBasicBlock *
31696X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI,
31697                                             MachineBasicBlock *MBB) const {
31698  DebugLoc DL = MI.getDebugLoc();
31699  MachineFunction *MF = MBB->getParent();
31700  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
31701  MachineRegisterInfo &MRI = MF->getRegInfo();
31702
31703  // Memory Reference
31704  SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
31705                                           MI.memoperands_end());
31706
31707  MVT PVT = getPointerTy(MF->getDataLayout());
31708  const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
31709
31710  // checkSspMBB:
31711  //         xor vreg1, vreg1
31712  //         rdssp vreg1
31713  //         test vreg1, vreg1
31714  //         je sinkMBB   # Jump if Shadow Stack is not supported
31715  // fallMBB:
31716  //         mov buf+24/12(%rip), vreg2
31717  //         sub vreg1, vreg2
31718  //         jbe sinkMBB  # No need to fix the Shadow Stack
31719  // fixShadowMBB:
31720  //         shr 3/2, vreg2
31721  //         incssp vreg2  # fix the SSP according to the lower 8 bits
31722  //         shr 8, vreg2
31723  //         je sinkMBB
31724  // fixShadowLoopPrepareMBB:
31725  //         shl vreg2
31726  //         mov 128, vreg3
31727  // fixShadowLoopMBB:
31728  //         incssp vreg3
31729  //         dec vreg2
31730  //         jne fixShadowLoopMBB # Iterate until you finish fixing
31731  //                              # the Shadow Stack
31732  // sinkMBB:
31733
31734  MachineFunction::iterator I = ++MBB->getIterator();
31735  const BasicBlock *BB = MBB->getBasicBlock();
31736
31737  MachineBasicBlock *checkSspMBB = MF->CreateMachineBasicBlock(BB);
31738  MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
31739  MachineBasicBlock *fixShadowMBB = MF->CreateMachineBasicBlock(BB);
31740  MachineBasicBlock *fixShadowLoopPrepareMBB = MF->CreateMachineBasicBlock(BB);
31741  MachineBasicBlock *fixShadowLoopMBB = MF->CreateMachineBasicBlock(BB);
31742  MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
31743  MF->insert(I, checkSspMBB);
31744  MF->insert(I, fallMBB);
31745  MF->insert(I, fixShadowMBB);
31746  MF->insert(I, fixShadowLoopPrepareMBB);
31747  MF->insert(I, fixShadowLoopMBB);
31748  MF->insert(I, sinkMBB);
31749
31750  // Transfer the remainder of BB and its successor edges to sinkMBB.
31751  sinkMBB->splice(sinkMBB->begin(), MBB, MachineBasicBlock::iterator(MI),
31752                  MBB->end());
31753  sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
31754
31755  MBB->addSuccessor(checkSspMBB);
31756
31757  // Initialize a register with zero.
31758  Register ZReg = MRI.createVirtualRegister(PtrRC);
31759  unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr;
31760  BuildMI(checkSspMBB, DL, TII->get(XorRROpc))
31761      .addDef(ZReg)
31762      .addReg(ZReg, RegState::Undef)
31763      .addReg(ZReg, RegState::Undef);
31764
31765  // Read the current SSP Register value to the zeroed register.
31766  Register SSPCopyReg = MRI.createVirtualRegister(PtrRC);
31767  unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
31768  BuildMI(checkSspMBB, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
31769
31770  // Check whether the result of the SSP register is zero and jump directly
31771  // to the sink.
31772  unsigned TestRROpc = (PVT == MVT::i64) ? X86::TEST64rr : X86::TEST32rr;
31773  BuildMI(checkSspMBB, DL, TII->get(TestRROpc))
31774      .addReg(SSPCopyReg)
31775      .addReg(SSPCopyReg);
31776  BuildMI(checkSspMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_E);
31777  checkSspMBB->addSuccessor(sinkMBB);
31778  checkSspMBB->addSuccessor(fallMBB);
31779
31780  // Reload the previously saved SSP register value.
31781  Register PrevSSPReg = MRI.createVirtualRegister(PtrRC);
31782  unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
31783  const int64_t SPPOffset = 3 * PVT.getStoreSize();
31784  MachineInstrBuilder MIB =
31785      BuildMI(fallMBB, DL, TII->get(PtrLoadOpc), PrevSSPReg);
31786  for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
31787    const MachineOperand &MO = MI.getOperand(i);
31788    if (i == X86::AddrDisp)
31789      MIB.addDisp(MO, SPPOffset);
31790    else if (MO.isReg()) // Don't add the whole operand, we don't want to
31791                         // preserve kill flags.
31792      MIB.addReg(MO.getReg());
31793    else
31794      MIB.add(MO);
31795  }
31796  MIB.setMemRefs(MMOs);
31797
31798  // Subtract the current SSP from the previous SSP.
31799  Register SspSubReg = MRI.createVirtualRegister(PtrRC);
31800  unsigned SubRROpc = (PVT == MVT::i64) ? X86::SUB64rr : X86::SUB32rr;
31801  BuildMI(fallMBB, DL, TII->get(SubRROpc), SspSubReg)
31802      .addReg(PrevSSPReg)
31803      .addReg(SSPCopyReg);
31804
31805  // Jump to sink in case PrevSSPReg <= SSPCopyReg.
31806  BuildMI(fallMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_BE);
31807  fallMBB->addSuccessor(sinkMBB);
31808  fallMBB->addSuccessor(fixShadowMBB);
31809
31810  // Shift right by 2/3 for 32/64 because incssp multiplies the argument by 4/8.
31811  unsigned ShrRIOpc = (PVT == MVT::i64) ? X86::SHR64ri : X86::SHR32ri;
31812  unsigned Offset = (PVT == MVT::i64) ? 3 : 2;
31813  Register SspFirstShrReg = MRI.createVirtualRegister(PtrRC);
31814  BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspFirstShrReg)
31815      .addReg(SspSubReg)
31816      .addImm(Offset);
31817
31818  // Increase SSP when looking only on the lower 8 bits of the delta.
31819  unsigned IncsspOpc = (PVT == MVT::i64) ? X86::INCSSPQ : X86::INCSSPD;
31820  BuildMI(fixShadowMBB, DL, TII->get(IncsspOpc)).addReg(SspFirstShrReg);
31821
31822  // Reset the lower 8 bits.
31823  Register SspSecondShrReg = MRI.createVirtualRegister(PtrRC);
31824  BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspSecondShrReg)
31825      .addReg(SspFirstShrReg)
31826      .addImm(8);
31827
31828  // Jump if the result of the shift is zero.
31829  BuildMI(fixShadowMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_E);
31830  fixShadowMBB->addSuccessor(sinkMBB);
31831  fixShadowMBB->addSuccessor(fixShadowLoopPrepareMBB);
31832
31833  // Do a single shift left.
31834  unsigned ShlR1Opc = (PVT == MVT::i64) ? X86::SHL64r1 : X86::SHL32r1;
31835  Register SspAfterShlReg = MRI.createVirtualRegister(PtrRC);
31836  BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(ShlR1Opc), SspAfterShlReg)
31837      .addReg(SspSecondShrReg);
31838
31839  // Save the value 128 to a register (will be used next with incssp).
31840  Register Value128InReg = MRI.createVirtualRegister(PtrRC);
31841  unsigned MovRIOpc = (PVT == MVT::i64) ? X86::MOV64ri32 : X86::MOV32ri;
31842  BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(MovRIOpc), Value128InReg)
31843      .addImm(128);
31844  fixShadowLoopPrepareMBB->addSuccessor(fixShadowLoopMBB);
31845
31846  // Since incssp only looks at the lower 8 bits, we might need to do several
31847  // iterations of incssp until we finish fixing the shadow stack.
31848  Register DecReg = MRI.createVirtualRegister(PtrRC);
31849  Register CounterReg = MRI.createVirtualRegister(PtrRC);
31850  BuildMI(fixShadowLoopMBB, DL, TII->get(X86::PHI), CounterReg)
31851      .addReg(SspAfterShlReg)
31852      .addMBB(fixShadowLoopPrepareMBB)
31853      .addReg(DecReg)
31854      .addMBB(fixShadowLoopMBB);
31855
31856  // Every iteration we increase the SSP by 128.
31857  BuildMI(fixShadowLoopMBB, DL, TII->get(IncsspOpc)).addReg(Value128InReg);
31858
31859  // Every iteration we decrement the counter by 1.
31860  unsigned DecROpc = (PVT == MVT::i64) ? X86::DEC64r : X86::DEC32r;
31861  BuildMI(fixShadowLoopMBB, DL, TII->get(DecROpc), DecReg).addReg(CounterReg);
31862
31863  // Jump if the counter is not zero yet.
31864  BuildMI(fixShadowLoopMBB, DL, TII->get(X86::JCC_1)).addMBB(fixShadowLoopMBB).addImm(X86::COND_NE);
31865  fixShadowLoopMBB->addSuccessor(sinkMBB);
31866  fixShadowLoopMBB->addSuccessor(fixShadowLoopMBB);
31867
31868  return sinkMBB;
31869}
31870
31871MachineBasicBlock *
31872X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
31873                                     MachineBasicBlock *MBB) const {
31874  DebugLoc DL = MI.getDebugLoc();
31875  MachineFunction *MF = MBB->getParent();
31876  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
31877  MachineRegisterInfo &MRI = MF->getRegInfo();
31878
31879  // Memory Reference
31880  SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
31881                                           MI.memoperands_end());
31882
31883  MVT PVT = getPointerTy(MF->getDataLayout());
31884  assert((PVT == MVT::i64 || PVT == MVT::i32) &&
31885         "Invalid Pointer Size!");
31886
31887  const TargetRegisterClass *RC =
31888    (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
31889  Register Tmp = MRI.createVirtualRegister(RC);
31890  // Since FP is only updated here but NOT referenced, it's treated as GPR.
31891  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
31892  unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
31893  Register SP = RegInfo->getStackRegister();
31894
31895  MachineInstrBuilder MIB;
31896
31897  const int64_t LabelOffset = 1 * PVT.getStoreSize();
31898  const int64_t SPOffset = 2 * PVT.getStoreSize();
31899
31900  unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
31901  unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
31902
31903  MachineBasicBlock *thisMBB = MBB;
31904
31905  // When CET and shadow stack is enabled, we need to fix the Shadow Stack.
31906  if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
31907    thisMBB = emitLongJmpShadowStackFix(MI, thisMBB);
31908  }
31909
31910  // Reload FP
31911  MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), FP);
31912  for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
31913    const MachineOperand &MO = MI.getOperand(i);
31914    if (MO.isReg()) // Don't add the whole operand, we don't want to
31915                    // preserve kill flags.
31916      MIB.addReg(MO.getReg());
31917    else
31918      MIB.add(MO);
31919  }
31920  MIB.setMemRefs(MMOs);
31921
31922  // Reload IP
31923  MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
31924  for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
31925    const MachineOperand &MO = MI.getOperand(i);
31926    if (i == X86::AddrDisp)
31927      MIB.addDisp(MO, LabelOffset);
31928    else if (MO.isReg()) // Don't add the whole operand, we don't want to
31929                         // preserve kill flags.
31930      MIB.addReg(MO.getReg());
31931    else
31932      MIB.add(MO);
31933  }
31934  MIB.setMemRefs(MMOs);
31935
31936  // Reload SP
31937  MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), SP);
31938  for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
31939    if (i == X86::AddrDisp)
31940      MIB.addDisp(MI.getOperand(i), SPOffset);
31941    else
31942      MIB.add(MI.getOperand(i)); // We can preserve the kill flags here, it's
31943                                 // the last instruction of the expansion.
31944  }
31945  MIB.setMemRefs(MMOs);
31946
31947  // Jump
31948  BuildMI(*thisMBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
31949
31950  MI.eraseFromParent();
31951  return thisMBB;
31952}
31953
31954void X86TargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
31955                                               MachineBasicBlock *MBB,
31956                                               MachineBasicBlock *DispatchBB,
31957                                               int FI) const {
31958  DebugLoc DL = MI.getDebugLoc();
31959  MachineFunction *MF = MBB->getParent();
31960  MachineRegisterInfo *MRI = &MF->getRegInfo();
31961  const X86InstrInfo *TII = Subtarget.getInstrInfo();
31962
31963  MVT PVT = getPointerTy(MF->getDataLayout());
31964  assert((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!");
31965
31966  unsigned Op = 0;
31967  unsigned VR = 0;
31968
31969  bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
31970                     !isPositionIndependent();
31971
31972  if (UseImmLabel) {
31973    Op = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
31974  } else {
31975    const TargetRegisterClass *TRC =
31976        (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
31977    VR = MRI->createVirtualRegister(TRC);
31978    Op = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
31979
31980    if (Subtarget.is64Bit())
31981      BuildMI(*MBB, MI, DL, TII->get(X86::LEA64r), VR)
31982          .addReg(X86::RIP)
31983          .addImm(1)
31984          .addReg(0)
31985          .addMBB(DispatchBB)
31986          .addReg(0);
31987    else
31988      BuildMI(*MBB, MI, DL, TII->get(X86::LEA32r), VR)
31989          .addReg(0) /* TII->getGlobalBaseReg(MF) */
31990          .addImm(1)
31991          .addReg(0)
31992          .addMBB(DispatchBB, Subtarget.classifyBlockAddressReference())
31993          .addReg(0);
31994  }
31995
31996  MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, TII->get(Op));
31997  addFrameReference(MIB, FI, Subtarget.is64Bit() ? 56 : 36);
31998  if (UseImmLabel)
31999    MIB.addMBB(DispatchBB);
32000  else
32001    MIB.addReg(VR);
32002}
32003
32004MachineBasicBlock *
32005X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
32006                                         MachineBasicBlock *BB) const {
32007  DebugLoc DL = MI.getDebugLoc();
32008  MachineFunction *MF = BB->getParent();
32009  MachineRegisterInfo *MRI = &MF->getRegInfo();
32010  const X86InstrInfo *TII = Subtarget.getInstrInfo();
32011  int FI = MF->getFrameInfo().getFunctionContextIndex();
32012
32013  // Get a mapping of the call site numbers to all of the landing pads they're
32014  // associated with.
32015  DenseMap<unsigned, SmallVector<MachineBasicBlock *, 2>> CallSiteNumToLPad;
32016  unsigned MaxCSNum = 0;
32017  for (auto &MBB : *MF) {
32018    if (!MBB.isEHPad())
32019      continue;
32020
32021    MCSymbol *Sym = nullptr;
32022    for (const auto &MI : MBB) {
32023      if (MI.isDebugInstr())
32024        continue;
32025
32026      assert(MI.isEHLabel() && "expected EH_LABEL");
32027      Sym = MI.getOperand(0).getMCSymbol();
32028      break;
32029    }
32030
32031    if (!MF->hasCallSiteLandingPad(Sym))
32032      continue;
32033
32034    for (unsigned CSI : MF->getCallSiteLandingPad(Sym)) {
32035      CallSiteNumToLPad[CSI].push_back(&MBB);
32036      MaxCSNum = std::max(MaxCSNum, CSI);
32037    }
32038  }
32039
32040  // Get an ordered list of the machine basic blocks for the jump table.
32041  std::vector<MachineBasicBlock *> LPadList;
32042  SmallPtrSet<MachineBasicBlock *, 32> InvokeBBs;
32043  LPadList.reserve(CallSiteNumToLPad.size());
32044
32045  for (unsigned CSI = 1; CSI <= MaxCSNum; ++CSI) {
32046    for (auto &LP : CallSiteNumToLPad[CSI]) {
32047      LPadList.push_back(LP);
32048      InvokeBBs.insert(LP->pred_begin(), LP->pred_end());
32049    }
32050  }
32051
32052  assert(!LPadList.empty() &&
32053         "No landing pad destinations for the dispatch jump table!");
32054
32055  // Create the MBBs for the dispatch code.
32056
32057  // Shove the dispatch's address into the return slot in the function context.
32058  MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
32059  DispatchBB->setIsEHPad(true);
32060
32061  MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
32062  BuildMI(TrapBB, DL, TII->get(X86::TRAP));
32063  DispatchBB->addSuccessor(TrapBB);
32064
32065  MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
32066  DispatchBB->addSuccessor(DispContBB);
32067
32068  // Insert MBBs.
32069  MF->push_back(DispatchBB);
32070  MF->push_back(DispContBB);
32071  MF->push_back(TrapBB);
32072
32073  // Insert code into the entry block that creates and registers the function
32074  // context.
32075  SetupEntryBlockForSjLj(MI, BB, DispatchBB, FI);
32076
32077  // Create the jump table and associated information
32078  unsigned JTE = getJumpTableEncoding();
32079  MachineJumpTableInfo *JTI = MF->getOrCreateJumpTableInfo(JTE);
32080  unsigned MJTI = JTI->createJumpTableIndex(LPadList);
32081
32082  const X86RegisterInfo &RI = TII->getRegisterInfo();
32083  // Add a register mask with no preserved registers.  This results in all
32084  // registers being marked as clobbered.
32085  if (RI.hasBasePointer(*MF)) {
32086    const bool FPIs64Bit =
32087        Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
32088    X86MachineFunctionInfo *MFI = MF->getInfo<X86MachineFunctionInfo>();
32089    MFI->setRestoreBasePointer(MF);
32090
32091    Register FP = RI.getFrameRegister(*MF);
32092    Register BP = RI.getBaseRegister();
32093    unsigned Op = FPIs64Bit ? X86::MOV64rm : X86::MOV32rm;
32094    addRegOffset(BuildMI(DispatchBB, DL, TII->get(Op), BP), FP, true,
32095                 MFI->getRestoreBasePointerOffset())
32096        .addRegMask(RI.getNoPreservedMask());
32097  } else {
32098    BuildMI(DispatchBB, DL, TII->get(X86::NOOP))
32099        .addRegMask(RI.getNoPreservedMask());
32100  }
32101
32102  // IReg is used as an index in a memory operand and therefore can't be SP
32103  Register IReg = MRI->createVirtualRegister(&X86::GR32_NOSPRegClass);
32104  addFrameReference(BuildMI(DispatchBB, DL, TII->get(X86::MOV32rm), IReg), FI,
32105                    Subtarget.is64Bit() ? 8 : 4);
32106  BuildMI(DispatchBB, DL, TII->get(X86::CMP32ri))
32107      .addReg(IReg)
32108      .addImm(LPadList.size());
32109  BuildMI(DispatchBB, DL, TII->get(X86::JCC_1)).addMBB(TrapBB).addImm(X86::COND_AE);
32110
32111  if (Subtarget.is64Bit()) {
32112    Register BReg = MRI->createVirtualRegister(&X86::GR64RegClass);
32113    Register IReg64 = MRI->createVirtualRegister(&X86::GR64_NOSPRegClass);
32114
32115    // leaq .LJTI0_0(%rip), BReg
32116    BuildMI(DispContBB, DL, TII->get(X86::LEA64r), BReg)
32117        .addReg(X86::RIP)
32118        .addImm(1)
32119        .addReg(0)
32120        .addJumpTableIndex(MJTI)
32121        .addReg(0);
32122    // movzx IReg64, IReg
32123    BuildMI(DispContBB, DL, TII->get(TargetOpcode::SUBREG_TO_REG), IReg64)
32124        .addImm(0)
32125        .addReg(IReg)
32126        .addImm(X86::sub_32bit);
32127
32128    switch (JTE) {
32129    case MachineJumpTableInfo::EK_BlockAddress:
32130      // jmpq *(BReg,IReg64,8)
32131      BuildMI(DispContBB, DL, TII->get(X86::JMP64m))
32132          .addReg(BReg)
32133          .addImm(8)
32134          .addReg(IReg64)
32135          .addImm(0)
32136          .addReg(0);
32137      break;
32138    case MachineJumpTableInfo::EK_LabelDifference32: {
32139      Register OReg = MRI->createVirtualRegister(&X86::GR32RegClass);
32140      Register OReg64 = MRI->createVirtualRegister(&X86::GR64RegClass);
32141      Register TReg = MRI->createVirtualRegister(&X86::GR64RegClass);
32142
32143      // movl (BReg,IReg64,4), OReg
32144      BuildMI(DispContBB, DL, TII->get(X86::MOV32rm), OReg)
32145          .addReg(BReg)
32146          .addImm(4)
32147          .addReg(IReg64)
32148          .addImm(0)
32149          .addReg(0);
32150      // movsx OReg64, OReg
32151      BuildMI(DispContBB, DL, TII->get(X86::MOVSX64rr32), OReg64).addReg(OReg);
32152      // addq BReg, OReg64, TReg
32153      BuildMI(DispContBB, DL, TII->get(X86::ADD64rr), TReg)
32154          .addReg(OReg64)
32155          .addReg(BReg);
32156      // jmpq *TReg
32157      BuildMI(DispContBB, DL, TII->get(X86::JMP64r)).addReg(TReg);
32158      break;
32159    }
32160    default:
32161      llvm_unreachable("Unexpected jump table encoding");
32162    }
32163  } else {
32164    // jmpl *.LJTI0_0(,IReg,4)
32165    BuildMI(DispContBB, DL, TII->get(X86::JMP32m))
32166        .addReg(0)
32167        .addImm(4)
32168        .addReg(IReg)
32169        .addJumpTableIndex(MJTI)
32170        .addReg(0);
32171  }
32172
32173  // Add the jump table entries as successors to the MBB.
32174  SmallPtrSet<MachineBasicBlock *, 8> SeenMBBs;
32175  for (auto &LP : LPadList)
32176    if (SeenMBBs.insert(LP).second)
32177      DispContBB->addSuccessor(LP);
32178
32179  // N.B. the order the invoke BBs are processed in doesn't matter here.
32180  SmallVector<MachineBasicBlock *, 64> MBBLPads;
32181  const MCPhysReg *SavedRegs = MF->getRegInfo().getCalleeSavedRegs();
32182  for (MachineBasicBlock *MBB : InvokeBBs) {
32183    // Remove the landing pad successor from the invoke block and replace it
32184    // with the new dispatch block.
32185    // Keep a copy of Successors since it's modified inside the loop.
32186    SmallVector<MachineBasicBlock *, 8> Successors(MBB->succ_rbegin(),
32187                                                   MBB->succ_rend());
32188    // FIXME: Avoid quadratic complexity.
32189    for (auto MBBS : Successors) {
32190      if (MBBS->isEHPad()) {
32191        MBB->removeSuccessor(MBBS);
32192        MBBLPads.push_back(MBBS);
32193      }
32194    }
32195
32196    MBB->addSuccessor(DispatchBB);
32197
32198    // Find the invoke call and mark all of the callee-saved registers as
32199    // 'implicit defined' so that they're spilled.  This prevents code from
32200    // moving instructions to before the EH block, where they will never be
32201    // executed.
32202    for (auto &II : reverse(*MBB)) {
32203      if (!II.isCall())
32204        continue;
32205
32206      DenseMap<unsigned, bool> DefRegs;
32207      for (auto &MOp : II.operands())
32208        if (MOp.isReg())
32209          DefRegs[MOp.getReg()] = true;
32210
32211      MachineInstrBuilder MIB(*MF, &II);
32212      for (unsigned RegIdx = 0; SavedRegs[RegIdx]; ++RegIdx) {
32213        unsigned Reg = SavedRegs[RegIdx];
32214        if (!DefRegs[Reg])
32215          MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
32216      }
32217
32218      break;
32219    }
32220  }
32221
32222  // Mark all former landing pads as non-landing pads.  The dispatch is the only
32223  // landing pad now.
32224  for (auto &LP : MBBLPads)
32225    LP->setIsEHPad(false);
32226
32227  // The instruction is gone now.
32228  MI.eraseFromParent();
32229  return BB;
32230}
32231
32232MachineBasicBlock *
32233X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
32234                                               MachineBasicBlock *BB) const {
32235  MachineFunction *MF = BB->getParent();
32236  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
32237  DebugLoc DL = MI.getDebugLoc();
32238
32239  switch (MI.getOpcode()) {
32240  default: llvm_unreachable("Unexpected instr type to insert");
32241  case X86::TLS_addr32:
32242  case X86::TLS_addr64:
32243  case X86::TLS_base_addr32:
32244  case X86::TLS_base_addr64:
32245    return EmitLoweredTLSAddr(MI, BB);
32246  case X86::INDIRECT_THUNK_CALL32:
32247  case X86::INDIRECT_THUNK_CALL64:
32248  case X86::INDIRECT_THUNK_TCRETURN32:
32249  case X86::INDIRECT_THUNK_TCRETURN64:
32250    return EmitLoweredIndirectThunk(MI, BB);
32251  case X86::CATCHRET:
32252    return EmitLoweredCatchRet(MI, BB);
32253  case X86::CATCHPAD:
32254    return EmitLoweredCatchPad(MI, BB);
32255  case X86::SEG_ALLOCA_32:
32256  case X86::SEG_ALLOCA_64:
32257    return EmitLoweredSegAlloca(MI, BB);
32258  case X86::TLSCall_32:
32259  case X86::TLSCall_64:
32260    return EmitLoweredTLSCall(MI, BB);
32261  case X86::CMOV_FR32:
32262  case X86::CMOV_FR32X:
32263  case X86::CMOV_FR64:
32264  case X86::CMOV_FR64X:
32265  case X86::CMOV_GR8:
32266  case X86::CMOV_GR16:
32267  case X86::CMOV_GR32:
32268  case X86::CMOV_RFP32:
32269  case X86::CMOV_RFP64:
32270  case X86::CMOV_RFP80:
32271  case X86::CMOV_VR128:
32272  case X86::CMOV_VR128X:
32273  case X86::CMOV_VR256:
32274  case X86::CMOV_VR256X:
32275  case X86::CMOV_VR512:
32276  case X86::CMOV_VK2:
32277  case X86::CMOV_VK4:
32278  case X86::CMOV_VK8:
32279  case X86::CMOV_VK16:
32280  case X86::CMOV_VK32:
32281  case X86::CMOV_VK64:
32282    return EmitLoweredSelect(MI, BB);
32283
32284  case X86::RDFLAGS32:
32285  case X86::RDFLAGS64: {
32286    unsigned PushF =
32287        MI.getOpcode() == X86::RDFLAGS32 ? X86::PUSHF32 : X86::PUSHF64;
32288    unsigned Pop = MI.getOpcode() == X86::RDFLAGS32 ? X86::POP32r : X86::POP64r;
32289    MachineInstr *Push = BuildMI(*BB, MI, DL, TII->get(PushF));
32290    // Permit reads of the EFLAGS and DF registers without them being defined.
32291    // This intrinsic exists to read external processor state in flags, such as
32292    // the trap flag, interrupt flag, and direction flag, none of which are
32293    // modeled by the backend.
32294    assert(Push->getOperand(2).getReg() == X86::EFLAGS &&
32295           "Unexpected register in operand!");
32296    Push->getOperand(2).setIsUndef();
32297    assert(Push->getOperand(3).getReg() == X86::DF &&
32298           "Unexpected register in operand!");
32299    Push->getOperand(3).setIsUndef();
32300    BuildMI(*BB, MI, DL, TII->get(Pop), MI.getOperand(0).getReg());
32301
32302    MI.eraseFromParent(); // The pseudo is gone now.
32303    return BB;
32304  }
32305
32306  case X86::WRFLAGS32:
32307  case X86::WRFLAGS64: {
32308    unsigned Push =
32309        MI.getOpcode() == X86::WRFLAGS32 ? X86::PUSH32r : X86::PUSH64r;
32310    unsigned PopF =
32311        MI.getOpcode() == X86::WRFLAGS32 ? X86::POPF32 : X86::POPF64;
32312    BuildMI(*BB, MI, DL, TII->get(Push)).addReg(MI.getOperand(0).getReg());
32313    BuildMI(*BB, MI, DL, TII->get(PopF));
32314
32315    MI.eraseFromParent(); // The pseudo is gone now.
32316    return BB;
32317  }
32318
32319  case X86::FP32_TO_INT16_IN_MEM:
32320  case X86::FP32_TO_INT32_IN_MEM:
32321  case X86::FP32_TO_INT64_IN_MEM:
32322  case X86::FP64_TO_INT16_IN_MEM:
32323  case X86::FP64_TO_INT32_IN_MEM:
32324  case X86::FP64_TO_INT64_IN_MEM:
32325  case X86::FP80_TO_INT16_IN_MEM:
32326  case X86::FP80_TO_INT32_IN_MEM:
32327  case X86::FP80_TO_INT64_IN_MEM: {
32328    // Change the floating point control register to use "round towards zero"
32329    // mode when truncating to an integer value.
32330    int OrigCWFrameIdx = MF->getFrameInfo().CreateStackObject(2, 2, false);
32331    addFrameReference(BuildMI(*BB, MI, DL,
32332                              TII->get(X86::FNSTCW16m)), OrigCWFrameIdx);
32333
32334    // Load the old value of the control word...
32335    Register OldCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
32336    addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOVZX32rm16), OldCW),
32337                      OrigCWFrameIdx);
32338
32339    // OR 0b11 into bit 10 and 11. 0b11 is the encoding for round toward zero.
32340    Register NewCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
32341    BuildMI(*BB, MI, DL, TII->get(X86::OR32ri), NewCW)
32342      .addReg(OldCW, RegState::Kill).addImm(0xC00);
32343
32344    // Extract to 16 bits.
32345    Register NewCW16 =
32346        MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
32347    BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), NewCW16)
32348      .addReg(NewCW, RegState::Kill, X86::sub_16bit);
32349
32350    // Prepare memory for FLDCW.
32351    int NewCWFrameIdx = MF->getFrameInfo().CreateStackObject(2, 2, false);
32352    addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)),
32353                      NewCWFrameIdx)
32354      .addReg(NewCW16, RegState::Kill);
32355
32356    // Reload the modified control word now...
32357    addFrameReference(BuildMI(*BB, MI, DL,
32358                              TII->get(X86::FLDCW16m)), NewCWFrameIdx);
32359
32360    // Get the X86 opcode to use.
32361    unsigned Opc;
32362    switch (MI.getOpcode()) {
32363    default: llvm_unreachable("illegal opcode!");
32364    case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
32365    case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
32366    case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
32367    case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
32368    case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
32369    case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
32370    case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
32371    case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
32372    case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
32373    }
32374
32375    X86AddressMode AM = getAddressFromInstr(&MI, 0);
32376    addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
32377        .addReg(MI.getOperand(X86::AddrNumOperands).getReg());
32378
32379    // Reload the original control word now.
32380    addFrameReference(BuildMI(*BB, MI, DL,
32381                              TII->get(X86::FLDCW16m)), OrigCWFrameIdx);
32382
32383    MI.eraseFromParent(); // The pseudo instruction is gone now.
32384    return BB;
32385  }
32386
32387  // xbegin
32388  case X86::XBEGIN:
32389    return emitXBegin(MI, BB, Subtarget.getInstrInfo());
32390
32391  case X86::VASTART_SAVE_XMM_REGS:
32392    return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
32393
32394  case X86::VAARG_64:
32395    return EmitVAARG64WithCustomInserter(MI, BB);
32396
32397  case X86::EH_SjLj_SetJmp32:
32398  case X86::EH_SjLj_SetJmp64:
32399    return emitEHSjLjSetJmp(MI, BB);
32400
32401  case X86::EH_SjLj_LongJmp32:
32402  case X86::EH_SjLj_LongJmp64:
32403    return emitEHSjLjLongJmp(MI, BB);
32404
32405  case X86::Int_eh_sjlj_setup_dispatch:
32406    return EmitSjLjDispatchBlock(MI, BB);
32407
32408  case TargetOpcode::STATEPOINT:
32409    // As an implementation detail, STATEPOINT shares the STACKMAP format at
32410    // this point in the process.  We diverge later.
32411    return emitPatchPoint(MI, BB);
32412
32413  case TargetOpcode::STACKMAP:
32414  case TargetOpcode::PATCHPOINT:
32415    return emitPatchPoint(MI, BB);
32416
32417  case TargetOpcode::PATCHABLE_EVENT_CALL:
32418    return emitXRayCustomEvent(MI, BB);
32419
32420  case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
32421    return emitXRayTypedEvent(MI, BB);
32422
32423  case X86::LCMPXCHG8B: {
32424    const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
32425    // In addition to 4 E[ABCD] registers implied by encoding, CMPXCHG8B
32426    // requires a memory operand. If it happens that current architecture is
32427    // i686 and for current function we need a base pointer
32428    // - which is ESI for i686 - register allocator would not be able to
32429    // allocate registers for an address in form of X(%reg, %reg, Y)
32430    // - there never would be enough unreserved registers during regalloc
32431    // (without the need for base ptr the only option would be X(%edi, %esi, Y).
32432    // We are giving a hand to register allocator by precomputing the address in
32433    // a new vreg using LEA.
32434
32435    // If it is not i686 or there is no base pointer - nothing to do here.
32436    if (!Subtarget.is32Bit() || !TRI->hasBasePointer(*MF))
32437      return BB;
32438
32439    // Even though this code does not necessarily needs the base pointer to
32440    // be ESI, we check for that. The reason: if this assert fails, there are
32441    // some changes happened in the compiler base pointer handling, which most
32442    // probably have to be addressed somehow here.
32443    assert(TRI->getBaseRegister() == X86::ESI &&
32444           "LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a "
32445           "base pointer in mind");
32446
32447    MachineRegisterInfo &MRI = MF->getRegInfo();
32448    MVT SPTy = getPointerTy(MF->getDataLayout());
32449    const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
32450    Register computedAddrVReg = MRI.createVirtualRegister(AddrRegClass);
32451
32452    X86AddressMode AM = getAddressFromInstr(&MI, 0);
32453    // Regalloc does not need any help when the memory operand of CMPXCHG8B
32454    // does not use index register.
32455    if (AM.IndexReg == X86::NoRegister)
32456      return BB;
32457
32458    // After X86TargetLowering::ReplaceNodeResults CMPXCHG8B is glued to its
32459    // four operand definitions that are E[ABCD] registers. We skip them and
32460    // then insert the LEA.
32461    MachineBasicBlock::reverse_iterator RMBBI(MI.getReverseIterator());
32462    while (RMBBI != BB->rend() && (RMBBI->definesRegister(X86::EAX) ||
32463                                   RMBBI->definesRegister(X86::EBX) ||
32464                                   RMBBI->definesRegister(X86::ECX) ||
32465                                   RMBBI->definesRegister(X86::EDX))) {
32466      ++RMBBI;
32467    }
32468    MachineBasicBlock::iterator MBBI(RMBBI);
32469    addFullAddress(
32470        BuildMI(*BB, *MBBI, DL, TII->get(X86::LEA32r), computedAddrVReg), AM);
32471
32472    setDirectAddressInInstr(&MI, 0, computedAddrVReg);
32473
32474    return BB;
32475  }
32476  case X86::LCMPXCHG16B:
32477    return BB;
32478  case X86::LCMPXCHG8B_SAVE_EBX:
32479  case X86::LCMPXCHG16B_SAVE_RBX: {
32480    unsigned BasePtr =
32481        MI.getOpcode() == X86::LCMPXCHG8B_SAVE_EBX ? X86::EBX : X86::RBX;
32482    if (!BB->isLiveIn(BasePtr))
32483      BB->addLiveIn(BasePtr);
32484    return BB;
32485  }
32486  }
32487}
32488
32489//===----------------------------------------------------------------------===//
32490//                           X86 Optimization Hooks
32491//===----------------------------------------------------------------------===//
32492
32493bool
32494X86TargetLowering::targetShrinkDemandedConstant(SDValue Op,
32495                                                const APInt &Demanded,
32496                                                TargetLoweringOpt &TLO) const {
32497  // Only optimize Ands to prevent shrinking a constant that could be
32498  // matched by movzx.
32499  if (Op.getOpcode() != ISD::AND)
32500    return false;
32501
32502  EVT VT = Op.getValueType();
32503
32504  // Ignore vectors.
32505  if (VT.isVector())
32506    return false;
32507
32508  unsigned Size = VT.getSizeInBits();
32509
32510  // Make sure the RHS really is a constant.
32511  ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
32512  if (!C)
32513    return false;
32514
32515  const APInt &Mask = C->getAPIntValue();
32516
32517  // Clear all non-demanded bits initially.
32518  APInt ShrunkMask = Mask & Demanded;
32519
32520  // Find the width of the shrunk mask.
32521  unsigned Width = ShrunkMask.getActiveBits();
32522
32523  // If the mask is all 0s there's nothing to do here.
32524  if (Width == 0)
32525    return false;
32526
32527  // Find the next power of 2 width, rounding up to a byte.
32528  Width = PowerOf2Ceil(std::max(Width, 8U));
32529  // Truncate the width to size to handle illegal types.
32530  Width = std::min(Width, Size);
32531
32532  // Calculate a possible zero extend mask for this constant.
32533  APInt ZeroExtendMask = APInt::getLowBitsSet(Size, Width);
32534
32535  // If we aren't changing the mask, just return true to keep it and prevent
32536  // the caller from optimizing.
32537  if (ZeroExtendMask == Mask)
32538    return true;
32539
32540  // Make sure the new mask can be represented by a combination of mask bits
32541  // and non-demanded bits.
32542  if (!ZeroExtendMask.isSubsetOf(Mask | ~Demanded))
32543    return false;
32544
32545  // Replace the constant with the zero extend mask.
32546  SDLoc DL(Op);
32547  SDValue NewC = TLO.DAG.getConstant(ZeroExtendMask, DL, VT);
32548  SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
32549  return TLO.CombineTo(Op, NewOp);
32550}
32551
32552void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
32553                                                      KnownBits &Known,
32554                                                      const APInt &DemandedElts,
32555                                                      const SelectionDAG &DAG,
32556                                                      unsigned Depth) const {
32557  unsigned BitWidth = Known.getBitWidth();
32558  unsigned Opc = Op.getOpcode();
32559  EVT VT = Op.getValueType();
32560  assert((Opc >= ISD::BUILTIN_OP_END ||
32561          Opc == ISD::INTRINSIC_WO_CHAIN ||
32562          Opc == ISD::INTRINSIC_W_CHAIN ||
32563          Opc == ISD::INTRINSIC_VOID) &&
32564         "Should use MaskedValueIsZero if you don't know whether Op"
32565         " is a target node!");
32566
32567  Known.resetAll();
32568  switch (Opc) {
32569  default: break;
32570  case X86ISD::SETCC:
32571    Known.Zero.setBitsFrom(1);
32572    break;
32573  case X86ISD::MOVMSK: {
32574    unsigned NumLoBits = Op.getOperand(0).getValueType().getVectorNumElements();
32575    Known.Zero.setBitsFrom(NumLoBits);
32576    break;
32577  }
32578  case X86ISD::PEXTRB:
32579  case X86ISD::PEXTRW: {
32580    SDValue Src = Op.getOperand(0);
32581    EVT SrcVT = Src.getValueType();
32582    APInt DemandedElt = APInt::getOneBitSet(SrcVT.getVectorNumElements(),
32583                                            Op.getConstantOperandVal(1));
32584    Known = DAG.computeKnownBits(Src, DemandedElt, Depth + 1);
32585    Known = Known.zextOrTrunc(BitWidth, false);
32586    Known.Zero.setBitsFrom(SrcVT.getScalarSizeInBits());
32587    break;
32588  }
32589  case X86ISD::VSRAI:
32590  case X86ISD::VSHLI:
32591  case X86ISD::VSRLI: {
32592    unsigned ShAmt = Op.getConstantOperandVal(1);
32593    if (ShAmt >= VT.getScalarSizeInBits()) {
32594      Known.setAllZero();
32595      break;
32596    }
32597
32598    Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
32599    if (Opc == X86ISD::VSHLI) {
32600      Known.Zero <<= ShAmt;
32601      Known.One <<= ShAmt;
32602      // Low bits are known zero.
32603      Known.Zero.setLowBits(ShAmt);
32604    } else if (Opc == X86ISD::VSRLI) {
32605      Known.Zero.lshrInPlace(ShAmt);
32606      Known.One.lshrInPlace(ShAmt);
32607      // High bits are known zero.
32608      Known.Zero.setHighBits(ShAmt);
32609    } else {
32610      Known.Zero.ashrInPlace(ShAmt);
32611      Known.One.ashrInPlace(ShAmt);
32612    }
32613    break;
32614  }
32615  case X86ISD::PACKUS: {
32616    // PACKUS is just a truncation if the upper half is zero.
32617    APInt DemandedLHS, DemandedRHS;
32618    getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
32619
32620    Known.One = APInt::getAllOnesValue(BitWidth * 2);
32621    Known.Zero = APInt::getAllOnesValue(BitWidth * 2);
32622
32623    KnownBits Known2;
32624    if (!!DemandedLHS) {
32625      Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedLHS, Depth + 1);
32626      Known.One &= Known2.One;
32627      Known.Zero &= Known2.Zero;
32628    }
32629    if (!!DemandedRHS) {
32630      Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedRHS, Depth + 1);
32631      Known.One &= Known2.One;
32632      Known.Zero &= Known2.Zero;
32633    }
32634
32635    if (Known.countMinLeadingZeros() < BitWidth)
32636      Known.resetAll();
32637    Known = Known.trunc(BitWidth);
32638    break;
32639  }
32640  case X86ISD::ANDNP: {
32641    KnownBits Known2;
32642    Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
32643    Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
32644
32645    // ANDNP = (~X & Y);
32646    Known.One &= Known2.Zero;
32647    Known.Zero |= Known2.One;
32648    break;
32649  }
32650  case X86ISD::FOR: {
32651    KnownBits Known2;
32652    Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
32653    Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
32654
32655    // Output known-0 bits are only known if clear in both the LHS & RHS.
32656    Known.Zero &= Known2.Zero;
32657    // Output known-1 are known to be set if set in either the LHS | RHS.
32658    Known.One |= Known2.One;
32659    break;
32660  }
32661  case X86ISD::PSADBW: {
32662    assert(VT.getScalarType() == MVT::i64 &&
32663           Op.getOperand(0).getValueType().getScalarType() == MVT::i8 &&
32664           "Unexpected PSADBW types");
32665
32666    // PSADBW - fills low 16 bits and zeros upper 48 bits of each i64 result.
32667    Known.Zero.setBitsFrom(16);
32668    break;
32669  }
32670  case X86ISD::CMOV: {
32671    Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
32672    // If we don't know any bits, early out.
32673    if (Known.isUnknown())
32674      break;
32675    KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
32676
32677    // Only known if known in both the LHS and RHS.
32678    Known.One &= Known2.One;
32679    Known.Zero &= Known2.Zero;
32680    break;
32681  }
32682  }
32683
32684  // Handle target shuffles.
32685  // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
32686  if (isTargetShuffle(Opc)) {
32687    bool IsUnary;
32688    SmallVector<int, 64> Mask;
32689    SmallVector<SDValue, 2> Ops;
32690    if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask,
32691                             IsUnary)) {
32692      unsigned NumOps = Ops.size();
32693      unsigned NumElts = VT.getVectorNumElements();
32694      if (Mask.size() == NumElts) {
32695        SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
32696        Known.Zero.setAllBits(); Known.One.setAllBits();
32697        for (unsigned i = 0; i != NumElts; ++i) {
32698          if (!DemandedElts[i])
32699            continue;
32700          int M = Mask[i];
32701          if (M == SM_SentinelUndef) {
32702            // For UNDEF elements, we don't know anything about the common state
32703            // of the shuffle result.
32704            Known.resetAll();
32705            break;
32706          } else if (M == SM_SentinelZero) {
32707            Known.One.clearAllBits();
32708            continue;
32709          }
32710          assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
32711                 "Shuffle index out of range");
32712
32713          unsigned OpIdx = (unsigned)M / NumElts;
32714          unsigned EltIdx = (unsigned)M % NumElts;
32715          if (Ops[OpIdx].getValueType() != VT) {
32716            // TODO - handle target shuffle ops with different value types.
32717            Known.resetAll();
32718            break;
32719          }
32720          DemandedOps[OpIdx].setBit(EltIdx);
32721        }
32722        // Known bits are the values that are shared by every demanded element.
32723        for (unsigned i = 0; i != NumOps && !Known.isUnknown(); ++i) {
32724          if (!DemandedOps[i])
32725            continue;
32726          KnownBits Known2 =
32727              DAG.computeKnownBits(Ops[i], DemandedOps[i], Depth + 1);
32728          Known.One &= Known2.One;
32729          Known.Zero &= Known2.Zero;
32730        }
32731      }
32732    }
32733  }
32734}
32735
32736unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
32737    SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
32738    unsigned Depth) const {
32739  EVT VT = Op.getValueType();
32740  unsigned VTBits = VT.getScalarSizeInBits();
32741  unsigned Opcode = Op.getOpcode();
32742  switch (Opcode) {
32743  case X86ISD::SETCC_CARRY:
32744    // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
32745    return VTBits;
32746
32747  case X86ISD::VTRUNC: {
32748    // TODO: Add DemandedElts support.
32749    SDValue Src = Op.getOperand(0);
32750    unsigned NumSrcBits = Src.getScalarValueSizeInBits();
32751    assert(VTBits < NumSrcBits && "Illegal truncation input type");
32752    unsigned Tmp = DAG.ComputeNumSignBits(Src, Depth + 1);
32753    if (Tmp > (NumSrcBits - VTBits))
32754      return Tmp - (NumSrcBits - VTBits);
32755    return 1;
32756  }
32757
32758  case X86ISD::PACKSS: {
32759    // PACKSS is just a truncation if the sign bits extend to the packed size.
32760    APInt DemandedLHS, DemandedRHS;
32761    getPackDemandedElts(Op.getValueType(), DemandedElts, DemandedLHS,
32762                        DemandedRHS);
32763
32764    unsigned SrcBits = Op.getOperand(0).getScalarValueSizeInBits();
32765    unsigned Tmp0 = SrcBits, Tmp1 = SrcBits;
32766    if (!!DemandedLHS)
32767      Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
32768    if (!!DemandedRHS)
32769      Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1);
32770    unsigned Tmp = std::min(Tmp0, Tmp1);
32771    if (Tmp > (SrcBits - VTBits))
32772      return Tmp - (SrcBits - VTBits);
32773    return 1;
32774  }
32775
32776  case X86ISD::VSHLI: {
32777    SDValue Src = Op.getOperand(0);
32778    const APInt &ShiftVal = Op.getConstantOperandAPInt(1);
32779    if (ShiftVal.uge(VTBits))
32780      return VTBits; // Shifted all bits out --> zero.
32781    unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
32782    if (ShiftVal.uge(Tmp))
32783      return 1; // Shifted all sign bits out --> unknown.
32784    return Tmp - ShiftVal.getZExtValue();
32785  }
32786
32787  case X86ISD::VSRAI: {
32788    SDValue Src = Op.getOperand(0);
32789    APInt ShiftVal = Op.getConstantOperandAPInt(1);
32790    if (ShiftVal.uge(VTBits - 1))
32791      return VTBits; // Sign splat.
32792    unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
32793    ShiftVal += Tmp;
32794    return ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue();
32795  }
32796
32797  case X86ISD::PCMPGT:
32798  case X86ISD::PCMPEQ:
32799  case X86ISD::CMPP:
32800  case X86ISD::VPCOM:
32801  case X86ISD::VPCOMU:
32802    // Vector compares return zero/all-bits result values.
32803    return VTBits;
32804
32805  case X86ISD::ANDNP: {
32806    unsigned Tmp0 =
32807        DAG.ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
32808    if (Tmp0 == 1) return 1; // Early out.
32809    unsigned Tmp1 =
32810        DAG.ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
32811    return std::min(Tmp0, Tmp1);
32812  }
32813
32814  case X86ISD::CMOV: {
32815    unsigned Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), Depth+1);
32816    if (Tmp0 == 1) return 1;  // Early out.
32817    unsigned Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), Depth+1);
32818    return std::min(Tmp0, Tmp1);
32819  }
32820  }
32821
32822  // Handle target shuffles.
32823  // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
32824  if (isTargetShuffle(Opcode)) {
32825    bool IsUnary;
32826    SmallVector<int, 64> Mask;
32827    SmallVector<SDValue, 2> Ops;
32828    if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask,
32829                             IsUnary)) {
32830      unsigned NumOps = Ops.size();
32831      unsigned NumElts = VT.getVectorNumElements();
32832      if (Mask.size() == NumElts) {
32833        SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
32834        for (unsigned i = 0; i != NumElts; ++i) {
32835          if (!DemandedElts[i])
32836            continue;
32837          int M = Mask[i];
32838          if (M == SM_SentinelUndef) {
32839            // For UNDEF elements, we don't know anything about the common state
32840            // of the shuffle result.
32841            return 1;
32842          } else if (M == SM_SentinelZero) {
32843            // Zero = all sign bits.
32844            continue;
32845          }
32846          assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
32847                 "Shuffle index out of range");
32848
32849          unsigned OpIdx = (unsigned)M / NumElts;
32850          unsigned EltIdx = (unsigned)M % NumElts;
32851          if (Ops[OpIdx].getValueType() != VT) {
32852            // TODO - handle target shuffle ops with different value types.
32853            return 1;
32854          }
32855          DemandedOps[OpIdx].setBit(EltIdx);
32856        }
32857        unsigned Tmp0 = VTBits;
32858        for (unsigned i = 0; i != NumOps && Tmp0 > 1; ++i) {
32859          if (!DemandedOps[i])
32860            continue;
32861          unsigned Tmp1 =
32862              DAG.ComputeNumSignBits(Ops[i], DemandedOps[i], Depth + 1);
32863          Tmp0 = std::min(Tmp0, Tmp1);
32864        }
32865        return Tmp0;
32866      }
32867    }
32868  }
32869
32870  // Fallback case.
32871  return 1;
32872}
32873
32874SDValue X86TargetLowering::unwrapAddress(SDValue N) const {
32875  if (N->getOpcode() == X86ISD::Wrapper || N->getOpcode() == X86ISD::WrapperRIP)
32876    return N->getOperand(0);
32877  return N;
32878}
32879
32880// Attempt to match a combined shuffle mask against supported unary shuffle
32881// instructions.
32882// TODO: Investigate sharing more of this with shuffle lowering.
32883static bool matchUnaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
32884                              bool AllowFloatDomain, bool AllowIntDomain,
32885                              SDValue &V1, const SDLoc &DL, SelectionDAG &DAG,
32886                              const X86Subtarget &Subtarget, unsigned &Shuffle,
32887                              MVT &SrcVT, MVT &DstVT) {
32888  unsigned NumMaskElts = Mask.size();
32889  unsigned MaskEltSize = MaskVT.getScalarSizeInBits();
32890
32891  // Match against a VZEXT_MOVL vXi32 zero-extending instruction.
32892  if (MaskEltSize == 32 && isUndefOrEqual(Mask[0], 0) &&
32893      isUndefOrZero(Mask[1]) && isUndefInRange(Mask, 2, NumMaskElts - 2)) {
32894    Shuffle = X86ISD::VZEXT_MOVL;
32895    SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
32896    return true;
32897  }
32898
32899  // Match against a ANY/ZERO_EXTEND_VECTOR_INREG instruction.
32900  // TODO: Add 512-bit vector support (split AVX512F and AVX512BW).
32901  if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE41()) ||
32902                         (MaskVT.is256BitVector() && Subtarget.hasInt256()))) {
32903    unsigned MaxScale = 64 / MaskEltSize;
32904    for (unsigned Scale = 2; Scale <= MaxScale; Scale *= 2) {
32905      bool MatchAny = true;
32906      bool MatchZero = true;
32907      unsigned NumDstElts = NumMaskElts / Scale;
32908      for (unsigned i = 0; i != NumDstElts && (MatchAny || MatchZero); ++i) {
32909        if (!isUndefOrEqual(Mask[i * Scale], (int)i)) {
32910          MatchAny = MatchZero = false;
32911          break;
32912        }
32913        MatchAny &= isUndefInRange(Mask, (i * Scale) + 1, Scale - 1);
32914        MatchZero &= isUndefOrZeroInRange(Mask, (i * Scale) + 1, Scale - 1);
32915      }
32916      if (MatchAny || MatchZero) {
32917        assert(MatchZero && "Failed to match zext but matched aext?");
32918        unsigned SrcSize = std::max(128u, NumDstElts * MaskEltSize);
32919        MVT ScalarTy = MaskVT.isInteger() ? MaskVT.getScalarType() :
32920                                            MVT::getIntegerVT(MaskEltSize);
32921        SrcVT = MVT::getVectorVT(ScalarTy, SrcSize / MaskEltSize);
32922
32923        if (SrcVT.getSizeInBits() != MaskVT.getSizeInBits())
32924          V1 = extractSubVector(V1, 0, DAG, DL, SrcSize);
32925
32926        Shuffle = unsigned(MatchAny ? ISD::ANY_EXTEND : ISD::ZERO_EXTEND);
32927        if (SrcVT.getVectorNumElements() != NumDstElts)
32928          Shuffle = getOpcode_EXTEND_VECTOR_INREG(Shuffle);
32929
32930        DstVT = MVT::getIntegerVT(Scale * MaskEltSize);
32931        DstVT = MVT::getVectorVT(DstVT, NumDstElts);
32932        return true;
32933      }
32934    }
32935  }
32936
32937  // Match against a VZEXT_MOVL instruction, SSE1 only supports 32-bits (MOVSS).
32938  if (((MaskEltSize == 32) || (MaskEltSize == 64 && Subtarget.hasSSE2())) &&
32939      isUndefOrEqual(Mask[0], 0) &&
32940      isUndefOrZeroInRange(Mask, 1, NumMaskElts - 1)) {
32941    Shuffle = X86ISD::VZEXT_MOVL;
32942    SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
32943    return true;
32944  }
32945
32946  // Check if we have SSE3 which will let us use MOVDDUP etc. The
32947  // instructions are no slower than UNPCKLPD but has the option to
32948  // fold the input operand into even an unaligned memory load.
32949  if (MaskVT.is128BitVector() && Subtarget.hasSSE3() && AllowFloatDomain) {
32950    if (isTargetShuffleEquivalent(Mask, {0, 0})) {
32951      Shuffle = X86ISD::MOVDDUP;
32952      SrcVT = DstVT = MVT::v2f64;
32953      return true;
32954    }
32955    if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
32956      Shuffle = X86ISD::MOVSLDUP;
32957      SrcVT = DstVT = MVT::v4f32;
32958      return true;
32959    }
32960    if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3})) {
32961      Shuffle = X86ISD::MOVSHDUP;
32962      SrcVT = DstVT = MVT::v4f32;
32963      return true;
32964    }
32965  }
32966
32967  if (MaskVT.is256BitVector() && AllowFloatDomain) {
32968    assert(Subtarget.hasAVX() && "AVX required for 256-bit vector shuffles");
32969    if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
32970      Shuffle = X86ISD::MOVDDUP;
32971      SrcVT = DstVT = MVT::v4f64;
32972      return true;
32973    }
32974    if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
32975      Shuffle = X86ISD::MOVSLDUP;
32976      SrcVT = DstVT = MVT::v8f32;
32977      return true;
32978    }
32979    if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3, 5, 5, 7, 7})) {
32980      Shuffle = X86ISD::MOVSHDUP;
32981      SrcVT = DstVT = MVT::v8f32;
32982      return true;
32983    }
32984  }
32985
32986  if (MaskVT.is512BitVector() && AllowFloatDomain) {
32987    assert(Subtarget.hasAVX512() &&
32988           "AVX512 required for 512-bit vector shuffles");
32989    if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
32990      Shuffle = X86ISD::MOVDDUP;
32991      SrcVT = DstVT = MVT::v8f64;
32992      return true;
32993    }
32994    if (isTargetShuffleEquivalent(
32995            Mask, {0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14})) {
32996      Shuffle = X86ISD::MOVSLDUP;
32997      SrcVT = DstVT = MVT::v16f32;
32998      return true;
32999    }
33000    if (isTargetShuffleEquivalent(
33001            Mask, {1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15})) {
33002      Shuffle = X86ISD::MOVSHDUP;
33003      SrcVT = DstVT = MVT::v16f32;
33004      return true;
33005    }
33006  }
33007
33008  return false;
33009}
33010
33011// Attempt to match a combined shuffle mask against supported unary immediate
33012// permute instructions.
33013// TODO: Investigate sharing more of this with shuffle lowering.
33014static bool matchUnaryPermuteShuffle(MVT MaskVT, ArrayRef<int> Mask,
33015                                     const APInt &Zeroable,
33016                                     bool AllowFloatDomain, bool AllowIntDomain,
33017                                     const X86Subtarget &Subtarget,
33018                                     unsigned &Shuffle, MVT &ShuffleVT,
33019                                     unsigned &PermuteImm) {
33020  unsigned NumMaskElts = Mask.size();
33021  unsigned InputSizeInBits = MaskVT.getSizeInBits();
33022  unsigned MaskScalarSizeInBits = InputSizeInBits / NumMaskElts;
33023  MVT MaskEltVT = MVT::getIntegerVT(MaskScalarSizeInBits);
33024
33025  bool ContainsZeros =
33026      llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; });
33027
33028  // Handle VPERMI/VPERMILPD vXi64/vXi64 patterns.
33029  if (!ContainsZeros && MaskScalarSizeInBits == 64) {
33030    // Check for lane crossing permutes.
33031    if (is128BitLaneCrossingShuffleMask(MaskEltVT, Mask)) {
33032      // PERMPD/PERMQ permutes within a 256-bit vector (AVX2+).
33033      if (Subtarget.hasAVX2() && MaskVT.is256BitVector()) {
33034        Shuffle = X86ISD::VPERMI;
33035        ShuffleVT = (AllowFloatDomain ? MVT::v4f64 : MVT::v4i64);
33036        PermuteImm = getV4X86ShuffleImm(Mask);
33037        return true;
33038      }
33039      if (Subtarget.hasAVX512() && MaskVT.is512BitVector()) {
33040        SmallVector<int, 4> RepeatedMask;
33041        if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask)) {
33042          Shuffle = X86ISD::VPERMI;
33043          ShuffleVT = (AllowFloatDomain ? MVT::v8f64 : MVT::v8i64);
33044          PermuteImm = getV4X86ShuffleImm(RepeatedMask);
33045          return true;
33046        }
33047      }
33048    } else if (AllowFloatDomain && Subtarget.hasAVX()) {
33049      // VPERMILPD can permute with a non-repeating shuffle.
33050      Shuffle = X86ISD::VPERMILPI;
33051      ShuffleVT = MVT::getVectorVT(MVT::f64, Mask.size());
33052      PermuteImm = 0;
33053      for (int i = 0, e = Mask.size(); i != e; ++i) {
33054        int M = Mask[i];
33055        if (M == SM_SentinelUndef)
33056          continue;
33057        assert(((M / 2) == (i / 2)) && "Out of range shuffle mask index");
33058        PermuteImm |= (M & 1) << i;
33059      }
33060      return true;
33061    }
33062  }
33063
33064  // Handle PSHUFD/VPERMILPI vXi32/vXf32 repeated patterns.
33065  // AVX introduced the VPERMILPD/VPERMILPS float permutes, before then we
33066  // had to use 2-input SHUFPD/SHUFPS shuffles (not handled here).
33067  if ((MaskScalarSizeInBits == 64 || MaskScalarSizeInBits == 32) &&
33068      !ContainsZeros && (AllowIntDomain || Subtarget.hasAVX())) {
33069    SmallVector<int, 4> RepeatedMask;
33070    if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
33071      // Narrow the repeated mask to create 32-bit element permutes.
33072      SmallVector<int, 4> WordMask = RepeatedMask;
33073      if (MaskScalarSizeInBits == 64)
33074        scaleShuffleMask<int>(2, RepeatedMask, WordMask);
33075
33076      Shuffle = (AllowIntDomain ? X86ISD::PSHUFD : X86ISD::VPERMILPI);
33077      ShuffleVT = (AllowIntDomain ? MVT::i32 : MVT::f32);
33078      ShuffleVT = MVT::getVectorVT(ShuffleVT, InputSizeInBits / 32);
33079      PermuteImm = getV4X86ShuffleImm(WordMask);
33080      return true;
33081    }
33082  }
33083
33084  // Handle PSHUFLW/PSHUFHW vXi16 repeated patterns.
33085  if (!ContainsZeros && AllowIntDomain && MaskScalarSizeInBits == 16) {
33086    SmallVector<int, 4> RepeatedMask;
33087    if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
33088      ArrayRef<int> LoMask(RepeatedMask.data() + 0, 4);
33089      ArrayRef<int> HiMask(RepeatedMask.data() + 4, 4);
33090
33091      // PSHUFLW: permute lower 4 elements only.
33092      if (isUndefOrInRange(LoMask, 0, 4) &&
33093          isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
33094        Shuffle = X86ISD::PSHUFLW;
33095        ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
33096        PermuteImm = getV4X86ShuffleImm(LoMask);
33097        return true;
33098      }
33099
33100      // PSHUFHW: permute upper 4 elements only.
33101      if (isUndefOrInRange(HiMask, 4, 8) &&
33102          isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
33103        // Offset the HiMask so that we can create the shuffle immediate.
33104        int OffsetHiMask[4];
33105        for (int i = 0; i != 4; ++i)
33106          OffsetHiMask[i] = (HiMask[i] < 0 ? HiMask[i] : HiMask[i] - 4);
33107
33108        Shuffle = X86ISD::PSHUFHW;
33109        ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
33110        PermuteImm = getV4X86ShuffleImm(OffsetHiMask);
33111        return true;
33112      }
33113    }
33114  }
33115
33116  // Attempt to match against byte/bit shifts.
33117  // FIXME: Add 512-bit support.
33118  if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
33119                         (MaskVT.is256BitVector() && Subtarget.hasAVX2()))) {
33120    int ShiftAmt = matchShuffleAsShift(ShuffleVT, Shuffle, MaskScalarSizeInBits,
33121                                       Mask, 0, Zeroable, Subtarget);
33122    if (0 < ShiftAmt) {
33123      PermuteImm = (unsigned)ShiftAmt;
33124      return true;
33125    }
33126  }
33127
33128  return false;
33129}
33130
33131// Attempt to match a combined unary shuffle mask against supported binary
33132// shuffle instructions.
33133// TODO: Investigate sharing more of this with shuffle lowering.
33134static bool matchBinaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
33135                               bool AllowFloatDomain, bool AllowIntDomain,
33136                               SDValue &V1, SDValue &V2, const SDLoc &DL,
33137                               SelectionDAG &DAG, const X86Subtarget &Subtarget,
33138                               unsigned &Shuffle, MVT &SrcVT, MVT &DstVT,
33139                               bool IsUnary) {
33140  unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
33141
33142  if (MaskVT.is128BitVector()) {
33143    if (isTargetShuffleEquivalent(Mask, {0, 0}) && AllowFloatDomain) {
33144      V2 = V1;
33145      V1 = (SM_SentinelUndef == Mask[0] ? DAG.getUNDEF(MVT::v4f32) : V1);
33146      Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKL : X86ISD::MOVLHPS;
33147      SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
33148      return true;
33149    }
33150    if (isTargetShuffleEquivalent(Mask, {1, 1}) && AllowFloatDomain) {
33151      V2 = V1;
33152      Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKH : X86ISD::MOVHLPS;
33153      SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
33154      return true;
33155    }
33156    if (isTargetShuffleEquivalent(Mask, {0, 3}) && Subtarget.hasSSE2() &&
33157        (AllowFloatDomain || !Subtarget.hasSSE41())) {
33158      std::swap(V1, V2);
33159      Shuffle = X86ISD::MOVSD;
33160      SrcVT = DstVT = MVT::v2f64;
33161      return true;
33162    }
33163    if (isTargetShuffleEquivalent(Mask, {4, 1, 2, 3}) &&
33164        (AllowFloatDomain || !Subtarget.hasSSE41())) {
33165      Shuffle = X86ISD::MOVSS;
33166      SrcVT = DstVT = MVT::v4f32;
33167      return true;
33168    }
33169  }
33170
33171  // Attempt to match against either an unary or binary PACKSS/PACKUS shuffle.
33172  if (((MaskVT == MVT::v8i16 || MaskVT == MVT::v16i8) && Subtarget.hasSSE2()) ||
33173      ((MaskVT == MVT::v16i16 || MaskVT == MVT::v32i8) && Subtarget.hasInt256()) ||
33174      ((MaskVT == MVT::v32i16 || MaskVT == MVT::v64i8) && Subtarget.hasBWI())) {
33175    if (matchShuffleWithPACK(MaskVT, SrcVT, V1, V2, Shuffle, Mask, DAG,
33176                             Subtarget)) {
33177      DstVT = MaskVT;
33178      return true;
33179    }
33180  }
33181
33182  // Attempt to match against either a unary or binary UNPCKL/UNPCKH shuffle.
33183  if ((MaskVT == MVT::v4f32 && Subtarget.hasSSE1()) ||
33184      (MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
33185      (MaskVT.is256BitVector() && 32 <= EltSizeInBits && Subtarget.hasAVX()) ||
33186      (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
33187      (MaskVT.is512BitVector() && Subtarget.hasAVX512())) {
33188    if (matchShuffleWithUNPCK(MaskVT, V1, V2, Shuffle, IsUnary, Mask, DL, DAG,
33189                              Subtarget)) {
33190      SrcVT = DstVT = MaskVT;
33191      if (MaskVT.is256BitVector() && !Subtarget.hasAVX2())
33192        SrcVT = DstVT = (32 == EltSizeInBits ? MVT::v8f32 : MVT::v4f64);
33193      return true;
33194    }
33195  }
33196
33197  return false;
33198}
33199
33200static bool matchBinaryPermuteShuffle(
33201    MVT MaskVT, ArrayRef<int> Mask, const APInt &Zeroable,
33202    bool AllowFloatDomain, bool AllowIntDomain, SDValue &V1, SDValue &V2,
33203    const SDLoc &DL, SelectionDAG &DAG, const X86Subtarget &Subtarget,
33204    unsigned &Shuffle, MVT &ShuffleVT, unsigned &PermuteImm) {
33205  unsigned NumMaskElts = Mask.size();
33206  unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
33207
33208  // Attempt to match against PALIGNR byte rotate.
33209  if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSSE3()) ||
33210                         (MaskVT.is256BitVector() && Subtarget.hasAVX2()))) {
33211    int ByteRotation = matchShuffleAsByteRotate(MaskVT, V1, V2, Mask);
33212    if (0 < ByteRotation) {
33213      Shuffle = X86ISD::PALIGNR;
33214      ShuffleVT = MVT::getVectorVT(MVT::i8, MaskVT.getSizeInBits() / 8);
33215      PermuteImm = ByteRotation;
33216      return true;
33217    }
33218  }
33219
33220  // Attempt to combine to X86ISD::BLENDI.
33221  if ((NumMaskElts <= 8 && ((Subtarget.hasSSE41() && MaskVT.is128BitVector()) ||
33222                            (Subtarget.hasAVX() && MaskVT.is256BitVector()))) ||
33223      (MaskVT == MVT::v16i16 && Subtarget.hasAVX2())) {
33224    uint64_t BlendMask = 0;
33225    bool ForceV1Zero = false, ForceV2Zero = false;
33226    SmallVector<int, 8> TargetMask(Mask.begin(), Mask.end());
33227    if (matchShuffleAsBlend(V1, V2, TargetMask, Zeroable, ForceV1Zero,
33228                            ForceV2Zero, BlendMask)) {
33229      if (MaskVT == MVT::v16i16) {
33230        // We can only use v16i16 PBLENDW if the lanes are repeated.
33231        SmallVector<int, 8> RepeatedMask;
33232        if (isRepeatedTargetShuffleMask(128, MaskVT, TargetMask,
33233                                        RepeatedMask)) {
33234          assert(RepeatedMask.size() == 8 &&
33235                 "Repeated mask size doesn't match!");
33236          PermuteImm = 0;
33237          for (int i = 0; i < 8; ++i)
33238            if (RepeatedMask[i] >= 8)
33239              PermuteImm |= 1 << i;
33240          V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
33241          V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
33242          Shuffle = X86ISD::BLENDI;
33243          ShuffleVT = MaskVT;
33244          return true;
33245        }
33246      } else {
33247        V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
33248        V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
33249        PermuteImm = (unsigned)BlendMask;
33250        Shuffle = X86ISD::BLENDI;
33251        ShuffleVT = MaskVT;
33252        return true;
33253      }
33254    }
33255  }
33256
33257  // Attempt to combine to INSERTPS, but only if it has elements that need to
33258  // be set to zero.
33259  if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
33260      MaskVT.is128BitVector() &&
33261      llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; }) &&
33262      matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
33263    Shuffle = X86ISD::INSERTPS;
33264    ShuffleVT = MVT::v4f32;
33265    return true;
33266  }
33267
33268  // Attempt to combine to SHUFPD.
33269  if (AllowFloatDomain && EltSizeInBits == 64 &&
33270      ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
33271       (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
33272       (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
33273    bool ForceV1Zero = false, ForceV2Zero = false;
33274    if (matchShuffleWithSHUFPD(MaskVT, V1, V2, ForceV1Zero, ForceV2Zero,
33275                               PermuteImm, Mask, Zeroable)) {
33276      V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
33277      V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
33278      Shuffle = X86ISD::SHUFP;
33279      ShuffleVT = MVT::getVectorVT(MVT::f64, MaskVT.getSizeInBits() / 64);
33280      return true;
33281    }
33282  }
33283
33284  // Attempt to combine to SHUFPS.
33285  if (AllowFloatDomain && EltSizeInBits == 32 &&
33286      ((MaskVT.is128BitVector() && Subtarget.hasSSE1()) ||
33287       (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
33288       (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
33289    SmallVector<int, 4> RepeatedMask;
33290    if (isRepeatedTargetShuffleMask(128, MaskVT, Mask, RepeatedMask)) {
33291      // Match each half of the repeated mask, to determine if its just
33292      // referencing one of the vectors, is zeroable or entirely undef.
33293      auto MatchHalf = [&](unsigned Offset, int &S0, int &S1) {
33294        int M0 = RepeatedMask[Offset];
33295        int M1 = RepeatedMask[Offset + 1];
33296
33297        if (isUndefInRange(RepeatedMask, Offset, 2)) {
33298          return DAG.getUNDEF(MaskVT);
33299        } else if (isUndefOrZeroInRange(RepeatedMask, Offset, 2)) {
33300          S0 = (SM_SentinelUndef == M0 ? -1 : 0);
33301          S1 = (SM_SentinelUndef == M1 ? -1 : 1);
33302          return getZeroVector(MaskVT, Subtarget, DAG, DL);
33303        } else if (isUndefOrInRange(M0, 0, 4) && isUndefOrInRange(M1, 0, 4)) {
33304          S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
33305          S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
33306          return V1;
33307        } else if (isUndefOrInRange(M0, 4, 8) && isUndefOrInRange(M1, 4, 8)) {
33308          S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
33309          S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
33310          return V2;
33311        }
33312
33313        return SDValue();
33314      };
33315
33316      int ShufMask[4] = {-1, -1, -1, -1};
33317      SDValue Lo = MatchHalf(0, ShufMask[0], ShufMask[1]);
33318      SDValue Hi = MatchHalf(2, ShufMask[2], ShufMask[3]);
33319
33320      if (Lo && Hi) {
33321        V1 = Lo;
33322        V2 = Hi;
33323        Shuffle = X86ISD::SHUFP;
33324        ShuffleVT = MVT::getVectorVT(MVT::f32, MaskVT.getSizeInBits() / 32);
33325        PermuteImm = getV4X86ShuffleImm(ShufMask);
33326        return true;
33327      }
33328    }
33329  }
33330
33331  // Attempt to combine to INSERTPS more generally if X86ISD::SHUFP failed.
33332  if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
33333      MaskVT.is128BitVector() &&
33334      matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
33335    Shuffle = X86ISD::INSERTPS;
33336    ShuffleVT = MVT::v4f32;
33337    return true;
33338  }
33339
33340  return false;
33341}
33342
33343static SDValue combineX86ShuffleChainWithExtract(
33344    ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
33345    bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG,
33346    const X86Subtarget &Subtarget);
33347
33348/// Combine an arbitrary chain of shuffles into a single instruction if
33349/// possible.
33350///
33351/// This is the leaf of the recursive combine below. When we have found some
33352/// chain of single-use x86 shuffle instructions and accumulated the combined
33353/// shuffle mask represented by them, this will try to pattern match that mask
33354/// into either a single instruction if there is a special purpose instruction
33355/// for this operation, or into a PSHUFB instruction which is a fully general
33356/// instruction but should only be used to replace chains over a certain depth.
33357static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
33358                                      ArrayRef<int> BaseMask, int Depth,
33359                                      bool HasVariableMask,
33360                                      bool AllowVariableMask, SelectionDAG &DAG,
33361                                      const X86Subtarget &Subtarget) {
33362  assert(!BaseMask.empty() && "Cannot combine an empty shuffle mask!");
33363  assert((Inputs.size() == 1 || Inputs.size() == 2) &&
33364         "Unexpected number of shuffle inputs!");
33365
33366  // Find the inputs that enter the chain. Note that multiple uses are OK
33367  // here, we're not going to remove the operands we find.
33368  bool UnaryShuffle = (Inputs.size() == 1);
33369  SDValue V1 = peekThroughBitcasts(Inputs[0]);
33370  SDValue V2 = (UnaryShuffle ? DAG.getUNDEF(V1.getValueType())
33371                             : peekThroughBitcasts(Inputs[1]));
33372
33373  MVT VT1 = V1.getSimpleValueType();
33374  MVT VT2 = V2.getSimpleValueType();
33375  MVT RootVT = Root.getSimpleValueType();
33376  assert(VT1.getSizeInBits() == RootVT.getSizeInBits() &&
33377         VT2.getSizeInBits() == RootVT.getSizeInBits() &&
33378         "Vector size mismatch");
33379
33380  SDLoc DL(Root);
33381  SDValue Res;
33382
33383  unsigned NumBaseMaskElts = BaseMask.size();
33384  if (NumBaseMaskElts == 1) {
33385    assert(BaseMask[0] == 0 && "Invalid shuffle index found!");
33386    return DAG.getBitcast(RootVT, V1);
33387  }
33388
33389  unsigned RootSizeInBits = RootVT.getSizeInBits();
33390  unsigned NumRootElts = RootVT.getVectorNumElements();
33391  unsigned BaseMaskEltSizeInBits = RootSizeInBits / NumBaseMaskElts;
33392  bool FloatDomain = VT1.isFloatingPoint() || VT2.isFloatingPoint() ||
33393                     (RootVT.isFloatingPoint() && Depth >= 1) ||
33394                     (RootVT.is256BitVector() && !Subtarget.hasAVX2());
33395
33396  // Don't combine if we are a AVX512/EVEX target and the mask element size
33397  // is different from the root element size - this would prevent writemasks
33398  // from being reused.
33399  // TODO - this currently prevents all lane shuffles from occurring.
33400  // TODO - check for writemasks usage instead of always preventing combining.
33401  // TODO - attempt to narrow Mask back to writemask size.
33402  bool IsEVEXShuffle =
33403      RootSizeInBits == 512 || (Subtarget.hasVLX() && RootSizeInBits >= 128);
33404
33405  // Attempt to match a subvector broadcast.
33406  // shuffle(insert_subvector(undef, sub, 0), undef, 0, 0, 0, 0)
33407  if (UnaryShuffle &&
33408      (BaseMaskEltSizeInBits == 128 || BaseMaskEltSizeInBits == 256)) {
33409    SmallVector<int, 64> BroadcastMask(NumBaseMaskElts, 0);
33410    if (isTargetShuffleEquivalent(BaseMask, BroadcastMask)) {
33411      SDValue Src = Inputs[0];
33412      if (Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
33413          Src.getOperand(0).isUndef() &&
33414          Src.getOperand(1).getValueSizeInBits() == BaseMaskEltSizeInBits &&
33415          MayFoldLoad(Src.getOperand(1)) && isNullConstant(Src.getOperand(2))) {
33416        return DAG.getBitcast(RootVT, DAG.getNode(X86ISD::SUBV_BROADCAST, DL,
33417                                                  Src.getValueType(),
33418                                                  Src.getOperand(1)));
33419      }
33420    }
33421  }
33422
33423  // TODO - handle 128/256-bit lane shuffles of 512-bit vectors.
33424
33425  // Handle 128-bit lane shuffles of 256-bit vectors.
33426  // If we have AVX2, prefer to use VPERMQ/VPERMPD for unary shuffles unless
33427  // we need to use the zeroing feature.
33428  // TODO - this should support binary shuffles.
33429  if (UnaryShuffle && RootVT.is256BitVector() && NumBaseMaskElts == 2 &&
33430      !(Subtarget.hasAVX2() && BaseMask[0] >= -1 && BaseMask[1] >= -1) &&
33431      !isSequentialOrUndefOrZeroInRange(BaseMask, 0, 2, 0)) {
33432    if (Depth == 0 && Root.getOpcode() == X86ISD::VPERM2X128)
33433      return SDValue(); // Nothing to do!
33434    MVT ShuffleVT = (FloatDomain ? MVT::v4f64 : MVT::v4i64);
33435    unsigned PermMask = 0;
33436    PermMask |= ((BaseMask[0] < 0 ? 0x8 : (BaseMask[0] & 1)) << 0);
33437    PermMask |= ((BaseMask[1] < 0 ? 0x8 : (BaseMask[1] & 1)) << 4);
33438
33439    Res = DAG.getBitcast(ShuffleVT, V1);
33440    Res = DAG.getNode(X86ISD::VPERM2X128, DL, ShuffleVT, Res,
33441                      DAG.getUNDEF(ShuffleVT),
33442                      DAG.getTargetConstant(PermMask, DL, MVT::i8));
33443    return DAG.getBitcast(RootVT, Res);
33444  }
33445
33446  // For masks that have been widened to 128-bit elements or more,
33447  // narrow back down to 64-bit elements.
33448  SmallVector<int, 64> Mask;
33449  if (BaseMaskEltSizeInBits > 64) {
33450    assert((BaseMaskEltSizeInBits % 64) == 0 && "Illegal mask size");
33451    int MaskScale = BaseMaskEltSizeInBits / 64;
33452    scaleShuffleMask<int>(MaskScale, BaseMask, Mask);
33453  } else {
33454    Mask = SmallVector<int, 64>(BaseMask.begin(), BaseMask.end());
33455  }
33456
33457  unsigned NumMaskElts = Mask.size();
33458  unsigned MaskEltSizeInBits = RootSizeInBits / NumMaskElts;
33459
33460  // Determine the effective mask value type.
33461  FloatDomain &= (32 <= MaskEltSizeInBits);
33462  MVT MaskVT = FloatDomain ? MVT::getFloatingPointVT(MaskEltSizeInBits)
33463                           : MVT::getIntegerVT(MaskEltSizeInBits);
33464  MaskVT = MVT::getVectorVT(MaskVT, NumMaskElts);
33465
33466  // Only allow legal mask types.
33467  if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
33468    return SDValue();
33469
33470  // Attempt to match the mask against known shuffle patterns.
33471  MVT ShuffleSrcVT, ShuffleVT;
33472  unsigned Shuffle, PermuteImm;
33473
33474  // Which shuffle domains are permitted?
33475  // Permit domain crossing at higher combine depths.
33476  // TODO: Should we indicate which domain is preferred if both are allowed?
33477  bool AllowFloatDomain = FloatDomain || (Depth >= 3);
33478  bool AllowIntDomain = (!FloatDomain || (Depth >= 3)) && Subtarget.hasSSE2() &&
33479                        (!MaskVT.is256BitVector() || Subtarget.hasAVX2());
33480
33481  // Determine zeroable mask elements.
33482  APInt KnownUndef, KnownZero;
33483  resolveZeroablesFromTargetShuffle(Mask, KnownUndef, KnownZero);
33484  APInt Zeroable = KnownUndef | KnownZero;
33485
33486  if (UnaryShuffle) {
33487    // If we are shuffling a X86ISD::VZEXT_LOAD then we can use the load
33488    // directly if we don't shuffle the lower element and we shuffle the upper
33489    // (zero) elements within themselves.
33490    if (V1.getOpcode() == X86ISD::VZEXT_LOAD &&
33491        (cast<MemIntrinsicSDNode>(V1)->getMemoryVT().getScalarSizeInBits() %
33492         MaskEltSizeInBits) == 0) {
33493      unsigned Scale =
33494          cast<MemIntrinsicSDNode>(V1)->getMemoryVT().getScalarSizeInBits() /
33495          MaskEltSizeInBits;
33496      ArrayRef<int> HiMask(Mask.data() + Scale, NumMaskElts - Scale);
33497      if (isSequentialOrUndefInRange(Mask, 0, Scale, 0) &&
33498          isUndefOrZeroOrInRange(HiMask, Scale, NumMaskElts)) {
33499        return DAG.getBitcast(RootVT, V1);
33500      }
33501    }
33502
33503    // Attempt to match against broadcast-from-vector.
33504    // Limit AVX1 to cases where we're loading+broadcasting a scalar element.
33505    if ((Subtarget.hasAVX2() || (Subtarget.hasAVX() && 32 <= MaskEltSizeInBits))
33506        && (!IsEVEXShuffle || NumRootElts == NumMaskElts)) {
33507      SmallVector<int, 64> BroadcastMask(NumMaskElts, 0);
33508      if (isTargetShuffleEquivalent(Mask, BroadcastMask)) {
33509        if (V1.getValueType() == MaskVT &&
33510            V1.getOpcode() == ISD::SCALAR_TO_VECTOR &&
33511            MayFoldLoad(V1.getOperand(0))) {
33512          if (Depth == 0 && Root.getOpcode() == X86ISD::VBROADCAST)
33513            return SDValue(); // Nothing to do!
33514          Res = V1.getOperand(0);
33515          Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
33516          return DAG.getBitcast(RootVT, Res);
33517        }
33518        if (Subtarget.hasAVX2()) {
33519          if (Depth == 0 && Root.getOpcode() == X86ISD::VBROADCAST)
33520            return SDValue(); // Nothing to do!
33521          Res = DAG.getBitcast(MaskVT, V1);
33522          Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
33523          return DAG.getBitcast(RootVT, Res);
33524        }
33525      }
33526    }
33527
33528    SDValue NewV1 = V1; // Save operand in case early exit happens.
33529    if (matchUnaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, NewV1,
33530                          DL, DAG, Subtarget, Shuffle, ShuffleSrcVT,
33531                          ShuffleVT) &&
33532        (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
33533      if (Depth == 0 && Root.getOpcode() == Shuffle)
33534        return SDValue(); // Nothing to do!
33535      Res = DAG.getBitcast(ShuffleSrcVT, NewV1);
33536      Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res);
33537      return DAG.getBitcast(RootVT, Res);
33538    }
33539
33540    if (matchUnaryPermuteShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
33541                                 AllowIntDomain, Subtarget, Shuffle, ShuffleVT,
33542                                 PermuteImm) &&
33543        (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
33544      if (Depth == 0 && Root.getOpcode() == Shuffle)
33545        return SDValue(); // Nothing to do!
33546      Res = DAG.getBitcast(ShuffleVT, V1);
33547      Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res,
33548                        DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
33549      return DAG.getBitcast(RootVT, Res);
33550    }
33551  }
33552
33553  SDValue NewV1 = V1; // Save operands in case early exit happens.
33554  SDValue NewV2 = V2;
33555  if (matchBinaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, NewV1,
33556                         NewV2, DL, DAG, Subtarget, Shuffle, ShuffleSrcVT,
33557                         ShuffleVT, UnaryShuffle) &&
33558      (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
33559    if (Depth == 0 && Root.getOpcode() == Shuffle)
33560      return SDValue(); // Nothing to do!
33561    NewV1 = DAG.getBitcast(ShuffleSrcVT, NewV1);
33562    NewV2 = DAG.getBitcast(ShuffleSrcVT, NewV2);
33563    Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2);
33564    return DAG.getBitcast(RootVT, Res);
33565  }
33566
33567  NewV1 = V1; // Save operands in case early exit happens.
33568  NewV2 = V2;
33569  if (matchBinaryPermuteShuffle(
33570          MaskVT, Mask, Zeroable, AllowFloatDomain, AllowIntDomain, NewV1,
33571          NewV2, DL, DAG, Subtarget, Shuffle, ShuffleVT, PermuteImm) &&
33572      (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
33573    if (Depth == 0 && Root.getOpcode() == Shuffle)
33574      return SDValue(); // Nothing to do!
33575    NewV1 = DAG.getBitcast(ShuffleVT, NewV1);
33576    NewV2 = DAG.getBitcast(ShuffleVT, NewV2);
33577    Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2,
33578                      DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
33579    return DAG.getBitcast(RootVT, Res);
33580  }
33581
33582  // Typically from here on, we need an integer version of MaskVT.
33583  MVT IntMaskVT = MVT::getIntegerVT(MaskEltSizeInBits);
33584  IntMaskVT = MVT::getVectorVT(IntMaskVT, NumMaskElts);
33585
33586  // Annoyingly, SSE4A instructions don't map into the above match helpers.
33587  if (Subtarget.hasSSE4A() && AllowIntDomain && RootSizeInBits == 128) {
33588    uint64_t BitLen, BitIdx;
33589    if (matchShuffleAsEXTRQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx,
33590                            Zeroable)) {
33591      if (Depth == 0 && Root.getOpcode() == X86ISD::EXTRQI)
33592        return SDValue(); // Nothing to do!
33593      V1 = DAG.getBitcast(IntMaskVT, V1);
33594      Res = DAG.getNode(X86ISD::EXTRQI, DL, IntMaskVT, V1,
33595                        DAG.getTargetConstant(BitLen, DL, MVT::i8),
33596                        DAG.getTargetConstant(BitIdx, DL, MVT::i8));
33597      return DAG.getBitcast(RootVT, Res);
33598    }
33599
33600    if (matchShuffleAsINSERTQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx)) {
33601      if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTQI)
33602        return SDValue(); // Nothing to do!
33603      V1 = DAG.getBitcast(IntMaskVT, V1);
33604      V2 = DAG.getBitcast(IntMaskVT, V2);
33605      Res = DAG.getNode(X86ISD::INSERTQI, DL, IntMaskVT, V1, V2,
33606                        DAG.getTargetConstant(BitLen, DL, MVT::i8),
33607                        DAG.getTargetConstant(BitIdx, DL, MVT::i8));
33608      return DAG.getBitcast(RootVT, Res);
33609    }
33610  }
33611
33612  // Don't try to re-form single instruction chains under any circumstances now
33613  // that we've done encoding canonicalization for them.
33614  if (Depth < 1)
33615    return SDValue();
33616
33617  // Depth threshold above which we can efficiently use variable mask shuffles.
33618  int VariableShuffleDepth = Subtarget.hasFastVariableShuffle() ? 1 : 2;
33619  AllowVariableMask &= (Depth >= VariableShuffleDepth) || HasVariableMask;
33620
33621  bool MaskContainsZeros =
33622      any_of(Mask, [](int M) { return M == SM_SentinelZero; });
33623
33624  if (is128BitLaneCrossingShuffleMask(MaskVT, Mask)) {
33625    // If we have a single input lane-crossing shuffle then lower to VPERMV.
33626    if (UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
33627        ((Subtarget.hasAVX2() &&
33628          (MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
33629         (Subtarget.hasAVX512() &&
33630          (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
33631           MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
33632         (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
33633         (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
33634         (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
33635         (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
33636      SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
33637      Res = DAG.getBitcast(MaskVT, V1);
33638      Res = DAG.getNode(X86ISD::VPERMV, DL, MaskVT, VPermMask, Res);
33639      return DAG.getBitcast(RootVT, Res);
33640    }
33641
33642    // Lower a unary+zero lane-crossing shuffle as VPERMV3 with a zero
33643    // vector as the second source.
33644    if (UnaryShuffle && AllowVariableMask &&
33645        ((Subtarget.hasAVX512() &&
33646          (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
33647           MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
33648         (Subtarget.hasVLX() &&
33649          (MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
33650           MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
33651         (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
33652         (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
33653         (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
33654         (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
33655      // Adjust shuffle mask - replace SM_SentinelZero with second source index.
33656      for (unsigned i = 0; i != NumMaskElts; ++i)
33657        if (Mask[i] == SM_SentinelZero)
33658          Mask[i] = NumMaskElts + i;
33659
33660      SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
33661      Res = DAG.getBitcast(MaskVT, V1);
33662      SDValue Zero = getZeroVector(MaskVT, Subtarget, DAG, DL);
33663      Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, Res, VPermMask, Zero);
33664      return DAG.getBitcast(RootVT, Res);
33665    }
33666
33667    // If that failed and either input is extracted then try to combine as a
33668    // shuffle with the larger type.
33669    if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
33670            Inputs, Root, BaseMask, Depth, HasVariableMask, AllowVariableMask,
33671            DAG, Subtarget))
33672      return WideShuffle;
33673
33674    // If we have a dual input lane-crossing shuffle then lower to VPERMV3.
33675    if (AllowVariableMask && !MaskContainsZeros &&
33676        ((Subtarget.hasAVX512() &&
33677          (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
33678           MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
33679         (Subtarget.hasVLX() &&
33680          (MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
33681           MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
33682         (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
33683         (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
33684         (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
33685         (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
33686      SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
33687      V1 = DAG.getBitcast(MaskVT, V1);
33688      V2 = DAG.getBitcast(MaskVT, V2);
33689      Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, V1, VPermMask, V2);
33690      return DAG.getBitcast(RootVT, Res);
33691    }
33692    return SDValue();
33693  }
33694
33695  // See if we can combine a single input shuffle with zeros to a bit-mask,
33696  // which is much simpler than any shuffle.
33697  if (UnaryShuffle && MaskContainsZeros && AllowVariableMask &&
33698      isSequentialOrUndefOrZeroInRange(Mask, 0, NumMaskElts, 0) &&
33699      DAG.getTargetLoweringInfo().isTypeLegal(MaskVT)) {
33700    APInt Zero = APInt::getNullValue(MaskEltSizeInBits);
33701    APInt AllOnes = APInt::getAllOnesValue(MaskEltSizeInBits);
33702    APInt UndefElts(NumMaskElts, 0);
33703    SmallVector<APInt, 64> EltBits(NumMaskElts, Zero);
33704    for (unsigned i = 0; i != NumMaskElts; ++i) {
33705      int M = Mask[i];
33706      if (M == SM_SentinelUndef) {
33707        UndefElts.setBit(i);
33708        continue;
33709      }
33710      if (M == SM_SentinelZero)
33711        continue;
33712      EltBits[i] = AllOnes;
33713    }
33714    SDValue BitMask = getConstVector(EltBits, UndefElts, MaskVT, DAG, DL);
33715    Res = DAG.getBitcast(MaskVT, V1);
33716    unsigned AndOpcode =
33717        FloatDomain ? unsigned(X86ISD::FAND) : unsigned(ISD::AND);
33718    Res = DAG.getNode(AndOpcode, DL, MaskVT, Res, BitMask);
33719    return DAG.getBitcast(RootVT, Res);
33720  }
33721
33722  // If we have a single input shuffle with different shuffle patterns in the
33723  // the 128-bit lanes use the variable mask to VPERMILPS.
33724  // TODO Combine other mask types at higher depths.
33725  if (UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
33726      ((MaskVT == MVT::v8f32 && Subtarget.hasAVX()) ||
33727       (MaskVT == MVT::v16f32 && Subtarget.hasAVX512()))) {
33728    SmallVector<SDValue, 16> VPermIdx;
33729    for (int M : Mask) {
33730      SDValue Idx =
33731          M < 0 ? DAG.getUNDEF(MVT::i32) : DAG.getConstant(M % 4, DL, MVT::i32);
33732      VPermIdx.push_back(Idx);
33733    }
33734    SDValue VPermMask = DAG.getBuildVector(IntMaskVT, DL, VPermIdx);
33735    Res = DAG.getBitcast(MaskVT, V1);
33736    Res = DAG.getNode(X86ISD::VPERMILPV, DL, MaskVT, Res, VPermMask);
33737    return DAG.getBitcast(RootVT, Res);
33738  }
33739
33740  // With XOP, binary shuffles of 128/256-bit floating point vectors can combine
33741  // to VPERMIL2PD/VPERMIL2PS.
33742  if (AllowVariableMask && Subtarget.hasXOP() &&
33743      (MaskVT == MVT::v2f64 || MaskVT == MVT::v4f64 || MaskVT == MVT::v4f32 ||
33744       MaskVT == MVT::v8f32)) {
33745    // VPERMIL2 Operation.
33746    // Bits[3] - Match Bit.
33747    // Bits[2:1] - (Per Lane) PD Shuffle Mask.
33748    // Bits[2:0] - (Per Lane) PS Shuffle Mask.
33749    unsigned NumLanes = MaskVT.getSizeInBits() / 128;
33750    unsigned NumEltsPerLane = NumMaskElts / NumLanes;
33751    SmallVector<int, 8> VPerm2Idx;
33752    unsigned M2ZImm = 0;
33753    for (int M : Mask) {
33754      if (M == SM_SentinelUndef) {
33755        VPerm2Idx.push_back(-1);
33756        continue;
33757      }
33758      if (M == SM_SentinelZero) {
33759        M2ZImm = 2;
33760        VPerm2Idx.push_back(8);
33761        continue;
33762      }
33763      int Index = (M % NumEltsPerLane) + ((M / NumMaskElts) * NumEltsPerLane);
33764      Index = (MaskVT.getScalarSizeInBits() == 64 ? Index << 1 : Index);
33765      VPerm2Idx.push_back(Index);
33766    }
33767    V1 = DAG.getBitcast(MaskVT, V1);
33768    V2 = DAG.getBitcast(MaskVT, V2);
33769    SDValue VPerm2MaskOp = getConstVector(VPerm2Idx, IntMaskVT, DAG, DL, true);
33770    Res = DAG.getNode(X86ISD::VPERMIL2, DL, MaskVT, V1, V2, VPerm2MaskOp,
33771                      DAG.getTargetConstant(M2ZImm, DL, MVT::i8));
33772    return DAG.getBitcast(RootVT, Res);
33773  }
33774
33775  // If we have 3 or more shuffle instructions or a chain involving a variable
33776  // mask, we can replace them with a single PSHUFB instruction profitably.
33777  // Intel's manuals suggest only using PSHUFB if doing so replacing 5
33778  // instructions, but in practice PSHUFB tends to be *very* fast so we're
33779  // more aggressive.
33780  if (UnaryShuffle && AllowVariableMask &&
33781      ((RootVT.is128BitVector() && Subtarget.hasSSSE3()) ||
33782       (RootVT.is256BitVector() && Subtarget.hasAVX2()) ||
33783       (RootVT.is512BitVector() && Subtarget.hasBWI()))) {
33784    SmallVector<SDValue, 16> PSHUFBMask;
33785    int NumBytes = RootVT.getSizeInBits() / 8;
33786    int Ratio = NumBytes / NumMaskElts;
33787    for (int i = 0; i < NumBytes; ++i) {
33788      int M = Mask[i / Ratio];
33789      if (M == SM_SentinelUndef) {
33790        PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
33791        continue;
33792      }
33793      if (M == SM_SentinelZero) {
33794        PSHUFBMask.push_back(DAG.getConstant(255, DL, MVT::i8));
33795        continue;
33796      }
33797      M = Ratio * M + i % Ratio;
33798      assert((M / 16) == (i / 16) && "Lane crossing detected");
33799      PSHUFBMask.push_back(DAG.getConstant(M, DL, MVT::i8));
33800    }
33801    MVT ByteVT = MVT::getVectorVT(MVT::i8, NumBytes);
33802    Res = DAG.getBitcast(ByteVT, V1);
33803    SDValue PSHUFBMaskOp = DAG.getBuildVector(ByteVT, DL, PSHUFBMask);
33804    Res = DAG.getNode(X86ISD::PSHUFB, DL, ByteVT, Res, PSHUFBMaskOp);
33805    return DAG.getBitcast(RootVT, Res);
33806  }
33807
33808  // With XOP, if we have a 128-bit binary input shuffle we can always combine
33809  // to VPPERM. We match the depth requirement of PSHUFB - VPPERM is never
33810  // slower than PSHUFB on targets that support both.
33811  if (AllowVariableMask && RootVT.is128BitVector() && Subtarget.hasXOP()) {
33812    // VPPERM Mask Operation
33813    // Bits[4:0] - Byte Index (0 - 31)
33814    // Bits[7:5] - Permute Operation (0 - Source byte, 4 - ZERO)
33815    SmallVector<SDValue, 16> VPPERMMask;
33816    int NumBytes = 16;
33817    int Ratio = NumBytes / NumMaskElts;
33818    for (int i = 0; i < NumBytes; ++i) {
33819      int M = Mask[i / Ratio];
33820      if (M == SM_SentinelUndef) {
33821        VPPERMMask.push_back(DAG.getUNDEF(MVT::i8));
33822        continue;
33823      }
33824      if (M == SM_SentinelZero) {
33825        VPPERMMask.push_back(DAG.getConstant(128, DL, MVT::i8));
33826        continue;
33827      }
33828      M = Ratio * M + i % Ratio;
33829      VPPERMMask.push_back(DAG.getConstant(M, DL, MVT::i8));
33830    }
33831    MVT ByteVT = MVT::v16i8;
33832    V1 = DAG.getBitcast(ByteVT, V1);
33833    V2 = DAG.getBitcast(ByteVT, V2);
33834    SDValue VPPERMMaskOp = DAG.getBuildVector(ByteVT, DL, VPPERMMask);
33835    Res = DAG.getNode(X86ISD::VPPERM, DL, ByteVT, V1, V2, VPPERMMaskOp);
33836    return DAG.getBitcast(RootVT, Res);
33837  }
33838
33839  // If that failed and either input is extracted then try to combine as a
33840  // shuffle with the larger type.
33841  if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
33842          Inputs, Root, BaseMask, Depth, HasVariableMask, AllowVariableMask,
33843          DAG, Subtarget))
33844    return WideShuffle;
33845
33846  // If we have a dual input shuffle then lower to VPERMV3.
33847  if (!UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
33848      ((Subtarget.hasAVX512() &&
33849        (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
33850         MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
33851       (Subtarget.hasVLX() &&
33852        (MaskVT == MVT::v2f64 || MaskVT == MVT::v2i64 || MaskVT == MVT::v4f64 ||
33853         MaskVT == MVT::v4i64 || MaskVT == MVT::v4f32 || MaskVT == MVT::v4i32 ||
33854         MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
33855       (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
33856       (Subtarget.hasBWI() && Subtarget.hasVLX() &&
33857        (MaskVT == MVT::v8i16 || MaskVT == MVT::v16i16)) ||
33858       (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
33859       (Subtarget.hasVBMI() && Subtarget.hasVLX() &&
33860        (MaskVT == MVT::v16i8 || MaskVT == MVT::v32i8)))) {
33861    SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
33862    V1 = DAG.getBitcast(MaskVT, V1);
33863    V2 = DAG.getBitcast(MaskVT, V2);
33864    Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, V1, VPermMask, V2);
33865    return DAG.getBitcast(RootVT, Res);
33866  }
33867
33868  // Failed to find any combines.
33869  return SDValue();
33870}
33871
33872// Combine an arbitrary chain of shuffles + extract_subvectors into a single
33873// instruction if possible.
33874//
33875// Wrapper for combineX86ShuffleChain that extends the shuffle mask to a larger
33876// type size to attempt to combine:
33877// shuffle(extract_subvector(x,c1),extract_subvector(y,c2),m1)
33878// -->
33879// extract_subvector(shuffle(x,y,m2),0)
33880static SDValue combineX86ShuffleChainWithExtract(
33881    ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
33882    bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG,
33883    const X86Subtarget &Subtarget) {
33884  unsigned NumMaskElts = BaseMask.size();
33885  unsigned NumInputs = Inputs.size();
33886  if (NumInputs == 0)
33887    return SDValue();
33888
33889  SmallVector<SDValue, 4> WideInputs(Inputs.begin(), Inputs.end());
33890  SmallVector<unsigned, 4> Offsets(NumInputs, 0);
33891
33892  // Peek through subvectors.
33893  // TODO: Support inter-mixed EXTRACT_SUBVECTORs + BITCASTs?
33894  unsigned WideSizeInBits = WideInputs[0].getValueSizeInBits();
33895  for (unsigned i = 0; i != NumInputs; ++i) {
33896    SDValue &Src = WideInputs[i];
33897    unsigned &Offset = Offsets[i];
33898    Src = peekThroughBitcasts(Src);
33899    EVT BaseVT = Src.getValueType();
33900    while (Src.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
33901           isa<ConstantSDNode>(Src.getOperand(1))) {
33902      Offset += Src.getConstantOperandVal(1);
33903      Src = Src.getOperand(0);
33904    }
33905    WideSizeInBits = std::max(WideSizeInBits,
33906                              (unsigned)Src.getValueSizeInBits());
33907    assert((Offset % BaseVT.getVectorNumElements()) == 0 &&
33908           "Unexpected subvector extraction");
33909    Offset /= BaseVT.getVectorNumElements();
33910    Offset *= NumMaskElts;
33911  }
33912
33913  // Bail if we're always extracting from the lowest subvectors,
33914  // combineX86ShuffleChain should match this for the current width.
33915  if (llvm::all_of(Offsets, [](unsigned Offset) { return Offset == 0; }))
33916    return SDValue();
33917
33918  EVT RootVT = Root.getValueType();
33919  unsigned RootSizeInBits = RootVT.getSizeInBits();
33920  unsigned Scale = WideSizeInBits / RootSizeInBits;
33921  assert((WideSizeInBits % RootSizeInBits) == 0 &&
33922         "Unexpected subvector extraction");
33923
33924  // If the src vector types aren't the same, see if we can extend
33925  // them to match each other.
33926  // TODO: Support different scalar types?
33927  EVT WideSVT = WideInputs[0].getValueType().getScalarType();
33928  if (llvm::any_of(WideInputs, [&WideSVT, &DAG](SDValue Op) {
33929        return !DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()) ||
33930               Op.getValueType().getScalarType() != WideSVT;
33931      }))
33932    return SDValue();
33933
33934  for (SDValue &NewInput : WideInputs) {
33935    assert((WideSizeInBits % NewInput.getValueSizeInBits()) == 0 &&
33936           "Shuffle vector size mismatch");
33937    if (WideSizeInBits > NewInput.getValueSizeInBits())
33938      NewInput = widenSubVector(NewInput, false, Subtarget, DAG,
33939                                SDLoc(NewInput), WideSizeInBits);
33940    assert(WideSizeInBits == NewInput.getValueSizeInBits() &&
33941           "Unexpected subvector extraction");
33942  }
33943
33944  // Create new mask for larger type.
33945  for (unsigned i = 1; i != NumInputs; ++i)
33946    Offsets[i] += i * Scale * NumMaskElts;
33947
33948  SmallVector<int, 64> WideMask(BaseMask.begin(), BaseMask.end());
33949  for (int &M : WideMask) {
33950    if (M < 0)
33951      continue;
33952    M = (M % NumMaskElts) + Offsets[M / NumMaskElts];
33953  }
33954  WideMask.append((Scale - 1) * NumMaskElts, SM_SentinelUndef);
33955
33956  // Remove unused/repeated shuffle source ops.
33957  resolveTargetShuffleInputsAndMask(WideInputs, WideMask);
33958  assert(!WideInputs.empty() && "Shuffle with no inputs detected");
33959
33960  if (WideInputs.size() > 2)
33961    return SDValue();
33962
33963  // Increase depth for every upper subvector we've peeked through.
33964  Depth += count_if(Offsets, [](unsigned Offset) { return Offset > 0; });
33965
33966  // Attempt to combine wider chain.
33967  // TODO: Can we use a better Root?
33968  SDValue WideRoot = WideInputs[0];
33969  if (SDValue WideShuffle = combineX86ShuffleChain(
33970          WideInputs, WideRoot, WideMask, Depth, HasVariableMask,
33971          AllowVariableMask, DAG, Subtarget)) {
33972    WideShuffle =
33973        extractSubVector(WideShuffle, 0, DAG, SDLoc(Root), RootSizeInBits);
33974    return DAG.getBitcast(RootVT, WideShuffle);
33975  }
33976  return SDValue();
33977}
33978
33979// Attempt to constant fold all of the constant source ops.
33980// Returns true if the entire shuffle is folded to a constant.
33981// TODO: Extend this to merge multiple constant Ops and update the mask.
33982static SDValue combineX86ShufflesConstants(ArrayRef<SDValue> Ops,
33983                                           ArrayRef<int> Mask, SDValue Root,
33984                                           bool HasVariableMask,
33985                                           SelectionDAG &DAG,
33986                                           const X86Subtarget &Subtarget) {
33987  MVT VT = Root.getSimpleValueType();
33988
33989  unsigned SizeInBits = VT.getSizeInBits();
33990  unsigned NumMaskElts = Mask.size();
33991  unsigned MaskSizeInBits = SizeInBits / NumMaskElts;
33992  unsigned NumOps = Ops.size();
33993
33994  // Extract constant bits from each source op.
33995  bool OneUseConstantOp = false;
33996  SmallVector<APInt, 16> UndefEltsOps(NumOps);
33997  SmallVector<SmallVector<APInt, 16>, 16> RawBitsOps(NumOps);
33998  for (unsigned i = 0; i != NumOps; ++i) {
33999    SDValue SrcOp = Ops[i];
34000    OneUseConstantOp |= SrcOp.hasOneUse();
34001    if (!getTargetConstantBitsFromNode(SrcOp, MaskSizeInBits, UndefEltsOps[i],
34002                                       RawBitsOps[i]))
34003      return SDValue();
34004  }
34005
34006  // Only fold if at least one of the constants is only used once or
34007  // the combined shuffle has included a variable mask shuffle, this
34008  // is to avoid constant pool bloat.
34009  if (!OneUseConstantOp && !HasVariableMask)
34010    return SDValue();
34011
34012  // Shuffle the constant bits according to the mask.
34013  SDLoc DL(Root);
34014  APInt UndefElts(NumMaskElts, 0);
34015  APInt ZeroElts(NumMaskElts, 0);
34016  APInt ConstantElts(NumMaskElts, 0);
34017  SmallVector<APInt, 8> ConstantBitData(NumMaskElts,
34018                                        APInt::getNullValue(MaskSizeInBits));
34019  for (unsigned i = 0; i != NumMaskElts; ++i) {
34020    int M = Mask[i];
34021    if (M == SM_SentinelUndef) {
34022      UndefElts.setBit(i);
34023      continue;
34024    } else if (M == SM_SentinelZero) {
34025      ZeroElts.setBit(i);
34026      continue;
34027    }
34028    assert(0 <= M && M < (int)(NumMaskElts * NumOps));
34029
34030    unsigned SrcOpIdx = (unsigned)M / NumMaskElts;
34031    unsigned SrcMaskIdx = (unsigned)M % NumMaskElts;
34032
34033    auto &SrcUndefElts = UndefEltsOps[SrcOpIdx];
34034    if (SrcUndefElts[SrcMaskIdx]) {
34035      UndefElts.setBit(i);
34036      continue;
34037    }
34038
34039    auto &SrcEltBits = RawBitsOps[SrcOpIdx];
34040    APInt &Bits = SrcEltBits[SrcMaskIdx];
34041    if (!Bits) {
34042      ZeroElts.setBit(i);
34043      continue;
34044    }
34045
34046    ConstantElts.setBit(i);
34047    ConstantBitData[i] = Bits;
34048  }
34049  assert((UndefElts | ZeroElts | ConstantElts).isAllOnesValue());
34050
34051  // Attempt to create a zero vector.
34052  if ((UndefElts | ZeroElts).isAllOnesValue())
34053    return getZeroVector(Root.getSimpleValueType(), Subtarget, DAG, DL);
34054
34055  // Create the constant data.
34056  MVT MaskSVT;
34057  if (VT.isFloatingPoint() && (MaskSizeInBits == 32 || MaskSizeInBits == 64))
34058    MaskSVT = MVT::getFloatingPointVT(MaskSizeInBits);
34059  else
34060    MaskSVT = MVT::getIntegerVT(MaskSizeInBits);
34061
34062  MVT MaskVT = MVT::getVectorVT(MaskSVT, NumMaskElts);
34063  if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
34064    return SDValue();
34065
34066  SDValue CstOp = getConstVector(ConstantBitData, UndefElts, MaskVT, DAG, DL);
34067  return DAG.getBitcast(VT, CstOp);
34068}
34069
34070/// Fully generic combining of x86 shuffle instructions.
34071///
34072/// This should be the last combine run over the x86 shuffle instructions. Once
34073/// they have been fully optimized, this will recursively consider all chains
34074/// of single-use shuffle instructions, build a generic model of the cumulative
34075/// shuffle operation, and check for simpler instructions which implement this
34076/// operation. We use this primarily for two purposes:
34077///
34078/// 1) Collapse generic shuffles to specialized single instructions when
34079///    equivalent. In most cases, this is just an encoding size win, but
34080///    sometimes we will collapse multiple generic shuffles into a single
34081///    special-purpose shuffle.
34082/// 2) Look for sequences of shuffle instructions with 3 or more total
34083///    instructions, and replace them with the slightly more expensive SSSE3
34084///    PSHUFB instruction if available. We do this as the last combining step
34085///    to ensure we avoid using PSHUFB if we can implement the shuffle with
34086///    a suitable short sequence of other instructions. The PSHUFB will either
34087///    use a register or have to read from memory and so is slightly (but only
34088///    slightly) more expensive than the other shuffle instructions.
34089///
34090/// Because this is inherently a quadratic operation (for each shuffle in
34091/// a chain, we recurse up the chain), the depth is limited to 8 instructions.
34092/// This should never be an issue in practice as the shuffle lowering doesn't
34093/// produce sequences of more than 8 instructions.
34094///
34095/// FIXME: We will currently miss some cases where the redundant shuffling
34096/// would simplify under the threshold for PSHUFB formation because of
34097/// combine-ordering. To fix this, we should do the redundant instruction
34098/// combining in this recursive walk.
34099static SDValue combineX86ShufflesRecursively(
34100    ArrayRef<SDValue> SrcOps, int SrcOpIndex, SDValue Root,
34101    ArrayRef<int> RootMask, ArrayRef<const SDNode *> SrcNodes, unsigned Depth,
34102    bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG,
34103    const X86Subtarget &Subtarget) {
34104  assert(RootMask.size() > 0 &&
34105         (RootMask.size() > 1 || (RootMask[0] == 0 && SrcOpIndex == 0)) &&
34106         "Illegal shuffle root mask");
34107
34108  // Bound the depth of our recursive combine because this is ultimately
34109  // quadratic in nature.
34110  const unsigned MaxRecursionDepth = 8;
34111  if (Depth >= MaxRecursionDepth)
34112    return SDValue();
34113
34114  // Directly rip through bitcasts to find the underlying operand.
34115  SDValue Op = SrcOps[SrcOpIndex];
34116  Op = peekThroughOneUseBitcasts(Op);
34117
34118  MVT VT = Op.getSimpleValueType();
34119  if (!VT.isVector())
34120    return SDValue(); // Bail if we hit a non-vector.
34121
34122  assert(Root.getSimpleValueType().isVector() &&
34123         "Shuffles operate on vector types!");
34124  assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
34125         "Can only combine shuffles of the same vector register size.");
34126
34127  // Extract target shuffle mask and resolve sentinels and inputs.
34128  // TODO - determine Op's demanded elts from RootMask.
34129  SmallVector<int, 64> OpMask;
34130  SmallVector<SDValue, 2> OpInputs;
34131  APInt OpUndef, OpZero;
34132  APInt OpDemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
34133  bool IsOpVariableMask = isTargetShuffleVariableMask(Op.getOpcode());
34134  if (!getTargetShuffleInputs(Op, OpDemandedElts, OpInputs, OpMask, OpUndef,
34135                              OpZero, DAG, Depth, false))
34136    return SDValue();
34137
34138  SmallVector<int, 64> Mask;
34139  SmallVector<SDValue, 16> Ops;
34140
34141  // We don't need to merge masks if the root is empty.
34142  bool EmptyRoot = (Depth == 0) && (RootMask.size() == 1);
34143  if (EmptyRoot) {
34144    // Only resolve zeros if it will remove an input, otherwise we might end
34145    // up in an infinite loop.
34146    bool ResolveKnownZeros = true;
34147    if (!OpZero.isNullValue()) {
34148      APInt UsedInputs = APInt::getNullValue(OpInputs.size());
34149      for (int i = 0, e = OpMask.size(); i != e; ++i) {
34150        int M = OpMask[i];
34151        if (OpUndef[i] || OpZero[i] || isUndefOrZero(M))
34152          continue;
34153        UsedInputs.setBit(M / OpMask.size());
34154        if (UsedInputs.isAllOnesValue()) {
34155          ResolveKnownZeros = false;
34156          break;
34157        }
34158      }
34159    }
34160    resolveTargetShuffleFromZeroables(OpMask, OpUndef, OpZero,
34161                                      ResolveKnownZeros);
34162
34163    Mask = OpMask;
34164    Ops.append(OpInputs.begin(), OpInputs.end());
34165  } else {
34166    resolveTargetShuffleFromZeroables(OpMask, OpUndef, OpZero);
34167
34168    // Add the inputs to the Ops list, avoiding duplicates.
34169    Ops.append(SrcOps.begin(), SrcOps.end());
34170
34171    auto AddOp = [&Ops](SDValue Input, int InsertionPoint) -> int {
34172      // Attempt to find an existing match.
34173      SDValue InputBC = peekThroughBitcasts(Input);
34174      for (int i = 0, e = Ops.size(); i < e; ++i)
34175        if (InputBC == peekThroughBitcasts(Ops[i]))
34176          return i;
34177      // Match failed - should we replace an existing Op?
34178      if (InsertionPoint >= 0) {
34179        Ops[InsertionPoint] = Input;
34180        return InsertionPoint;
34181      }
34182      // Add to the end of the Ops list.
34183      Ops.push_back(Input);
34184      return Ops.size() - 1;
34185    };
34186
34187    SmallVector<int, 2> OpInputIdx;
34188    for (SDValue OpInput : OpInputs)
34189      OpInputIdx.push_back(
34190          AddOp(OpInput, OpInputIdx.empty() ? SrcOpIndex : -1));
34191
34192    assert(((RootMask.size() > OpMask.size() &&
34193             RootMask.size() % OpMask.size() == 0) ||
34194            (OpMask.size() > RootMask.size() &&
34195             OpMask.size() % RootMask.size() == 0) ||
34196            OpMask.size() == RootMask.size()) &&
34197           "The smaller number of elements must divide the larger.");
34198
34199    // This function can be performance-critical, so we rely on the power-of-2
34200    // knowledge that we have about the mask sizes to replace div/rem ops with
34201    // bit-masks and shifts.
34202    assert(isPowerOf2_32(RootMask.size()) &&
34203           "Non-power-of-2 shuffle mask sizes");
34204    assert(isPowerOf2_32(OpMask.size()) && "Non-power-of-2 shuffle mask sizes");
34205    unsigned RootMaskSizeLog2 = countTrailingZeros(RootMask.size());
34206    unsigned OpMaskSizeLog2 = countTrailingZeros(OpMask.size());
34207
34208    unsigned MaskWidth = std::max<unsigned>(OpMask.size(), RootMask.size());
34209    unsigned RootRatio =
34210        std::max<unsigned>(1, OpMask.size() >> RootMaskSizeLog2);
34211    unsigned OpRatio = std::max<unsigned>(1, RootMask.size() >> OpMaskSizeLog2);
34212    assert((RootRatio == 1 || OpRatio == 1) &&
34213           "Must not have a ratio for both incoming and op masks!");
34214
34215    assert(isPowerOf2_32(MaskWidth) && "Non-power-of-2 shuffle mask sizes");
34216    assert(isPowerOf2_32(RootRatio) && "Non-power-of-2 shuffle mask sizes");
34217    assert(isPowerOf2_32(OpRatio) && "Non-power-of-2 shuffle mask sizes");
34218    unsigned RootRatioLog2 = countTrailingZeros(RootRatio);
34219    unsigned OpRatioLog2 = countTrailingZeros(OpRatio);
34220
34221    Mask.resize(MaskWidth, SM_SentinelUndef);
34222
34223    // Merge this shuffle operation's mask into our accumulated mask. Note that
34224    // this shuffle's mask will be the first applied to the input, followed by
34225    // the root mask to get us all the way to the root value arrangement. The
34226    // reason for this order is that we are recursing up the operation chain.
34227    for (unsigned i = 0; i < MaskWidth; ++i) {
34228      unsigned RootIdx = i >> RootRatioLog2;
34229      if (RootMask[RootIdx] < 0) {
34230        // This is a zero or undef lane, we're done.
34231        Mask[i] = RootMask[RootIdx];
34232        continue;
34233      }
34234
34235      unsigned RootMaskedIdx =
34236          RootRatio == 1
34237              ? RootMask[RootIdx]
34238              : (RootMask[RootIdx] << RootRatioLog2) + (i & (RootRatio - 1));
34239
34240      // Just insert the scaled root mask value if it references an input other
34241      // than the SrcOp we're currently inserting.
34242      if ((RootMaskedIdx < (SrcOpIndex * MaskWidth)) ||
34243          (((SrcOpIndex + 1) * MaskWidth) <= RootMaskedIdx)) {
34244        Mask[i] = RootMaskedIdx;
34245        continue;
34246      }
34247
34248      RootMaskedIdx = RootMaskedIdx & (MaskWidth - 1);
34249      unsigned OpIdx = RootMaskedIdx >> OpRatioLog2;
34250      if (OpMask[OpIdx] < 0) {
34251        // The incoming lanes are zero or undef, it doesn't matter which ones we
34252        // are using.
34253        Mask[i] = OpMask[OpIdx];
34254        continue;
34255      }
34256
34257      // Ok, we have non-zero lanes, map them through to one of the Op's inputs.
34258      unsigned OpMaskedIdx = OpRatio == 1 ? OpMask[OpIdx]
34259                                          : (OpMask[OpIdx] << OpRatioLog2) +
34260                                                (RootMaskedIdx & (OpRatio - 1));
34261
34262      OpMaskedIdx = OpMaskedIdx & (MaskWidth - 1);
34263      int InputIdx = OpMask[OpIdx] / (int)OpMask.size();
34264      assert(0 <= OpInputIdx[InputIdx] && "Unknown target shuffle input");
34265      OpMaskedIdx += OpInputIdx[InputIdx] * MaskWidth;
34266
34267      Mask[i] = OpMaskedIdx;
34268    }
34269  }
34270
34271  // Remove unused/repeated shuffle source ops.
34272  resolveTargetShuffleInputsAndMask(Ops, Mask);
34273
34274  // Handle the all undef/zero cases early.
34275  if (all_of(Mask, [](int Idx) { return Idx == SM_SentinelUndef; }))
34276    return DAG.getUNDEF(Root.getValueType());
34277
34278  // TODO - should we handle the mixed zero/undef case as well? Just returning
34279  // a zero mask will lose information on undef elements possibly reducing
34280  // future combine possibilities.
34281  if (all_of(Mask, [](int Idx) { return Idx < 0; }))
34282    return getZeroVector(Root.getSimpleValueType(), Subtarget, DAG,
34283                         SDLoc(Root));
34284
34285  assert(!Ops.empty() && "Shuffle with no inputs detected");
34286  HasVariableMask |= IsOpVariableMask;
34287
34288  // Update the list of shuffle nodes that have been combined so far.
34289  SmallVector<const SDNode *, 16> CombinedNodes(SrcNodes.begin(),
34290                                                SrcNodes.end());
34291  CombinedNodes.push_back(Op.getNode());
34292
34293  // See if we can recurse into each shuffle source op (if it's a target
34294  // shuffle). The source op should only be generally combined if it either has
34295  // a single use (i.e. current Op) or all its users have already been combined,
34296  // if not then we can still combine but should prevent generation of variable
34297  // shuffles to avoid constant pool bloat.
34298  // Don't recurse if we already have more source ops than we can combine in
34299  // the remaining recursion depth.
34300  if (Ops.size() < (MaxRecursionDepth - Depth)) {
34301    for (int i = 0, e = Ops.size(); i < e; ++i) {
34302      // For empty roots, we need to resolve zeroable elements before combining
34303      // them with other shuffles.
34304      SmallVector<int, 64> ResolvedMask = Mask;
34305      if (EmptyRoot)
34306        resolveTargetShuffleFromZeroables(ResolvedMask, OpUndef, OpZero);
34307      bool AllowVar = false;
34308      if (Ops[i].getNode()->hasOneUse() ||
34309          SDNode::areOnlyUsersOf(CombinedNodes, Ops[i].getNode()))
34310        AllowVar = AllowVariableMask;
34311      if (SDValue Res = combineX86ShufflesRecursively(
34312              Ops, i, Root, ResolvedMask, CombinedNodes, Depth + 1,
34313              HasVariableMask, AllowVar, DAG, Subtarget))
34314        return Res;
34315    }
34316  }
34317
34318  // Attempt to constant fold all of the constant source ops.
34319  if (SDValue Cst = combineX86ShufflesConstants(
34320          Ops, Mask, Root, HasVariableMask, DAG, Subtarget))
34321    return Cst;
34322
34323  // We can only combine unary and binary shuffle mask cases.
34324  if (Ops.size() <= 2) {
34325    // Minor canonicalization of the accumulated shuffle mask to make it easier
34326    // to match below. All this does is detect masks with sequential pairs of
34327    // elements, and shrink them to the half-width mask. It does this in a loop
34328    // so it will reduce the size of the mask to the minimal width mask which
34329    // performs an equivalent shuffle.
34330    SmallVector<int, 64> WidenedMask;
34331    while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
34332      Mask = std::move(WidenedMask);
34333    }
34334
34335    // Canonicalization of binary shuffle masks to improve pattern matching by
34336    // commuting the inputs.
34337    if (Ops.size() == 2 && canonicalizeShuffleMaskWithCommute(Mask)) {
34338      ShuffleVectorSDNode::commuteMask(Mask);
34339      std::swap(Ops[0], Ops[1]);
34340    }
34341
34342    // Finally, try to combine into a single shuffle instruction.
34343    return combineX86ShuffleChain(Ops, Root, Mask, Depth, HasVariableMask,
34344                                  AllowVariableMask, DAG, Subtarget);
34345  }
34346
34347  // If that failed and any input is extracted then try to combine as a
34348  // shuffle with the larger type.
34349  return combineX86ShuffleChainWithExtract(Ops, Root, Mask, Depth,
34350                                           HasVariableMask, AllowVariableMask,
34351                                           DAG, Subtarget);
34352}
34353
34354/// Helper entry wrapper to combineX86ShufflesRecursively.
34355static SDValue combineX86ShufflesRecursively(SDValue Op, SelectionDAG &DAG,
34356                                             const X86Subtarget &Subtarget) {
34357  return combineX86ShufflesRecursively({Op}, 0, Op, {0}, {}, /*Depth*/ 0,
34358                                       /*HasVarMask*/ false,
34359                                       /*AllowVarMask*/ true, DAG, Subtarget);
34360}
34361
34362/// Get the PSHUF-style mask from PSHUF node.
34363///
34364/// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
34365/// PSHUF-style masks that can be reused with such instructions.
34366static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
34367  MVT VT = N.getSimpleValueType();
34368  SmallVector<int, 4> Mask;
34369  SmallVector<SDValue, 2> Ops;
34370  bool IsUnary;
34371  bool HaveMask =
34372      getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask, IsUnary);
34373  (void)HaveMask;
34374  assert(HaveMask);
34375
34376  // If we have more than 128-bits, only the low 128-bits of shuffle mask
34377  // matter. Check that the upper masks are repeats and remove them.
34378  if (VT.getSizeInBits() > 128) {
34379    int LaneElts = 128 / VT.getScalarSizeInBits();
34380#ifndef NDEBUG
34381    for (int i = 1, NumLanes = VT.getSizeInBits() / 128; i < NumLanes; ++i)
34382      for (int j = 0; j < LaneElts; ++j)
34383        assert(Mask[j] == Mask[i * LaneElts + j] - (LaneElts * i) &&
34384               "Mask doesn't repeat in high 128-bit lanes!");
34385#endif
34386    Mask.resize(LaneElts);
34387  }
34388
34389  switch (N.getOpcode()) {
34390  case X86ISD::PSHUFD:
34391    return Mask;
34392  case X86ISD::PSHUFLW:
34393    Mask.resize(4);
34394    return Mask;
34395  case X86ISD::PSHUFHW:
34396    Mask.erase(Mask.begin(), Mask.begin() + 4);
34397    for (int &M : Mask)
34398      M -= 4;
34399    return Mask;
34400  default:
34401    llvm_unreachable("No valid shuffle instruction found!");
34402  }
34403}
34404
34405/// Search for a combinable shuffle across a chain ending in pshufd.
34406///
34407/// We walk up the chain and look for a combinable shuffle, skipping over
34408/// shuffles that we could hoist this shuffle's transformation past without
34409/// altering anything.
34410static SDValue
34411combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
34412                             SelectionDAG &DAG) {
34413  assert(N.getOpcode() == X86ISD::PSHUFD &&
34414         "Called with something other than an x86 128-bit half shuffle!");
34415  SDLoc DL(N);
34416
34417  // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
34418  // of the shuffles in the chain so that we can form a fresh chain to replace
34419  // this one.
34420  SmallVector<SDValue, 8> Chain;
34421  SDValue V = N.getOperand(0);
34422  for (; V.hasOneUse(); V = V.getOperand(0)) {
34423    switch (V.getOpcode()) {
34424    default:
34425      return SDValue(); // Nothing combined!
34426
34427    case ISD::BITCAST:
34428      // Skip bitcasts as we always know the type for the target specific
34429      // instructions.
34430      continue;
34431
34432    case X86ISD::PSHUFD:
34433      // Found another dword shuffle.
34434      break;
34435
34436    case X86ISD::PSHUFLW:
34437      // Check that the low words (being shuffled) are the identity in the
34438      // dword shuffle, and the high words are self-contained.
34439      if (Mask[0] != 0 || Mask[1] != 1 ||
34440          !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
34441        return SDValue();
34442
34443      Chain.push_back(V);
34444      continue;
34445
34446    case X86ISD::PSHUFHW:
34447      // Check that the high words (being shuffled) are the identity in the
34448      // dword shuffle, and the low words are self-contained.
34449      if (Mask[2] != 2 || Mask[3] != 3 ||
34450          !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
34451        return SDValue();
34452
34453      Chain.push_back(V);
34454      continue;
34455
34456    case X86ISD::UNPCKL:
34457    case X86ISD::UNPCKH:
34458      // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
34459      // shuffle into a preceding word shuffle.
34460      if (V.getSimpleValueType().getVectorElementType() != MVT::i8 &&
34461          V.getSimpleValueType().getVectorElementType() != MVT::i16)
34462        return SDValue();
34463
34464      // Search for a half-shuffle which we can combine with.
34465      unsigned CombineOp =
34466          V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
34467      if (V.getOperand(0) != V.getOperand(1) ||
34468          !V->isOnlyUserOf(V.getOperand(0).getNode()))
34469        return SDValue();
34470      Chain.push_back(V);
34471      V = V.getOperand(0);
34472      do {
34473        switch (V.getOpcode()) {
34474        default:
34475          return SDValue(); // Nothing to combine.
34476
34477        case X86ISD::PSHUFLW:
34478        case X86ISD::PSHUFHW:
34479          if (V.getOpcode() == CombineOp)
34480            break;
34481
34482          Chain.push_back(V);
34483
34484          LLVM_FALLTHROUGH;
34485        case ISD::BITCAST:
34486          V = V.getOperand(0);
34487          continue;
34488        }
34489        break;
34490      } while (V.hasOneUse());
34491      break;
34492    }
34493    // Break out of the loop if we break out of the switch.
34494    break;
34495  }
34496
34497  if (!V.hasOneUse())
34498    // We fell out of the loop without finding a viable combining instruction.
34499    return SDValue();
34500
34501  // Merge this node's mask and our incoming mask.
34502  SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
34503  for (int &M : Mask)
34504    M = VMask[M];
34505  V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
34506                  getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
34507
34508  // Rebuild the chain around this new shuffle.
34509  while (!Chain.empty()) {
34510    SDValue W = Chain.pop_back_val();
34511
34512    if (V.getValueType() != W.getOperand(0).getValueType())
34513      V = DAG.getBitcast(W.getOperand(0).getValueType(), V);
34514
34515    switch (W.getOpcode()) {
34516    default:
34517      llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
34518
34519    case X86ISD::UNPCKL:
34520    case X86ISD::UNPCKH:
34521      V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
34522      break;
34523
34524    case X86ISD::PSHUFD:
34525    case X86ISD::PSHUFLW:
34526    case X86ISD::PSHUFHW:
34527      V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
34528      break;
34529    }
34530  }
34531  if (V.getValueType() != N.getValueType())
34532    V = DAG.getBitcast(N.getValueType(), V);
34533
34534  // Return the new chain to replace N.
34535  return V;
34536}
34537
34538/// Try to combine x86 target specific shuffles.
34539static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG,
34540                                    TargetLowering::DAGCombinerInfo &DCI,
34541                                    const X86Subtarget &Subtarget) {
34542  SDLoc DL(N);
34543  MVT VT = N.getSimpleValueType();
34544  SmallVector<int, 4> Mask;
34545  unsigned Opcode = N.getOpcode();
34546
34547  // Combine binary shuffle of 2 similar 'Horizontal' instructions into a
34548  // single instruction.
34549  if (VT.getScalarSizeInBits() == 64 &&
34550      (Opcode == X86ISD::MOVSD || Opcode == X86ISD::UNPCKH ||
34551       Opcode == X86ISD::UNPCKL)) {
34552    auto BC0 = peekThroughBitcasts(N.getOperand(0));
34553    auto BC1 = peekThroughBitcasts(N.getOperand(1));
34554    EVT VT0 = BC0.getValueType();
34555    EVT VT1 = BC1.getValueType();
34556    unsigned Opcode0 = BC0.getOpcode();
34557    unsigned Opcode1 = BC1.getOpcode();
34558    if (Opcode0 == Opcode1 && VT0 == VT1 &&
34559        (Opcode0 == X86ISD::FHADD || Opcode0 == X86ISD::HADD ||
34560         Opcode0 == X86ISD::FHSUB || Opcode0 == X86ISD::HSUB ||
34561         Opcode0 == X86ISD::PACKSS || Opcode0 == X86ISD::PACKUS)) {
34562      SDValue Lo, Hi;
34563      if (Opcode == X86ISD::MOVSD) {
34564        Lo = BC1.getOperand(0);
34565        Hi = BC0.getOperand(1);
34566      } else {
34567        Lo = BC0.getOperand(Opcode == X86ISD::UNPCKH ? 1 : 0);
34568        Hi = BC1.getOperand(Opcode == X86ISD::UNPCKH ? 1 : 0);
34569      }
34570      SDValue Horiz = DAG.getNode(Opcode0, DL, VT0, Lo, Hi);
34571      return DAG.getBitcast(VT, Horiz);
34572    }
34573  }
34574
34575  switch (Opcode) {
34576  case X86ISD::VBROADCAST: {
34577    SDValue Src = N.getOperand(0);
34578    SDValue BC = peekThroughBitcasts(Src);
34579    EVT SrcVT = Src.getValueType();
34580    EVT BCVT = BC.getValueType();
34581
34582    // If broadcasting from another shuffle, attempt to simplify it.
34583    // TODO - we really need a general SimplifyDemandedVectorElts mechanism.
34584    if (isTargetShuffle(BC.getOpcode()) &&
34585        VT.getScalarSizeInBits() % BCVT.getScalarSizeInBits() == 0) {
34586      unsigned Scale = VT.getScalarSizeInBits() / BCVT.getScalarSizeInBits();
34587      SmallVector<int, 16> DemandedMask(BCVT.getVectorNumElements(),
34588                                        SM_SentinelUndef);
34589      for (unsigned i = 0; i != Scale; ++i)
34590        DemandedMask[i] = i;
34591      if (SDValue Res = combineX86ShufflesRecursively(
34592              {BC}, 0, BC, DemandedMask, {}, /*Depth*/ 0,
34593              /*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget))
34594        return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
34595                           DAG.getBitcast(SrcVT, Res));
34596    }
34597
34598    // broadcast(bitcast(src)) -> bitcast(broadcast(src))
34599    // 32-bit targets have to bitcast i64 to f64, so better to bitcast upward.
34600    if (Src.getOpcode() == ISD::BITCAST &&
34601        SrcVT.getScalarSizeInBits() == BCVT.getScalarSizeInBits()) {
34602      EVT NewVT = EVT::getVectorVT(*DAG.getContext(), BCVT.getScalarType(),
34603                                   VT.getVectorNumElements());
34604      return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, DL, NewVT, BC));
34605    }
34606
34607    // Reduce broadcast source vector to lowest 128-bits.
34608    if (SrcVT.getSizeInBits() > 128)
34609      return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
34610                         extract128BitVector(Src, 0, DAG, DL));
34611
34612    // broadcast(scalar_to_vector(x)) -> broadcast(x).
34613    if (Src.getOpcode() == ISD::SCALAR_TO_VECTOR)
34614      return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Src.getOperand(0));
34615
34616    // Share broadcast with the longest vector and extract low subvector (free).
34617    for (SDNode *User : Src->uses())
34618      if (User != N.getNode() && User->getOpcode() == X86ISD::VBROADCAST &&
34619          User->getValueSizeInBits(0) > VT.getSizeInBits()) {
34620        return extractSubVector(SDValue(User, 0), 0, DAG, DL,
34621                                VT.getSizeInBits());
34622      }
34623
34624    // vbroadcast(scalarload X) -> vbroadcast_load X
34625    // For float loads, extract other uses of the scalar from the broadcast.
34626    if (!SrcVT.isVector() && (Src.hasOneUse() || VT.isFloatingPoint()) &&
34627        ISD::isNormalLoad(Src.getNode())) {
34628      LoadSDNode *LN = cast<LoadSDNode>(Src);
34629      SDVTList Tys = DAG.getVTList(VT, MVT::Other);
34630      SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
34631      SDValue BcastLd =
34632          DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
34633                                  LN->getMemoryVT(), LN->getMemOperand());
34634      // If the load value is used only by N, replace it via CombineTo N.
34635      bool NoReplaceExtract = Src.hasOneUse();
34636      DCI.CombineTo(N.getNode(), BcastLd);
34637      if (NoReplaceExtract) {
34638        DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
34639        DCI.recursivelyDeleteUnusedNodes(LN);
34640      } else {
34641        SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT, BcastLd,
34642                                  DAG.getIntPtrConstant(0, DL));
34643        DCI.CombineTo(LN, Scl, BcastLd.getValue(1));
34644      }
34645      return N; // Return N so it doesn't get rechecked!
34646    }
34647
34648    return SDValue();
34649  }
34650  case X86ISD::BLENDI: {
34651    SDValue N0 = N.getOperand(0);
34652    SDValue N1 = N.getOperand(1);
34653
34654    // blend(bitcast(x),bitcast(y)) -> bitcast(blend(x,y)) to narrower types.
34655    // TODO: Handle MVT::v16i16 repeated blend mask.
34656    if (N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST &&
34657        N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()) {
34658      MVT SrcVT = N0.getOperand(0).getSimpleValueType();
34659      if ((VT.getScalarSizeInBits() % SrcVT.getScalarSizeInBits()) == 0 &&
34660          SrcVT.getScalarSizeInBits() >= 32) {
34661        unsigned BlendMask = N.getConstantOperandVal(2);
34662        unsigned Size = VT.getVectorNumElements();
34663        unsigned Scale = VT.getScalarSizeInBits() / SrcVT.getScalarSizeInBits();
34664        BlendMask = scaleVectorShuffleBlendMask(BlendMask, Size, Scale);
34665        return DAG.getBitcast(
34666            VT, DAG.getNode(X86ISD::BLENDI, DL, SrcVT, N0.getOperand(0),
34667                            N1.getOperand(0),
34668                            DAG.getTargetConstant(BlendMask, DL, MVT::i8)));
34669      }
34670    }
34671    return SDValue();
34672  }
34673  case X86ISD::VPERMI: {
34674    // vpermi(bitcast(x)) -> bitcast(vpermi(x)) for same number of elements.
34675    // TODO: Remove when we have preferred domains in combineX86ShuffleChain.
34676    SDValue N0 = N.getOperand(0);
34677    SDValue N1 = N.getOperand(1);
34678    unsigned EltSizeInBits = VT.getScalarSizeInBits();
34679    if (N0.getOpcode() == ISD::BITCAST &&
34680        N0.getOperand(0).getScalarValueSizeInBits() == EltSizeInBits) {
34681      SDValue Src = N0.getOperand(0);
34682      EVT SrcVT = Src.getValueType();
34683      SDValue Res = DAG.getNode(X86ISD::VPERMI, DL, SrcVT, Src, N1);
34684      return DAG.getBitcast(VT, Res);
34685    }
34686    return SDValue();
34687  }
34688  case X86ISD::PSHUFD:
34689  case X86ISD::PSHUFLW:
34690  case X86ISD::PSHUFHW:
34691    Mask = getPSHUFShuffleMask(N);
34692    assert(Mask.size() == 4);
34693    break;
34694  case X86ISD::MOVSD:
34695  case X86ISD::MOVSS: {
34696    SDValue N0 = N.getOperand(0);
34697    SDValue N1 = N.getOperand(1);
34698
34699    // Canonicalize scalar FPOps:
34700    // MOVS*(N0, OP(N0, N1)) --> MOVS*(N0, SCALAR_TO_VECTOR(OP(N0[0], N1[0])))
34701    // If commutable, allow OP(N1[0], N0[0]).
34702    unsigned Opcode1 = N1.getOpcode();
34703    if (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL || Opcode1 == ISD::FSUB ||
34704        Opcode1 == ISD::FDIV) {
34705      SDValue N10 = N1.getOperand(0);
34706      SDValue N11 = N1.getOperand(1);
34707      if (N10 == N0 ||
34708          (N11 == N0 && (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL))) {
34709        if (N10 != N0)
34710          std::swap(N10, N11);
34711        MVT SVT = VT.getVectorElementType();
34712        SDValue ZeroIdx = DAG.getIntPtrConstant(0, DL);
34713        N10 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N10, ZeroIdx);
34714        N11 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N11, ZeroIdx);
34715        SDValue Scl = DAG.getNode(Opcode1, DL, SVT, N10, N11);
34716        SDValue SclVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
34717        return DAG.getNode(Opcode, DL, VT, N0, SclVec);
34718      }
34719    }
34720
34721    return SDValue();
34722  }
34723  case X86ISD::INSERTPS: {
34724    assert(VT == MVT::v4f32 && "INSERTPS ValueType must be MVT::v4f32");
34725    SDValue Op0 = N.getOperand(0);
34726    SDValue Op1 = N.getOperand(1);
34727    SDValue Op2 = N.getOperand(2);
34728    unsigned InsertPSMask = cast<ConstantSDNode>(Op2)->getZExtValue();
34729    unsigned SrcIdx = (InsertPSMask >> 6) & 0x3;
34730    unsigned DstIdx = (InsertPSMask >> 4) & 0x3;
34731    unsigned ZeroMask = InsertPSMask & 0xF;
34732
34733    // If we zero out all elements from Op0 then we don't need to reference it.
34734    if (((ZeroMask | (1u << DstIdx)) == 0xF) && !Op0.isUndef())
34735      return DAG.getNode(X86ISD::INSERTPS, DL, VT, DAG.getUNDEF(VT), Op1,
34736                         DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
34737
34738    // If we zero out the element from Op1 then we don't need to reference it.
34739    if ((ZeroMask & (1u << DstIdx)) && !Op1.isUndef())
34740      return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
34741                         DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
34742
34743    // Attempt to merge insertps Op1 with an inner target shuffle node.
34744    SmallVector<int, 8> TargetMask1;
34745    SmallVector<SDValue, 2> Ops1;
34746    APInt KnownUndef1, KnownZero1;
34747    if (getTargetShuffleAndZeroables(Op1, TargetMask1, Ops1, KnownUndef1,
34748                                     KnownZero1)) {
34749      if (KnownUndef1[SrcIdx] || KnownZero1[SrcIdx]) {
34750        // Zero/UNDEF insertion - zero out element and remove dependency.
34751        InsertPSMask |= (1u << DstIdx);
34752        return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
34753                           DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
34754      }
34755      // Update insertps mask srcidx and reference the source input directly.
34756      int M = TargetMask1[SrcIdx];
34757      assert(0 <= M && M < 8 && "Shuffle index out of range");
34758      InsertPSMask = (InsertPSMask & 0x3f) | ((M & 0x3) << 6);
34759      Op1 = Ops1[M < 4 ? 0 : 1];
34760      return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
34761                         DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
34762    }
34763
34764    // Attempt to merge insertps Op0 with an inner target shuffle node.
34765    SmallVector<int, 8> TargetMask0;
34766    SmallVector<SDValue, 2> Ops0;
34767    APInt KnownUndef0, KnownZero0;
34768    if (getTargetShuffleAndZeroables(Op0, TargetMask0, Ops0, KnownUndef0,
34769                                     KnownZero0)) {
34770      bool Updated = false;
34771      bool UseInput00 = false;
34772      bool UseInput01 = false;
34773      for (int i = 0; i != 4; ++i) {
34774        if ((InsertPSMask & (1u << i)) || (i == (int)DstIdx)) {
34775          // No change if element is already zero or the inserted element.
34776          continue;
34777        } else if (KnownUndef0[i] || KnownZero0[i]) {
34778          // If the target mask is undef/zero then we must zero the element.
34779          InsertPSMask |= (1u << i);
34780          Updated = true;
34781          continue;
34782        }
34783
34784        // The input vector element must be inline.
34785        int M = TargetMask0[i];
34786        if (M != i && M != (i + 4))
34787          return SDValue();
34788
34789        // Determine which inputs of the target shuffle we're using.
34790        UseInput00 |= (0 <= M && M < 4);
34791        UseInput01 |= (4 <= M);
34792      }
34793
34794      // If we're not using both inputs of the target shuffle then use the
34795      // referenced input directly.
34796      if (UseInput00 && !UseInput01) {
34797        Updated = true;
34798        Op0 = Ops0[0];
34799      } else if (!UseInput00 && UseInput01) {
34800        Updated = true;
34801        Op0 = Ops0[1];
34802      }
34803
34804      if (Updated)
34805        return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
34806                           DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
34807    }
34808
34809    // If we're inserting an element from a vbroadcast load, fold the
34810    // load into the X86insertps instruction. We need to convert the scalar
34811    // load to a vector and clear the source lane of the INSERTPS control.
34812    if (Op1.getOpcode() == X86ISD::VBROADCAST_LOAD && Op1.hasOneUse()) {
34813      auto *MemIntr = cast<MemIntrinsicSDNode>(Op1);
34814      if (MemIntr->getMemoryVT().getScalarSizeInBits() == 32) {
34815        SDValue Load = DAG.getLoad(MVT::f32, DL, MemIntr->getChain(),
34816                                   MemIntr->getBasePtr(),
34817                                   MemIntr->getMemOperand());
34818        SDValue Insert = DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0,
34819                           DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT,
34820                                       Load),
34821                           DAG.getTargetConstant(InsertPSMask & 0x3f, DL, MVT::i8));
34822        DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), Load.getValue(1));
34823        return Insert;
34824      }
34825    }
34826
34827    return SDValue();
34828  }
34829  default:
34830    return SDValue();
34831  }
34832
34833  // Nuke no-op shuffles that show up after combining.
34834  if (isNoopShuffleMask(Mask))
34835    return N.getOperand(0);
34836
34837  // Look for simplifications involving one or two shuffle instructions.
34838  SDValue V = N.getOperand(0);
34839  switch (N.getOpcode()) {
34840  default:
34841    break;
34842  case X86ISD::PSHUFLW:
34843  case X86ISD::PSHUFHW:
34844    assert(VT.getVectorElementType() == MVT::i16 && "Bad word shuffle type!");
34845
34846    // See if this reduces to a PSHUFD which is no more expensive and can
34847    // combine with more operations. Note that it has to at least flip the
34848    // dwords as otherwise it would have been removed as a no-op.
34849    if (makeArrayRef(Mask).equals({2, 3, 0, 1})) {
34850      int DMask[] = {0, 1, 2, 3};
34851      int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
34852      DMask[DOffset + 0] = DOffset + 1;
34853      DMask[DOffset + 1] = DOffset + 0;
34854      MVT DVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
34855      V = DAG.getBitcast(DVT, V);
34856      V = DAG.getNode(X86ISD::PSHUFD, DL, DVT, V,
34857                      getV4X86ShuffleImm8ForMask(DMask, DL, DAG));
34858      return DAG.getBitcast(VT, V);
34859    }
34860
34861    // Look for shuffle patterns which can be implemented as a single unpack.
34862    // FIXME: This doesn't handle the location of the PSHUFD generically, and
34863    // only works when we have a PSHUFD followed by two half-shuffles.
34864    if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
34865        (V.getOpcode() == X86ISD::PSHUFLW ||
34866         V.getOpcode() == X86ISD::PSHUFHW) &&
34867        V.getOpcode() != N.getOpcode() &&
34868        V.hasOneUse()) {
34869      SDValue D = peekThroughOneUseBitcasts(V.getOperand(0));
34870      if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
34871        SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
34872        SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
34873        int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
34874        int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
34875        int WordMask[8];
34876        for (int i = 0; i < 4; ++i) {
34877          WordMask[i + NOffset] = Mask[i] + NOffset;
34878          WordMask[i + VOffset] = VMask[i] + VOffset;
34879        }
34880        // Map the word mask through the DWord mask.
34881        int MappedMask[8];
34882        for (int i = 0; i < 8; ++i)
34883          MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
34884        if (makeArrayRef(MappedMask).equals({0, 0, 1, 1, 2, 2, 3, 3}) ||
34885            makeArrayRef(MappedMask).equals({4, 4, 5, 5, 6, 6, 7, 7})) {
34886          // We can replace all three shuffles with an unpack.
34887          V = DAG.getBitcast(VT, D.getOperand(0));
34888          return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
34889                                                : X86ISD::UNPCKH,
34890                             DL, VT, V, V);
34891        }
34892      }
34893    }
34894
34895    break;
34896
34897  case X86ISD::PSHUFD:
34898    if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG))
34899      return NewN;
34900
34901    break;
34902  }
34903
34904  return SDValue();
34905}
34906
34907/// Checks if the shuffle mask takes subsequent elements
34908/// alternately from two vectors.
34909/// For example <0, 5, 2, 7> or <8, 1, 10, 3, 12, 5, 14, 7> are both correct.
34910static bool isAddSubOrSubAddMask(ArrayRef<int> Mask, bool &Op0Even) {
34911
34912  int ParitySrc[2] = {-1, -1};
34913  unsigned Size = Mask.size();
34914  for (unsigned i = 0; i != Size; ++i) {
34915    int M = Mask[i];
34916    if (M < 0)
34917      continue;
34918
34919    // Make sure we are using the matching element from the input.
34920    if ((M % Size) != i)
34921      return false;
34922
34923    // Make sure we use the same input for all elements of the same parity.
34924    int Src = M / Size;
34925    if (ParitySrc[i % 2] >= 0 && ParitySrc[i % 2] != Src)
34926      return false;
34927    ParitySrc[i % 2] = Src;
34928  }
34929
34930  // Make sure each input is used.
34931  if (ParitySrc[0] < 0 || ParitySrc[1] < 0 || ParitySrc[0] == ParitySrc[1])
34932    return false;
34933
34934  Op0Even = ParitySrc[0] == 0;
34935  return true;
34936}
34937
34938/// Returns true iff the shuffle node \p N can be replaced with ADDSUB(SUBADD)
34939/// operation. If true is returned then the operands of ADDSUB(SUBADD) operation
34940/// are written to the parameters \p Opnd0 and \p Opnd1.
34941///
34942/// We combine shuffle to ADDSUB(SUBADD) directly on the abstract vector shuffle nodes
34943/// so it is easier to generically match. We also insert dummy vector shuffle
34944/// nodes for the operands which explicitly discard the lanes which are unused
34945/// by this operation to try to flow through the rest of the combiner
34946/// the fact that they're unused.
34947static bool isAddSubOrSubAdd(SDNode *N, const X86Subtarget &Subtarget,
34948                             SelectionDAG &DAG, SDValue &Opnd0, SDValue &Opnd1,
34949                             bool &IsSubAdd) {
34950
34951  EVT VT = N->getValueType(0);
34952  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
34953  if (!Subtarget.hasSSE3() || !TLI.isTypeLegal(VT) ||
34954      !VT.getSimpleVT().isFloatingPoint())
34955    return false;
34956
34957  // We only handle target-independent shuffles.
34958  // FIXME: It would be easy and harmless to use the target shuffle mask
34959  // extraction tool to support more.
34960  if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
34961    return false;
34962
34963  SDValue V1 = N->getOperand(0);
34964  SDValue V2 = N->getOperand(1);
34965
34966  // Make sure we have an FADD and an FSUB.
34967  if ((V1.getOpcode() != ISD::FADD && V1.getOpcode() != ISD::FSUB) ||
34968      (V2.getOpcode() != ISD::FADD && V2.getOpcode() != ISD::FSUB) ||
34969      V1.getOpcode() == V2.getOpcode())
34970    return false;
34971
34972  // If there are other uses of these operations we can't fold them.
34973  if (!V1->hasOneUse() || !V2->hasOneUse())
34974    return false;
34975
34976  // Ensure that both operations have the same operands. Note that we can
34977  // commute the FADD operands.
34978  SDValue LHS, RHS;
34979  if (V1.getOpcode() == ISD::FSUB) {
34980    LHS = V1->getOperand(0); RHS = V1->getOperand(1);
34981    if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
34982        (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
34983      return false;
34984  } else {
34985    assert(V2.getOpcode() == ISD::FSUB && "Unexpected opcode");
34986    LHS = V2->getOperand(0); RHS = V2->getOperand(1);
34987    if ((V1->getOperand(0) != LHS || V1->getOperand(1) != RHS) &&
34988        (V1->getOperand(0) != RHS || V1->getOperand(1) != LHS))
34989      return false;
34990  }
34991
34992  ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
34993  bool Op0Even;
34994  if (!isAddSubOrSubAddMask(Mask, Op0Even))
34995    return false;
34996
34997  // It's a subadd if the vector in the even parity is an FADD.
34998  IsSubAdd = Op0Even ? V1->getOpcode() == ISD::FADD
34999                     : V2->getOpcode() == ISD::FADD;
35000
35001  Opnd0 = LHS;
35002  Opnd1 = RHS;
35003  return true;
35004}
35005
35006/// Combine shuffle of two fma nodes into FMAddSub or FMSubAdd.
35007static SDValue combineShuffleToFMAddSub(SDNode *N,
35008                                        const X86Subtarget &Subtarget,
35009                                        SelectionDAG &DAG) {
35010  // We only handle target-independent shuffles.
35011  // FIXME: It would be easy and harmless to use the target shuffle mask
35012  // extraction tool to support more.
35013  if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
35014    return SDValue();
35015
35016  MVT VT = N->getSimpleValueType(0);
35017  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
35018  if (!Subtarget.hasAnyFMA() || !TLI.isTypeLegal(VT))
35019    return SDValue();
35020
35021  // We're trying to match (shuffle fma(a, b, c), X86Fmsub(a, b, c).
35022  SDValue Op0 = N->getOperand(0);
35023  SDValue Op1 = N->getOperand(1);
35024  SDValue FMAdd = Op0, FMSub = Op1;
35025  if (FMSub.getOpcode() != X86ISD::FMSUB)
35026    std::swap(FMAdd, FMSub);
35027
35028  if (FMAdd.getOpcode() != ISD::FMA || FMSub.getOpcode() != X86ISD::FMSUB ||
35029      FMAdd.getOperand(0) != FMSub.getOperand(0) || !FMAdd.hasOneUse() ||
35030      FMAdd.getOperand(1) != FMSub.getOperand(1) || !FMSub.hasOneUse() ||
35031      FMAdd.getOperand(2) != FMSub.getOperand(2))
35032    return SDValue();
35033
35034  // Check for correct shuffle mask.
35035  ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
35036  bool Op0Even;
35037  if (!isAddSubOrSubAddMask(Mask, Op0Even))
35038    return SDValue();
35039
35040  // FMAddSub takes zeroth operand from FMSub node.
35041  SDLoc DL(N);
35042  bool IsSubAdd = Op0Even ? Op0 == FMAdd : Op1 == FMAdd;
35043  unsigned Opcode = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
35044  return DAG.getNode(Opcode, DL, VT, FMAdd.getOperand(0), FMAdd.getOperand(1),
35045                     FMAdd.getOperand(2));
35046}
35047
35048/// Try to combine a shuffle into a target-specific add-sub or
35049/// mul-add-sub node.
35050static SDValue combineShuffleToAddSubOrFMAddSub(SDNode *N,
35051                                                const X86Subtarget &Subtarget,
35052                                                SelectionDAG &DAG) {
35053  if (SDValue V = combineShuffleToFMAddSub(N, Subtarget, DAG))
35054    return V;
35055
35056  SDValue Opnd0, Opnd1;
35057  bool IsSubAdd;
35058  if (!isAddSubOrSubAdd(N, Subtarget, DAG, Opnd0, Opnd1, IsSubAdd))
35059    return SDValue();
35060
35061  MVT VT = N->getSimpleValueType(0);
35062  SDLoc DL(N);
35063
35064  // Try to generate X86ISD::FMADDSUB node here.
35065  SDValue Opnd2;
35066  if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, 2)) {
35067    unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
35068    return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
35069  }
35070
35071  if (IsSubAdd)
35072    return SDValue();
35073
35074  // Do not generate X86ISD::ADDSUB node for 512-bit types even though
35075  // the ADDSUB idiom has been successfully recognized. There are no known
35076  // X86 targets with 512-bit ADDSUB instructions!
35077  if (VT.is512BitVector())
35078    return SDValue();
35079
35080  return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
35081}
35082
35083// We are looking for a shuffle where both sources are concatenated with undef
35084// and have a width that is half of the output's width. AVX2 has VPERMD/Q, so
35085// if we can express this as a single-source shuffle, that's preferable.
35086static SDValue combineShuffleOfConcatUndef(SDNode *N, SelectionDAG &DAG,
35087                                           const X86Subtarget &Subtarget) {
35088  if (!Subtarget.hasAVX2() || !isa<ShuffleVectorSDNode>(N))
35089    return SDValue();
35090
35091  EVT VT = N->getValueType(0);
35092
35093  // We only care about shuffles of 128/256-bit vectors of 32/64-bit values.
35094  if (!VT.is128BitVector() && !VT.is256BitVector())
35095    return SDValue();
35096
35097  if (VT.getVectorElementType() != MVT::i32 &&
35098      VT.getVectorElementType() != MVT::i64 &&
35099      VT.getVectorElementType() != MVT::f32 &&
35100      VT.getVectorElementType() != MVT::f64)
35101    return SDValue();
35102
35103  SDValue N0 = N->getOperand(0);
35104  SDValue N1 = N->getOperand(1);
35105
35106  // Check that both sources are concats with undef.
35107  if (N0.getOpcode() != ISD::CONCAT_VECTORS ||
35108      N1.getOpcode() != ISD::CONCAT_VECTORS || N0.getNumOperands() != 2 ||
35109      N1.getNumOperands() != 2 || !N0.getOperand(1).isUndef() ||
35110      !N1.getOperand(1).isUndef())
35111    return SDValue();
35112
35113  // Construct the new shuffle mask. Elements from the first source retain their
35114  // index, but elements from the second source no longer need to skip an undef.
35115  SmallVector<int, 8> Mask;
35116  int NumElts = VT.getVectorNumElements();
35117
35118  ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
35119  for (int Elt : SVOp->getMask())
35120    Mask.push_back(Elt < NumElts ? Elt : (Elt - NumElts / 2));
35121
35122  SDLoc DL(N);
35123  SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, N0.getOperand(0),
35124                               N1.getOperand(0));
35125  return DAG.getVectorShuffle(VT, DL, Concat, DAG.getUNDEF(VT), Mask);
35126}
35127
35128/// Eliminate a redundant shuffle of a horizontal math op.
35129static SDValue foldShuffleOfHorizOp(SDNode *N, SelectionDAG &DAG) {
35130  unsigned Opcode = N->getOpcode();
35131  if (Opcode != X86ISD::MOVDDUP && Opcode != X86ISD::VBROADCAST)
35132    if (Opcode != ISD::VECTOR_SHUFFLE || !N->getOperand(1).isUndef())
35133      return SDValue();
35134
35135  // For a broadcast, peek through an extract element of index 0 to find the
35136  // horizontal op: broadcast (ext_vec_elt HOp, 0)
35137  EVT VT = N->getValueType(0);
35138  if (Opcode == X86ISD::VBROADCAST) {
35139    SDValue SrcOp = N->getOperand(0);
35140    if (SrcOp.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
35141        SrcOp.getValueType() == MVT::f64 &&
35142        SrcOp.getOperand(0).getValueType() == VT &&
35143        isNullConstant(SrcOp.getOperand(1)))
35144      N = SrcOp.getNode();
35145  }
35146
35147  SDValue HOp = N->getOperand(0);
35148  if (HOp.getOpcode() != X86ISD::HADD && HOp.getOpcode() != X86ISD::FHADD &&
35149      HOp.getOpcode() != X86ISD::HSUB && HOp.getOpcode() != X86ISD::FHSUB)
35150    return SDValue();
35151
35152  // 128-bit horizontal math instructions are defined to operate on adjacent
35153  // lanes of each operand as:
35154  // v4X32: A[0] + A[1] , A[2] + A[3] , B[0] + B[1] , B[2] + B[3]
35155  // ...similarly for v2f64 and v8i16.
35156  if (!HOp.getOperand(0).isUndef() && !HOp.getOperand(1).isUndef() &&
35157      HOp.getOperand(0) != HOp.getOperand(1))
35158    return SDValue();
35159
35160  // The shuffle that we are eliminating may have allowed the horizontal op to
35161  // have an undemanded (undefined) operand. Duplicate the other (defined)
35162  // operand to ensure that the results are defined across all lanes without the
35163  // shuffle.
35164  auto updateHOp = [](SDValue HorizOp, SelectionDAG &DAG) {
35165    SDValue X;
35166    if (HorizOp.getOperand(0).isUndef()) {
35167      assert(!HorizOp.getOperand(1).isUndef() && "Not expecting foldable h-op");
35168      X = HorizOp.getOperand(1);
35169    } else if (HorizOp.getOperand(1).isUndef()) {
35170      assert(!HorizOp.getOperand(0).isUndef() && "Not expecting foldable h-op");
35171      X = HorizOp.getOperand(0);
35172    } else {
35173      return HorizOp;
35174    }
35175    return DAG.getNode(HorizOp.getOpcode(), SDLoc(HorizOp),
35176                       HorizOp.getValueType(), X, X);
35177  };
35178
35179  // When the operands of a horizontal math op are identical, the low half of
35180  // the result is the same as the high half. If a target shuffle is also
35181  // replicating low and high halves (and without changing the type/length of
35182  // the vector), we don't need the shuffle.
35183  if (Opcode == X86ISD::MOVDDUP || Opcode == X86ISD::VBROADCAST) {
35184    if (HOp.getScalarValueSizeInBits() == 64 && HOp.getValueType() == VT) {
35185      // movddup (hadd X, X) --> hadd X, X
35186      // broadcast (extract_vec_elt (hadd X, X), 0) --> hadd X, X
35187      assert((HOp.getValueType() == MVT::v2f64 ||
35188              HOp.getValueType() == MVT::v4f64) && "Unexpected type for h-op");
35189      return updateHOp(HOp, DAG);
35190    }
35191    return SDValue();
35192  }
35193
35194  // shuffle (hadd X, X), undef, [low half...high half] --> hadd X, X
35195  ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
35196  // TODO: Other mask possibilities like {1,1} and {1,0} could be added here,
35197  // but this should be tied to whatever horizontal op matching and shuffle
35198  // canonicalization are producing.
35199  if (HOp.getValueSizeInBits() == 128 &&
35200      (isTargetShuffleEquivalent(Mask, {0, 0}) ||
35201       isTargetShuffleEquivalent(Mask, {0, 1, 0, 1}) ||
35202       isTargetShuffleEquivalent(Mask, {0, 1, 2, 3, 0, 1, 2, 3})))
35203    return updateHOp(HOp, DAG);
35204
35205  if (HOp.getValueSizeInBits() == 256 &&
35206      (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2}) ||
35207       isTargetShuffleEquivalent(Mask, {0, 1, 0, 1, 4, 5, 4, 5}) ||
35208       isTargetShuffleEquivalent(
35209           Mask, {0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 8, 9, 10, 11})))
35210    return updateHOp(HOp, DAG);
35211
35212  return SDValue();
35213}
35214
35215/// If we have a shuffle of AVX/AVX512 (256/512 bit) vectors that only uses the
35216/// low half of each source vector and does not set any high half elements in
35217/// the destination vector, narrow the shuffle to half its original size.
35218static SDValue narrowShuffle(ShuffleVectorSDNode *Shuf, SelectionDAG &DAG) {
35219  if (!Shuf->getValueType(0).isSimple())
35220    return SDValue();
35221  MVT VT = Shuf->getSimpleValueType(0);
35222  if (!VT.is256BitVector() && !VT.is512BitVector())
35223    return SDValue();
35224
35225  // See if we can ignore all of the high elements of the shuffle.
35226  ArrayRef<int> Mask = Shuf->getMask();
35227  if (!isUndefUpperHalf(Mask))
35228    return SDValue();
35229
35230  // Check if the shuffle mask accesses only the low half of each input vector
35231  // (half-index output is 0 or 2).
35232  int HalfIdx1, HalfIdx2;
35233  SmallVector<int, 8> HalfMask(Mask.size() / 2);
35234  if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2) ||
35235      (HalfIdx1 % 2 == 1) || (HalfIdx2 % 2 == 1))
35236    return SDValue();
35237
35238  // Create a half-width shuffle to replace the unnecessarily wide shuffle.
35239  // The trick is knowing that all of the insert/extract are actually free
35240  // subregister (zmm<->ymm or ymm<->xmm) ops. That leaves us with a shuffle
35241  // of narrow inputs into a narrow output, and that is always cheaper than
35242  // the wide shuffle that we started with.
35243  return getShuffleHalfVectors(SDLoc(Shuf), Shuf->getOperand(0),
35244                               Shuf->getOperand(1), HalfMask, HalfIdx1,
35245                               HalfIdx2, false, DAG, /*UseConcat*/true);
35246}
35247
35248static SDValue combineShuffle(SDNode *N, SelectionDAG &DAG,
35249                              TargetLowering::DAGCombinerInfo &DCI,
35250                              const X86Subtarget &Subtarget) {
35251  if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(N))
35252    if (SDValue V = narrowShuffle(Shuf, DAG))
35253      return V;
35254
35255  // If we have legalized the vector types, look for blends of FADD and FSUB
35256  // nodes that we can fuse into an ADDSUB, FMADDSUB, or FMSUBADD node.
35257  SDLoc dl(N);
35258  EVT VT = N->getValueType(0);
35259  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
35260  if (TLI.isTypeLegal(VT)) {
35261    if (SDValue AddSub = combineShuffleToAddSubOrFMAddSub(N, Subtarget, DAG))
35262      return AddSub;
35263
35264    if (SDValue HAddSub = foldShuffleOfHorizOp(N, DAG))
35265      return HAddSub;
35266  }
35267
35268  // Attempt to combine into a vector load/broadcast.
35269  if (SDValue LD = combineToConsecutiveLoads(VT, N, dl, DAG, Subtarget, true))
35270    return LD;
35271
35272  // For AVX2, we sometimes want to combine
35273  // (vector_shuffle <mask> (concat_vectors t1, undef)
35274  //                        (concat_vectors t2, undef))
35275  // Into:
35276  // (vector_shuffle <mask> (concat_vectors t1, t2), undef)
35277  // Since the latter can be efficiently lowered with VPERMD/VPERMQ
35278  if (SDValue ShufConcat = combineShuffleOfConcatUndef(N, DAG, Subtarget))
35279    return ShufConcat;
35280
35281  if (isTargetShuffle(N->getOpcode())) {
35282    SDValue Op(N, 0);
35283    if (SDValue Shuffle = combineTargetShuffle(Op, DAG, DCI, Subtarget))
35284      return Shuffle;
35285
35286    // Try recursively combining arbitrary sequences of x86 shuffle
35287    // instructions into higher-order shuffles. We do this after combining
35288    // specific PSHUF instruction sequences into their minimal form so that we
35289    // can evaluate how many specialized shuffle instructions are involved in
35290    // a particular chain.
35291    if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
35292      return Res;
35293
35294    // Simplify source operands based on shuffle mask.
35295    // TODO - merge this into combineX86ShufflesRecursively.
35296    APInt KnownUndef, KnownZero;
35297    APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
35298    if (TLI.SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero, DCI))
35299      return SDValue(N, 0);
35300  }
35301
35302  // Look for a v2i64/v2f64 VZEXT_MOVL of a node that already produces zeros
35303  // in the upper 64 bits.
35304  // TODO: Can we generalize this using computeKnownBits.
35305  if (N->getOpcode() == X86ISD::VZEXT_MOVL &&
35306      (VT == MVT::v2f64 || VT == MVT::v2i64) &&
35307      N->getOperand(0).getOpcode() == ISD::BITCAST &&
35308      (N->getOperand(0).getOperand(0).getValueType() == MVT::v4f32 ||
35309       N->getOperand(0).getOperand(0).getValueType() == MVT::v4i32)) {
35310    SDValue In = N->getOperand(0).getOperand(0);
35311    switch (In.getOpcode()) {
35312    default:
35313      break;
35314    case X86ISD::CVTP2SI:   case X86ISD::CVTP2UI:
35315    case X86ISD::MCVTP2SI:  case X86ISD::MCVTP2UI:
35316    case X86ISD::CVTTP2SI:  case X86ISD::CVTTP2UI:
35317    case X86ISD::MCVTTP2SI: case X86ISD::MCVTTP2UI:
35318    case X86ISD::CVTSI2P:   case X86ISD::CVTUI2P:
35319    case X86ISD::MCVTSI2P:  case X86ISD::MCVTUI2P:
35320    case X86ISD::VFPROUND:  case X86ISD::VMFPROUND:
35321      if (In.getOperand(0).getValueType() == MVT::v2f64 ||
35322          In.getOperand(0).getValueType() == MVT::v2i64)
35323        return N->getOperand(0); // return the bitcast
35324      break;
35325    case X86ISD::STRICT_CVTTP2SI:
35326    case X86ISD::STRICT_CVTTP2UI:
35327    case X86ISD::STRICT_CVTSI2P:
35328    case X86ISD::STRICT_CVTUI2P:
35329    case X86ISD::STRICT_VFPROUND:
35330      if (In.getOperand(1).getValueType() == MVT::v2f64 ||
35331          In.getOperand(1).getValueType() == MVT::v2i64)
35332        return N->getOperand(0);
35333      break;
35334    }
35335  }
35336
35337  // Pull subvector inserts into undef through VZEXT_MOVL by making it an
35338  // insert into a zero vector. This helps get VZEXT_MOVL closer to
35339  // scalar_to_vectors where 256/512 are canonicalized to an insert and a
35340  // 128-bit scalar_to_vector. This reduces the number of isel patterns.
35341  if (N->getOpcode() == X86ISD::VZEXT_MOVL && !DCI.isBeforeLegalizeOps() &&
35342      N->getOperand(0).getOpcode() == ISD::INSERT_SUBVECTOR &&
35343      N->getOperand(0).hasOneUse() &&
35344      N->getOperand(0).getOperand(0).isUndef() &&
35345      isNullConstant(N->getOperand(0).getOperand(2))) {
35346    SDValue In = N->getOperand(0).getOperand(1);
35347    SDValue Movl = DAG.getNode(X86ISD::VZEXT_MOVL, dl, In.getValueType(), In);
35348    return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT,
35349                       getZeroVector(VT.getSimpleVT(), Subtarget, DAG, dl),
35350                       Movl, N->getOperand(0).getOperand(2));
35351  }
35352
35353  // If this a vzmovl of a full vector load, replace it with a vzload, unless
35354  // the load is volatile.
35355  if (N->getOpcode() == X86ISD::VZEXT_MOVL && N->getOperand(0).hasOneUse() &&
35356      ISD::isNormalLoad(N->getOperand(0).getNode())) {
35357    LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0));
35358    if (LN->isSimple()) {
35359      SDVTList Tys = DAG.getVTList(VT, MVT::Other);
35360      SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
35361      SDValue VZLoad =
35362          DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
35363                                  VT.getVectorElementType(),
35364                                  LN->getPointerInfo(),
35365                                  LN->getAlignment(),
35366                                  MachineMemOperand::MOLoad);
35367      DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
35368      return VZLoad;
35369    }
35370  }
35371
35372  return SDValue();
35373}
35374
35375bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
35376    SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero,
35377    TargetLoweringOpt &TLO, unsigned Depth) const {
35378  int NumElts = DemandedElts.getBitWidth();
35379  unsigned Opc = Op.getOpcode();
35380  EVT VT = Op.getValueType();
35381
35382  // Handle special case opcodes.
35383  switch (Opc) {
35384  case X86ISD::PMULDQ:
35385  case X86ISD::PMULUDQ: {
35386    APInt LHSUndef, LHSZero;
35387    APInt RHSUndef, RHSZero;
35388    SDValue LHS = Op.getOperand(0);
35389    SDValue RHS = Op.getOperand(1);
35390    if (SimplifyDemandedVectorElts(LHS, DemandedElts, LHSUndef, LHSZero, TLO,
35391                                   Depth + 1))
35392      return true;
35393    if (SimplifyDemandedVectorElts(RHS, DemandedElts, RHSUndef, RHSZero, TLO,
35394                                   Depth + 1))
35395      return true;
35396    // Multiply by zero.
35397    KnownZero = LHSZero | RHSZero;
35398    break;
35399  }
35400  case X86ISD::VSHL:
35401  case X86ISD::VSRL:
35402  case X86ISD::VSRA: {
35403    // We only need the bottom 64-bits of the (128-bit) shift amount.
35404    SDValue Amt = Op.getOperand(1);
35405    MVT AmtVT = Amt.getSimpleValueType();
35406    assert(AmtVT.is128BitVector() && "Unexpected value type");
35407
35408    // If we reuse the shift amount just for sse shift amounts then we know that
35409    // only the bottom 64-bits are only ever used.
35410    bool AssumeSingleUse = llvm::all_of(Amt->uses(), [&Amt](SDNode *Use) {
35411      unsigned UseOpc = Use->getOpcode();
35412      return (UseOpc == X86ISD::VSHL || UseOpc == X86ISD::VSRL ||
35413              UseOpc == X86ISD::VSRA) &&
35414             Use->getOperand(0) != Amt;
35415    });
35416
35417    APInt AmtUndef, AmtZero;
35418    unsigned NumAmtElts = AmtVT.getVectorNumElements();
35419    APInt AmtElts = APInt::getLowBitsSet(NumAmtElts, NumAmtElts / 2);
35420    if (SimplifyDemandedVectorElts(Amt, AmtElts, AmtUndef, AmtZero, TLO,
35421                                   Depth + 1, AssumeSingleUse))
35422      return true;
35423    LLVM_FALLTHROUGH;
35424  }
35425  case X86ISD::VSHLI:
35426  case X86ISD::VSRLI:
35427  case X86ISD::VSRAI: {
35428    SDValue Src = Op.getOperand(0);
35429    APInt SrcUndef;
35430    if (SimplifyDemandedVectorElts(Src, DemandedElts, SrcUndef, KnownZero, TLO,
35431                                   Depth + 1))
35432      return true;
35433    // TODO convert SrcUndef to KnownUndef.
35434    break;
35435  }
35436  case X86ISD::KSHIFTL: {
35437    SDValue Src = Op.getOperand(0);
35438    auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
35439    assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount");
35440    unsigned ShiftAmt = Amt->getZExtValue();
35441
35442    if (ShiftAmt == 0)
35443      return TLO.CombineTo(Op, Src);
35444
35445    // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
35446    // single shift.  We can do this if the bottom bits (which are shifted
35447    // out) are never demanded.
35448    if (Src.getOpcode() == X86ISD::KSHIFTR) {
35449      if (!DemandedElts.intersects(APInt::getLowBitsSet(NumElts, ShiftAmt))) {
35450        unsigned C1 = Src.getConstantOperandVal(1);
35451        unsigned NewOpc = X86ISD::KSHIFTL;
35452        int Diff = ShiftAmt - C1;
35453        if (Diff < 0) {
35454          Diff = -Diff;
35455          NewOpc = X86ISD::KSHIFTR;
35456        }
35457
35458        SDLoc dl(Op);
35459        SDValue NewSA = TLO.DAG.getTargetConstant(Diff, dl, MVT::i8);
35460        return TLO.CombineTo(
35461            Op, TLO.DAG.getNode(NewOpc, dl, VT, Src.getOperand(0), NewSA));
35462      }
35463    }
35464
35465    APInt DemandedSrc = DemandedElts.lshr(ShiftAmt);
35466    if (SimplifyDemandedVectorElts(Src, DemandedSrc, KnownUndef, KnownZero, TLO,
35467                                   Depth + 1))
35468      return true;
35469
35470    KnownUndef <<= ShiftAmt;
35471    KnownZero <<= ShiftAmt;
35472    KnownZero.setLowBits(ShiftAmt);
35473    break;
35474  }
35475  case X86ISD::KSHIFTR: {
35476    SDValue Src = Op.getOperand(0);
35477    auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
35478    assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount");
35479    unsigned ShiftAmt = Amt->getZExtValue();
35480
35481    if (ShiftAmt == 0)
35482      return TLO.CombineTo(Op, Src);
35483
35484    // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a
35485    // single shift.  We can do this if the top bits (which are shifted
35486    // out) are never demanded.
35487    if (Src.getOpcode() == X86ISD::KSHIFTL) {
35488      if (!DemandedElts.intersects(APInt::getHighBitsSet(NumElts, ShiftAmt))) {
35489        unsigned C1 = Src.getConstantOperandVal(1);
35490        unsigned NewOpc = X86ISD::KSHIFTR;
35491        int Diff = ShiftAmt - C1;
35492        if (Diff < 0) {
35493          Diff = -Diff;
35494          NewOpc = X86ISD::KSHIFTL;
35495        }
35496
35497        SDLoc dl(Op);
35498        SDValue NewSA = TLO.DAG.getTargetConstant(Diff, dl, MVT::i8);
35499        return TLO.CombineTo(
35500            Op, TLO.DAG.getNode(NewOpc, dl, VT, Src.getOperand(0), NewSA));
35501      }
35502    }
35503
35504    APInt DemandedSrc = DemandedElts.shl(ShiftAmt);
35505    if (SimplifyDemandedVectorElts(Src, DemandedSrc, KnownUndef, KnownZero, TLO,
35506                                   Depth + 1))
35507      return true;
35508
35509    KnownUndef.lshrInPlace(ShiftAmt);
35510    KnownZero.lshrInPlace(ShiftAmt);
35511    KnownZero.setHighBits(ShiftAmt);
35512    break;
35513  }
35514  case X86ISD::CVTSI2P:
35515  case X86ISD::CVTUI2P: {
35516    SDValue Src = Op.getOperand(0);
35517    MVT SrcVT = Src.getSimpleValueType();
35518    APInt SrcUndef, SrcZero;
35519    APInt SrcElts = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
35520    if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
35521                                   Depth + 1))
35522      return true;
35523    break;
35524  }
35525  case X86ISD::PACKSS:
35526  case X86ISD::PACKUS: {
35527    SDValue N0 = Op.getOperand(0);
35528    SDValue N1 = Op.getOperand(1);
35529
35530    APInt DemandedLHS, DemandedRHS;
35531    getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
35532
35533    APInt SrcUndef, SrcZero;
35534    if (SimplifyDemandedVectorElts(N0, DemandedLHS, SrcUndef, SrcZero, TLO,
35535                                   Depth + 1))
35536      return true;
35537    if (SimplifyDemandedVectorElts(N1, DemandedRHS, SrcUndef, SrcZero, TLO,
35538                                   Depth + 1))
35539      return true;
35540
35541    // Aggressively peek through ops to get at the demanded elts.
35542    // TODO - we should do this for all target/faux shuffles ops.
35543    if (!DemandedElts.isAllOnesValue()) {
35544      APInt DemandedSrcBits =
35545          APInt::getAllOnesValue(N0.getScalarValueSizeInBits());
35546      SDValue NewN0 = SimplifyMultipleUseDemandedBits(
35547          N0, DemandedSrcBits, DemandedLHS, TLO.DAG, Depth + 1);
35548      SDValue NewN1 = SimplifyMultipleUseDemandedBits(
35549          N1, DemandedSrcBits, DemandedRHS, TLO.DAG, Depth + 1);
35550      if (NewN0 || NewN1) {
35551        NewN0 = NewN0 ? NewN0 : N0;
35552        NewN1 = NewN1 ? NewN1 : N1;
35553        return TLO.CombineTo(Op,
35554                             TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewN0, NewN1));
35555      }
35556    }
35557    break;
35558  }
35559  case X86ISD::HADD:
35560  case X86ISD::HSUB:
35561  case X86ISD::FHADD:
35562  case X86ISD::FHSUB: {
35563    APInt DemandedLHS, DemandedRHS;
35564    getHorizDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
35565
35566    APInt LHSUndef, LHSZero;
35567    if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedLHS, LHSUndef,
35568                                   LHSZero, TLO, Depth + 1))
35569      return true;
35570    APInt RHSUndef, RHSZero;
35571    if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedRHS, RHSUndef,
35572                                   RHSZero, TLO, Depth + 1))
35573      return true;
35574    break;
35575  }
35576  case X86ISD::VTRUNC:
35577  case X86ISD::VTRUNCS:
35578  case X86ISD::VTRUNCUS: {
35579    SDValue Src = Op.getOperand(0);
35580    MVT SrcVT = Src.getSimpleValueType();
35581    APInt DemandedSrc = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
35582    APInt SrcUndef, SrcZero;
35583    if (SimplifyDemandedVectorElts(Src, DemandedSrc, SrcUndef, SrcZero, TLO,
35584                                   Depth + 1))
35585      return true;
35586    KnownZero = SrcZero.zextOrTrunc(NumElts);
35587    KnownUndef = SrcUndef.zextOrTrunc(NumElts);
35588    break;
35589  }
35590  case X86ISD::BLENDV: {
35591    APInt SelUndef, SelZero;
35592    if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, SelUndef,
35593                                   SelZero, TLO, Depth + 1))
35594      return true;
35595
35596    // TODO: Use SelZero to adjust LHS/RHS DemandedElts.
35597    APInt LHSUndef, LHSZero;
35598    if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedElts, LHSUndef,
35599                                   LHSZero, TLO, Depth + 1))
35600      return true;
35601
35602    APInt RHSUndef, RHSZero;
35603    if (SimplifyDemandedVectorElts(Op.getOperand(2), DemandedElts, RHSUndef,
35604                                   RHSZero, TLO, Depth + 1))
35605      return true;
35606
35607    KnownZero = LHSZero & RHSZero;
35608    KnownUndef = LHSUndef & RHSUndef;
35609    break;
35610  }
35611  case X86ISD::VBROADCAST: {
35612    SDValue Src = Op.getOperand(0);
35613    MVT SrcVT = Src.getSimpleValueType();
35614    if (!SrcVT.isVector())
35615      return false;
35616    // Don't bother broadcasting if we just need the 0'th element.
35617    if (DemandedElts == 1) {
35618      if (Src.getValueType() != VT)
35619        Src = widenSubVector(VT.getSimpleVT(), Src, false, Subtarget, TLO.DAG,
35620                             SDLoc(Op));
35621      return TLO.CombineTo(Op, Src);
35622    }
35623    APInt SrcUndef, SrcZero;
35624    APInt SrcElts = APInt::getOneBitSet(SrcVT.getVectorNumElements(), 0);
35625    if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
35626                                   Depth + 1))
35627      return true;
35628    break;
35629  }
35630  case X86ISD::VPERMV: {
35631    SDValue Mask = Op.getOperand(0);
35632    APInt MaskUndef, MaskZero;
35633    if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
35634                                   Depth + 1))
35635      return true;
35636    break;
35637  }
35638  case X86ISD::PSHUFB:
35639  case X86ISD::VPERMV3:
35640  case X86ISD::VPERMILPV: {
35641    SDValue Mask = Op.getOperand(1);
35642    APInt MaskUndef, MaskZero;
35643    if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
35644                                   Depth + 1))
35645      return true;
35646    break;
35647  }
35648  case X86ISD::VPPERM:
35649  case X86ISD::VPERMIL2: {
35650    SDValue Mask = Op.getOperand(2);
35651    APInt MaskUndef, MaskZero;
35652    if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
35653                                   Depth + 1))
35654      return true;
35655    break;
35656  }
35657  }
35658
35659  // For 256/512-bit ops that are 128/256-bit ops glued together, if we do not
35660  // demand any of the high elements, then narrow the op to 128/256-bits: e.g.
35661  // (op ymm0, ymm1) --> insert undef, (op xmm0, xmm1), 0
35662  if ((VT.is256BitVector() || VT.is512BitVector()) &&
35663      DemandedElts.lshr(NumElts / 2) == 0) {
35664    unsigned SizeInBits = VT.getSizeInBits();
35665    unsigned ExtSizeInBits = SizeInBits / 2;
35666
35667    // See if 512-bit ops only use the bottom 128-bits.
35668    if (VT.is512BitVector() && DemandedElts.lshr(NumElts / 4) == 0)
35669      ExtSizeInBits = SizeInBits / 4;
35670
35671    switch (Opc) {
35672      // Zero upper elements.
35673    case X86ISD::VZEXT_MOVL: {
35674      SDLoc DL(Op);
35675      SDValue Ext0 =
35676          extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
35677      SDValue ExtOp =
35678          TLO.DAG.getNode(Opc, DL, Ext0.getValueType(), Ext0);
35679      SDValue UndefVec = TLO.DAG.getUNDEF(VT);
35680      SDValue Insert =
35681          insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
35682      return TLO.CombineTo(Op, Insert);
35683    }
35684      // Subvector broadcast.
35685    case X86ISD::SUBV_BROADCAST: {
35686      SDLoc DL(Op);
35687      SDValue Src = Op.getOperand(0);
35688      if (Src.getValueSizeInBits() > ExtSizeInBits)
35689        Src = extractSubVector(Src, 0, TLO.DAG, DL, ExtSizeInBits);
35690      else if (Src.getValueSizeInBits() < ExtSizeInBits) {
35691        MVT SrcSVT = Src.getSimpleValueType().getScalarType();
35692        MVT SrcVT =
35693            MVT::getVectorVT(SrcSVT, ExtSizeInBits / SrcSVT.getSizeInBits());
35694        Src = TLO.DAG.getNode(X86ISD::SUBV_BROADCAST, DL, SrcVT, Src);
35695      }
35696      return TLO.CombineTo(Op, insertSubVector(TLO.DAG.getUNDEF(VT), Src, 0,
35697                                               TLO.DAG, DL, ExtSizeInBits));
35698    }
35699      // Byte shifts by immediate.
35700    case X86ISD::VSHLDQ:
35701    case X86ISD::VSRLDQ:
35702      // Shift by uniform.
35703    case X86ISD::VSHL:
35704    case X86ISD::VSRL:
35705    case X86ISD::VSRA:
35706      // Shift by immediate.
35707    case X86ISD::VSHLI:
35708    case X86ISD::VSRLI:
35709    case X86ISD::VSRAI: {
35710      SDLoc DL(Op);
35711      SDValue Ext0 =
35712          extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
35713      SDValue ExtOp =
35714          TLO.DAG.getNode(Opc, DL, Ext0.getValueType(), Ext0, Op.getOperand(1));
35715      SDValue UndefVec = TLO.DAG.getUNDEF(VT);
35716      SDValue Insert =
35717          insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
35718      return TLO.CombineTo(Op, Insert);
35719    }
35720    case X86ISD::VPERMI: {
35721      // Simplify PERMPD/PERMQ to extract_subvector.
35722      // TODO: This should be done in shuffle combining.
35723      if (VT == MVT::v4f64 || VT == MVT::v4i64) {
35724        SmallVector<int, 4> Mask;
35725        DecodeVPERMMask(NumElts, Op.getConstantOperandVal(1), Mask);
35726        if (isUndefOrEqual(Mask[0], 2) && isUndefOrEqual(Mask[1], 3)) {
35727          SDLoc DL(Op);
35728          SDValue Ext = extractSubVector(Op.getOperand(0), 2, TLO.DAG, DL, 128);
35729          SDValue UndefVec = TLO.DAG.getUNDEF(VT);
35730          SDValue Insert = insertSubVector(UndefVec, Ext, 0, TLO.DAG, DL, 128);
35731          return TLO.CombineTo(Op, Insert);
35732        }
35733      }
35734      break;
35735    }
35736      // Target Shuffles.
35737    case X86ISD::PSHUFB:
35738    case X86ISD::UNPCKL:
35739    case X86ISD::UNPCKH:
35740      // Saturated Packs.
35741    case X86ISD::PACKSS:
35742    case X86ISD::PACKUS:
35743      // Horizontal Ops.
35744    case X86ISD::HADD:
35745    case X86ISD::HSUB:
35746    case X86ISD::FHADD:
35747    case X86ISD::FHSUB: {
35748      SDLoc DL(Op);
35749      MVT ExtVT = VT.getSimpleVT();
35750      ExtVT = MVT::getVectorVT(ExtVT.getScalarType(),
35751                               ExtSizeInBits / ExtVT.getScalarSizeInBits());
35752      SDValue Ext0 =
35753          extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
35754      SDValue Ext1 =
35755          extractSubVector(Op.getOperand(1), 0, TLO.DAG, DL, ExtSizeInBits);
35756      SDValue ExtOp = TLO.DAG.getNode(Opc, DL, ExtVT, Ext0, Ext1);
35757      SDValue UndefVec = TLO.DAG.getUNDEF(VT);
35758      SDValue Insert =
35759          insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
35760      return TLO.CombineTo(Op, Insert);
35761    }
35762    }
35763  }
35764
35765  // Get target/faux shuffle mask.
35766  APInt OpUndef, OpZero;
35767  SmallVector<int, 64> OpMask;
35768  SmallVector<SDValue, 2> OpInputs;
35769  if (!getTargetShuffleInputs(Op, DemandedElts, OpInputs, OpMask, OpUndef,
35770                              OpZero, TLO.DAG, Depth, false))
35771    return false;
35772
35773  // Shuffle inputs must be the same size as the result.
35774  if (OpMask.size() != (unsigned)NumElts ||
35775      llvm::any_of(OpInputs, [VT](SDValue V) {
35776        return VT.getSizeInBits() != V.getValueSizeInBits() ||
35777               !V.getValueType().isVector();
35778      }))
35779    return false;
35780
35781  KnownZero = OpZero;
35782  KnownUndef = OpUndef;
35783
35784  // Check if shuffle mask can be simplified to undef/zero/identity.
35785  int NumSrcs = OpInputs.size();
35786  for (int i = 0; i != NumElts; ++i)
35787    if (!DemandedElts[i])
35788      OpMask[i] = SM_SentinelUndef;
35789
35790  if (isUndefInRange(OpMask, 0, NumElts)) {
35791    KnownUndef.setAllBits();
35792    return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
35793  }
35794  if (isUndefOrZeroInRange(OpMask, 0, NumElts)) {
35795    KnownZero.setAllBits();
35796    return TLO.CombineTo(
35797        Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
35798  }
35799  for (int Src = 0; Src != NumSrcs; ++Src)
35800    if (isSequentialOrUndefInRange(OpMask, 0, NumElts, Src * NumElts))
35801      return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, OpInputs[Src]));
35802
35803  // Attempt to simplify inputs.
35804  for (int Src = 0; Src != NumSrcs; ++Src) {
35805    // TODO: Support inputs of different types.
35806    if (OpInputs[Src].getValueType() != VT)
35807      continue;
35808
35809    int Lo = Src * NumElts;
35810    APInt SrcElts = APInt::getNullValue(NumElts);
35811    for (int i = 0; i != NumElts; ++i)
35812      if (DemandedElts[i]) {
35813        int M = OpMask[i] - Lo;
35814        if (0 <= M && M < NumElts)
35815          SrcElts.setBit(M);
35816      }
35817
35818    // TODO - Propagate input undef/zero elts.
35819    APInt SrcUndef, SrcZero;
35820    if (SimplifyDemandedVectorElts(OpInputs[Src], SrcElts, SrcUndef, SrcZero,
35821                                   TLO, Depth + 1))
35822      return true;
35823  }
35824
35825  // If we don't demand all elements, then attempt to combine to a simpler
35826  // shuffle.
35827  // TODO: Handle other depths, but first we need to handle the fact that
35828  // it might combine to the same shuffle.
35829  if (!DemandedElts.isAllOnesValue() && Depth == 0) {
35830    SmallVector<int, 64> DemandedMask(NumElts, SM_SentinelUndef);
35831    for (int i = 0; i != NumElts; ++i)
35832      if (DemandedElts[i])
35833        DemandedMask[i] = i;
35834
35835    SDValue NewShuffle = combineX86ShufflesRecursively(
35836        {Op}, 0, Op, DemandedMask, {}, Depth, /*HasVarMask*/ false,
35837        /*AllowVarMask*/ true, TLO.DAG, Subtarget);
35838    if (NewShuffle)
35839      return TLO.CombineTo(Op, NewShuffle);
35840  }
35841
35842  return false;
35843}
35844
35845bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
35846    SDValue Op, const APInt &OriginalDemandedBits,
35847    const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
35848    unsigned Depth) const {
35849  EVT VT = Op.getValueType();
35850  unsigned BitWidth = OriginalDemandedBits.getBitWidth();
35851  unsigned Opc = Op.getOpcode();
35852  switch(Opc) {
35853  case X86ISD::PMULDQ:
35854  case X86ISD::PMULUDQ: {
35855    // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
35856    KnownBits KnownOp;
35857    SDValue LHS = Op.getOperand(0);
35858    SDValue RHS = Op.getOperand(1);
35859    // FIXME: Can we bound this better?
35860    APInt DemandedMask = APInt::getLowBitsSet(64, 32);
35861    if (SimplifyDemandedBits(LHS, DemandedMask, OriginalDemandedElts, KnownOp,
35862                             TLO, Depth + 1))
35863      return true;
35864    if (SimplifyDemandedBits(RHS, DemandedMask, OriginalDemandedElts, KnownOp,
35865                             TLO, Depth + 1))
35866      return true;
35867
35868    // Aggressively peek through ops to get at the demanded low bits.
35869    SDValue DemandedLHS = SimplifyMultipleUseDemandedBits(
35870        LHS, DemandedMask, OriginalDemandedElts, TLO.DAG, Depth + 1);
35871    SDValue DemandedRHS = SimplifyMultipleUseDemandedBits(
35872        RHS, DemandedMask, OriginalDemandedElts, TLO.DAG, Depth + 1);
35873    if (DemandedLHS || DemandedRHS) {
35874      DemandedLHS = DemandedLHS ? DemandedLHS : LHS;
35875      DemandedRHS = DemandedRHS ? DemandedRHS : RHS;
35876      return TLO.CombineTo(
35877          Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, DemandedLHS, DemandedRHS));
35878    }
35879    break;
35880  }
35881  case X86ISD::VSHLI: {
35882    SDValue Op0 = Op.getOperand(0);
35883
35884    unsigned ShAmt = Op.getConstantOperandVal(1);
35885    if (ShAmt >= BitWidth)
35886      break;
35887
35888    APInt DemandedMask = OriginalDemandedBits.lshr(ShAmt);
35889
35890    // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
35891    // single shift.  We can do this if the bottom bits (which are shifted
35892    // out) are never demanded.
35893    if (Op0.getOpcode() == X86ISD::VSRLI &&
35894        OriginalDemandedBits.countTrailingZeros() >= ShAmt) {
35895      unsigned Shift2Amt = Op0.getConstantOperandVal(1);
35896      if (Shift2Amt < BitWidth) {
35897        int Diff = ShAmt - Shift2Amt;
35898        if (Diff == 0)
35899          return TLO.CombineTo(Op, Op0.getOperand(0));
35900
35901        unsigned NewOpc = Diff < 0 ? X86ISD::VSRLI : X86ISD::VSHLI;
35902        SDValue NewShift = TLO.DAG.getNode(
35903            NewOpc, SDLoc(Op), VT, Op0.getOperand(0),
35904            TLO.DAG.getTargetConstant(std::abs(Diff), SDLoc(Op), MVT::i8));
35905        return TLO.CombineTo(Op, NewShift);
35906      }
35907    }
35908
35909    if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
35910                             TLO, Depth + 1))
35911      return true;
35912
35913    assert(!Known.hasConflict() && "Bits known to be one AND zero?");
35914    Known.Zero <<= ShAmt;
35915    Known.One <<= ShAmt;
35916
35917    // Low bits known zero.
35918    Known.Zero.setLowBits(ShAmt);
35919    break;
35920  }
35921  case X86ISD::VSRLI: {
35922    unsigned ShAmt = Op.getConstantOperandVal(1);
35923    if (ShAmt >= BitWidth)
35924      break;
35925
35926    APInt DemandedMask = OriginalDemandedBits << ShAmt;
35927
35928    if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask,
35929                             OriginalDemandedElts, Known, TLO, Depth + 1))
35930      return true;
35931
35932    assert(!Known.hasConflict() && "Bits known to be one AND zero?");
35933    Known.Zero.lshrInPlace(ShAmt);
35934    Known.One.lshrInPlace(ShAmt);
35935
35936    // High bits known zero.
35937    Known.Zero.setHighBits(ShAmt);
35938    break;
35939  }
35940  case X86ISD::VSRAI: {
35941    SDValue Op0 = Op.getOperand(0);
35942    SDValue Op1 = Op.getOperand(1);
35943
35944    unsigned ShAmt = cast<ConstantSDNode>(Op1)->getZExtValue();
35945    if (ShAmt >= BitWidth)
35946      break;
35947
35948    APInt DemandedMask = OriginalDemandedBits << ShAmt;
35949
35950    // If we just want the sign bit then we don't need to shift it.
35951    if (OriginalDemandedBits.isSignMask())
35952      return TLO.CombineTo(Op, Op0);
35953
35954    // fold (VSRAI (VSHLI X, C1), C1) --> X iff NumSignBits(X) > C1
35955    if (Op0.getOpcode() == X86ISD::VSHLI &&
35956        Op.getOperand(1) == Op0.getOperand(1)) {
35957      SDValue Op00 = Op0.getOperand(0);
35958      unsigned NumSignBits =
35959          TLO.DAG.ComputeNumSignBits(Op00, OriginalDemandedElts);
35960      if (ShAmt < NumSignBits)
35961        return TLO.CombineTo(Op, Op00);
35962    }
35963
35964    // If any of the demanded bits are produced by the sign extension, we also
35965    // demand the input sign bit.
35966    if (OriginalDemandedBits.countLeadingZeros() < ShAmt)
35967      DemandedMask.setSignBit();
35968
35969    if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
35970                             TLO, Depth + 1))
35971      return true;
35972
35973    assert(!Known.hasConflict() && "Bits known to be one AND zero?");
35974    Known.Zero.lshrInPlace(ShAmt);
35975    Known.One.lshrInPlace(ShAmt);
35976
35977    // If the input sign bit is known to be zero, or if none of the top bits
35978    // are demanded, turn this into an unsigned shift right.
35979    if (Known.Zero[BitWidth - ShAmt - 1] ||
35980        OriginalDemandedBits.countLeadingZeros() >= ShAmt)
35981      return TLO.CombineTo(
35982          Op, TLO.DAG.getNode(X86ISD::VSRLI, SDLoc(Op), VT, Op0, Op1));
35983
35984    // High bits are known one.
35985    if (Known.One[BitWidth - ShAmt - 1])
35986      Known.One.setHighBits(ShAmt);
35987    break;
35988  }
35989  case X86ISD::PEXTRB:
35990  case X86ISD::PEXTRW: {
35991    SDValue Vec = Op.getOperand(0);
35992    auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
35993    MVT VecVT = Vec.getSimpleValueType();
35994    unsigned NumVecElts = VecVT.getVectorNumElements();
35995
35996    if (CIdx && CIdx->getAPIntValue().ult(NumVecElts)) {
35997      unsigned Idx = CIdx->getZExtValue();
35998      unsigned VecBitWidth = VecVT.getScalarSizeInBits();
35999
36000      // If we demand no bits from the vector then we must have demanded
36001      // bits from the implict zext - simplify to zero.
36002      APInt DemandedVecBits = OriginalDemandedBits.trunc(VecBitWidth);
36003      if (DemandedVecBits == 0)
36004        return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
36005
36006      APInt KnownUndef, KnownZero;
36007      APInt DemandedVecElts = APInt::getOneBitSet(NumVecElts, Idx);
36008      if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef,
36009                                     KnownZero, TLO, Depth + 1))
36010        return true;
36011
36012      KnownBits KnownVec;
36013      if (SimplifyDemandedBits(Vec, DemandedVecBits, DemandedVecElts,
36014                               KnownVec, TLO, Depth + 1))
36015        return true;
36016
36017      if (SDValue V = SimplifyMultipleUseDemandedBits(
36018              Vec, DemandedVecBits, DemandedVecElts, TLO.DAG, Depth + 1))
36019        return TLO.CombineTo(
36020            Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, V, Op.getOperand(1)));
36021
36022      Known = KnownVec.zext(BitWidth, true);
36023      return false;
36024    }
36025    break;
36026  }
36027  case X86ISD::PINSRB:
36028  case X86ISD::PINSRW: {
36029    SDValue Vec = Op.getOperand(0);
36030    SDValue Scl = Op.getOperand(1);
36031    auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
36032    MVT VecVT = Vec.getSimpleValueType();
36033
36034    if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements())) {
36035      unsigned Idx = CIdx->getZExtValue();
36036      if (!OriginalDemandedElts[Idx])
36037        return TLO.CombineTo(Op, Vec);
36038
36039      KnownBits KnownVec;
36040      APInt DemandedVecElts(OriginalDemandedElts);
36041      DemandedVecElts.clearBit(Idx);
36042      if (SimplifyDemandedBits(Vec, OriginalDemandedBits, DemandedVecElts,
36043                               KnownVec, TLO, Depth + 1))
36044        return true;
36045
36046      KnownBits KnownScl;
36047      unsigned NumSclBits = Scl.getScalarValueSizeInBits();
36048      APInt DemandedSclBits = OriginalDemandedBits.zext(NumSclBits);
36049      if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1))
36050        return true;
36051
36052      KnownScl = KnownScl.trunc(VecVT.getScalarSizeInBits());
36053      Known.One = KnownVec.One & KnownScl.One;
36054      Known.Zero = KnownVec.Zero & KnownScl.Zero;
36055      return false;
36056    }
36057    break;
36058  }
36059  case X86ISD::PACKSS:
36060    // PACKSS saturates to MIN/MAX integer values. So if we just want the
36061    // sign bit then we can just ask for the source operands sign bit.
36062    // TODO - add known bits handling.
36063    if (OriginalDemandedBits.isSignMask()) {
36064      APInt DemandedLHS, DemandedRHS;
36065      getPackDemandedElts(VT, OriginalDemandedElts, DemandedLHS, DemandedRHS);
36066
36067      KnownBits KnownLHS, KnownRHS;
36068      APInt SignMask = APInt::getSignMask(BitWidth * 2);
36069      if (SimplifyDemandedBits(Op.getOperand(0), SignMask, DemandedLHS,
36070                               KnownLHS, TLO, Depth + 1))
36071        return true;
36072      if (SimplifyDemandedBits(Op.getOperand(1), SignMask, DemandedRHS,
36073                               KnownRHS, TLO, Depth + 1))
36074        return true;
36075    }
36076    // TODO - add general PACKSS/PACKUS SimplifyDemandedBits support.
36077    break;
36078  case X86ISD::PCMPGT:
36079    // icmp sgt(0, R) == ashr(R, BitWidth-1).
36080    // iff we only need the sign bit then we can use R directly.
36081    if (OriginalDemandedBits.isSignMask() &&
36082        ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
36083      return TLO.CombineTo(Op, Op.getOperand(1));
36084    break;
36085  case X86ISD::MOVMSK: {
36086    SDValue Src = Op.getOperand(0);
36087    MVT SrcVT = Src.getSimpleValueType();
36088    unsigned SrcBits = SrcVT.getScalarSizeInBits();
36089    unsigned NumElts = SrcVT.getVectorNumElements();
36090
36091    // If we don't need the sign bits at all just return zero.
36092    if (OriginalDemandedBits.countTrailingZeros() >= NumElts)
36093      return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
36094
36095    // Only demand the vector elements of the sign bits we need.
36096    APInt KnownUndef, KnownZero;
36097    APInt DemandedElts = OriginalDemandedBits.zextOrTrunc(NumElts);
36098    if (SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef, KnownZero,
36099                                   TLO, Depth + 1))
36100      return true;
36101
36102    Known.Zero = KnownZero.zextOrSelf(BitWidth);
36103    Known.Zero.setHighBits(BitWidth - NumElts);
36104
36105    // MOVMSK only uses the MSB from each vector element.
36106    KnownBits KnownSrc;
36107    if (SimplifyDemandedBits(Src, APInt::getSignMask(SrcBits), DemandedElts,
36108                             KnownSrc, TLO, Depth + 1))
36109      return true;
36110
36111    if (KnownSrc.One[SrcBits - 1])
36112      Known.One.setLowBits(NumElts);
36113    else if (KnownSrc.Zero[SrcBits - 1])
36114      Known.Zero.setLowBits(NumElts);
36115    return false;
36116  }
36117  }
36118
36119  return TargetLowering::SimplifyDemandedBitsForTargetNode(
36120      Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth);
36121}
36122
36123SDValue X86TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
36124    SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
36125    SelectionDAG &DAG, unsigned Depth) const {
36126  int NumElts = DemandedElts.getBitWidth();
36127  unsigned Opc = Op.getOpcode();
36128  EVT VT = Op.getValueType();
36129
36130  switch (Opc) {
36131  case X86ISD::PINSRB:
36132  case X86ISD::PINSRW: {
36133    // If we don't demand the inserted element, return the base vector.
36134    SDValue Vec = Op.getOperand(0);
36135    auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
36136    MVT VecVT = Vec.getSimpleValueType();
36137    if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements()) &&
36138        !DemandedElts[CIdx->getZExtValue()])
36139      return Vec;
36140     break;
36141  }
36142  case X86ISD::PCMPGT:
36143    // icmp sgt(0, R) == ashr(R, BitWidth-1).
36144    // iff we only need the sign bit then we can use R directly.
36145    if (DemandedBits.isSignMask() &&
36146        ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
36147      return Op.getOperand(1);
36148    break;
36149  }
36150
36151  APInt ShuffleUndef, ShuffleZero;
36152  SmallVector<int, 16> ShuffleMask;
36153  SmallVector<SDValue, 2> ShuffleOps;
36154  if (getTargetShuffleInputs(Op, DemandedElts, ShuffleOps, ShuffleMask,
36155                             ShuffleUndef, ShuffleZero, DAG, Depth, false)) {
36156    // If all the demanded elts are from one operand and are inline,
36157    // then we can use the operand directly.
36158    int NumOps = ShuffleOps.size();
36159    if (ShuffleMask.size() == (unsigned)NumElts &&
36160        llvm::all_of(ShuffleOps, [VT](SDValue V) {
36161          return VT.getSizeInBits() == V.getValueSizeInBits();
36162        })) {
36163
36164      if (DemandedElts.isSubsetOf(ShuffleUndef))
36165        return DAG.getUNDEF(VT);
36166      if (DemandedElts.isSubsetOf(ShuffleUndef | ShuffleZero))
36167        return getZeroVector(VT.getSimpleVT(), Subtarget, DAG, SDLoc(Op));
36168
36169      // Bitmask that indicates which ops have only been accessed 'inline'.
36170      APInt IdentityOp = APInt::getAllOnesValue(NumOps);
36171      for (int i = 0; i != NumElts; ++i) {
36172        int M = ShuffleMask[i];
36173        if (!DemandedElts[i] || ShuffleUndef[i])
36174          continue;
36175        int Op = M / NumElts;
36176        int Index = M % NumElts;
36177        if (M < 0 || Index != i) {
36178          IdentityOp.clearAllBits();
36179          break;
36180        }
36181        IdentityOp &= APInt::getOneBitSet(NumOps, Op);
36182        if (IdentityOp == 0)
36183          break;
36184      }
36185      assert((IdentityOp == 0 || IdentityOp.countPopulation() == 1) &&
36186             "Multiple identity shuffles detected");
36187
36188      if (IdentityOp != 0)
36189        return DAG.getBitcast(VT, ShuffleOps[IdentityOp.countTrailingZeros()]);
36190    }
36191  }
36192
36193  return TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
36194      Op, DemandedBits, DemandedElts, DAG, Depth);
36195}
36196
36197// Helper to peek through bitops/setcc to determine size of source vector.
36198// Allows combineBitcastvxi1 to determine what size vector generated a <X x i1>.
36199static bool checkBitcastSrcVectorSize(SDValue Src, unsigned Size) {
36200  switch (Src.getOpcode()) {
36201  case ISD::SETCC:
36202    return Src.getOperand(0).getValueSizeInBits() == Size;
36203  case ISD::AND:
36204  case ISD::XOR:
36205  case ISD::OR:
36206    return checkBitcastSrcVectorSize(Src.getOperand(0), Size) &&
36207           checkBitcastSrcVectorSize(Src.getOperand(1), Size);
36208  }
36209  return false;
36210}
36211
36212// Helper to push sign extension of vXi1 SETCC result through bitops.
36213static SDValue signExtendBitcastSrcVector(SelectionDAG &DAG, EVT SExtVT,
36214                                          SDValue Src, const SDLoc &DL) {
36215  switch (Src.getOpcode()) {
36216  case ISD::SETCC:
36217    return DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
36218  case ISD::AND:
36219  case ISD::XOR:
36220  case ISD::OR:
36221    return DAG.getNode(
36222        Src.getOpcode(), DL, SExtVT,
36223        signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(0), DL),
36224        signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(1), DL));
36225  }
36226  llvm_unreachable("Unexpected node type for vXi1 sign extension");
36227}
36228
36229// Try to match patterns such as
36230// (i16 bitcast (v16i1 x))
36231// ->
36232// (i16 movmsk (16i8 sext (v16i1 x)))
36233// before the illegal vector is scalarized on subtargets that don't have legal
36234// vxi1 types.
36235static SDValue combineBitcastvxi1(SelectionDAG &DAG, EVT VT, SDValue Src,
36236                                  const SDLoc &DL,
36237                                  const X86Subtarget &Subtarget) {
36238  EVT SrcVT = Src.getValueType();
36239  if (!SrcVT.isSimple() || SrcVT.getScalarType() != MVT::i1)
36240    return SDValue();
36241
36242  // If the input is a truncate from v16i8 or v32i8 go ahead and use a
36243  // movmskb even with avx512. This will be better than truncating to vXi1 and
36244  // using a kmov. This can especially help KNL if the input is a v16i8/v32i8
36245  // vpcmpeqb/vpcmpgtb.
36246  bool IsTruncated = Src.getOpcode() == ISD::TRUNCATE && Src.hasOneUse() &&
36247                     (Src.getOperand(0).getValueType() == MVT::v16i8 ||
36248                      Src.getOperand(0).getValueType() == MVT::v32i8 ||
36249                      Src.getOperand(0).getValueType() == MVT::v64i8);
36250
36251  // With AVX512 vxi1 types are legal and we prefer using k-regs.
36252  // MOVMSK is supported in SSE2 or later.
36253  if (!Subtarget.hasSSE2() || (Subtarget.hasAVX512() && !IsTruncated))
36254    return SDValue();
36255
36256  // There are MOVMSK flavors for types v16i8, v32i8, v4f32, v8f32, v4f64 and
36257  // v8f64. So all legal 128-bit and 256-bit vectors are covered except for
36258  // v8i16 and v16i16.
36259  // For these two cases, we can shuffle the upper element bytes to a
36260  // consecutive sequence at the start of the vector and treat the results as
36261  // v16i8 or v32i8, and for v16i8 this is the preferable solution. However,
36262  // for v16i16 this is not the case, because the shuffle is expensive, so we
36263  // avoid sign-extending to this type entirely.
36264  // For example, t0 := (v8i16 sext(v8i1 x)) needs to be shuffled as:
36265  // (v16i8 shuffle <0,2,4,6,8,10,12,14,u,u,...,u> (v16i8 bitcast t0), undef)
36266  MVT SExtVT;
36267  bool PropagateSExt = false;
36268  switch (SrcVT.getSimpleVT().SimpleTy) {
36269  default:
36270    return SDValue();
36271  case MVT::v2i1:
36272    SExtVT = MVT::v2i64;
36273    break;
36274  case MVT::v4i1:
36275    SExtVT = MVT::v4i32;
36276    // For cases such as (i4 bitcast (v4i1 setcc v4i64 v1, v2))
36277    // sign-extend to a 256-bit operation to avoid truncation.
36278    if (Subtarget.hasAVX() && checkBitcastSrcVectorSize(Src, 256)) {
36279      SExtVT = MVT::v4i64;
36280      PropagateSExt = true;
36281    }
36282    break;
36283  case MVT::v8i1:
36284    SExtVT = MVT::v8i16;
36285    // For cases such as (i8 bitcast (v8i1 setcc v8i32 v1, v2)),
36286    // sign-extend to a 256-bit operation to match the compare.
36287    // If the setcc operand is 128-bit, prefer sign-extending to 128-bit over
36288    // 256-bit because the shuffle is cheaper than sign extending the result of
36289    // the compare.
36290    if (Subtarget.hasAVX() && (checkBitcastSrcVectorSize(Src, 256) ||
36291                               checkBitcastSrcVectorSize(Src, 512))) {
36292      SExtVT = MVT::v8i32;
36293      PropagateSExt = true;
36294    }
36295    break;
36296  case MVT::v16i1:
36297    SExtVT = MVT::v16i8;
36298    // For the case (i16 bitcast (v16i1 setcc v16i16 v1, v2)),
36299    // it is not profitable to sign-extend to 256-bit because this will
36300    // require an extra cross-lane shuffle which is more expensive than
36301    // truncating the result of the compare to 128-bits.
36302    break;
36303  case MVT::v32i1:
36304    SExtVT = MVT::v32i8;
36305    break;
36306  case MVT::v64i1:
36307    // If we have AVX512F, but not AVX512BW and the input is truncated from
36308    // v64i8 checked earlier. Then split the input and make two pmovmskbs.
36309    if (Subtarget.hasAVX512() && !Subtarget.hasBWI()) {
36310      SExtVT = MVT::v64i8;
36311      break;
36312    }
36313    return SDValue();
36314  };
36315
36316  SDValue V = PropagateSExt ? signExtendBitcastSrcVector(DAG, SExtVT, Src, DL)
36317                            : DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
36318
36319  if (SExtVT == MVT::v16i8 || SExtVT == MVT::v32i8 || SExtVT == MVT::v64i8) {
36320    V = getPMOVMSKB(DL, V, DAG, Subtarget);
36321  } else {
36322    if (SExtVT == MVT::v8i16)
36323      V = DAG.getNode(X86ISD::PACKSS, DL, MVT::v16i8, V,
36324                      DAG.getUNDEF(MVT::v8i16));
36325    V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
36326  }
36327
36328  EVT IntVT =
36329      EVT::getIntegerVT(*DAG.getContext(), SrcVT.getVectorNumElements());
36330  V = DAG.getZExtOrTrunc(V, DL, IntVT);
36331  return DAG.getBitcast(VT, V);
36332}
36333
36334// Convert a vXi1 constant build vector to the same width scalar integer.
36335static SDValue combinevXi1ConstantToInteger(SDValue Op, SelectionDAG &DAG) {
36336  EVT SrcVT = Op.getValueType();
36337  assert(SrcVT.getVectorElementType() == MVT::i1 &&
36338         "Expected a vXi1 vector");
36339  assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
36340         "Expected a constant build vector");
36341
36342  APInt Imm(SrcVT.getVectorNumElements(), 0);
36343  for (unsigned Idx = 0, e = Op.getNumOperands(); Idx < e; ++Idx) {
36344    SDValue In = Op.getOperand(Idx);
36345    if (!In.isUndef() && (cast<ConstantSDNode>(In)->getZExtValue() & 0x1))
36346      Imm.setBit(Idx);
36347  }
36348  EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), Imm.getBitWidth());
36349  return DAG.getConstant(Imm, SDLoc(Op), IntVT);
36350}
36351
36352static SDValue combineCastedMaskArithmetic(SDNode *N, SelectionDAG &DAG,
36353                                           TargetLowering::DAGCombinerInfo &DCI,
36354                                           const X86Subtarget &Subtarget) {
36355  assert(N->getOpcode() == ISD::BITCAST && "Expected a bitcast");
36356
36357  if (!DCI.isBeforeLegalizeOps())
36358    return SDValue();
36359
36360  // Only do this if we have k-registers.
36361  if (!Subtarget.hasAVX512())
36362    return SDValue();
36363
36364  EVT DstVT = N->getValueType(0);
36365  SDValue Op = N->getOperand(0);
36366  EVT SrcVT = Op.getValueType();
36367
36368  if (!Op.hasOneUse())
36369    return SDValue();
36370
36371  // Look for logic ops.
36372  if (Op.getOpcode() != ISD::AND &&
36373      Op.getOpcode() != ISD::OR &&
36374      Op.getOpcode() != ISD::XOR)
36375    return SDValue();
36376
36377  // Make sure we have a bitcast between mask registers and a scalar type.
36378  if (!(SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
36379        DstVT.isScalarInteger()) &&
36380      !(DstVT.isVector() && DstVT.getVectorElementType() == MVT::i1 &&
36381        SrcVT.isScalarInteger()))
36382    return SDValue();
36383
36384  SDValue LHS = Op.getOperand(0);
36385  SDValue RHS = Op.getOperand(1);
36386
36387  if (LHS.hasOneUse() && LHS.getOpcode() == ISD::BITCAST &&
36388      LHS.getOperand(0).getValueType() == DstVT)
36389    return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT, LHS.getOperand(0),
36390                       DAG.getBitcast(DstVT, RHS));
36391
36392  if (RHS.hasOneUse() && RHS.getOpcode() == ISD::BITCAST &&
36393      RHS.getOperand(0).getValueType() == DstVT)
36394    return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
36395                       DAG.getBitcast(DstVT, LHS), RHS.getOperand(0));
36396
36397  // If the RHS is a vXi1 build vector, this is a good reason to flip too.
36398  // Most of these have to move a constant from the scalar domain anyway.
36399  if (ISD::isBuildVectorOfConstantSDNodes(RHS.getNode())) {
36400    RHS = combinevXi1ConstantToInteger(RHS, DAG);
36401    return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
36402                       DAG.getBitcast(DstVT, LHS), RHS);
36403  }
36404
36405  return SDValue();
36406}
36407
36408static SDValue createMMXBuildVector(BuildVectorSDNode *BV, SelectionDAG &DAG,
36409                                    const X86Subtarget &Subtarget) {
36410  SDLoc DL(BV);
36411  unsigned NumElts = BV->getNumOperands();
36412  SDValue Splat = BV->getSplatValue();
36413
36414  // Build MMX element from integer GPR or SSE float values.
36415  auto CreateMMXElement = [&](SDValue V) {
36416    if (V.isUndef())
36417      return DAG.getUNDEF(MVT::x86mmx);
36418    if (V.getValueType().isFloatingPoint()) {
36419      if (Subtarget.hasSSE1() && !isa<ConstantFPSDNode>(V)) {
36420        V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4f32, V);
36421        V = DAG.getBitcast(MVT::v2i64, V);
36422        return DAG.getNode(X86ISD::MOVDQ2Q, DL, MVT::x86mmx, V);
36423      }
36424      V = DAG.getBitcast(MVT::i32, V);
36425    } else {
36426      V = DAG.getAnyExtOrTrunc(V, DL, MVT::i32);
36427    }
36428    return DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, V);
36429  };
36430
36431  // Convert build vector ops to MMX data in the bottom elements.
36432  SmallVector<SDValue, 8> Ops;
36433
36434  // Broadcast - use (PUNPCKL+)PSHUFW to broadcast single element.
36435  if (Splat) {
36436    if (Splat.isUndef())
36437      return DAG.getUNDEF(MVT::x86mmx);
36438
36439    Splat = CreateMMXElement(Splat);
36440
36441    if (Subtarget.hasSSE1()) {
36442      // Unpack v8i8 to splat i8 elements to lowest 16-bits.
36443      if (NumElts == 8)
36444        Splat = DAG.getNode(
36445            ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
36446            DAG.getConstant(Intrinsic::x86_mmx_punpcklbw, DL, MVT::i32), Splat,
36447            Splat);
36448
36449      // Use PSHUFW to repeat 16-bit elements.
36450      unsigned ShufMask = (NumElts > 2 ? 0 : 0x44);
36451      return DAG.getNode(
36452          ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
36453          DAG.getTargetConstant(Intrinsic::x86_sse_pshuf_w, DL, MVT::i32),
36454          Splat, DAG.getTargetConstant(ShufMask, DL, MVT::i8));
36455    }
36456    Ops.append(NumElts, Splat);
36457  } else {
36458    for (unsigned i = 0; i != NumElts; ++i)
36459      Ops.push_back(CreateMMXElement(BV->getOperand(i)));
36460  }
36461
36462  // Use tree of PUNPCKLs to build up general MMX vector.
36463  while (Ops.size() > 1) {
36464    unsigned NumOps = Ops.size();
36465    unsigned IntrinOp =
36466        (NumOps == 2 ? Intrinsic::x86_mmx_punpckldq
36467                     : (NumOps == 4 ? Intrinsic::x86_mmx_punpcklwd
36468                                    : Intrinsic::x86_mmx_punpcklbw));
36469    SDValue Intrin = DAG.getConstant(IntrinOp, DL, MVT::i32);
36470    for (unsigned i = 0; i != NumOps; i += 2)
36471      Ops[i / 2] = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx, Intrin,
36472                               Ops[i], Ops[i + 1]);
36473    Ops.resize(NumOps / 2);
36474  }
36475
36476  return Ops[0];
36477}
36478
36479static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG,
36480                              TargetLowering::DAGCombinerInfo &DCI,
36481                              const X86Subtarget &Subtarget) {
36482  SDValue N0 = N->getOperand(0);
36483  EVT VT = N->getValueType(0);
36484  EVT SrcVT = N0.getValueType();
36485
36486  // Try to match patterns such as
36487  // (i16 bitcast (v16i1 x))
36488  // ->
36489  // (i16 movmsk (16i8 sext (v16i1 x)))
36490  // before the setcc result is scalarized on subtargets that don't have legal
36491  // vxi1 types.
36492  if (DCI.isBeforeLegalize()) {
36493    SDLoc dl(N);
36494    if (SDValue V = combineBitcastvxi1(DAG, VT, N0, dl, Subtarget))
36495      return V;
36496
36497    // Recognize the IR pattern for the movmsk intrinsic under SSE1 befoer type
36498    // legalization destroys the v4i32 type.
36499    if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && SrcVT == MVT::v4i1 &&
36500        VT.isScalarInteger() && N0.getOpcode() == ISD::SETCC &&
36501        N0.getOperand(0).getValueType() == MVT::v4i32 &&
36502        ISD::isBuildVectorAllZeros(N0.getOperand(1).getNode()) &&
36503        cast<CondCodeSDNode>(N0.getOperand(2))->get() == ISD::SETLT) {
36504      SDValue N00 = N0.getOperand(0);
36505      // Only do this if we can avoid scalarizing the input.
36506      if (ISD::isNormalLoad(N00.getNode()) ||
36507          (N00.getOpcode() == ISD::BITCAST &&
36508           N00.getOperand(0).getValueType() == MVT::v4f32)) {
36509        SDValue V = DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32,
36510                                DAG.getBitcast(MVT::v4f32, N00));
36511        return DAG.getZExtOrTrunc(V, dl, VT);
36512      }
36513    }
36514
36515    // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
36516    // type, widen both sides to avoid a trip through memory.
36517    if ((VT == MVT::v4i1 || VT == MVT::v2i1) && SrcVT.isScalarInteger() &&
36518        Subtarget.hasAVX512()) {
36519      N0 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i8, N0);
36520      N0 = DAG.getBitcast(MVT::v8i1, N0);
36521      return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, N0,
36522                         DAG.getIntPtrConstant(0, dl));
36523    }
36524
36525    // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
36526    // type, widen both sides to avoid a trip through memory.
36527    if ((SrcVT == MVT::v4i1 || SrcVT == MVT::v2i1) && VT.isScalarInteger() &&
36528        Subtarget.hasAVX512()) {
36529      // Use zeros for the widening if we already have some zeroes. This can
36530      // allow SimplifyDemandedBits to remove scalar ANDs that may be down
36531      // stream of this.
36532      // FIXME: It might make sense to detect a concat_vectors with a mix of
36533      // zeroes and undef and turn it into insert_subvector for i1 vectors as
36534      // a separate combine. What we can't do is canonicalize the operands of
36535      // such a concat or we'll get into a loop with SimplifyDemandedBits.
36536      if (N0.getOpcode() == ISD::CONCAT_VECTORS) {
36537        SDValue LastOp = N0.getOperand(N0.getNumOperands() - 1);
36538        if (ISD::isBuildVectorAllZeros(LastOp.getNode())) {
36539          SrcVT = LastOp.getValueType();
36540          unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
36541          SmallVector<SDValue, 4> Ops(N0->op_begin(), N0->op_end());
36542          Ops.resize(NumConcats, DAG.getConstant(0, dl, SrcVT));
36543          N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
36544          N0 = DAG.getBitcast(MVT::i8, N0);
36545          return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
36546        }
36547      }
36548
36549      unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
36550      SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(SrcVT));
36551      Ops[0] = N0;
36552      N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
36553      N0 = DAG.getBitcast(MVT::i8, N0);
36554      return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
36555    }
36556  }
36557
36558  // Look for (i8 (bitcast (v8i1 (extract_subvector (v16i1 X), 0)))) and
36559  // replace with (i8 (trunc (i16 (bitcast (v16i1 X))))). This can occur
36560  // due to insert_subvector legalization on KNL. By promoting the copy to i16
36561  // we can help with known bits propagation from the vXi1 domain to the
36562  // scalar domain.
36563  if (VT == MVT::i8 && SrcVT == MVT::v8i1 && Subtarget.hasAVX512() &&
36564      !Subtarget.hasDQI() && N0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
36565      N0.getOperand(0).getValueType() == MVT::v16i1 &&
36566      isNullConstant(N0.getOperand(1)))
36567    return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT,
36568                       DAG.getBitcast(MVT::i16, N0.getOperand(0)));
36569
36570  // Combine (bitcast (vbroadcast_load)) -> (vbroadcast_load). The memory VT
36571  // determines // the number of bits loaded. Remaining bits are zero.
36572  if (N0.getOpcode() == X86ISD::VBROADCAST_LOAD && N0.hasOneUse() &&
36573      VT.getScalarSizeInBits() == SrcVT.getScalarSizeInBits()) {
36574    auto *BCast = cast<MemIntrinsicSDNode>(N0);
36575    SDVTList Tys = DAG.getVTList(VT, MVT::Other);
36576    SDValue Ops[] = { BCast->getChain(), BCast->getBasePtr() };
36577    SDValue ResNode =
36578        DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, SDLoc(N), Tys, Ops,
36579                                VT.getVectorElementType(),
36580                                BCast->getMemOperand());
36581    DAG.ReplaceAllUsesOfValueWith(SDValue(BCast, 1), ResNode.getValue(1));
36582    return ResNode;
36583  }
36584
36585  // Since MMX types are special and don't usually play with other vector types,
36586  // it's better to handle them early to be sure we emit efficient code by
36587  // avoiding store-load conversions.
36588  if (VT == MVT::x86mmx) {
36589    // Detect MMX constant vectors.
36590    APInt UndefElts;
36591    SmallVector<APInt, 1> EltBits;
36592    if (getTargetConstantBitsFromNode(N0, 64, UndefElts, EltBits)) {
36593      SDLoc DL(N0);
36594      // Handle zero-extension of i32 with MOVD.
36595      if (EltBits[0].countLeadingZeros() >= 32)
36596        return DAG.getNode(X86ISD::MMX_MOVW2D, DL, VT,
36597                           DAG.getConstant(EltBits[0].trunc(32), DL, MVT::i32));
36598      // Else, bitcast to a double.
36599      // TODO - investigate supporting sext 32-bit immediates on x86_64.
36600      APFloat F64(APFloat::IEEEdouble(), EltBits[0]);
36601      return DAG.getBitcast(VT, DAG.getConstantFP(F64, DL, MVT::f64));
36602    }
36603
36604    // Detect bitcasts to x86mmx low word.
36605    if (N0.getOpcode() == ISD::BUILD_VECTOR &&
36606        (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) &&
36607        N0.getOperand(0).getValueType() == SrcVT.getScalarType()) {
36608      bool LowUndef = true, AllUndefOrZero = true;
36609      for (unsigned i = 1, e = SrcVT.getVectorNumElements(); i != e; ++i) {
36610        SDValue Op = N0.getOperand(i);
36611        LowUndef &= Op.isUndef() || (i >= e/2);
36612        AllUndefOrZero &= (Op.isUndef() || isNullConstant(Op));
36613      }
36614      if (AllUndefOrZero) {
36615        SDValue N00 = N0.getOperand(0);
36616        SDLoc dl(N00);
36617        N00 = LowUndef ? DAG.getAnyExtOrTrunc(N00, dl, MVT::i32)
36618                       : DAG.getZExtOrTrunc(N00, dl, MVT::i32);
36619        return DAG.getNode(X86ISD::MMX_MOVW2D, dl, VT, N00);
36620      }
36621    }
36622
36623    // Detect bitcasts of 64-bit build vectors and convert to a
36624    // MMX UNPCK/PSHUFW which takes MMX type inputs with the value in the
36625    // lowest element.
36626    if (N0.getOpcode() == ISD::BUILD_VECTOR &&
36627        (SrcVT == MVT::v2f32 || SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 ||
36628         SrcVT == MVT::v8i8))
36629      return createMMXBuildVector(cast<BuildVectorSDNode>(N0), DAG, Subtarget);
36630
36631    // Detect bitcasts between element or subvector extraction to x86mmx.
36632    if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
36633         N0.getOpcode() == ISD::EXTRACT_SUBVECTOR) &&
36634        isNullConstant(N0.getOperand(1))) {
36635      SDValue N00 = N0.getOperand(0);
36636      if (N00.getValueType().is128BitVector())
36637        return DAG.getNode(X86ISD::MOVDQ2Q, SDLoc(N00), VT,
36638                           DAG.getBitcast(MVT::v2i64, N00));
36639    }
36640
36641    // Detect bitcasts from FP_TO_SINT to x86mmx.
36642    if (SrcVT == MVT::v2i32 && N0.getOpcode() == ISD::FP_TO_SINT) {
36643      SDLoc DL(N0);
36644      SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
36645                                DAG.getUNDEF(MVT::v2i32));
36646      return DAG.getNode(X86ISD::MOVDQ2Q, DL, VT,
36647                         DAG.getBitcast(MVT::v2i64, Res));
36648    }
36649  }
36650
36651  // Try to remove a bitcast of constant vXi1 vector. We have to legalize
36652  // most of these to scalar anyway.
36653  if (Subtarget.hasAVX512() && VT.isScalarInteger() &&
36654      SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
36655      ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
36656    return combinevXi1ConstantToInteger(N0, DAG);
36657  }
36658
36659  if (Subtarget.hasAVX512() && SrcVT.isScalarInteger() &&
36660      VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
36661      isa<ConstantSDNode>(N0)) {
36662    auto *C = cast<ConstantSDNode>(N0);
36663    if (C->isAllOnesValue())
36664      return DAG.getConstant(1, SDLoc(N0), VT);
36665    if (C->isNullValue())
36666      return DAG.getConstant(0, SDLoc(N0), VT);
36667  }
36668
36669  // Try to remove bitcasts from input and output of mask arithmetic to
36670  // remove GPR<->K-register crossings.
36671  if (SDValue V = combineCastedMaskArithmetic(N, DAG, DCI, Subtarget))
36672    return V;
36673
36674  // Convert a bitcasted integer logic operation that has one bitcasted
36675  // floating-point operand into a floating-point logic operation. This may
36676  // create a load of a constant, but that is cheaper than materializing the
36677  // constant in an integer register and transferring it to an SSE register or
36678  // transferring the SSE operand to integer register and back.
36679  unsigned FPOpcode;
36680  switch (N0.getOpcode()) {
36681    case ISD::AND: FPOpcode = X86ISD::FAND; break;
36682    case ISD::OR:  FPOpcode = X86ISD::FOR;  break;
36683    case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
36684    default: return SDValue();
36685  }
36686
36687  if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
36688        (Subtarget.hasSSE2() && VT == MVT::f64)))
36689    return SDValue();
36690
36691  SDValue LogicOp0 = N0.getOperand(0);
36692  SDValue LogicOp1 = N0.getOperand(1);
36693  SDLoc DL0(N0);
36694
36695  // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y))
36696  if (N0.hasOneUse() && LogicOp0.getOpcode() == ISD::BITCAST &&
36697      LogicOp0.hasOneUse() && LogicOp0.getOperand(0).getValueType() == VT &&
36698      !isa<ConstantSDNode>(LogicOp0.getOperand(0))) {
36699    SDValue CastedOp1 = DAG.getBitcast(VT, LogicOp1);
36700    return DAG.getNode(FPOpcode, DL0, VT, LogicOp0.getOperand(0), CastedOp1);
36701  }
36702  // bitcast(logic(X, bitcast(Y))) --> logic'(bitcast(X), Y)
36703  if (N0.hasOneUse() && LogicOp1.getOpcode() == ISD::BITCAST &&
36704      LogicOp1.hasOneUse() && LogicOp1.getOperand(0).getValueType() == VT &&
36705      !isa<ConstantSDNode>(LogicOp1.getOperand(0))) {
36706    SDValue CastedOp0 = DAG.getBitcast(VT, LogicOp0);
36707    return DAG.getNode(FPOpcode, DL0, VT, LogicOp1.getOperand(0), CastedOp0);
36708  }
36709
36710  return SDValue();
36711}
36712
36713// Given a ABS node, detect the following pattern:
36714// (ABS (SUB (ZERO_EXTEND a), (ZERO_EXTEND b))).
36715// This is useful as it is the input into a SAD pattern.
36716static bool detectZextAbsDiff(const SDValue &Abs, SDValue &Op0, SDValue &Op1) {
36717  SDValue AbsOp1 = Abs->getOperand(0);
36718  if (AbsOp1.getOpcode() != ISD::SUB)
36719    return false;
36720
36721  Op0 = AbsOp1.getOperand(0);
36722  Op1 = AbsOp1.getOperand(1);
36723
36724  // Check if the operands of the sub are zero-extended from vectors of i8.
36725  if (Op0.getOpcode() != ISD::ZERO_EXTEND ||
36726      Op0.getOperand(0).getValueType().getVectorElementType() != MVT::i8 ||
36727      Op1.getOpcode() != ISD::ZERO_EXTEND ||
36728      Op1.getOperand(0).getValueType().getVectorElementType() != MVT::i8)
36729    return false;
36730
36731  return true;
36732}
36733
36734// Given two zexts of <k x i8> to <k x i32>, create a PSADBW of the inputs
36735// to these zexts.
36736static SDValue createPSADBW(SelectionDAG &DAG, const SDValue &Zext0,
36737                            const SDValue &Zext1, const SDLoc &DL,
36738                            const X86Subtarget &Subtarget) {
36739  // Find the appropriate width for the PSADBW.
36740  EVT InVT = Zext0.getOperand(0).getValueType();
36741  unsigned RegSize = std::max(128u, (unsigned)InVT.getSizeInBits());
36742
36743  // "Zero-extend" the i8 vectors. This is not a per-element zext, rather we
36744  // fill in the missing vector elements with 0.
36745  unsigned NumConcat = RegSize / InVT.getSizeInBits();
36746  SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, InVT));
36747  Ops[0] = Zext0.getOperand(0);
36748  MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8);
36749  SDValue SadOp0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
36750  Ops[0] = Zext1.getOperand(0);
36751  SDValue SadOp1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
36752
36753  // Actually build the SAD, split as 128/256/512 bits for SSE/AVX2/AVX512BW.
36754  auto PSADBWBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
36755                          ArrayRef<SDValue> Ops) {
36756    MVT VT = MVT::getVectorVT(MVT::i64, Ops[0].getValueSizeInBits() / 64);
36757    return DAG.getNode(X86ISD::PSADBW, DL, VT, Ops);
36758  };
36759  MVT SadVT = MVT::getVectorVT(MVT::i64, RegSize / 64);
36760  return SplitOpsAndApply(DAG, Subtarget, DL, SadVT, { SadOp0, SadOp1 },
36761                          PSADBWBuilder);
36762}
36763
36764// Attempt to replace an min/max v8i16/v16i8 horizontal reduction with
36765// PHMINPOSUW.
36766static SDValue combineHorizontalMinMaxResult(SDNode *Extract, SelectionDAG &DAG,
36767                                             const X86Subtarget &Subtarget) {
36768  // Bail without SSE41.
36769  if (!Subtarget.hasSSE41())
36770    return SDValue();
36771
36772  EVT ExtractVT = Extract->getValueType(0);
36773  if (ExtractVT != MVT::i16 && ExtractVT != MVT::i8)
36774    return SDValue();
36775
36776  // Check for SMAX/SMIN/UMAX/UMIN horizontal reduction patterns.
36777  ISD::NodeType BinOp;
36778  SDValue Src = DAG.matchBinOpReduction(
36779      Extract, BinOp, {ISD::SMAX, ISD::SMIN, ISD::UMAX, ISD::UMIN}, true);
36780  if (!Src)
36781    return SDValue();
36782
36783  EVT SrcVT = Src.getValueType();
36784  EVT SrcSVT = SrcVT.getScalarType();
36785  if (SrcSVT != ExtractVT || (SrcVT.getSizeInBits() % 128) != 0)
36786    return SDValue();
36787
36788  SDLoc DL(Extract);
36789  SDValue MinPos = Src;
36790
36791  // First, reduce the source down to 128-bit, applying BinOp to lo/hi.
36792  while (SrcVT.getSizeInBits() > 128) {
36793    unsigned NumElts = SrcVT.getVectorNumElements();
36794    unsigned NumSubElts = NumElts / 2;
36795    SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcSVT, NumSubElts);
36796    unsigned SubSizeInBits = SrcVT.getSizeInBits();
36797    SDValue Lo = extractSubVector(MinPos, 0, DAG, DL, SubSizeInBits);
36798    SDValue Hi = extractSubVector(MinPos, NumSubElts, DAG, DL, SubSizeInBits);
36799    MinPos = DAG.getNode(BinOp, DL, SrcVT, Lo, Hi);
36800  }
36801  assert(((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) ||
36802          (SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) &&
36803         "Unexpected value type");
36804
36805  // PHMINPOSUW applies to UMIN(v8i16), for SMIN/SMAX/UMAX we must apply a mask
36806  // to flip the value accordingly.
36807  SDValue Mask;
36808  unsigned MaskEltsBits = ExtractVT.getSizeInBits();
36809  if (BinOp == ISD::SMAX)
36810    Mask = DAG.getConstant(APInt::getSignedMaxValue(MaskEltsBits), DL, SrcVT);
36811  else if (BinOp == ISD::SMIN)
36812    Mask = DAG.getConstant(APInt::getSignedMinValue(MaskEltsBits), DL, SrcVT);
36813  else if (BinOp == ISD::UMAX)
36814    Mask = DAG.getConstant(APInt::getAllOnesValue(MaskEltsBits), DL, SrcVT);
36815
36816  if (Mask)
36817    MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
36818
36819  // For v16i8 cases we need to perform UMIN on pairs of byte elements,
36820  // shuffling each upper element down and insert zeros. This means that the
36821  // v16i8 UMIN will leave the upper element as zero, performing zero-extension
36822  // ready for the PHMINPOS.
36823  if (ExtractVT == MVT::i8) {
36824    SDValue Upper = DAG.getVectorShuffle(
36825        SrcVT, DL, MinPos, DAG.getConstant(0, DL, MVT::v16i8),
36826        {1, 16, 3, 16, 5, 16, 7, 16, 9, 16, 11, 16, 13, 16, 15, 16});
36827    MinPos = DAG.getNode(ISD::UMIN, DL, SrcVT, MinPos, Upper);
36828  }
36829
36830  // Perform the PHMINPOS on a v8i16 vector,
36831  MinPos = DAG.getBitcast(MVT::v8i16, MinPos);
36832  MinPos = DAG.getNode(X86ISD::PHMINPOS, DL, MVT::v8i16, MinPos);
36833  MinPos = DAG.getBitcast(SrcVT, MinPos);
36834
36835  if (Mask)
36836    MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
36837
36838  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, MinPos,
36839                     DAG.getIntPtrConstant(0, DL));
36840}
36841
36842// Attempt to replace an all_of/any_of/parity style horizontal reduction with a MOVMSK.
36843static SDValue combineHorizontalPredicateResult(SDNode *Extract,
36844                                                SelectionDAG &DAG,
36845                                                const X86Subtarget &Subtarget) {
36846  // Bail without SSE2.
36847  if (!Subtarget.hasSSE2())
36848    return SDValue();
36849
36850  EVT ExtractVT = Extract->getValueType(0);
36851  unsigned BitWidth = ExtractVT.getSizeInBits();
36852  if (ExtractVT != MVT::i64 && ExtractVT != MVT::i32 && ExtractVT != MVT::i16 &&
36853      ExtractVT != MVT::i8 && ExtractVT != MVT::i1)
36854    return SDValue();
36855
36856  // Check for OR(any_of)/AND(all_of)/XOR(parity) horizontal reduction patterns.
36857  ISD::NodeType BinOp;
36858  SDValue Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::OR, ISD::AND});
36859  if (!Match && ExtractVT == MVT::i1)
36860    Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::XOR});
36861  if (!Match)
36862    return SDValue();
36863
36864  // EXTRACT_VECTOR_ELT can require implicit extension of the vector element
36865  // which we can't support here for now.
36866  if (Match.getScalarValueSizeInBits() != BitWidth)
36867    return SDValue();
36868
36869  SDValue Movmsk;
36870  SDLoc DL(Extract);
36871  EVT MatchVT = Match.getValueType();
36872  unsigned NumElts = MatchVT.getVectorNumElements();
36873  unsigned MaxElts = Subtarget.hasInt256() ? 32 : 16;
36874  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
36875
36876  if (ExtractVT == MVT::i1) {
36877    // Special case for (pre-legalization) vXi1 reductions.
36878    if (NumElts > 64 || !isPowerOf2_32(NumElts))
36879      return SDValue();
36880    if (TLI.isTypeLegal(MatchVT)) {
36881      // If this is a legal AVX512 predicate type then we can just bitcast.
36882      EVT MovmskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
36883      Movmsk = DAG.getBitcast(MovmskVT, Match);
36884    } else {
36885      // Use combineBitcastvxi1 to create the MOVMSK.
36886      while (NumElts > MaxElts) {
36887        SDValue Lo, Hi;
36888        std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
36889        Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
36890        NumElts /= 2;
36891      }
36892      EVT MovmskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
36893      Movmsk = combineBitcastvxi1(DAG, MovmskVT, Match, DL, Subtarget);
36894    }
36895    if (!Movmsk)
36896      return SDValue();
36897    Movmsk = DAG.getZExtOrTrunc(Movmsk, DL, NumElts > 32 ? MVT::i64 : MVT::i32);
36898  } else {
36899    // Bail with AVX512VL (which uses predicate registers).
36900    if (Subtarget.hasVLX())
36901      return SDValue();
36902
36903    unsigned MatchSizeInBits = Match.getValueSizeInBits();
36904    if (!(MatchSizeInBits == 128 ||
36905          (MatchSizeInBits == 256 && Subtarget.hasAVX())))
36906      return SDValue();
36907
36908    // Make sure this isn't a vector of 1 element. The perf win from using
36909    // MOVMSK diminishes with less elements in the reduction, but it is
36910    // generally better to get the comparison over to the GPRs as soon as
36911    // possible to reduce the number of vector ops.
36912    if (Match.getValueType().getVectorNumElements() < 2)
36913      return SDValue();
36914
36915    // Check that we are extracting a reduction of all sign bits.
36916    if (DAG.ComputeNumSignBits(Match) != BitWidth)
36917      return SDValue();
36918
36919    if (MatchSizeInBits == 256 && BitWidth < 32 && !Subtarget.hasInt256()) {
36920      SDValue Lo, Hi;
36921      std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
36922      Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
36923      MatchSizeInBits = Match.getValueSizeInBits();
36924    }
36925
36926    // For 32/64 bit comparisons use MOVMSKPS/MOVMSKPD, else PMOVMSKB.
36927    MVT MaskSrcVT;
36928    if (64 == BitWidth || 32 == BitWidth)
36929      MaskSrcVT = MVT::getVectorVT(MVT::getFloatingPointVT(BitWidth),
36930                                   MatchSizeInBits / BitWidth);
36931    else
36932      MaskSrcVT = MVT::getVectorVT(MVT::i8, MatchSizeInBits / 8);
36933
36934    SDValue BitcastLogicOp = DAG.getBitcast(MaskSrcVT, Match);
36935    Movmsk = getPMOVMSKB(DL, BitcastLogicOp, DAG, Subtarget);
36936    NumElts = MaskSrcVT.getVectorNumElements();
36937  }
36938  assert((NumElts <= 32 || NumElts == 64) &&
36939         "Not expecting more than 64 elements");
36940
36941  MVT CmpVT = NumElts == 64 ? MVT::i64 : MVT::i32;
36942  if (BinOp == ISD::XOR) {
36943    // parity -> (AND (CTPOP(MOVMSK X)), 1)
36944    SDValue Mask = DAG.getConstant(1, DL, CmpVT);
36945    SDValue Result = DAG.getNode(ISD::CTPOP, DL, CmpVT, Movmsk);
36946    Result = DAG.getNode(ISD::AND, DL, CmpVT, Result, Mask);
36947    return DAG.getZExtOrTrunc(Result, DL, ExtractVT);
36948  }
36949
36950  SDValue CmpC;
36951  ISD::CondCode CondCode;
36952  if (BinOp == ISD::OR) {
36953    // any_of -> MOVMSK != 0
36954    CmpC = DAG.getConstant(0, DL, CmpVT);
36955    CondCode = ISD::CondCode::SETNE;
36956  } else {
36957    // all_of -> MOVMSK == ((1 << NumElts) - 1)
36958    CmpC = DAG.getConstant(APInt::getLowBitsSet(CmpVT.getSizeInBits(), NumElts),
36959                           DL, CmpVT);
36960    CondCode = ISD::CondCode::SETEQ;
36961  }
36962
36963  // The setcc produces an i8 of 0/1, so extend that to the result width and
36964  // negate to get the final 0/-1 mask value.
36965  EVT SetccVT =
36966      TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), CmpVT);
36967  SDValue Setcc = DAG.getSetCC(DL, SetccVT, Movmsk, CmpC, CondCode);
36968  SDValue Zext = DAG.getZExtOrTrunc(Setcc, DL, ExtractVT);
36969  SDValue Zero = DAG.getConstant(0, DL, ExtractVT);
36970  return DAG.getNode(ISD::SUB, DL, ExtractVT, Zero, Zext);
36971}
36972
36973static SDValue combineBasicSADPattern(SDNode *Extract, SelectionDAG &DAG,
36974                                      const X86Subtarget &Subtarget) {
36975  // PSADBW is only supported on SSE2 and up.
36976  if (!Subtarget.hasSSE2())
36977    return SDValue();
36978
36979  // Verify the type we're extracting from is any integer type above i16.
36980  EVT VT = Extract->getOperand(0).getValueType();
36981  if (!VT.isSimple() || !(VT.getVectorElementType().getSizeInBits() > 16))
36982    return SDValue();
36983
36984  unsigned RegSize = 128;
36985  if (Subtarget.useBWIRegs())
36986    RegSize = 512;
36987  else if (Subtarget.hasAVX())
36988    RegSize = 256;
36989
36990  // We handle upto v16i* for SSE2 / v32i* for AVX / v64i* for AVX512.
36991  // TODO: We should be able to handle larger vectors by splitting them before
36992  // feeding them into several SADs, and then reducing over those.
36993  if (RegSize / VT.getVectorNumElements() < 8)
36994    return SDValue();
36995
36996  // Match shuffle + add pyramid.
36997  ISD::NodeType BinOp;
36998  SDValue Root = DAG.matchBinOpReduction(Extract, BinOp, {ISD::ADD});
36999
37000  // The operand is expected to be zero extended from i8
37001  // (verified in detectZextAbsDiff).
37002  // In order to convert to i64 and above, additional any/zero/sign
37003  // extend is expected.
37004  // The zero extend from 32 bit has no mathematical effect on the result.
37005  // Also the sign extend is basically zero extend
37006  // (extends the sign bit which is zero).
37007  // So it is correct to skip the sign/zero extend instruction.
37008  if (Root && (Root.getOpcode() == ISD::SIGN_EXTEND ||
37009    Root.getOpcode() == ISD::ZERO_EXTEND ||
37010    Root.getOpcode() == ISD::ANY_EXTEND))
37011    Root = Root.getOperand(0);
37012
37013  // If there was a match, we want Root to be a select that is the root of an
37014  // abs-diff pattern.
37015  if (!Root || Root.getOpcode() != ISD::ABS)
37016    return SDValue();
37017
37018  // Check whether we have an abs-diff pattern feeding into the select.
37019  SDValue Zext0, Zext1;
37020  if (!detectZextAbsDiff(Root, Zext0, Zext1))
37021    return SDValue();
37022
37023  // Create the SAD instruction.
37024  SDLoc DL(Extract);
37025  SDValue SAD = createPSADBW(DAG, Zext0, Zext1, DL, Subtarget);
37026
37027  // If the original vector was wider than 8 elements, sum over the results
37028  // in the SAD vector.
37029  unsigned Stages = Log2_32(VT.getVectorNumElements());
37030  MVT SadVT = SAD.getSimpleValueType();
37031  if (Stages > 3) {
37032    unsigned SadElems = SadVT.getVectorNumElements();
37033
37034    for(unsigned i = Stages - 3; i > 0; --i) {
37035      SmallVector<int, 16> Mask(SadElems, -1);
37036      for(unsigned j = 0, MaskEnd = 1 << (i - 1); j < MaskEnd; ++j)
37037        Mask[j] = MaskEnd + j;
37038
37039      SDValue Shuffle =
37040          DAG.getVectorShuffle(SadVT, DL, SAD, DAG.getUNDEF(SadVT), Mask);
37041      SAD = DAG.getNode(ISD::ADD, DL, SadVT, SAD, Shuffle);
37042    }
37043  }
37044
37045  MVT Type = Extract->getSimpleValueType(0);
37046  unsigned TypeSizeInBits = Type.getSizeInBits();
37047  // Return the lowest TypeSizeInBits bits.
37048  MVT ResVT = MVT::getVectorVT(Type, SadVT.getSizeInBits() / TypeSizeInBits);
37049  SAD = DAG.getBitcast(ResVT, SAD);
37050  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Type, SAD,
37051                     Extract->getOperand(1));
37052}
37053
37054// Attempt to peek through a target shuffle and extract the scalar from the
37055// source.
37056static SDValue combineExtractWithShuffle(SDNode *N, SelectionDAG &DAG,
37057                                         TargetLowering::DAGCombinerInfo &DCI,
37058                                         const X86Subtarget &Subtarget) {
37059  if (DCI.isBeforeLegalizeOps())
37060    return SDValue();
37061
37062  SDLoc dl(N);
37063  SDValue Src = N->getOperand(0);
37064  SDValue Idx = N->getOperand(1);
37065
37066  EVT VT = N->getValueType(0);
37067  EVT SrcVT = Src.getValueType();
37068  EVT SrcSVT = SrcVT.getVectorElementType();
37069  unsigned NumSrcElts = SrcVT.getVectorNumElements();
37070
37071  // Don't attempt this for boolean mask vectors or unknown extraction indices.
37072  if (SrcSVT == MVT::i1 || !isa<ConstantSDNode>(Idx))
37073    return SDValue();
37074
37075  SDValue SrcBC = peekThroughBitcasts(Src);
37076
37077  // Handle extract(broadcast(scalar_value)), it doesn't matter what index is.
37078  if (X86ISD::VBROADCAST == SrcBC.getOpcode()) {
37079    SDValue SrcOp = SrcBC.getOperand(0);
37080    if (SrcOp.getValueSizeInBits() == VT.getSizeInBits())
37081      return DAG.getBitcast(VT, SrcOp);
37082  }
37083
37084  // If we're extracting a single element from a broadcast load and there are
37085  // no other users, just create a single load.
37086  if (SrcBC.getOpcode() == X86ISD::VBROADCAST_LOAD && SrcBC.hasOneUse()) {
37087    auto *MemIntr = cast<MemIntrinsicSDNode>(SrcBC);
37088    unsigned SrcBCWidth = SrcBC.getScalarValueSizeInBits();
37089    if (MemIntr->getMemoryVT().getSizeInBits() == SrcBCWidth &&
37090        VT.getSizeInBits() == SrcBCWidth) {
37091      SDValue Load = DAG.getLoad(VT, dl, MemIntr->getChain(),
37092                                 MemIntr->getBasePtr(),
37093                                 MemIntr->getPointerInfo(),
37094                                 MemIntr->getAlignment(),
37095                                 MemIntr->getMemOperand()->getFlags());
37096      DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), Load.getValue(1));
37097      return Load;
37098    }
37099  }
37100
37101  // Handle extract(truncate(x)) for 0'th index.
37102  // TODO: Treat this as a faux shuffle?
37103  // TODO: When can we use this for general indices?
37104  if (ISD::TRUNCATE == Src.getOpcode() && SrcVT.is128BitVector() &&
37105      isNullConstant(Idx)) {
37106    Src = extract128BitVector(Src.getOperand(0), 0, DAG, dl);
37107    Src = DAG.getBitcast(SrcVT, Src);
37108    return DAG.getNode(N->getOpcode(), dl, VT, Src, Idx);
37109  }
37110
37111  // Resolve the target shuffle inputs and mask.
37112  SmallVector<int, 16> Mask;
37113  SmallVector<SDValue, 2> Ops;
37114  if (!getTargetShuffleInputs(SrcBC, Ops, Mask, DAG))
37115    return SDValue();
37116
37117  // Attempt to narrow/widen the shuffle mask to the correct size.
37118  if (Mask.size() != NumSrcElts) {
37119    if ((NumSrcElts % Mask.size()) == 0) {
37120      SmallVector<int, 16> ScaledMask;
37121      int Scale = NumSrcElts / Mask.size();
37122      scaleShuffleMask<int>(Scale, Mask, ScaledMask);
37123      Mask = std::move(ScaledMask);
37124    } else if ((Mask.size() % NumSrcElts) == 0) {
37125      // Simplify Mask based on demanded element.
37126      int ExtractIdx = (int)N->getConstantOperandVal(1);
37127      int Scale = Mask.size() / NumSrcElts;
37128      int Lo = Scale * ExtractIdx;
37129      int Hi = Scale * (ExtractIdx + 1);
37130      for (int i = 0, e = (int)Mask.size(); i != e; ++i)
37131        if (i < Lo || Hi <= i)
37132          Mask[i] = SM_SentinelUndef;
37133
37134      SmallVector<int, 16> WidenedMask;
37135      while (Mask.size() > NumSrcElts &&
37136             canWidenShuffleElements(Mask, WidenedMask))
37137        Mask = std::move(WidenedMask);
37138      // TODO - investigate support for wider shuffle masks with known upper
37139      // undef/zero elements for implicit zero-extension.
37140    }
37141  }
37142
37143  // Check if narrowing/widening failed.
37144  if (Mask.size() != NumSrcElts)
37145    return SDValue();
37146
37147  int SrcIdx = Mask[N->getConstantOperandVal(1)];
37148
37149  // If the shuffle source element is undef/zero then we can just accept it.
37150  if (SrcIdx == SM_SentinelUndef)
37151    return DAG.getUNDEF(VT);
37152
37153  if (SrcIdx == SM_SentinelZero)
37154    return VT.isFloatingPoint() ? DAG.getConstantFP(0.0, dl, VT)
37155                                : DAG.getConstant(0, dl, VT);
37156
37157  SDValue SrcOp = Ops[SrcIdx / Mask.size()];
37158  SrcIdx = SrcIdx % Mask.size();
37159
37160  // We can only extract other elements from 128-bit vectors and in certain
37161  // circumstances, depending on SSE-level.
37162  // TODO: Investigate using extract_subvector for larger vectors.
37163  // TODO: Investigate float/double extraction if it will be just stored.
37164  if ((SrcVT == MVT::v4i32 || SrcVT == MVT::v2i64) &&
37165      ((SrcIdx == 0 && Subtarget.hasSSE2()) || Subtarget.hasSSE41())) {
37166    assert(SrcSVT == VT && "Unexpected extraction type");
37167    SrcOp = DAG.getBitcast(SrcVT, SrcOp);
37168    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SrcSVT, SrcOp,
37169                       DAG.getIntPtrConstant(SrcIdx, dl));
37170  }
37171
37172  if ((SrcVT == MVT::v8i16 && Subtarget.hasSSE2()) ||
37173      (SrcVT == MVT::v16i8 && Subtarget.hasSSE41())) {
37174    assert(VT.getSizeInBits() >= SrcSVT.getSizeInBits() &&
37175           "Unexpected extraction type");
37176    unsigned OpCode = (SrcVT == MVT::v8i16 ? X86ISD::PEXTRW : X86ISD::PEXTRB);
37177    SrcOp = DAG.getBitcast(SrcVT, SrcOp);
37178    SDValue ExtOp = DAG.getNode(OpCode, dl, MVT::i32, SrcOp,
37179                                DAG.getIntPtrConstant(SrcIdx, dl));
37180    return DAG.getZExtOrTrunc(ExtOp, dl, VT);
37181  }
37182
37183  return SDValue();
37184}
37185
37186/// Extracting a scalar FP value from vector element 0 is free, so extract each
37187/// operand first, then perform the math as a scalar op.
37188static SDValue scalarizeExtEltFP(SDNode *ExtElt, SelectionDAG &DAG) {
37189  assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Expected extract");
37190  SDValue Vec = ExtElt->getOperand(0);
37191  SDValue Index = ExtElt->getOperand(1);
37192  EVT VT = ExtElt->getValueType(0);
37193  EVT VecVT = Vec.getValueType();
37194
37195  // TODO: If this is a unary/expensive/expand op, allow extraction from a
37196  // non-zero element because the shuffle+scalar op will be cheaper?
37197  if (!Vec.hasOneUse() || !isNullConstant(Index) || VecVT.getScalarType() != VT)
37198    return SDValue();
37199
37200  // Vector FP compares don't fit the pattern of FP math ops (propagate, not
37201  // extract, the condition code), so deal with those as a special-case.
37202  if (Vec.getOpcode() == ISD::SETCC && VT == MVT::i1) {
37203    EVT OpVT = Vec.getOperand(0).getValueType().getScalarType();
37204    if (OpVT != MVT::f32 && OpVT != MVT::f64)
37205      return SDValue();
37206
37207    // extract (setcc X, Y, CC), 0 --> setcc (extract X, 0), (extract Y, 0), CC
37208    SDLoc DL(ExtElt);
37209    SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
37210                               Vec.getOperand(0), Index);
37211    SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
37212                               Vec.getOperand(1), Index);
37213    return DAG.getNode(Vec.getOpcode(), DL, VT, Ext0, Ext1, Vec.getOperand(2));
37214  }
37215
37216  if (VT != MVT::f32 && VT != MVT::f64)
37217    return SDValue();
37218
37219  // Vector FP selects don't fit the pattern of FP math ops (because the
37220  // condition has a different type and we have to change the opcode), so deal
37221  // with those here.
37222  // FIXME: This is restricted to pre type legalization by ensuring the setcc
37223  // has i1 elements. If we loosen this we need to convert vector bool to a
37224  // scalar bool.
37225  if (Vec.getOpcode() == ISD::VSELECT &&
37226      Vec.getOperand(0).getOpcode() == ISD::SETCC &&
37227      Vec.getOperand(0).getValueType().getScalarType() == MVT::i1 &&
37228      Vec.getOperand(0).getOperand(0).getValueType() == VecVT) {
37229    // ext (sel Cond, X, Y), 0 --> sel (ext Cond, 0), (ext X, 0), (ext Y, 0)
37230    SDLoc DL(ExtElt);
37231    SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
37232                               Vec.getOperand(0).getValueType().getScalarType(),
37233                               Vec.getOperand(0), Index);
37234    SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
37235                               Vec.getOperand(1), Index);
37236    SDValue Ext2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
37237                               Vec.getOperand(2), Index);
37238    return DAG.getNode(ISD::SELECT, DL, VT, Ext0, Ext1, Ext2);
37239  }
37240
37241  // TODO: This switch could include FNEG and the x86-specific FP logic ops
37242  // (FAND, FANDN, FOR, FXOR). But that may require enhancements to avoid
37243  // missed load folding and fma+fneg combining.
37244  switch (Vec.getOpcode()) {
37245  case ISD::FMA: // Begin 3 operands
37246  case ISD::FMAD:
37247  case ISD::FADD: // Begin 2 operands
37248  case ISD::FSUB:
37249  case ISD::FMUL:
37250  case ISD::FDIV:
37251  case ISD::FREM:
37252  case ISD::FCOPYSIGN:
37253  case ISD::FMINNUM:
37254  case ISD::FMAXNUM:
37255  case ISD::FMINNUM_IEEE:
37256  case ISD::FMAXNUM_IEEE:
37257  case ISD::FMAXIMUM:
37258  case ISD::FMINIMUM:
37259  case X86ISD::FMAX:
37260  case X86ISD::FMIN:
37261  case ISD::FABS: // Begin 1 operand
37262  case ISD::FSQRT:
37263  case ISD::FRINT:
37264  case ISD::FCEIL:
37265  case ISD::FTRUNC:
37266  case ISD::FNEARBYINT:
37267  case ISD::FROUND:
37268  case ISD::FFLOOR:
37269  case X86ISD::FRCP:
37270  case X86ISD::FRSQRT: {
37271    // extract (fp X, Y, ...), 0 --> fp (extract X, 0), (extract Y, 0), ...
37272    SDLoc DL(ExtElt);
37273    SmallVector<SDValue, 4> ExtOps;
37274    for (SDValue Op : Vec->ops())
37275      ExtOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op, Index));
37276    return DAG.getNode(Vec.getOpcode(), DL, VT, ExtOps);
37277  }
37278  default:
37279    return SDValue();
37280  }
37281  llvm_unreachable("All opcodes should return within switch");
37282}
37283
37284/// Try to convert a vector reduction sequence composed of binops and shuffles
37285/// into horizontal ops.
37286static SDValue combineReductionToHorizontal(SDNode *ExtElt, SelectionDAG &DAG,
37287                                            const X86Subtarget &Subtarget) {
37288  assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unexpected caller");
37289
37290  // We need at least SSE2 to anything here.
37291  if (!Subtarget.hasSSE2())
37292    return SDValue();
37293
37294  ISD::NodeType Opc;
37295  SDValue Rdx =
37296      DAG.matchBinOpReduction(ExtElt, Opc, {ISD::ADD, ISD::FADD}, true);
37297  if (!Rdx)
37298    return SDValue();
37299
37300  SDValue Index = ExtElt->getOperand(1);
37301  assert(isNullConstant(Index) &&
37302         "Reduction doesn't end in an extract from index 0");
37303
37304  EVT VT = ExtElt->getValueType(0);
37305  EVT VecVT = Rdx.getValueType();
37306  if (VecVT.getScalarType() != VT)
37307    return SDValue();
37308
37309  SDLoc DL(ExtElt);
37310
37311  // vXi8 reduction - sub 128-bit vector.
37312  if (VecVT == MVT::v4i8 || VecVT == MVT::v8i8) {
37313    if (VecVT == MVT::v4i8) {
37314      // Pad with zero.
37315      if (Subtarget.hasSSE41()) {
37316        Rdx = DAG.getBitcast(MVT::i32, Rdx);
37317        Rdx = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v4i32,
37318                          DAG.getConstant(0, DL, MVT::v4i32), Rdx,
37319                          DAG.getIntPtrConstant(0, DL));
37320        Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
37321      } else {
37322        Rdx = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i8, Rdx,
37323                          DAG.getConstant(0, DL, VecVT));
37324      }
37325    }
37326    if (Rdx.getValueType() == MVT::v8i8) {
37327      // Pad with undef.
37328      Rdx = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, Rdx,
37329                        DAG.getUNDEF(MVT::v8i8));
37330    }
37331    Rdx = DAG.getNode(X86ISD::PSADBW, DL, MVT::v2i64, Rdx,
37332                      DAG.getConstant(0, DL, MVT::v16i8));
37333    Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
37334    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
37335  }
37336
37337  // Must be a >=128-bit vector with pow2 elements.
37338  if ((VecVT.getSizeInBits() % 128) != 0 ||
37339      !isPowerOf2_32(VecVT.getVectorNumElements()))
37340    return SDValue();
37341
37342  // vXi8 reduction - sum lo/hi halves then use PSADBW.
37343  if (VT == MVT::i8) {
37344    while (Rdx.getValueSizeInBits() > 128) {
37345      unsigned HalfSize = VecVT.getSizeInBits() / 2;
37346      unsigned HalfElts = VecVT.getVectorNumElements() / 2;
37347      SDValue Lo = extractSubVector(Rdx, 0, DAG, DL, HalfSize);
37348      SDValue Hi = extractSubVector(Rdx, HalfElts, DAG, DL, HalfSize);
37349      Rdx = DAG.getNode(ISD::ADD, DL, Lo.getValueType(), Lo, Hi);
37350      VecVT = Rdx.getValueType();
37351    }
37352    assert(VecVT == MVT::v16i8 && "v16i8 reduction expected");
37353
37354    SDValue Hi = DAG.getVectorShuffle(
37355        MVT::v16i8, DL, Rdx, Rdx,
37356        {8, 9, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1});
37357    Rdx = DAG.getNode(ISD::ADD, DL, MVT::v16i8, Rdx, Hi);
37358    Rdx = DAG.getNode(X86ISD::PSADBW, DL, MVT::v2i64, Rdx,
37359                      getZeroVector(MVT::v16i8, Subtarget, DAG, DL));
37360    Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
37361    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
37362  }
37363
37364  // Only use (F)HADD opcodes if they aren't microcoded or minimizes codesize.
37365  bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
37366  if (!Subtarget.hasFastHorizontalOps() && !OptForSize)
37367    return SDValue();
37368
37369  unsigned HorizOpcode = Opc == ISD::ADD ? X86ISD::HADD : X86ISD::FHADD;
37370
37371  // 256-bit horizontal instructions operate on 128-bit chunks rather than
37372  // across the whole vector, so we need an extract + hop preliminary stage.
37373  // This is the only step where the operands of the hop are not the same value.
37374  // TODO: We could extend this to handle 512-bit or even longer vectors.
37375  if (((VecVT == MVT::v16i16 || VecVT == MVT::v8i32) && Subtarget.hasSSSE3()) ||
37376      ((VecVT == MVT::v8f32 || VecVT == MVT::v4f64) && Subtarget.hasSSE3())) {
37377    unsigned NumElts = VecVT.getVectorNumElements();
37378    SDValue Hi = extract128BitVector(Rdx, NumElts / 2, DAG, DL);
37379    SDValue Lo = extract128BitVector(Rdx, 0, DAG, DL);
37380    Rdx = DAG.getNode(HorizOpcode, DL, Lo.getValueType(), Hi, Lo);
37381    VecVT = Rdx.getValueType();
37382  }
37383  if (!((VecVT == MVT::v8i16 || VecVT == MVT::v4i32) && Subtarget.hasSSSE3()) &&
37384      !((VecVT == MVT::v4f32 || VecVT == MVT::v2f64) && Subtarget.hasSSE3()))
37385    return SDValue();
37386
37387  // extract (add (shuf X), X), 0 --> extract (hadd X, X), 0
37388  unsigned ReductionSteps = Log2_32(VecVT.getVectorNumElements());
37389  for (unsigned i = 0; i != ReductionSteps; ++i)
37390    Rdx = DAG.getNode(HorizOpcode, DL, VecVT, Rdx, Rdx);
37391
37392  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
37393}
37394
37395/// Detect vector gather/scatter index generation and convert it from being a
37396/// bunch of shuffles and extracts into a somewhat faster sequence.
37397/// For i686, the best sequence is apparently storing the value and loading
37398/// scalars back, while for x64 we should use 64-bit extracts and shifts.
37399static SDValue combineExtractVectorElt(SDNode *N, SelectionDAG &DAG,
37400                                       TargetLowering::DAGCombinerInfo &DCI,
37401                                       const X86Subtarget &Subtarget) {
37402  if (SDValue NewOp = combineExtractWithShuffle(N, DAG, DCI, Subtarget))
37403    return NewOp;
37404
37405  SDValue InputVector = N->getOperand(0);
37406  SDValue EltIdx = N->getOperand(1);
37407  auto *CIdx = dyn_cast<ConstantSDNode>(EltIdx);
37408
37409  EVT SrcVT = InputVector.getValueType();
37410  EVT VT = N->getValueType(0);
37411  SDLoc dl(InputVector);
37412  bool IsPextr = N->getOpcode() != ISD::EXTRACT_VECTOR_ELT;
37413  unsigned NumSrcElts = SrcVT.getVectorNumElements();
37414
37415  if (CIdx && CIdx->getAPIntValue().uge(NumSrcElts))
37416    return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
37417
37418  // Integer Constant Folding.
37419  if (CIdx && VT.isInteger()) {
37420    APInt UndefVecElts;
37421    SmallVector<APInt, 16> EltBits;
37422    unsigned VecEltBitWidth = SrcVT.getScalarSizeInBits();
37423    if (getTargetConstantBitsFromNode(InputVector, VecEltBitWidth, UndefVecElts,
37424                                      EltBits, true, false)) {
37425      uint64_t Idx = CIdx->getZExtValue();
37426      if (UndefVecElts[Idx])
37427        return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
37428      return DAG.getConstant(EltBits[Idx].zextOrSelf(VT.getScalarSizeInBits()),
37429                             dl, VT);
37430    }
37431  }
37432
37433  if (IsPextr) {
37434    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
37435    if (TLI.SimplifyDemandedBits(
37436            SDValue(N, 0), APInt::getAllOnesValue(VT.getSizeInBits()), DCI))
37437      return SDValue(N, 0);
37438
37439    // PEXTR*(PINSR*(v, s, c), c) -> s (with implicit zext handling).
37440    if ((InputVector.getOpcode() == X86ISD::PINSRB ||
37441         InputVector.getOpcode() == X86ISD::PINSRW) &&
37442        InputVector.getOperand(2) == EltIdx) {
37443      assert(SrcVT == InputVector.getOperand(0).getValueType() &&
37444             "Vector type mismatch");
37445      SDValue Scl = InputVector.getOperand(1);
37446      Scl = DAG.getNode(ISD::TRUNCATE, dl, SrcVT.getScalarType(), Scl);
37447      return DAG.getZExtOrTrunc(Scl, dl, VT);
37448    }
37449
37450    // TODO - Remove this once we can handle the implicit zero-extension of
37451    // X86ISD::PEXTRW/X86ISD::PEXTRB in combineHorizontalPredicateResult and
37452    // combineBasicSADPattern.
37453    return SDValue();
37454  }
37455
37456  // Detect mmx extraction of all bits as a i64. It works better as a bitcast.
37457  if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
37458      VT == MVT::i64 && SrcVT == MVT::v1i64 && isNullConstant(EltIdx)) {
37459    SDValue MMXSrc = InputVector.getOperand(0);
37460
37461    // The bitcast source is a direct mmx result.
37462    if (MMXSrc.getValueType() == MVT::x86mmx)
37463      return DAG.getBitcast(VT, InputVector);
37464  }
37465
37466  // Detect mmx to i32 conversion through a v2i32 elt extract.
37467  if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
37468      VT == MVT::i32 && SrcVT == MVT::v2i32 && isNullConstant(EltIdx)) {
37469    SDValue MMXSrc = InputVector.getOperand(0);
37470
37471    // The bitcast source is a direct mmx result.
37472    if (MMXSrc.getValueType() == MVT::x86mmx)
37473      return DAG.getNode(X86ISD::MMX_MOVD2W, dl, MVT::i32, MMXSrc);
37474  }
37475
37476  // Check whether this extract is the root of a sum of absolute differences
37477  // pattern. This has to be done here because we really want it to happen
37478  // pre-legalization,
37479  if (SDValue SAD = combineBasicSADPattern(N, DAG, Subtarget))
37480    return SAD;
37481
37482  // Attempt to replace an all_of/any_of horizontal reduction with a MOVMSK.
37483  if (SDValue Cmp = combineHorizontalPredicateResult(N, DAG, Subtarget))
37484    return Cmp;
37485
37486  // Attempt to replace min/max v8i16/v16i8 reductions with PHMINPOSUW.
37487  if (SDValue MinMax = combineHorizontalMinMaxResult(N, DAG, Subtarget))
37488    return MinMax;
37489
37490  if (SDValue V = combineReductionToHorizontal(N, DAG, Subtarget))
37491    return V;
37492
37493  if (SDValue V = scalarizeExtEltFP(N, DAG))
37494    return V;
37495
37496  // Attempt to extract a i1 element by using MOVMSK to extract the signbits
37497  // and then testing the relevant element.
37498  if (CIdx && SrcVT.getScalarType() == MVT::i1) {
37499    SmallVector<SDNode *, 16> BoolExtracts;
37500    auto IsBoolExtract = [&BoolExtracts](SDNode *Use) {
37501      if (Use->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
37502          isa<ConstantSDNode>(Use->getOperand(1)) &&
37503          Use->getValueType(0) == MVT::i1) {
37504        BoolExtracts.push_back(Use);
37505        return true;
37506      }
37507      return false;
37508    };
37509    if (all_of(InputVector->uses(), IsBoolExtract) &&
37510        BoolExtracts.size() > 1) {
37511      EVT BCVT = EVT::getIntegerVT(*DAG.getContext(), NumSrcElts);
37512      if (SDValue BC =
37513              combineBitcastvxi1(DAG, BCVT, InputVector, dl, Subtarget)) {
37514        for (SDNode *Use : BoolExtracts) {
37515          // extractelement vXi1 X, MaskIdx --> ((movmsk X) & Mask) == Mask
37516          unsigned MaskIdx = Use->getConstantOperandVal(1);
37517          APInt MaskBit = APInt::getOneBitSet(NumSrcElts, MaskIdx);
37518          SDValue Mask = DAG.getConstant(MaskBit, dl, BCVT);
37519          SDValue Res = DAG.getNode(ISD::AND, dl, BCVT, BC, Mask);
37520          Res = DAG.getSetCC(dl, MVT::i1, Res, Mask, ISD::SETEQ);
37521          DCI.CombineTo(Use, Res);
37522        }
37523        return SDValue(N, 0);
37524      }
37525    }
37526  }
37527
37528  return SDValue();
37529}
37530
37531/// If a vector select has an operand that is -1 or 0, try to simplify the
37532/// select to a bitwise logic operation.
37533/// TODO: Move to DAGCombiner, possibly using TargetLowering::hasAndNot()?
37534static SDValue
37535combineVSelectWithAllOnesOrZeros(SDNode *N, SelectionDAG &DAG,
37536                                 TargetLowering::DAGCombinerInfo &DCI,
37537                                 const X86Subtarget &Subtarget) {
37538  SDValue Cond = N->getOperand(0);
37539  SDValue LHS = N->getOperand(1);
37540  SDValue RHS = N->getOperand(2);
37541  EVT VT = LHS.getValueType();
37542  EVT CondVT = Cond.getValueType();
37543  SDLoc DL(N);
37544  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
37545
37546  if (N->getOpcode() != ISD::VSELECT)
37547    return SDValue();
37548
37549  assert(CondVT.isVector() && "Vector select expects a vector selector!");
37550
37551  // Check if the first operand is all zeros and Cond type is vXi1.
37552  // This situation only applies to avx512.
37553  // TODO: Use isNullOrNullSplat() to distinguish constants with undefs?
37554  // TODO: Can we assert that both operands are not zeros (because that should
37555  //       get simplified at node creation time)?
37556  bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
37557  bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
37558
37559  // If both inputs are 0/undef, create a complete zero vector.
37560  // FIXME: As noted above this should be handled by DAGCombiner/getNode.
37561  if (TValIsAllZeros && FValIsAllZeros) {
37562    if (VT.isFloatingPoint())
37563      return DAG.getConstantFP(0.0, DL, VT);
37564    return DAG.getConstant(0, DL, VT);
37565  }
37566
37567  if (TValIsAllZeros && !FValIsAllZeros && Subtarget.hasAVX512() &&
37568      Cond.hasOneUse() && CondVT.getVectorElementType() == MVT::i1) {
37569    // Invert the cond to not(cond) : xor(op,allones)=not(op)
37570    SDValue CondNew = DAG.getNOT(DL, Cond, CondVT);
37571    // Vselect cond, op1, op2 = Vselect not(cond), op2, op1
37572    return DAG.getSelect(DL, VT, CondNew, RHS, LHS);
37573  }
37574
37575  // To use the condition operand as a bitwise mask, it must have elements that
37576  // are the same size as the select elements. Ie, the condition operand must
37577  // have already been promoted from the IR select condition type <N x i1>.
37578  // Don't check if the types themselves are equal because that excludes
37579  // vector floating-point selects.
37580  if (CondVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
37581    return SDValue();
37582
37583  // Try to invert the condition if true value is not all 1s and false value is
37584  // not all 0s. Only do this if the condition has one use.
37585  bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
37586  if (!TValIsAllOnes && !FValIsAllZeros && Cond.hasOneUse() &&
37587      // Check if the selector will be produced by CMPP*/PCMP*.
37588      Cond.getOpcode() == ISD::SETCC &&
37589      // Check if SETCC has already been promoted.
37590      TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT) ==
37591          CondVT) {
37592    bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
37593
37594    if (TValIsAllZeros || FValIsAllOnes) {
37595      SDValue CC = Cond.getOperand(2);
37596      ISD::CondCode NewCC = ISD::getSetCCInverse(
37597          cast<CondCodeSDNode>(CC)->get(), Cond.getOperand(0).getValueType());
37598      Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1),
37599                          NewCC);
37600      std::swap(LHS, RHS);
37601      TValIsAllOnes = FValIsAllOnes;
37602      FValIsAllZeros = TValIsAllZeros;
37603    }
37604  }
37605
37606  // Cond value must be 'sign splat' to be converted to a logical op.
37607  if (DAG.ComputeNumSignBits(Cond) != CondVT.getScalarSizeInBits())
37608    return SDValue();
37609
37610  // vselect Cond, 111..., 000... -> Cond
37611  if (TValIsAllOnes && FValIsAllZeros)
37612    return DAG.getBitcast(VT, Cond);
37613
37614  if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(CondVT))
37615    return SDValue();
37616
37617  // vselect Cond, 111..., X -> or Cond, X
37618  if (TValIsAllOnes) {
37619    SDValue CastRHS = DAG.getBitcast(CondVT, RHS);
37620    SDValue Or = DAG.getNode(ISD::OR, DL, CondVT, Cond, CastRHS);
37621    return DAG.getBitcast(VT, Or);
37622  }
37623
37624  // vselect Cond, X, 000... -> and Cond, X
37625  if (FValIsAllZeros) {
37626    SDValue CastLHS = DAG.getBitcast(CondVT, LHS);
37627    SDValue And = DAG.getNode(ISD::AND, DL, CondVT, Cond, CastLHS);
37628    return DAG.getBitcast(VT, And);
37629  }
37630
37631  // vselect Cond, 000..., X -> andn Cond, X
37632  if (TValIsAllZeros) {
37633    MVT AndNVT = MVT::getVectorVT(MVT::i64, CondVT.getSizeInBits() / 64);
37634    SDValue CastCond = DAG.getBitcast(AndNVT, Cond);
37635    SDValue CastRHS = DAG.getBitcast(AndNVT, RHS);
37636    SDValue AndN = DAG.getNode(X86ISD::ANDNP, DL, AndNVT, CastCond, CastRHS);
37637    return DAG.getBitcast(VT, AndN);
37638  }
37639
37640  return SDValue();
37641}
37642
37643/// If both arms of a vector select are concatenated vectors, split the select,
37644/// and concatenate the result to eliminate a wide (256-bit) vector instruction:
37645///   vselect Cond, (concat T0, T1), (concat F0, F1) -->
37646///   concat (vselect (split Cond), T0, F0), (vselect (split Cond), T1, F1)
37647static SDValue narrowVectorSelect(SDNode *N, SelectionDAG &DAG,
37648                                  const X86Subtarget &Subtarget) {
37649  unsigned Opcode = N->getOpcode();
37650  if (Opcode != X86ISD::BLENDV && Opcode != ISD::VSELECT)
37651    return SDValue();
37652
37653  // TODO: Split 512-bit vectors too?
37654  EVT VT = N->getValueType(0);
37655  if (!VT.is256BitVector())
37656    return SDValue();
37657
37658  // TODO: Split as long as any 2 of the 3 operands are concatenated?
37659  SDValue Cond = N->getOperand(0);
37660  SDValue TVal = N->getOperand(1);
37661  SDValue FVal = N->getOperand(2);
37662  SmallVector<SDValue, 4> CatOpsT, CatOpsF;
37663  if (!TVal.hasOneUse() || !FVal.hasOneUse() ||
37664      !collectConcatOps(TVal.getNode(), CatOpsT) ||
37665      !collectConcatOps(FVal.getNode(), CatOpsF))
37666    return SDValue();
37667
37668  auto makeBlend = [Opcode](SelectionDAG &DAG, const SDLoc &DL,
37669                            ArrayRef<SDValue> Ops) {
37670    return DAG.getNode(Opcode, DL, Ops[1].getValueType(), Ops);
37671  };
37672  return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { Cond, TVal, FVal },
37673                          makeBlend, /*CheckBWI*/ false);
37674}
37675
37676static SDValue combineSelectOfTwoConstants(SDNode *N, SelectionDAG &DAG) {
37677  SDValue Cond = N->getOperand(0);
37678  SDValue LHS = N->getOperand(1);
37679  SDValue RHS = N->getOperand(2);
37680  SDLoc DL(N);
37681
37682  auto *TrueC = dyn_cast<ConstantSDNode>(LHS);
37683  auto *FalseC = dyn_cast<ConstantSDNode>(RHS);
37684  if (!TrueC || !FalseC)
37685    return SDValue();
37686
37687  // Don't do this for crazy integer types.
37688  EVT VT = N->getValueType(0);
37689  if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
37690    return SDValue();
37691
37692  // We're going to use the condition bit in math or logic ops. We could allow
37693  // this with a wider condition value (post-legalization it becomes an i8),
37694  // but if nothing is creating selects that late, it doesn't matter.
37695  if (Cond.getValueType() != MVT::i1)
37696    return SDValue();
37697
37698  // A power-of-2 multiply is just a shift. LEA also cheaply handles multiply by
37699  // 3, 5, or 9 with i32/i64, so those get transformed too.
37700  // TODO: For constants that overflow or do not differ by power-of-2 or small
37701  // multiplier, convert to 'and' + 'add'.
37702  const APInt &TrueVal = TrueC->getAPIntValue();
37703  const APInt &FalseVal = FalseC->getAPIntValue();
37704  bool OV;
37705  APInt Diff = TrueVal.ssub_ov(FalseVal, OV);
37706  if (OV)
37707    return SDValue();
37708
37709  APInt AbsDiff = Diff.abs();
37710  if (AbsDiff.isPowerOf2() ||
37711      ((VT == MVT::i32 || VT == MVT::i64) &&
37712       (AbsDiff == 3 || AbsDiff == 5 || AbsDiff == 9))) {
37713
37714    // We need a positive multiplier constant for shift/LEA codegen. The 'not'
37715    // of the condition can usually be folded into a compare predicate, but even
37716    // without that, the sequence should be cheaper than a CMOV alternative.
37717    if (TrueVal.slt(FalseVal)) {
37718      Cond = DAG.getNOT(DL, Cond, MVT::i1);
37719      std::swap(TrueC, FalseC);
37720    }
37721
37722    // select Cond, TC, FC --> (zext(Cond) * (TC - FC)) + FC
37723    SDValue R = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond);
37724
37725    // Multiply condition by the difference if non-one.
37726    if (!AbsDiff.isOneValue())
37727      R = DAG.getNode(ISD::MUL, DL, VT, R, DAG.getConstant(AbsDiff, DL, VT));
37728
37729    // Add the base if non-zero.
37730    if (!FalseC->isNullValue())
37731      R = DAG.getNode(ISD::ADD, DL, VT, R, SDValue(FalseC, 0));
37732
37733    return R;
37734  }
37735
37736  return SDValue();
37737}
37738
37739/// If this is a *dynamic* select (non-constant condition) and we can match
37740/// this node with one of the variable blend instructions, restructure the
37741/// condition so that blends can use the high (sign) bit of each element.
37742/// This function will also call SimplifyDemandedBits on already created
37743/// BLENDV to perform additional simplifications.
37744static SDValue combineVSelectToBLENDV(SDNode *N, SelectionDAG &DAG,
37745                                           TargetLowering::DAGCombinerInfo &DCI,
37746                                           const X86Subtarget &Subtarget) {
37747  SDValue Cond = N->getOperand(0);
37748  if ((N->getOpcode() != ISD::VSELECT &&
37749       N->getOpcode() != X86ISD::BLENDV) ||
37750      ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
37751    return SDValue();
37752
37753  // Don't optimize before the condition has been transformed to a legal type
37754  // and don't ever optimize vector selects that map to AVX512 mask-registers.
37755  unsigned BitWidth = Cond.getScalarValueSizeInBits();
37756  if (BitWidth < 8 || BitWidth > 64)
37757    return SDValue();
37758
37759  // We can only handle the cases where VSELECT is directly legal on the
37760  // subtarget. We custom lower VSELECT nodes with constant conditions and
37761  // this makes it hard to see whether a dynamic VSELECT will correctly
37762  // lower, so we both check the operation's status and explicitly handle the
37763  // cases where a *dynamic* blend will fail even though a constant-condition
37764  // blend could be custom lowered.
37765  // FIXME: We should find a better way to handle this class of problems.
37766  // Potentially, we should combine constant-condition vselect nodes
37767  // pre-legalization into shuffles and not mark as many types as custom
37768  // lowered.
37769  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
37770  EVT VT = N->getValueType(0);
37771  if (!TLI.isOperationLegalOrCustom(ISD::VSELECT, VT))
37772    return SDValue();
37773  // FIXME: We don't support i16-element blends currently. We could and
37774  // should support them by making *all* the bits in the condition be set
37775  // rather than just the high bit and using an i8-element blend.
37776  if (VT.getVectorElementType() == MVT::i16)
37777    return SDValue();
37778  // Dynamic blending was only available from SSE4.1 onward.
37779  if (VT.is128BitVector() && !Subtarget.hasSSE41())
37780    return SDValue();
37781  // Byte blends are only available in AVX2
37782  if (VT == MVT::v32i8 && !Subtarget.hasAVX2())
37783    return SDValue();
37784  // There are no 512-bit blend instructions that use sign bits.
37785  if (VT.is512BitVector())
37786    return SDValue();
37787
37788  auto OnlyUsedAsSelectCond = [](SDValue Cond) {
37789    for (SDNode::use_iterator UI = Cond->use_begin(), UE = Cond->use_end();
37790         UI != UE; ++UI)
37791      if ((UI->getOpcode() != ISD::VSELECT &&
37792           UI->getOpcode() != X86ISD::BLENDV) ||
37793          UI.getOperandNo() != 0)
37794        return false;
37795
37796    return true;
37797  };
37798
37799  if (OnlyUsedAsSelectCond(Cond)) {
37800    APInt DemandedMask(APInt::getSignMask(BitWidth));
37801    KnownBits Known;
37802    TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
37803                                          !DCI.isBeforeLegalizeOps());
37804    if (!TLI.SimplifyDemandedBits(Cond, DemandedMask, Known, TLO, 0, true))
37805      return SDValue();
37806
37807    // If we changed the computation somewhere in the DAG, this change will
37808    // affect all users of Cond. Update all the nodes so that we do not use
37809    // the generic VSELECT anymore. Otherwise, we may perform wrong
37810    // optimizations as we messed with the actual expectation for the vector
37811    // boolean values.
37812    for (SDNode *U : Cond->uses()) {
37813      if (U->getOpcode() == X86ISD::BLENDV)
37814        continue;
37815
37816      SDValue SB = DAG.getNode(X86ISD::BLENDV, SDLoc(U), U->getValueType(0),
37817                               Cond, U->getOperand(1), U->getOperand(2));
37818      DAG.ReplaceAllUsesOfValueWith(SDValue(U, 0), SB);
37819      DCI.AddToWorklist(U);
37820    }
37821    DCI.CommitTargetLoweringOpt(TLO);
37822    return SDValue(N, 0);
37823  }
37824
37825  // Otherwise we can still at least try to simplify multiple use bits.
37826  APInt DemandedMask(APInt::getSignMask(BitWidth));
37827  APInt DemandedElts(APInt::getAllOnesValue(VT.getVectorNumElements()));
37828  KnownBits Known;
37829  TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
37830                                        !DCI.isBeforeLegalizeOps());
37831  if (SDValue V = TLI.SimplifyMultipleUseDemandedBits(Cond, DemandedMask,
37832                                                      DemandedElts, DAG, 0))
37833    return DAG.getNode(X86ISD::BLENDV, SDLoc(N), N->getValueType(0),
37834                       V, N->getOperand(1), N->getOperand(2));
37835
37836  return SDValue();
37837}
37838
37839// Try to match:
37840//   (or (and (M, (sub 0, X)), (pandn M, X)))
37841// which is a special case of:
37842//   (select M, (sub 0, X), X)
37843// Per:
37844// http://graphics.stanford.edu/~seander/bithacks.html#ConditionalNegate
37845// We know that, if fNegate is 0 or 1:
37846//   (fNegate ? -v : v) == ((v ^ -fNegate) + fNegate)
37847//
37848// Here, we have a mask, M (all 1s or 0), and, similarly, we know that:
37849//   ((M & 1) ? -X : X) == ((X ^ -(M & 1)) + (M & 1))
37850//   ( M      ? -X : X) == ((X ^   M     ) + (M & 1))
37851// This lets us transform our vselect to:
37852//   (add (xor X, M), (and M, 1))
37853// And further to:
37854//   (sub (xor X, M), M)
37855static SDValue combineLogicBlendIntoConditionalNegate(
37856    EVT VT, SDValue Mask, SDValue X, SDValue Y, const SDLoc &DL,
37857    SelectionDAG &DAG, const X86Subtarget &Subtarget) {
37858  EVT MaskVT = Mask.getValueType();
37859  assert(MaskVT.isInteger() &&
37860         DAG.ComputeNumSignBits(Mask) == MaskVT.getScalarSizeInBits() &&
37861         "Mask must be zero/all-bits");
37862
37863  if (X.getValueType() != MaskVT || Y.getValueType() != MaskVT)
37864    return SDValue();
37865  if (!DAG.getTargetLoweringInfo().isOperationLegal(ISD::SUB, MaskVT))
37866    return SDValue();
37867
37868  auto IsNegV = [](SDNode *N, SDValue V) {
37869    return N->getOpcode() == ISD::SUB && N->getOperand(1) == V &&
37870           ISD::isBuildVectorAllZeros(N->getOperand(0).getNode());
37871  };
37872
37873  SDValue V;
37874  if (IsNegV(Y.getNode(), X))
37875    V = X;
37876  else if (IsNegV(X.getNode(), Y))
37877    V = Y;
37878  else
37879    return SDValue();
37880
37881  SDValue SubOp1 = DAG.getNode(ISD::XOR, DL, MaskVT, V, Mask);
37882  SDValue SubOp2 = Mask;
37883
37884  // If the negate was on the false side of the select, then
37885  // the operands of the SUB need to be swapped. PR 27251.
37886  // This is because the pattern being matched above is
37887  // (vselect M, (sub (0, X), X)  -> (sub (xor X, M), M)
37888  // but if the pattern matched was
37889  // (vselect M, X, (sub (0, X))), that is really negation of the pattern
37890  // above, -(vselect M, (sub 0, X), X), and therefore the replacement
37891  // pattern also needs to be a negation of the replacement pattern above.
37892  // And -(sub X, Y) is just sub (Y, X), so swapping the operands of the
37893  // sub accomplishes the negation of the replacement pattern.
37894  if (V == Y)
37895    std::swap(SubOp1, SubOp2);
37896
37897  SDValue Res = DAG.getNode(ISD::SUB, DL, MaskVT, SubOp1, SubOp2);
37898  return DAG.getBitcast(VT, Res);
37899}
37900
37901/// Do target-specific dag combines on SELECT and VSELECT nodes.
37902static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
37903                             TargetLowering::DAGCombinerInfo &DCI,
37904                             const X86Subtarget &Subtarget) {
37905  SDLoc DL(N);
37906  SDValue Cond = N->getOperand(0);
37907  SDValue LHS = N->getOperand(1);
37908  SDValue RHS = N->getOperand(2);
37909
37910  // Try simplification again because we use this function to optimize
37911  // BLENDV nodes that are not handled by the generic combiner.
37912  if (SDValue V = DAG.simplifySelect(Cond, LHS, RHS))
37913    return V;
37914
37915  EVT VT = LHS.getValueType();
37916  EVT CondVT = Cond.getValueType();
37917  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
37918  bool CondConstantVector = ISD::isBuildVectorOfConstantSDNodes(Cond.getNode());
37919
37920  // Attempt to combine (select M, (sub 0, X), X) -> (sub (xor X, M), M).
37921  // Limit this to cases of non-constant masks that createShuffleMaskFromVSELECT
37922  // can't catch, plus vXi8 cases where we'd likely end up with BLENDV.
37923  if (CondVT.isVector() && CondVT.isInteger() &&
37924      CondVT.getScalarSizeInBits() == VT.getScalarSizeInBits() &&
37925      (!CondConstantVector || CondVT.getScalarType() == MVT::i8) &&
37926      DAG.ComputeNumSignBits(Cond) == CondVT.getScalarSizeInBits())
37927    if (SDValue V = combineLogicBlendIntoConditionalNegate(VT, Cond, RHS, LHS,
37928                                                           DL, DAG, Subtarget))
37929      return V;
37930
37931  // Convert vselects with constant condition into shuffles.
37932  if (CondConstantVector && DCI.isBeforeLegalizeOps()) {
37933    SmallVector<int, 64> Mask;
37934    if (createShuffleMaskFromVSELECT(Mask, Cond))
37935      return DAG.getVectorShuffle(VT, DL, LHS, RHS, Mask);
37936  }
37937
37938  // If we have SSE[12] support, try to form min/max nodes. SSE min/max
37939  // instructions match the semantics of the common C idiom x<y?x:y but not
37940  // x<=y?x:y, because of how they handle negative zero (which can be
37941  // ignored in unsafe-math mode).
37942  // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
37943  if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
37944      VT != MVT::f80 && VT != MVT::f128 &&
37945      (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
37946      (Subtarget.hasSSE2() ||
37947       (Subtarget.hasSSE1() && VT.getScalarType() == MVT::f32))) {
37948    ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
37949
37950    unsigned Opcode = 0;
37951    // Check for x CC y ? x : y.
37952    if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
37953        DAG.isEqualTo(RHS, Cond.getOperand(1))) {
37954      switch (CC) {
37955      default: break;
37956      case ISD::SETULT:
37957        // Converting this to a min would handle NaNs incorrectly, and swapping
37958        // the operands would cause it to handle comparisons between positive
37959        // and negative zero incorrectly.
37960        if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
37961          if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
37962              !(DAG.isKnownNeverZeroFloat(LHS) ||
37963                DAG.isKnownNeverZeroFloat(RHS)))
37964            break;
37965          std::swap(LHS, RHS);
37966        }
37967        Opcode = X86ISD::FMIN;
37968        break;
37969      case ISD::SETOLE:
37970        // Converting this to a min would handle comparisons between positive
37971        // and negative zero incorrectly.
37972        if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
37973            !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
37974          break;
37975        Opcode = X86ISD::FMIN;
37976        break;
37977      case ISD::SETULE:
37978        // Converting this to a min would handle both negative zeros and NaNs
37979        // incorrectly, but we can swap the operands to fix both.
37980        std::swap(LHS, RHS);
37981        LLVM_FALLTHROUGH;
37982      case ISD::SETOLT:
37983      case ISD::SETLT:
37984      case ISD::SETLE:
37985        Opcode = X86ISD::FMIN;
37986        break;
37987
37988      case ISD::SETOGE:
37989        // Converting this to a max would handle comparisons between positive
37990        // and negative zero incorrectly.
37991        if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
37992            !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
37993          break;
37994        Opcode = X86ISD::FMAX;
37995        break;
37996      case ISD::SETUGT:
37997        // Converting this to a max would handle NaNs incorrectly, and swapping
37998        // the operands would cause it to handle comparisons between positive
37999        // and negative zero incorrectly.
38000        if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
38001          if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
38002              !(DAG.isKnownNeverZeroFloat(LHS) ||
38003                DAG.isKnownNeverZeroFloat(RHS)))
38004            break;
38005          std::swap(LHS, RHS);
38006        }
38007        Opcode = X86ISD::FMAX;
38008        break;
38009      case ISD::SETUGE:
38010        // Converting this to a max would handle both negative zeros and NaNs
38011        // incorrectly, but we can swap the operands to fix both.
38012        std::swap(LHS, RHS);
38013        LLVM_FALLTHROUGH;
38014      case ISD::SETOGT:
38015      case ISD::SETGT:
38016      case ISD::SETGE:
38017        Opcode = X86ISD::FMAX;
38018        break;
38019      }
38020    // Check for x CC y ? y : x -- a min/max with reversed arms.
38021    } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
38022               DAG.isEqualTo(RHS, Cond.getOperand(0))) {
38023      switch (CC) {
38024      default: break;
38025      case ISD::SETOGE:
38026        // Converting this to a min would handle comparisons between positive
38027        // and negative zero incorrectly, and swapping the operands would
38028        // cause it to handle NaNs incorrectly.
38029        if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
38030            !(DAG.isKnownNeverZeroFloat(LHS) ||
38031              DAG.isKnownNeverZeroFloat(RHS))) {
38032          if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
38033            break;
38034          std::swap(LHS, RHS);
38035        }
38036        Opcode = X86ISD::FMIN;
38037        break;
38038      case ISD::SETUGT:
38039        // Converting this to a min would handle NaNs incorrectly.
38040        if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
38041          break;
38042        Opcode = X86ISD::FMIN;
38043        break;
38044      case ISD::SETUGE:
38045        // Converting this to a min would handle both negative zeros and NaNs
38046        // incorrectly, but we can swap the operands to fix both.
38047        std::swap(LHS, RHS);
38048        LLVM_FALLTHROUGH;
38049      case ISD::SETOGT:
38050      case ISD::SETGT:
38051      case ISD::SETGE:
38052        Opcode = X86ISD::FMIN;
38053        break;
38054
38055      case ISD::SETULT:
38056        // Converting this to a max would handle NaNs incorrectly.
38057        if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
38058          break;
38059        Opcode = X86ISD::FMAX;
38060        break;
38061      case ISD::SETOLE:
38062        // Converting this to a max would handle comparisons between positive
38063        // and negative zero incorrectly, and swapping the operands would
38064        // cause it to handle NaNs incorrectly.
38065        if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
38066            !DAG.isKnownNeverZeroFloat(LHS) &&
38067            !DAG.isKnownNeverZeroFloat(RHS)) {
38068          if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
38069            break;
38070          std::swap(LHS, RHS);
38071        }
38072        Opcode = X86ISD::FMAX;
38073        break;
38074      case ISD::SETULE:
38075        // Converting this to a max would handle both negative zeros and NaNs
38076        // incorrectly, but we can swap the operands to fix both.
38077        std::swap(LHS, RHS);
38078        LLVM_FALLTHROUGH;
38079      case ISD::SETOLT:
38080      case ISD::SETLT:
38081      case ISD::SETLE:
38082        Opcode = X86ISD::FMAX;
38083        break;
38084      }
38085    }
38086
38087    if (Opcode)
38088      return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
38089  }
38090
38091  // Some mask scalar intrinsics rely on checking if only one bit is set
38092  // and implement it in C code like this:
38093  // A[0] = (U & 1) ? A[0] : W[0];
38094  // This creates some redundant instructions that break pattern matching.
38095  // fold (select (setcc (and (X, 1), 0, seteq), Y, Z)) -> select(and(X, 1),Z,Y)
38096  if (Subtarget.hasAVX512() && N->getOpcode() == ISD::SELECT &&
38097      Cond.getOpcode() == ISD::SETCC && (VT == MVT::f32 || VT == MVT::f64)) {
38098    ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
38099    SDValue AndNode = Cond.getOperand(0);
38100    if (AndNode.getOpcode() == ISD::AND && CC == ISD::SETEQ &&
38101        isNullConstant(Cond.getOperand(1)) &&
38102        isOneConstant(AndNode.getOperand(1))) {
38103      // LHS and RHS swapped due to
38104      // setcc outputting 1 when AND resulted in 0 and vice versa.
38105      AndNode = DAG.getZExtOrTrunc(AndNode, DL, MVT::i8);
38106      return DAG.getNode(ISD::SELECT, DL, VT, AndNode, RHS, LHS);
38107    }
38108  }
38109
38110  // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
38111  // lowering on KNL. In this case we convert it to
38112  // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
38113  // The same situation all vectors of i8 and i16 without BWI.
38114  // Make sure we extend these even before type legalization gets a chance to
38115  // split wide vectors.
38116  // Since SKX these selects have a proper lowering.
38117  if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && CondVT.isVector() &&
38118      CondVT.getVectorElementType() == MVT::i1 &&
38119      (VT.getVectorElementType() == MVT::i8 ||
38120       VT.getVectorElementType() == MVT::i16)) {
38121    Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond);
38122    return DAG.getNode(N->getOpcode(), DL, VT, Cond, LHS, RHS);
38123  }
38124
38125  // AVX512 - Extend select with zero to merge with target shuffle.
38126  // select(mask, extract_subvector(shuffle(x)), zero) -->
38127  // extract_subvector(select(insert_subvector(mask), shuffle(x), zero))
38128  // TODO - support non target shuffles as well.
38129  if (Subtarget.hasAVX512() && CondVT.isVector() &&
38130      CondVT.getVectorElementType() == MVT::i1) {
38131    auto SelectableOp = [&TLI](SDValue Op) {
38132      return Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
38133             isTargetShuffle(Op.getOperand(0).getOpcode()) &&
38134             isNullConstant(Op.getOperand(1)) &&
38135             TLI.isTypeLegal(Op.getOperand(0).getValueType()) &&
38136             Op.hasOneUse() && Op.getOperand(0).hasOneUse();
38137    };
38138
38139    bool SelectableLHS = SelectableOp(LHS);
38140    bool SelectableRHS = SelectableOp(RHS);
38141    bool ZeroLHS = ISD::isBuildVectorAllZeros(LHS.getNode());
38142    bool ZeroRHS = ISD::isBuildVectorAllZeros(RHS.getNode());
38143
38144    if ((SelectableLHS && ZeroRHS) || (SelectableRHS && ZeroLHS)) {
38145      EVT SrcVT = SelectableLHS ? LHS.getOperand(0).getValueType()
38146                                : RHS.getOperand(0).getValueType();
38147      unsigned NumSrcElts = SrcVT.getVectorNumElements();
38148      EVT SrcCondVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumSrcElts);
38149      LHS = insertSubVector(DAG.getUNDEF(SrcVT), LHS, 0, DAG, DL,
38150                            VT.getSizeInBits());
38151      RHS = insertSubVector(DAG.getUNDEF(SrcVT), RHS, 0, DAG, DL,
38152                            VT.getSizeInBits());
38153      Cond = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SrcCondVT,
38154                         DAG.getUNDEF(SrcCondVT), Cond,
38155                         DAG.getIntPtrConstant(0, DL));
38156      SDValue Res = DAG.getSelect(DL, SrcVT, Cond, LHS, RHS);
38157      return extractSubVector(Res, 0, DAG, DL, VT.getSizeInBits());
38158    }
38159  }
38160
38161  if (SDValue V = combineSelectOfTwoConstants(N, DAG))
38162    return V;
38163
38164  // Canonicalize max and min:
38165  // (x > y) ? x : y -> (x >= y) ? x : y
38166  // (x < y) ? x : y -> (x <= y) ? x : y
38167  // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
38168  // the need for an extra compare
38169  // against zero. e.g.
38170  // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
38171  // subl   %esi, %edi
38172  // testl  %edi, %edi
38173  // movl   $0, %eax
38174  // cmovgl %edi, %eax
38175  // =>
38176  // xorl   %eax, %eax
38177  // subl   %esi, $edi
38178  // cmovsl %eax, %edi
38179  if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
38180      Cond.hasOneUse() &&
38181      DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
38182      DAG.isEqualTo(RHS, Cond.getOperand(1))) {
38183    ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
38184    switch (CC) {
38185    default: break;
38186    case ISD::SETLT:
38187    case ISD::SETGT: {
38188      ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
38189      Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
38190                          Cond.getOperand(0), Cond.getOperand(1), NewCC);
38191      return DAG.getSelect(DL, VT, Cond, LHS, RHS);
38192    }
38193    }
38194  }
38195
38196  // Match VSELECTs into subs with unsigned saturation.
38197  if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
38198      // psubus is available in SSE2 for i8 and i16 vectors.
38199      Subtarget.hasSSE2() && VT.getVectorNumElements() >= 2 &&
38200      isPowerOf2_32(VT.getVectorNumElements()) &&
38201      (VT.getVectorElementType() == MVT::i8 ||
38202       VT.getVectorElementType() == MVT::i16)) {
38203    ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
38204
38205    // Check if one of the arms of the VSELECT is a zero vector. If it's on the
38206    // left side invert the predicate to simplify logic below.
38207    SDValue Other;
38208    if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
38209      Other = RHS;
38210      CC = ISD::getSetCCInverse(CC, VT.getVectorElementType());
38211    } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
38212      Other = LHS;
38213    }
38214
38215    if (Other.getNode() && Other->getNumOperands() == 2 &&
38216        Other->getOperand(0) == Cond.getOperand(0)) {
38217      SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
38218      SDValue CondRHS = Cond->getOperand(1);
38219
38220      // Look for a general sub with unsigned saturation first.
38221      // x >= y ? x-y : 0 --> subus x, y
38222      // x >  y ? x-y : 0 --> subus x, y
38223      if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
38224          Other->getOpcode() == ISD::SUB && OpRHS == CondRHS)
38225        return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
38226
38227      if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS)) {
38228        if (isa<BuildVectorSDNode>(CondRHS)) {
38229          // If the RHS is a constant we have to reverse the const
38230          // canonicalization.
38231          // x > C-1 ? x+-C : 0 --> subus x, C
38232          auto MatchUSUBSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
38233            return (!Op && !Cond) ||
38234                   (Op && Cond &&
38235                    Cond->getAPIntValue() == (-Op->getAPIntValue() - 1));
38236          };
38237          if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
38238              ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUSUBSAT,
38239                                        /*AllowUndefs*/ true)) {
38240            OpRHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
38241                                OpRHS);
38242            return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
38243          }
38244
38245          // Another special case: If C was a sign bit, the sub has been
38246          // canonicalized into a xor.
38247          // FIXME: Would it be better to use computeKnownBits to determine
38248          //        whether it's safe to decanonicalize the xor?
38249          // x s< 0 ? x^C : 0 --> subus x, C
38250          if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
38251            if (CC == ISD::SETLT && Other.getOpcode() == ISD::XOR &&
38252                ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
38253                OpRHSConst->getAPIntValue().isSignMask()) {
38254              // Note that we have to rebuild the RHS constant here to ensure we
38255              // don't rely on particular values of undef lanes.
38256              OpRHS = DAG.getConstant(OpRHSConst->getAPIntValue(), DL, VT);
38257              return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
38258            }
38259          }
38260        }
38261      }
38262    }
38263  }
38264
38265  // Match VSELECTs into add with unsigned saturation.
38266  if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
38267      // paddus is available in SSE2 for i8 and i16 vectors.
38268      Subtarget.hasSSE2() && VT.getVectorNumElements() >= 2 &&
38269      isPowerOf2_32(VT.getVectorNumElements()) &&
38270      (VT.getVectorElementType() == MVT::i8 ||
38271       VT.getVectorElementType() == MVT::i16)) {
38272    ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
38273
38274    SDValue CondLHS = Cond->getOperand(0);
38275    SDValue CondRHS = Cond->getOperand(1);
38276
38277    // Check if one of the arms of the VSELECT is vector with all bits set.
38278    // If it's on the left side invert the predicate to simplify logic below.
38279    SDValue Other;
38280    if (ISD::isBuildVectorAllOnes(LHS.getNode())) {
38281      Other = RHS;
38282      CC = ISD::getSetCCInverse(CC, VT.getVectorElementType());
38283    } else if (ISD::isBuildVectorAllOnes(RHS.getNode())) {
38284      Other = LHS;
38285    }
38286
38287    if (Other.getNode() && Other.getOpcode() == ISD::ADD) {
38288      SDValue OpLHS = Other.getOperand(0), OpRHS = Other.getOperand(1);
38289
38290      // Canonicalize condition operands.
38291      if (CC == ISD::SETUGE) {
38292        std::swap(CondLHS, CondRHS);
38293        CC = ISD::SETULE;
38294      }
38295
38296      // We can test against either of the addition operands.
38297      // x <= x+y ? x+y : ~0 --> addus x, y
38298      // x+y >= x ? x+y : ~0 --> addus x, y
38299      if (CC == ISD::SETULE && Other == CondRHS &&
38300          (OpLHS == CondLHS || OpRHS == CondLHS))
38301        return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS);
38302
38303      if (isa<BuildVectorSDNode>(OpRHS) && isa<BuildVectorSDNode>(CondRHS) &&
38304          CondLHS == OpLHS) {
38305        // If the RHS is a constant we have to reverse the const
38306        // canonicalization.
38307        // x > ~C ? x+C : ~0 --> addus x, C
38308        auto MatchUADDSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
38309          return Cond->getAPIntValue() == ~Op->getAPIntValue();
38310        };
38311        if (CC == ISD::SETULE &&
38312            ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUADDSAT))
38313          return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS);
38314      }
38315    }
38316  }
38317
38318  // Early exit check
38319  if (!TLI.isTypeLegal(VT))
38320    return SDValue();
38321
38322  if (SDValue V = combineVSelectWithAllOnesOrZeros(N, DAG, DCI, Subtarget))
38323    return V;
38324
38325  if (SDValue V = combineVSelectToBLENDV(N, DAG, DCI, Subtarget))
38326    return V;
38327
38328  if (SDValue V = narrowVectorSelect(N, DAG, Subtarget))
38329    return V;
38330
38331  // select(~Cond, X, Y) -> select(Cond, Y, X)
38332  if (CondVT.getScalarType() != MVT::i1)
38333    if (SDValue CondNot = IsNOT(Cond, DAG))
38334      return DAG.getNode(N->getOpcode(), DL, VT,
38335                         DAG.getBitcast(CondVT, CondNot), RHS, LHS);
38336
38337  // Custom action for SELECT MMX
38338  if (VT == MVT::x86mmx) {
38339    LHS = DAG.getBitcast(MVT::i64, LHS);
38340    RHS = DAG.getBitcast(MVT::i64, RHS);
38341    SDValue newSelect = DAG.getNode(ISD::SELECT, DL, MVT::i64, Cond, LHS, RHS);
38342    return DAG.getBitcast(VT, newSelect);
38343  }
38344
38345  return SDValue();
38346}
38347
38348/// Combine:
38349///   (brcond/cmov/setcc .., (cmp (atomic_load_add x, 1), 0), COND_S)
38350/// to:
38351///   (brcond/cmov/setcc .., (LADD x, 1), COND_LE)
38352/// i.e., reusing the EFLAGS produced by the LOCKed instruction.
38353/// Note that this is only legal for some op/cc combinations.
38354static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
38355                                       SelectionDAG &DAG,
38356                                       const X86Subtarget &Subtarget) {
38357  // This combine only operates on CMP-like nodes.
38358  if (!(Cmp.getOpcode() == X86ISD::CMP ||
38359        (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
38360    return SDValue();
38361
38362  // Can't replace the cmp if it has more uses than the one we're looking at.
38363  // FIXME: We would like to be able to handle this, but would need to make sure
38364  // all uses were updated.
38365  if (!Cmp.hasOneUse())
38366    return SDValue();
38367
38368  // This only applies to variations of the common case:
38369  //   (icmp slt x, 0) -> (icmp sle (add x, 1), 0)
38370  //   (icmp sge x, 0) -> (icmp sgt (add x, 1), 0)
38371  //   (icmp sle x, 0) -> (icmp slt (sub x, 1), 0)
38372  //   (icmp sgt x, 0) -> (icmp sge (sub x, 1), 0)
38373  // Using the proper condcodes (see below), overflow is checked for.
38374
38375  // FIXME: We can generalize both constraints:
38376  // - XOR/OR/AND (if they were made to survive AtomicExpand)
38377  // - LHS != 1
38378  // if the result is compared.
38379
38380  SDValue CmpLHS = Cmp.getOperand(0);
38381  SDValue CmpRHS = Cmp.getOperand(1);
38382
38383  if (!CmpLHS.hasOneUse())
38384    return SDValue();
38385
38386  unsigned Opc = CmpLHS.getOpcode();
38387  if (Opc != ISD::ATOMIC_LOAD_ADD && Opc != ISD::ATOMIC_LOAD_SUB)
38388    return SDValue();
38389
38390  SDValue OpRHS = CmpLHS.getOperand(2);
38391  auto *OpRHSC = dyn_cast<ConstantSDNode>(OpRHS);
38392  if (!OpRHSC)
38393    return SDValue();
38394
38395  APInt Addend = OpRHSC->getAPIntValue();
38396  if (Opc == ISD::ATOMIC_LOAD_SUB)
38397    Addend = -Addend;
38398
38399  auto *CmpRHSC = dyn_cast<ConstantSDNode>(CmpRHS);
38400  if (!CmpRHSC)
38401    return SDValue();
38402
38403  APInt Comparison = CmpRHSC->getAPIntValue();
38404
38405  // If the addend is the negation of the comparison value, then we can do
38406  // a full comparison by emitting the atomic arithmetic as a locked sub.
38407  if (Comparison == -Addend) {
38408    // The CC is fine, but we need to rewrite the LHS of the comparison as an
38409    // atomic sub.
38410    auto *AN = cast<AtomicSDNode>(CmpLHS.getNode());
38411    auto AtomicSub = DAG.getAtomic(
38412        ISD::ATOMIC_LOAD_SUB, SDLoc(CmpLHS), CmpLHS.getValueType(),
38413        /*Chain*/ CmpLHS.getOperand(0), /*LHS*/ CmpLHS.getOperand(1),
38414        /*RHS*/ DAG.getConstant(-Addend, SDLoc(CmpRHS), CmpRHS.getValueType()),
38415        AN->getMemOperand());
38416    auto LockOp = lowerAtomicArithWithLOCK(AtomicSub, DAG, Subtarget);
38417    DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),
38418                                  DAG.getUNDEF(CmpLHS.getValueType()));
38419    DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
38420    return LockOp;
38421  }
38422
38423  // We can handle comparisons with zero in a number of cases by manipulating
38424  // the CC used.
38425  if (!Comparison.isNullValue())
38426    return SDValue();
38427
38428  if (CC == X86::COND_S && Addend == 1)
38429    CC = X86::COND_LE;
38430  else if (CC == X86::COND_NS && Addend == 1)
38431    CC = X86::COND_G;
38432  else if (CC == X86::COND_G && Addend == -1)
38433    CC = X86::COND_GE;
38434  else if (CC == X86::COND_LE && Addend == -1)
38435    CC = X86::COND_L;
38436  else
38437    return SDValue();
38438
38439  SDValue LockOp = lowerAtomicArithWithLOCK(CmpLHS, DAG, Subtarget);
38440  DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),
38441                                DAG.getUNDEF(CmpLHS.getValueType()));
38442  DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
38443  return LockOp;
38444}
38445
38446// Check whether a boolean test is testing a boolean value generated by
38447// X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
38448// code.
38449//
38450// Simplify the following patterns:
38451// (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
38452// (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
38453// to (Op EFLAGS Cond)
38454//
38455// (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
38456// (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
38457// to (Op EFLAGS !Cond)
38458//
38459// where Op could be BRCOND or CMOV.
38460//
38461static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
38462  // This combine only operates on CMP-like nodes.
38463  if (!(Cmp.getOpcode() == X86ISD::CMP ||
38464        (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
38465    return SDValue();
38466
38467  // Quit if not used as a boolean value.
38468  if (CC != X86::COND_E && CC != X86::COND_NE)
38469    return SDValue();
38470
38471  // Check CMP operands. One of them should be 0 or 1 and the other should be
38472  // an SetCC or extended from it.
38473  SDValue Op1 = Cmp.getOperand(0);
38474  SDValue Op2 = Cmp.getOperand(1);
38475
38476  SDValue SetCC;
38477  const ConstantSDNode* C = nullptr;
38478  bool needOppositeCond = (CC == X86::COND_E);
38479  bool checkAgainstTrue = false; // Is it a comparison against 1?
38480
38481  if ((C = dyn_cast<ConstantSDNode>(Op1)))
38482    SetCC = Op2;
38483  else if ((C = dyn_cast<ConstantSDNode>(Op2)))
38484    SetCC = Op1;
38485  else // Quit if all operands are not constants.
38486    return SDValue();
38487
38488  if (C->getZExtValue() == 1) {
38489    needOppositeCond = !needOppositeCond;
38490    checkAgainstTrue = true;
38491  } else if (C->getZExtValue() != 0)
38492    // Quit if the constant is neither 0 or 1.
38493    return SDValue();
38494
38495  bool truncatedToBoolWithAnd = false;
38496  // Skip (zext $x), (trunc $x), or (and $x, 1) node.
38497  while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
38498         SetCC.getOpcode() == ISD::TRUNCATE ||
38499         SetCC.getOpcode() == ISD::AND) {
38500    if (SetCC.getOpcode() == ISD::AND) {
38501      int OpIdx = -1;
38502      if (isOneConstant(SetCC.getOperand(0)))
38503        OpIdx = 1;
38504      if (isOneConstant(SetCC.getOperand(1)))
38505        OpIdx = 0;
38506      if (OpIdx < 0)
38507        break;
38508      SetCC = SetCC.getOperand(OpIdx);
38509      truncatedToBoolWithAnd = true;
38510    } else
38511      SetCC = SetCC.getOperand(0);
38512  }
38513
38514  switch (SetCC.getOpcode()) {
38515  case X86ISD::SETCC_CARRY:
38516    // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
38517    // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
38518    // i.e. it's a comparison against true but the result of SETCC_CARRY is not
38519    // truncated to i1 using 'and'.
38520    if (checkAgainstTrue && !truncatedToBoolWithAnd)
38521      break;
38522    assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
38523           "Invalid use of SETCC_CARRY!");
38524    LLVM_FALLTHROUGH;
38525  case X86ISD::SETCC:
38526    // Set the condition code or opposite one if necessary.
38527    CC = X86::CondCode(SetCC.getConstantOperandVal(0));
38528    if (needOppositeCond)
38529      CC = X86::GetOppositeBranchCondition(CC);
38530    return SetCC.getOperand(1);
38531  case X86ISD::CMOV: {
38532    // Check whether false/true value has canonical one, i.e. 0 or 1.
38533    ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
38534    ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
38535    // Quit if true value is not a constant.
38536    if (!TVal)
38537      return SDValue();
38538    // Quit if false value is not a constant.
38539    if (!FVal) {
38540      SDValue Op = SetCC.getOperand(0);
38541      // Skip 'zext' or 'trunc' node.
38542      if (Op.getOpcode() == ISD::ZERO_EXTEND ||
38543          Op.getOpcode() == ISD::TRUNCATE)
38544        Op = Op.getOperand(0);
38545      // A special case for rdrand/rdseed, where 0 is set if false cond is
38546      // found.
38547      if ((Op.getOpcode() != X86ISD::RDRAND &&
38548           Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
38549        return SDValue();
38550    }
38551    // Quit if false value is not the constant 0 or 1.
38552    bool FValIsFalse = true;
38553    if (FVal && FVal->getZExtValue() != 0) {
38554      if (FVal->getZExtValue() != 1)
38555        return SDValue();
38556      // If FVal is 1, opposite cond is needed.
38557      needOppositeCond = !needOppositeCond;
38558      FValIsFalse = false;
38559    }
38560    // Quit if TVal is not the constant opposite of FVal.
38561    if (FValIsFalse && TVal->getZExtValue() != 1)
38562      return SDValue();
38563    if (!FValIsFalse && TVal->getZExtValue() != 0)
38564      return SDValue();
38565    CC = X86::CondCode(SetCC.getConstantOperandVal(2));
38566    if (needOppositeCond)
38567      CC = X86::GetOppositeBranchCondition(CC);
38568    return SetCC.getOperand(3);
38569  }
38570  }
38571
38572  return SDValue();
38573}
38574
38575/// Check whether Cond is an AND/OR of SETCCs off of the same EFLAGS.
38576/// Match:
38577///   (X86or (X86setcc) (X86setcc))
38578///   (X86cmp (and (X86setcc) (X86setcc)), 0)
38579static bool checkBoolTestAndOrSetCCCombine(SDValue Cond, X86::CondCode &CC0,
38580                                           X86::CondCode &CC1, SDValue &Flags,
38581                                           bool &isAnd) {
38582  if (Cond->getOpcode() == X86ISD::CMP) {
38583    if (!isNullConstant(Cond->getOperand(1)))
38584      return false;
38585
38586    Cond = Cond->getOperand(0);
38587  }
38588
38589  isAnd = false;
38590
38591  SDValue SetCC0, SetCC1;
38592  switch (Cond->getOpcode()) {
38593  default: return false;
38594  case ISD::AND:
38595  case X86ISD::AND:
38596    isAnd = true;
38597    LLVM_FALLTHROUGH;
38598  case ISD::OR:
38599  case X86ISD::OR:
38600    SetCC0 = Cond->getOperand(0);
38601    SetCC1 = Cond->getOperand(1);
38602    break;
38603  };
38604
38605  // Make sure we have SETCC nodes, using the same flags value.
38606  if (SetCC0.getOpcode() != X86ISD::SETCC ||
38607      SetCC1.getOpcode() != X86ISD::SETCC ||
38608      SetCC0->getOperand(1) != SetCC1->getOperand(1))
38609    return false;
38610
38611  CC0 = (X86::CondCode)SetCC0->getConstantOperandVal(0);
38612  CC1 = (X86::CondCode)SetCC1->getConstantOperandVal(0);
38613  Flags = SetCC0->getOperand(1);
38614  return true;
38615}
38616
38617// When legalizing carry, we create carries via add X, -1
38618// If that comes from an actual carry, via setcc, we use the
38619// carry directly.
38620static SDValue combineCarryThroughADD(SDValue EFLAGS, SelectionDAG &DAG) {
38621  if (EFLAGS.getOpcode() == X86ISD::ADD) {
38622    if (isAllOnesConstant(EFLAGS.getOperand(1))) {
38623      SDValue Carry = EFLAGS.getOperand(0);
38624      while (Carry.getOpcode() == ISD::TRUNCATE ||
38625             Carry.getOpcode() == ISD::ZERO_EXTEND ||
38626             Carry.getOpcode() == ISD::SIGN_EXTEND ||
38627             Carry.getOpcode() == ISD::ANY_EXTEND ||
38628             (Carry.getOpcode() == ISD::AND &&
38629              isOneConstant(Carry.getOperand(1))))
38630        Carry = Carry.getOperand(0);
38631      if (Carry.getOpcode() == X86ISD::SETCC ||
38632          Carry.getOpcode() == X86ISD::SETCC_CARRY) {
38633        // TODO: Merge this code with equivalent in combineAddOrSubToADCOrSBB?
38634        uint64_t CarryCC = Carry.getConstantOperandVal(0);
38635        SDValue CarryOp1 = Carry.getOperand(1);
38636        if (CarryCC == X86::COND_B)
38637          return CarryOp1;
38638        if (CarryCC == X86::COND_A) {
38639          // Try to convert COND_A into COND_B in an attempt to facilitate
38640          // materializing "setb reg".
38641          //
38642          // Do not flip "e > c", where "c" is a constant, because Cmp
38643          // instruction cannot take an immediate as its first operand.
38644          //
38645          if (CarryOp1.getOpcode() == X86ISD::SUB &&
38646              CarryOp1.getNode()->hasOneUse() &&
38647              CarryOp1.getValueType().isInteger() &&
38648              !isa<ConstantSDNode>(CarryOp1.getOperand(1))) {
38649            SDValue SubCommute =
38650                DAG.getNode(X86ISD::SUB, SDLoc(CarryOp1), CarryOp1->getVTList(),
38651                            CarryOp1.getOperand(1), CarryOp1.getOperand(0));
38652            return SDValue(SubCommute.getNode(), CarryOp1.getResNo());
38653          }
38654        }
38655        // If this is a check of the z flag of an add with 1, switch to the
38656        // C flag.
38657        if (CarryCC == X86::COND_E &&
38658            CarryOp1.getOpcode() == X86ISD::ADD &&
38659            isOneConstant(CarryOp1.getOperand(1)))
38660          return CarryOp1;
38661      }
38662    }
38663  }
38664
38665  return SDValue();
38666}
38667
38668/// Optimize an EFLAGS definition used according to the condition code \p CC
38669/// into a simpler EFLAGS value, potentially returning a new \p CC and replacing
38670/// uses of chain values.
38671static SDValue combineSetCCEFLAGS(SDValue EFLAGS, X86::CondCode &CC,
38672                                  SelectionDAG &DAG,
38673                                  const X86Subtarget &Subtarget) {
38674  if (CC == X86::COND_B)
38675    if (SDValue Flags = combineCarryThroughADD(EFLAGS, DAG))
38676      return Flags;
38677
38678  if (SDValue R = checkBoolTestSetCCCombine(EFLAGS, CC))
38679    return R;
38680  return combineSetCCAtomicArith(EFLAGS, CC, DAG, Subtarget);
38681}
38682
38683/// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
38684static SDValue combineCMov(SDNode *N, SelectionDAG &DAG,
38685                           TargetLowering::DAGCombinerInfo &DCI,
38686                           const X86Subtarget &Subtarget) {
38687  SDLoc DL(N);
38688
38689  SDValue FalseOp = N->getOperand(0);
38690  SDValue TrueOp = N->getOperand(1);
38691  X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
38692  SDValue Cond = N->getOperand(3);
38693
38694  // cmov X, X, ?, ? --> X
38695  if (TrueOp == FalseOp)
38696    return TrueOp;
38697
38698  // Try to simplify the EFLAGS and condition code operands.
38699  // We can't always do this as FCMOV only supports a subset of X86 cond.
38700  if (SDValue Flags = combineSetCCEFLAGS(Cond, CC, DAG, Subtarget)) {
38701    if (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC)) {
38702      SDValue Ops[] = {FalseOp, TrueOp, DAG.getTargetConstant(CC, DL, MVT::i8),
38703                       Flags};
38704      return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
38705    }
38706  }
38707
38708  // If this is a select between two integer constants, try to do some
38709  // optimizations.  Note that the operands are ordered the opposite of SELECT
38710  // operands.
38711  if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
38712    if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
38713      // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
38714      // larger than FalseC (the false value).
38715      if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
38716        CC = X86::GetOppositeBranchCondition(CC);
38717        std::swap(TrueC, FalseC);
38718        std::swap(TrueOp, FalseOp);
38719      }
38720
38721      // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3.  Likewise for any pow2/0.
38722      // This is efficient for any integer data type (including i8/i16) and
38723      // shift amount.
38724      if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
38725        Cond = getSETCC(CC, Cond, DL, DAG);
38726
38727        // Zero extend the condition if needed.
38728        Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
38729
38730        unsigned ShAmt = TrueC->getAPIntValue().logBase2();
38731        Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
38732                           DAG.getConstant(ShAmt, DL, MVT::i8));
38733        return Cond;
38734      }
38735
38736      // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.  This is efficient
38737      // for any integer data type, including i8/i16.
38738      if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
38739        Cond = getSETCC(CC, Cond, DL, DAG);
38740
38741        // Zero extend the condition if needed.
38742        Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
38743                           FalseC->getValueType(0), Cond);
38744        Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
38745                           SDValue(FalseC, 0));
38746        return Cond;
38747      }
38748
38749      // Optimize cases that will turn into an LEA instruction.  This requires
38750      // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
38751      if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
38752        APInt Diff = TrueC->getAPIntValue() - FalseC->getAPIntValue();
38753        assert(Diff.getBitWidth() == N->getValueType(0).getSizeInBits() &&
38754               "Implicit constant truncation");
38755
38756        bool isFastMultiplier = false;
38757        if (Diff.ult(10)) {
38758          switch (Diff.getZExtValue()) {
38759          default: break;
38760          case 1:  // result = add base, cond
38761          case 2:  // result = lea base(    , cond*2)
38762          case 3:  // result = lea base(cond, cond*2)
38763          case 4:  // result = lea base(    , cond*4)
38764          case 5:  // result = lea base(cond, cond*4)
38765          case 8:  // result = lea base(    , cond*8)
38766          case 9:  // result = lea base(cond, cond*8)
38767            isFastMultiplier = true;
38768            break;
38769          }
38770        }
38771
38772        if (isFastMultiplier) {
38773          Cond = getSETCC(CC, Cond, DL ,DAG);
38774          // Zero extend the condition if needed.
38775          Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
38776                             Cond);
38777          // Scale the condition by the difference.
38778          if (Diff != 1)
38779            Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
38780                               DAG.getConstant(Diff, DL, Cond.getValueType()));
38781
38782          // Add the base if non-zero.
38783          if (FalseC->getAPIntValue() != 0)
38784            Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
38785                               SDValue(FalseC, 0));
38786          return Cond;
38787        }
38788      }
38789    }
38790  }
38791
38792  // Handle these cases:
38793  //   (select (x != c), e, c) -> select (x != c), e, x),
38794  //   (select (x == c), c, e) -> select (x == c), x, e)
38795  // where the c is an integer constant, and the "select" is the combination
38796  // of CMOV and CMP.
38797  //
38798  // The rationale for this change is that the conditional-move from a constant
38799  // needs two instructions, however, conditional-move from a register needs
38800  // only one instruction.
38801  //
38802  // CAVEAT: By replacing a constant with a symbolic value, it may obscure
38803  //  some instruction-combining opportunities. This opt needs to be
38804  //  postponed as late as possible.
38805  //
38806  if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
38807    // the DCI.xxxx conditions are provided to postpone the optimization as
38808    // late as possible.
38809
38810    ConstantSDNode *CmpAgainst = nullptr;
38811    if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
38812        (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
38813        !isa<ConstantSDNode>(Cond.getOperand(0))) {
38814
38815      if (CC == X86::COND_NE &&
38816          CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
38817        CC = X86::GetOppositeBranchCondition(CC);
38818        std::swap(TrueOp, FalseOp);
38819      }
38820
38821      if (CC == X86::COND_E &&
38822          CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
38823        SDValue Ops[] = {FalseOp, Cond.getOperand(0),
38824                         DAG.getTargetConstant(CC, DL, MVT::i8), Cond};
38825        return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
38826      }
38827    }
38828  }
38829
38830  // Fold and/or of setcc's to double CMOV:
38831  //   (CMOV F, T, ((cc1 | cc2) != 0)) -> (CMOV (CMOV F, T, cc1), T, cc2)
38832  //   (CMOV F, T, ((cc1 & cc2) != 0)) -> (CMOV (CMOV T, F, !cc1), F, !cc2)
38833  //
38834  // This combine lets us generate:
38835  //   cmovcc1 (jcc1 if we don't have CMOV)
38836  //   cmovcc2 (same)
38837  // instead of:
38838  //   setcc1
38839  //   setcc2
38840  //   and/or
38841  //   cmovne (jne if we don't have CMOV)
38842  // When we can't use the CMOV instruction, it might increase branch
38843  // mispredicts.
38844  // When we can use CMOV, or when there is no mispredict, this improves
38845  // throughput and reduces register pressure.
38846  //
38847  if (CC == X86::COND_NE) {
38848    SDValue Flags;
38849    X86::CondCode CC0, CC1;
38850    bool isAndSetCC;
38851    if (checkBoolTestAndOrSetCCCombine(Cond, CC0, CC1, Flags, isAndSetCC)) {
38852      if (isAndSetCC) {
38853        std::swap(FalseOp, TrueOp);
38854        CC0 = X86::GetOppositeBranchCondition(CC0);
38855        CC1 = X86::GetOppositeBranchCondition(CC1);
38856      }
38857
38858      SDValue LOps[] = {FalseOp, TrueOp,
38859                        DAG.getTargetConstant(CC0, DL, MVT::i8), Flags};
38860      SDValue LCMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), LOps);
38861      SDValue Ops[] = {LCMOV, TrueOp, DAG.getTargetConstant(CC1, DL, MVT::i8),
38862                       Flags};
38863      SDValue CMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
38864      return CMOV;
38865    }
38866  }
38867
38868  // Fold (CMOV C1, (ADD (CTTZ X), C2), (X != 0)) ->
38869  //      (ADD (CMOV C1-C2, (CTTZ X), (X != 0)), C2)
38870  // Or (CMOV (ADD (CTTZ X), C2), C1, (X == 0)) ->
38871  //    (ADD (CMOV (CTTZ X), C1-C2, (X == 0)), C2)
38872  if ((CC == X86::COND_NE || CC == X86::COND_E) &&
38873      Cond.getOpcode() == X86ISD::CMP && isNullConstant(Cond.getOperand(1))) {
38874    SDValue Add = TrueOp;
38875    SDValue Const = FalseOp;
38876    // Canonicalize the condition code for easier matching and output.
38877    if (CC == X86::COND_E)
38878      std::swap(Add, Const);
38879
38880    // We might have replaced the constant in the cmov with the LHS of the
38881    // compare. If so change it to the RHS of the compare.
38882    if (Const == Cond.getOperand(0))
38883      Const = Cond.getOperand(1);
38884
38885    // Ok, now make sure that Add is (add (cttz X), C2) and Const is a constant.
38886    if (isa<ConstantSDNode>(Const) && Add.getOpcode() == ISD::ADD &&
38887        Add.hasOneUse() && isa<ConstantSDNode>(Add.getOperand(1)) &&
38888        (Add.getOperand(0).getOpcode() == ISD::CTTZ_ZERO_UNDEF ||
38889         Add.getOperand(0).getOpcode() == ISD::CTTZ) &&
38890        Add.getOperand(0).getOperand(0) == Cond.getOperand(0)) {
38891      EVT VT = N->getValueType(0);
38892      // This should constant fold.
38893      SDValue Diff = DAG.getNode(ISD::SUB, DL, VT, Const, Add.getOperand(1));
38894      SDValue CMov =
38895          DAG.getNode(X86ISD::CMOV, DL, VT, Diff, Add.getOperand(0),
38896                      DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8), Cond);
38897      return DAG.getNode(ISD::ADD, DL, VT, CMov, Add.getOperand(1));
38898    }
38899  }
38900
38901  return SDValue();
38902}
38903
38904/// Different mul shrinking modes.
38905enum class ShrinkMode { MULS8, MULU8, MULS16, MULU16 };
38906
38907static bool canReduceVMulWidth(SDNode *N, SelectionDAG &DAG, ShrinkMode &Mode) {
38908  EVT VT = N->getOperand(0).getValueType();
38909  if (VT.getScalarSizeInBits() != 32)
38910    return false;
38911
38912  assert(N->getNumOperands() == 2 && "NumOperands of Mul are 2");
38913  unsigned SignBits[2] = {1, 1};
38914  bool IsPositive[2] = {false, false};
38915  for (unsigned i = 0; i < 2; i++) {
38916    SDValue Opd = N->getOperand(i);
38917
38918    SignBits[i] = DAG.ComputeNumSignBits(Opd);
38919    IsPositive[i] = DAG.SignBitIsZero(Opd);
38920  }
38921
38922  bool AllPositive = IsPositive[0] && IsPositive[1];
38923  unsigned MinSignBits = std::min(SignBits[0], SignBits[1]);
38924  // When ranges are from -128 ~ 127, use MULS8 mode.
38925  if (MinSignBits >= 25)
38926    Mode = ShrinkMode::MULS8;
38927  // When ranges are from 0 ~ 255, use MULU8 mode.
38928  else if (AllPositive && MinSignBits >= 24)
38929    Mode = ShrinkMode::MULU8;
38930  // When ranges are from -32768 ~ 32767, use MULS16 mode.
38931  else if (MinSignBits >= 17)
38932    Mode = ShrinkMode::MULS16;
38933  // When ranges are from 0 ~ 65535, use MULU16 mode.
38934  else if (AllPositive && MinSignBits >= 16)
38935    Mode = ShrinkMode::MULU16;
38936  else
38937    return false;
38938  return true;
38939}
38940
38941/// When the operands of vector mul are extended from smaller size values,
38942/// like i8 and i16, the type of mul may be shrinked to generate more
38943/// efficient code. Two typical patterns are handled:
38944/// Pattern1:
38945///     %2 = sext/zext <N x i8> %1 to <N x i32>
38946///     %4 = sext/zext <N x i8> %3 to <N x i32>
38947//   or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
38948///     %5 = mul <N x i32> %2, %4
38949///
38950/// Pattern2:
38951///     %2 = zext/sext <N x i16> %1 to <N x i32>
38952///     %4 = zext/sext <N x i16> %3 to <N x i32>
38953///  or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
38954///     %5 = mul <N x i32> %2, %4
38955///
38956/// There are four mul shrinking modes:
38957/// If %2 == sext32(trunc8(%2)), i.e., the scalar value range of %2 is
38958/// -128 to 128, and the scalar value range of %4 is also -128 to 128,
38959/// generate pmullw+sext32 for it (MULS8 mode).
38960/// If %2 == zext32(trunc8(%2)), i.e., the scalar value range of %2 is
38961/// 0 to 255, and the scalar value range of %4 is also 0 to 255,
38962/// generate pmullw+zext32 for it (MULU8 mode).
38963/// If %2 == sext32(trunc16(%2)), i.e., the scalar value range of %2 is
38964/// -32768 to 32767, and the scalar value range of %4 is also -32768 to 32767,
38965/// generate pmullw+pmulhw for it (MULS16 mode).
38966/// If %2 == zext32(trunc16(%2)), i.e., the scalar value range of %2 is
38967/// 0 to 65535, and the scalar value range of %4 is also 0 to 65535,
38968/// generate pmullw+pmulhuw for it (MULU16 mode).
38969static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG,
38970                               const X86Subtarget &Subtarget) {
38971  // Check for legality
38972  // pmullw/pmulhw are not supported by SSE.
38973  if (!Subtarget.hasSSE2())
38974    return SDValue();
38975
38976  // Check for profitability
38977  // pmulld is supported since SSE41. It is better to use pmulld
38978  // instead of pmullw+pmulhw, except for subtargets where pmulld is slower than
38979  // the expansion.
38980  bool OptForMinSize = DAG.getMachineFunction().getFunction().hasMinSize();
38981  if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow()))
38982    return SDValue();
38983
38984  ShrinkMode Mode;
38985  if (!canReduceVMulWidth(N, DAG, Mode))
38986    return SDValue();
38987
38988  SDLoc DL(N);
38989  SDValue N0 = N->getOperand(0);
38990  SDValue N1 = N->getOperand(1);
38991  EVT VT = N->getOperand(0).getValueType();
38992  unsigned NumElts = VT.getVectorNumElements();
38993  if ((NumElts % 2) != 0)
38994    return SDValue();
38995
38996  EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, NumElts);
38997
38998  // Shrink the operands of mul.
38999  SDValue NewN0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N0);
39000  SDValue NewN1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N1);
39001
39002  // Generate the lower part of mul: pmullw. For MULU8/MULS8, only the
39003  // lower part is needed.
39004  SDValue MulLo = DAG.getNode(ISD::MUL, DL, ReducedVT, NewN0, NewN1);
39005  if (Mode == ShrinkMode::MULU8 || Mode == ShrinkMode::MULS8)
39006    return DAG.getNode((Mode == ShrinkMode::MULU8) ? ISD::ZERO_EXTEND
39007                                                   : ISD::SIGN_EXTEND,
39008                       DL, VT, MulLo);
39009
39010  MVT ResVT = MVT::getVectorVT(MVT::i32, NumElts / 2);
39011  // Generate the higher part of mul: pmulhw/pmulhuw. For MULU16/MULS16,
39012  // the higher part is also needed.
39013  SDValue MulHi =
39014      DAG.getNode(Mode == ShrinkMode::MULS16 ? ISD::MULHS : ISD::MULHU, DL,
39015                  ReducedVT, NewN0, NewN1);
39016
39017  // Repack the lower part and higher part result of mul into a wider
39018  // result.
39019  // Generate shuffle functioning as punpcklwd.
39020  SmallVector<int, 16> ShuffleMask(NumElts);
39021  for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
39022    ShuffleMask[2 * i] = i;
39023    ShuffleMask[2 * i + 1] = i + NumElts;
39024  }
39025  SDValue ResLo =
39026      DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
39027  ResLo = DAG.getBitcast(ResVT, ResLo);
39028  // Generate shuffle functioning as punpckhwd.
39029  for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
39030    ShuffleMask[2 * i] = i + NumElts / 2;
39031    ShuffleMask[2 * i + 1] = i + NumElts * 3 / 2;
39032  }
39033  SDValue ResHi =
39034      DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
39035  ResHi = DAG.getBitcast(ResVT, ResHi);
39036  return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ResLo, ResHi);
39037}
39038
39039static SDValue combineMulSpecial(uint64_t MulAmt, SDNode *N, SelectionDAG &DAG,
39040                                 EVT VT, const SDLoc &DL) {
39041
39042  auto combineMulShlAddOrSub = [&](int Mult, int Shift, bool isAdd) {
39043    SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
39044                                 DAG.getConstant(Mult, DL, VT));
39045    Result = DAG.getNode(ISD::SHL, DL, VT, Result,
39046                         DAG.getConstant(Shift, DL, MVT::i8));
39047    Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
39048                         N->getOperand(0));
39049    return Result;
39050  };
39051
39052  auto combineMulMulAddOrSub = [&](int Mul1, int Mul2, bool isAdd) {
39053    SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
39054                                 DAG.getConstant(Mul1, DL, VT));
39055    Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, Result,
39056                         DAG.getConstant(Mul2, DL, VT));
39057    Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
39058                         N->getOperand(0));
39059    return Result;
39060  };
39061
39062  switch (MulAmt) {
39063  default:
39064    break;
39065  case 11:
39066    // mul x, 11 => add ((shl (mul x, 5), 1), x)
39067    return combineMulShlAddOrSub(5, 1, /*isAdd*/ true);
39068  case 21:
39069    // mul x, 21 => add ((shl (mul x, 5), 2), x)
39070    return combineMulShlAddOrSub(5, 2, /*isAdd*/ true);
39071  case 41:
39072    // mul x, 41 => add ((shl (mul x, 5), 3), x)
39073    return combineMulShlAddOrSub(5, 3, /*isAdd*/ true);
39074  case 22:
39075    // mul x, 22 => add (add ((shl (mul x, 5), 2), x), x)
39076    return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
39077                       combineMulShlAddOrSub(5, 2, /*isAdd*/ true));
39078  case 19:
39079    // mul x, 19 => add ((shl (mul x, 9), 1), x)
39080    return combineMulShlAddOrSub(9, 1, /*isAdd*/ true);
39081  case 37:
39082    // mul x, 37 => add ((shl (mul x, 9), 2), x)
39083    return combineMulShlAddOrSub(9, 2, /*isAdd*/ true);
39084  case 73:
39085    // mul x, 73 => add ((shl (mul x, 9), 3), x)
39086    return combineMulShlAddOrSub(9, 3, /*isAdd*/ true);
39087  case 13:
39088    // mul x, 13 => add ((shl (mul x, 3), 2), x)
39089    return combineMulShlAddOrSub(3, 2, /*isAdd*/ true);
39090  case 23:
39091    // mul x, 23 => sub ((shl (mul x, 3), 3), x)
39092    return combineMulShlAddOrSub(3, 3, /*isAdd*/ false);
39093  case 26:
39094    // mul x, 26 => add ((mul (mul x, 5), 5), x)
39095    return combineMulMulAddOrSub(5, 5, /*isAdd*/ true);
39096  case 28:
39097    // mul x, 28 => add ((mul (mul x, 9), 3), x)
39098    return combineMulMulAddOrSub(9, 3, /*isAdd*/ true);
39099  case 29:
39100    // mul x, 29 => add (add ((mul (mul x, 9), 3), x), x)
39101    return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
39102                       combineMulMulAddOrSub(9, 3, /*isAdd*/ true));
39103  }
39104
39105  // Another trick. If this is a power 2 + 2/4/8, we can use a shift followed
39106  // by a single LEA.
39107  // First check if this a sum of two power of 2s because that's easy. Then
39108  // count how many zeros are up to the first bit.
39109  // TODO: We can do this even without LEA at a cost of two shifts and an add.
39110  if (isPowerOf2_64(MulAmt & (MulAmt - 1))) {
39111    unsigned ScaleShift = countTrailingZeros(MulAmt);
39112    if (ScaleShift >= 1 && ScaleShift < 4) {
39113      unsigned ShiftAmt = Log2_64((MulAmt & (MulAmt - 1)));
39114      SDValue Shift1 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
39115                                   DAG.getConstant(ShiftAmt, DL, MVT::i8));
39116      SDValue Shift2 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
39117                                   DAG.getConstant(ScaleShift, DL, MVT::i8));
39118      return DAG.getNode(ISD::ADD, DL, VT, Shift1, Shift2);
39119    }
39120  }
39121
39122  return SDValue();
39123}
39124
39125// If the upper 17 bits of each element are zero then we can use PMADDWD,
39126// which is always at least as quick as PMULLD, except on KNL.
39127static SDValue combineMulToPMADDWD(SDNode *N, SelectionDAG &DAG,
39128                                   const X86Subtarget &Subtarget) {
39129  if (!Subtarget.hasSSE2())
39130    return SDValue();
39131
39132  if (Subtarget.isPMADDWDSlow())
39133    return SDValue();
39134
39135  EVT VT = N->getValueType(0);
39136
39137  // Only support vXi32 vectors.
39138  if (!VT.isVector() || VT.getVectorElementType() != MVT::i32)
39139    return SDValue();
39140
39141  // Make sure the vXi16 type is legal. This covers the AVX512 without BWI case.
39142  // Also allow v2i32 if it will be widened.
39143  MVT WVT = MVT::getVectorVT(MVT::i16, 2 * VT.getVectorNumElements());
39144  if (VT != MVT::v2i32 && !DAG.getTargetLoweringInfo().isTypeLegal(WVT))
39145    return SDValue();
39146
39147  SDValue N0 = N->getOperand(0);
39148  SDValue N1 = N->getOperand(1);
39149
39150  // If we are zero extending two steps without SSE4.1, its better to reduce
39151  // the vmul width instead.
39152  if (!Subtarget.hasSSE41() &&
39153      (N0.getOpcode() == ISD::ZERO_EXTEND &&
39154       N0.getOperand(0).getScalarValueSizeInBits() <= 8) &&
39155      (N1.getOpcode() == ISD::ZERO_EXTEND &&
39156       N1.getOperand(0).getScalarValueSizeInBits() <= 8))
39157    return SDValue();
39158
39159  APInt Mask17 = APInt::getHighBitsSet(32, 17);
39160  if (!DAG.MaskedValueIsZero(N1, Mask17) ||
39161      !DAG.MaskedValueIsZero(N0, Mask17))
39162    return SDValue();
39163
39164  // Use SplitOpsAndApply to handle AVX splitting.
39165  auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
39166                           ArrayRef<SDValue> Ops) {
39167    MVT OpVT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
39168    return DAG.getNode(X86ISD::VPMADDWD, DL, OpVT, Ops);
39169  };
39170  return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT,
39171                          { DAG.getBitcast(WVT, N0), DAG.getBitcast(WVT, N1) },
39172                          PMADDWDBuilder);
39173}
39174
39175static SDValue combineMulToPMULDQ(SDNode *N, SelectionDAG &DAG,
39176                                  const X86Subtarget &Subtarget) {
39177  if (!Subtarget.hasSSE2())
39178    return SDValue();
39179
39180  EVT VT = N->getValueType(0);
39181
39182  // Only support vXi64 vectors.
39183  if (!VT.isVector() || VT.getVectorElementType() != MVT::i64 ||
39184      VT.getVectorNumElements() < 2 ||
39185      !isPowerOf2_32(VT.getVectorNumElements()))
39186    return SDValue();
39187
39188  SDValue N0 = N->getOperand(0);
39189  SDValue N1 = N->getOperand(1);
39190
39191  // MULDQ returns the 64-bit result of the signed multiplication of the lower
39192  // 32-bits. We can lower with this if the sign bits stretch that far.
39193  if (Subtarget.hasSSE41() && DAG.ComputeNumSignBits(N0) > 32 &&
39194      DAG.ComputeNumSignBits(N1) > 32) {
39195    auto PMULDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
39196                            ArrayRef<SDValue> Ops) {
39197      return DAG.getNode(X86ISD::PMULDQ, DL, Ops[0].getValueType(), Ops);
39198    };
39199    return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
39200                            PMULDQBuilder, /*CheckBWI*/false);
39201  }
39202
39203  // If the upper bits are zero we can use a single pmuludq.
39204  APInt Mask = APInt::getHighBitsSet(64, 32);
39205  if (DAG.MaskedValueIsZero(N0, Mask) && DAG.MaskedValueIsZero(N1, Mask)) {
39206    auto PMULUDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
39207                             ArrayRef<SDValue> Ops) {
39208      return DAG.getNode(X86ISD::PMULUDQ, DL, Ops[0].getValueType(), Ops);
39209    };
39210    return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
39211                            PMULUDQBuilder, /*CheckBWI*/false);
39212  }
39213
39214  return SDValue();
39215}
39216
39217/// Optimize a single multiply with constant into two operations in order to
39218/// implement it with two cheaper instructions, e.g. LEA + SHL, LEA + LEA.
39219static SDValue combineMul(SDNode *N, SelectionDAG &DAG,
39220                          TargetLowering::DAGCombinerInfo &DCI,
39221                          const X86Subtarget &Subtarget) {
39222  EVT VT = N->getValueType(0);
39223
39224  if (SDValue V = combineMulToPMADDWD(N, DAG, Subtarget))
39225    return V;
39226
39227  if (SDValue V = combineMulToPMULDQ(N, DAG, Subtarget))
39228    return V;
39229
39230  if (DCI.isBeforeLegalize() && VT.isVector())
39231    return reduceVMULWidth(N, DAG, Subtarget);
39232
39233  if (!MulConstantOptimization)
39234    return SDValue();
39235  // An imul is usually smaller than the alternative sequence.
39236  if (DAG.getMachineFunction().getFunction().hasMinSize())
39237    return SDValue();
39238
39239  if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
39240    return SDValue();
39241
39242  if (VT != MVT::i64 && VT != MVT::i32)
39243    return SDValue();
39244
39245  ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
39246  if (!C)
39247    return SDValue();
39248  if (isPowerOf2_64(C->getZExtValue()))
39249    return SDValue();
39250
39251  int64_t SignMulAmt = C->getSExtValue();
39252  assert(SignMulAmt != INT64_MIN && "Int min should have been handled!");
39253  uint64_t AbsMulAmt = SignMulAmt < 0 ? -SignMulAmt : SignMulAmt;
39254
39255  SDLoc DL(N);
39256  if (AbsMulAmt == 3 || AbsMulAmt == 5 || AbsMulAmt == 9) {
39257    SDValue NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
39258                                 DAG.getConstant(AbsMulAmt, DL, VT));
39259    if (SignMulAmt < 0)
39260      NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
39261                           NewMul);
39262
39263    return NewMul;
39264  }
39265
39266  uint64_t MulAmt1 = 0;
39267  uint64_t MulAmt2 = 0;
39268  if ((AbsMulAmt % 9) == 0) {
39269    MulAmt1 = 9;
39270    MulAmt2 = AbsMulAmt / 9;
39271  } else if ((AbsMulAmt % 5) == 0) {
39272    MulAmt1 = 5;
39273    MulAmt2 = AbsMulAmt / 5;
39274  } else if ((AbsMulAmt % 3) == 0) {
39275    MulAmt1 = 3;
39276    MulAmt2 = AbsMulAmt / 3;
39277  }
39278
39279  SDValue NewMul;
39280  // For negative multiply amounts, only allow MulAmt2 to be a power of 2.
39281  if (MulAmt2 &&
39282      (isPowerOf2_64(MulAmt2) ||
39283       (SignMulAmt >= 0 && (MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)))) {
39284
39285    if (isPowerOf2_64(MulAmt2) &&
39286        !(SignMulAmt >= 0 && N->hasOneUse() &&
39287          N->use_begin()->getOpcode() == ISD::ADD))
39288      // If second multiplifer is pow2, issue it first. We want the multiply by
39289      // 3, 5, or 9 to be folded into the addressing mode unless the lone use
39290      // is an add. Only do this for positive multiply amounts since the
39291      // negate would prevent it from being used as an address mode anyway.
39292      std::swap(MulAmt1, MulAmt2);
39293
39294    if (isPowerOf2_64(MulAmt1))
39295      NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
39296                           DAG.getConstant(Log2_64(MulAmt1), DL, MVT::i8));
39297    else
39298      NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
39299                           DAG.getConstant(MulAmt1, DL, VT));
39300
39301    if (isPowerOf2_64(MulAmt2))
39302      NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
39303                           DAG.getConstant(Log2_64(MulAmt2), DL, MVT::i8));
39304    else
39305      NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
39306                           DAG.getConstant(MulAmt2, DL, VT));
39307
39308    // Negate the result.
39309    if (SignMulAmt < 0)
39310      NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
39311                           NewMul);
39312  } else if (!Subtarget.slowLEA())
39313    NewMul = combineMulSpecial(C->getZExtValue(), N, DAG, VT, DL);
39314
39315  if (!NewMul) {
39316    assert(C->getZExtValue() != 0 &&
39317           C->getZExtValue() != (VT == MVT::i64 ? UINT64_MAX : UINT32_MAX) &&
39318           "Both cases that could cause potential overflows should have "
39319           "already been handled.");
39320    if (isPowerOf2_64(AbsMulAmt - 1)) {
39321      // (mul x, 2^N + 1) => (add (shl x, N), x)
39322      NewMul = DAG.getNode(
39323          ISD::ADD, DL, VT, N->getOperand(0),
39324          DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
39325                      DAG.getConstant(Log2_64(AbsMulAmt - 1), DL,
39326                                      MVT::i8)));
39327      // To negate, subtract the number from zero
39328      if (SignMulAmt < 0)
39329        NewMul = DAG.getNode(ISD::SUB, DL, VT,
39330                             DAG.getConstant(0, DL, VT), NewMul);
39331    } else if (isPowerOf2_64(AbsMulAmt + 1)) {
39332      // (mul x, 2^N - 1) => (sub (shl x, N), x)
39333      NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
39334                           DAG.getConstant(Log2_64(AbsMulAmt + 1),
39335                                           DL, MVT::i8));
39336      // To negate, reverse the operands of the subtract.
39337      if (SignMulAmt < 0)
39338        NewMul = DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), NewMul);
39339      else
39340        NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
39341    } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt - 2)) {
39342      // (mul x, 2^N + 2) => (add (add (shl x, N), x), x)
39343      NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
39344                           DAG.getConstant(Log2_64(AbsMulAmt - 2),
39345                                           DL, MVT::i8));
39346      NewMul = DAG.getNode(ISD::ADD, DL, VT, NewMul, N->getOperand(0));
39347      NewMul = DAG.getNode(ISD::ADD, DL, VT, NewMul, N->getOperand(0));
39348    } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt + 2)) {
39349      // (mul x, 2^N - 2) => (sub (sub (shl x, N), x), x)
39350      NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
39351                           DAG.getConstant(Log2_64(AbsMulAmt + 2),
39352                                           DL, MVT::i8));
39353      NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
39354      NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
39355    }
39356  }
39357
39358  return NewMul;
39359}
39360
39361static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG) {
39362  SDValue N0 = N->getOperand(0);
39363  SDValue N1 = N->getOperand(1);
39364  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
39365  EVT VT = N0.getValueType();
39366
39367  // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
39368  // since the result of setcc_c is all zero's or all ones.
39369  if (VT.isInteger() && !VT.isVector() &&
39370      N1C && N0.getOpcode() == ISD::AND &&
39371      N0.getOperand(1).getOpcode() == ISD::Constant) {
39372    SDValue N00 = N0.getOperand(0);
39373    APInt Mask = N0.getConstantOperandAPInt(1);
39374    Mask <<= N1C->getAPIntValue();
39375    bool MaskOK = false;
39376    // We can handle cases concerning bit-widening nodes containing setcc_c if
39377    // we carefully interrogate the mask to make sure we are semantics
39378    // preserving.
39379    // The transform is not safe if the result of C1 << C2 exceeds the bitwidth
39380    // of the underlying setcc_c operation if the setcc_c was zero extended.
39381    // Consider the following example:
39382    //   zext(setcc_c)                 -> i32 0x0000FFFF
39383    //   c1                            -> i32 0x0000FFFF
39384    //   c2                            -> i32 0x00000001
39385    //   (shl (and (setcc_c), c1), c2) -> i32 0x0001FFFE
39386    //   (and setcc_c, (c1 << c2))     -> i32 0x0000FFFE
39387    if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
39388      MaskOK = true;
39389    } else if (N00.getOpcode() == ISD::SIGN_EXTEND &&
39390               N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
39391      MaskOK = true;
39392    } else if ((N00.getOpcode() == ISD::ZERO_EXTEND ||
39393                N00.getOpcode() == ISD::ANY_EXTEND) &&
39394               N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
39395      MaskOK = Mask.isIntN(N00.getOperand(0).getValueSizeInBits());
39396    }
39397    if (MaskOK && Mask != 0) {
39398      SDLoc DL(N);
39399      return DAG.getNode(ISD::AND, DL, VT, N00, DAG.getConstant(Mask, DL, VT));
39400    }
39401  }
39402
39403  // Hardware support for vector shifts is sparse which makes us scalarize the
39404  // vector operations in many cases. Also, on sandybridge ADD is faster than
39405  // shl.
39406  // (shl V, 1) -> add V,V
39407  if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
39408    if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
39409      assert(N0.getValueType().isVector() && "Invalid vector shift type");
39410      // We shift all of the values by one. In many cases we do not have
39411      // hardware support for this operation. This is better expressed as an ADD
39412      // of two values.
39413      if (N1SplatC->isOne())
39414        return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
39415    }
39416
39417  return SDValue();
39418}
39419
39420static SDValue combineShiftRightArithmetic(SDNode *N, SelectionDAG &DAG) {
39421  SDValue N0 = N->getOperand(0);
39422  SDValue N1 = N->getOperand(1);
39423  EVT VT = N0.getValueType();
39424  unsigned Size = VT.getSizeInBits();
39425
39426  // fold (ashr (shl, a, [56,48,32,24,16]), SarConst)
39427  // into (shl, (sext (a), [56,48,32,24,16] - SarConst)) or
39428  // into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
39429  // depending on sign of (SarConst - [56,48,32,24,16])
39430
39431  // sexts in X86 are MOVs. The MOVs have the same code size
39432  // as above SHIFTs (only SHIFT on 1 has lower code size).
39433  // However the MOVs have 2 advantages to a SHIFT:
39434  // 1. MOVs can write to a register that differs from source
39435  // 2. MOVs accept memory operands
39436
39437  if (VT.isVector() || N1.getOpcode() != ISD::Constant ||
39438      N0.getOpcode() != ISD::SHL || !N0.hasOneUse() ||
39439      N0.getOperand(1).getOpcode() != ISD::Constant)
39440    return SDValue();
39441
39442  SDValue N00 = N0.getOperand(0);
39443  SDValue N01 = N0.getOperand(1);
39444  APInt ShlConst = (cast<ConstantSDNode>(N01))->getAPIntValue();
39445  APInt SarConst = (cast<ConstantSDNode>(N1))->getAPIntValue();
39446  EVT CVT = N1.getValueType();
39447
39448  if (SarConst.isNegative())
39449    return SDValue();
39450
39451  for (MVT SVT : { MVT::i8, MVT::i16, MVT::i32 }) {
39452    unsigned ShiftSize = SVT.getSizeInBits();
39453    // skipping types without corresponding sext/zext and
39454    // ShlConst that is not one of [56,48,32,24,16]
39455    if (ShiftSize >= Size || ShlConst != Size - ShiftSize)
39456      continue;
39457    SDLoc DL(N);
39458    SDValue NN =
39459        DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N00, DAG.getValueType(SVT));
39460    SarConst = SarConst - (Size - ShiftSize);
39461    if (SarConst == 0)
39462      return NN;
39463    else if (SarConst.isNegative())
39464      return DAG.getNode(ISD::SHL, DL, VT, NN,
39465                         DAG.getConstant(-SarConst, DL, CVT));
39466    else
39467      return DAG.getNode(ISD::SRA, DL, VT, NN,
39468                         DAG.getConstant(SarConst, DL, CVT));
39469  }
39470  return SDValue();
39471}
39472
39473static SDValue combineShiftRightLogical(SDNode *N, SelectionDAG &DAG,
39474                                        TargetLowering::DAGCombinerInfo &DCI) {
39475  SDValue N0 = N->getOperand(0);
39476  SDValue N1 = N->getOperand(1);
39477  EVT VT = N0.getValueType();
39478
39479  // Only do this on the last DAG combine as it can interfere with other
39480  // combines.
39481  if (!DCI.isAfterLegalizeDAG())
39482    return SDValue();
39483
39484  // Try to improve a sequence of srl (and X, C1), C2 by inverting the order.
39485  // TODO: This is a generic DAG combine that became an x86-only combine to
39486  // avoid shortcomings in other folds such as bswap, bit-test ('bt'), and
39487  // and-not ('andn').
39488  if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
39489    return SDValue();
39490
39491  auto *ShiftC = dyn_cast<ConstantSDNode>(N1);
39492  auto *AndC = dyn_cast<ConstantSDNode>(N0.getOperand(1));
39493  if (!ShiftC || !AndC)
39494    return SDValue();
39495
39496  // If we can shrink the constant mask below 8-bits or 32-bits, then this
39497  // transform should reduce code size. It may also enable secondary transforms
39498  // from improved known-bits analysis or instruction selection.
39499  APInt MaskVal = AndC->getAPIntValue();
39500
39501  // If this can be matched by a zero extend, don't optimize.
39502  if (MaskVal.isMask()) {
39503    unsigned TO = MaskVal.countTrailingOnes();
39504    if (TO >= 8 && isPowerOf2_32(TO))
39505      return SDValue();
39506  }
39507
39508  APInt NewMaskVal = MaskVal.lshr(ShiftC->getAPIntValue());
39509  unsigned OldMaskSize = MaskVal.getMinSignedBits();
39510  unsigned NewMaskSize = NewMaskVal.getMinSignedBits();
39511  if ((OldMaskSize > 8 && NewMaskSize <= 8) ||
39512      (OldMaskSize > 32 && NewMaskSize <= 32)) {
39513    // srl (and X, AndC), ShiftC --> and (srl X, ShiftC), (AndC >> ShiftC)
39514    SDLoc DL(N);
39515    SDValue NewMask = DAG.getConstant(NewMaskVal, DL, VT);
39516    SDValue NewShift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), N1);
39517    return DAG.getNode(ISD::AND, DL, VT, NewShift, NewMask);
39518  }
39519  return SDValue();
39520}
39521
39522static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
39523                                 TargetLowering::DAGCombinerInfo &DCI,
39524                                 const X86Subtarget &Subtarget) {
39525  unsigned Opcode = N->getOpcode();
39526  assert((X86ISD::PACKSS == Opcode || X86ISD::PACKUS == Opcode) &&
39527         "Unexpected shift opcode");
39528
39529  EVT VT = N->getValueType(0);
39530  SDValue N0 = N->getOperand(0);
39531  SDValue N1 = N->getOperand(1);
39532  unsigned DstBitsPerElt = VT.getScalarSizeInBits();
39533  unsigned SrcBitsPerElt = 2 * DstBitsPerElt;
39534  assert(N0.getScalarValueSizeInBits() == SrcBitsPerElt &&
39535         N1.getScalarValueSizeInBits() == SrcBitsPerElt &&
39536         "Unexpected PACKSS/PACKUS input type");
39537
39538  bool IsSigned = (X86ISD::PACKSS == Opcode);
39539
39540  // Constant Folding.
39541  APInt UndefElts0, UndefElts1;
39542  SmallVector<APInt, 32> EltBits0, EltBits1;
39543  if ((N0.isUndef() || N->isOnlyUserOf(N0.getNode())) &&
39544      (N1.isUndef() || N->isOnlyUserOf(N1.getNode())) &&
39545      getTargetConstantBitsFromNode(N0, SrcBitsPerElt, UndefElts0, EltBits0) &&
39546      getTargetConstantBitsFromNode(N1, SrcBitsPerElt, UndefElts1, EltBits1)) {
39547    unsigned NumLanes = VT.getSizeInBits() / 128;
39548    unsigned NumDstElts = VT.getVectorNumElements();
39549    unsigned NumSrcElts = NumDstElts / 2;
39550    unsigned NumDstEltsPerLane = NumDstElts / NumLanes;
39551    unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes;
39552
39553    APInt Undefs(NumDstElts, 0);
39554    SmallVector<APInt, 32> Bits(NumDstElts, APInt::getNullValue(DstBitsPerElt));
39555    for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
39556      for (unsigned Elt = 0; Elt != NumDstEltsPerLane; ++Elt) {
39557        unsigned SrcIdx = Lane * NumSrcEltsPerLane + Elt % NumSrcEltsPerLane;
39558        auto &UndefElts = (Elt >= NumSrcEltsPerLane ? UndefElts1 : UndefElts0);
39559        auto &EltBits = (Elt >= NumSrcEltsPerLane ? EltBits1 : EltBits0);
39560
39561        if (UndefElts[SrcIdx]) {
39562          Undefs.setBit(Lane * NumDstEltsPerLane + Elt);
39563          continue;
39564        }
39565
39566        APInt &Val = EltBits[SrcIdx];
39567        if (IsSigned) {
39568          // PACKSS: Truncate signed value with signed saturation.
39569          // Source values less than dst minint are saturated to minint.
39570          // Source values greater than dst maxint are saturated to maxint.
39571          if (Val.isSignedIntN(DstBitsPerElt))
39572            Val = Val.trunc(DstBitsPerElt);
39573          else if (Val.isNegative())
39574            Val = APInt::getSignedMinValue(DstBitsPerElt);
39575          else
39576            Val = APInt::getSignedMaxValue(DstBitsPerElt);
39577        } else {
39578          // PACKUS: Truncate signed value with unsigned saturation.
39579          // Source values less than zero are saturated to zero.
39580          // Source values greater than dst maxuint are saturated to maxuint.
39581          if (Val.isIntN(DstBitsPerElt))
39582            Val = Val.trunc(DstBitsPerElt);
39583          else if (Val.isNegative())
39584            Val = APInt::getNullValue(DstBitsPerElt);
39585          else
39586            Val = APInt::getAllOnesValue(DstBitsPerElt);
39587        }
39588        Bits[Lane * NumDstEltsPerLane + Elt] = Val;
39589      }
39590    }
39591
39592    return getConstVector(Bits, Undefs, VT.getSimpleVT(), DAG, SDLoc(N));
39593  }
39594
39595  // Try to combine a PACKUSWB/PACKSSWB implemented truncate with a regular
39596  // truncate to create a larger truncate.
39597  if (Subtarget.hasAVX512() &&
39598      N0.getOpcode() == ISD::TRUNCATE && N1.isUndef() && VT == MVT::v16i8 &&
39599      N0.getOperand(0).getValueType() == MVT::v8i32) {
39600    if ((IsSigned && DAG.ComputeNumSignBits(N0) > 8) ||
39601        (!IsSigned &&
39602         DAG.MaskedValueIsZero(N0, APInt::getHighBitsSet(16, 8)))) {
39603      if (Subtarget.hasVLX())
39604        return DAG.getNode(X86ISD::VTRUNC, SDLoc(N), VT, N0.getOperand(0));
39605
39606      // Widen input to v16i32 so we can truncate that.
39607      SDLoc dl(N);
39608      SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i32,
39609                                   N0.getOperand(0), DAG.getUNDEF(MVT::v8i32));
39610      return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Concat);
39611    }
39612  }
39613
39614  // Attempt to combine as shuffle.
39615  SDValue Op(N, 0);
39616  if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
39617    return Res;
39618
39619  return SDValue();
39620}
39621
39622static SDValue combineVectorShiftVar(SDNode *N, SelectionDAG &DAG,
39623                                     TargetLowering::DAGCombinerInfo &DCI,
39624                                     const X86Subtarget &Subtarget) {
39625  assert((X86ISD::VSHL == N->getOpcode() || X86ISD::VSRA == N->getOpcode() ||
39626          X86ISD::VSRL == N->getOpcode()) &&
39627         "Unexpected shift opcode");
39628  EVT VT = N->getValueType(0);
39629  SDValue N0 = N->getOperand(0);
39630  SDValue N1 = N->getOperand(1);
39631
39632  // Shift zero -> zero.
39633  if (ISD::isBuildVectorAllZeros(N0.getNode()))
39634    return DAG.getConstant(0, SDLoc(N), VT);
39635
39636  // Detect constant shift amounts.
39637  APInt UndefElts;
39638  SmallVector<APInt, 32> EltBits;
39639  if (getTargetConstantBitsFromNode(N1, 64, UndefElts, EltBits, true, false)) {
39640    unsigned X86Opc = getTargetVShiftUniformOpcode(N->getOpcode(), false);
39641    return getTargetVShiftByConstNode(X86Opc, SDLoc(N), VT.getSimpleVT(), N0,
39642                                      EltBits[0].getZExtValue(), DAG);
39643  }
39644
39645  APInt KnownUndef, KnownZero;
39646  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39647  APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
39648  if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
39649                                     KnownZero, DCI))
39650    return SDValue(N, 0);
39651
39652  return SDValue();
39653}
39654
39655static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG,
39656                                     TargetLowering::DAGCombinerInfo &DCI,
39657                                     const X86Subtarget &Subtarget) {
39658  unsigned Opcode = N->getOpcode();
39659  assert((X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode ||
39660          X86ISD::VSRLI == Opcode) &&
39661         "Unexpected shift opcode");
39662  bool LogicalShift = X86ISD::VSHLI == Opcode || X86ISD::VSRLI == Opcode;
39663  EVT VT = N->getValueType(0);
39664  SDValue N0 = N->getOperand(0);
39665  unsigned NumBitsPerElt = VT.getScalarSizeInBits();
39666  assert(VT == N0.getValueType() && (NumBitsPerElt % 8) == 0 &&
39667         "Unexpected value type");
39668  assert(N->getOperand(1).getValueType() == MVT::i8 &&
39669         "Unexpected shift amount type");
39670
39671  // Out of range logical bit shifts are guaranteed to be zero.
39672  // Out of range arithmetic bit shifts splat the sign bit.
39673  unsigned ShiftVal = N->getConstantOperandVal(1);
39674  if (ShiftVal >= NumBitsPerElt) {
39675    if (LogicalShift)
39676      return DAG.getConstant(0, SDLoc(N), VT);
39677    else
39678      ShiftVal = NumBitsPerElt - 1;
39679  }
39680
39681  // Shift N0 by zero -> N0.
39682  if (!ShiftVal)
39683    return N0;
39684
39685  // Shift zero -> zero.
39686  if (ISD::isBuildVectorAllZeros(N0.getNode()))
39687    return DAG.getConstant(0, SDLoc(N), VT);
39688
39689  // Fold (VSRAI (VSRAI X, C1), C2) --> (VSRAI X, (C1 + C2)) with (C1 + C2)
39690  // clamped to (NumBitsPerElt - 1).
39691  if (Opcode == X86ISD::VSRAI && N0.getOpcode() == X86ISD::VSRAI) {
39692    unsigned ShiftVal2 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
39693    unsigned NewShiftVal = ShiftVal + ShiftVal2;
39694    if (NewShiftVal >= NumBitsPerElt)
39695      NewShiftVal = NumBitsPerElt - 1;
39696    return DAG.getNode(X86ISD::VSRAI, SDLoc(N), VT, N0.getOperand(0),
39697                       DAG.getTargetConstant(NewShiftVal, SDLoc(N), MVT::i8));
39698  }
39699
39700  // We can decode 'whole byte' logical bit shifts as shuffles.
39701  if (LogicalShift && (ShiftVal % 8) == 0) {
39702    SDValue Op(N, 0);
39703    if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
39704      return Res;
39705  }
39706
39707  // Constant Folding.
39708  APInt UndefElts;
39709  SmallVector<APInt, 32> EltBits;
39710  if (N->isOnlyUserOf(N0.getNode()) &&
39711      getTargetConstantBitsFromNode(N0, NumBitsPerElt, UndefElts, EltBits)) {
39712    assert(EltBits.size() == VT.getVectorNumElements() &&
39713           "Unexpected shift value type");
39714    // Undef elements need to fold to 0. It's possible SimplifyDemandedBits
39715    // created an undef input due to no input bits being demanded, but user
39716    // still expects 0 in other bits.
39717    for (unsigned i = 0, e = EltBits.size(); i != e; ++i) {
39718      APInt &Elt = EltBits[i];
39719      if (UndefElts[i])
39720        Elt = 0;
39721      else if (X86ISD::VSHLI == Opcode)
39722        Elt <<= ShiftVal;
39723      else if (X86ISD::VSRAI == Opcode)
39724        Elt.ashrInPlace(ShiftVal);
39725      else
39726        Elt.lshrInPlace(ShiftVal);
39727    }
39728    // Reset undef elements since they were zeroed above.
39729    UndefElts = 0;
39730    return getConstVector(EltBits, UndefElts, VT.getSimpleVT(), DAG, SDLoc(N));
39731  }
39732
39733  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39734  if (TLI.SimplifyDemandedBits(SDValue(N, 0),
39735                               APInt::getAllOnesValue(NumBitsPerElt), DCI))
39736    return SDValue(N, 0);
39737
39738  return SDValue();
39739}
39740
39741static SDValue combineVectorInsert(SDNode *N, SelectionDAG &DAG,
39742                                   TargetLowering::DAGCombinerInfo &DCI,
39743                                   const X86Subtarget &Subtarget) {
39744  EVT VT = N->getValueType(0);
39745  assert(((N->getOpcode() == X86ISD::PINSRB && VT == MVT::v16i8) ||
39746          (N->getOpcode() == X86ISD::PINSRW && VT == MVT::v8i16)) &&
39747         "Unexpected vector insertion");
39748
39749  unsigned NumBitsPerElt = VT.getScalarSizeInBits();
39750  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39751  if (TLI.SimplifyDemandedBits(SDValue(N, 0),
39752                               APInt::getAllOnesValue(NumBitsPerElt), DCI))
39753    return SDValue(N, 0);
39754
39755  // Attempt to combine PINSRB/PINSRW patterns to a shuffle.
39756  SDValue Op(N, 0);
39757  if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
39758    return Res;
39759
39760  return SDValue();
39761}
39762
39763/// Recognize the distinctive (AND (setcc ...) (setcc ..)) where both setccs
39764/// reference the same FP CMP, and rewrite for CMPEQSS and friends. Likewise for
39765/// OR -> CMPNEQSS.
39766static SDValue combineCompareEqual(SDNode *N, SelectionDAG &DAG,
39767                                   TargetLowering::DAGCombinerInfo &DCI,
39768                                   const X86Subtarget &Subtarget) {
39769  unsigned opcode;
39770
39771  // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
39772  // we're requiring SSE2 for both.
39773  if (Subtarget.hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
39774    SDValue N0 = N->getOperand(0);
39775    SDValue N1 = N->getOperand(1);
39776    SDValue CMP0 = N0.getOperand(1);
39777    SDValue CMP1 = N1.getOperand(1);
39778    SDLoc DL(N);
39779
39780    // The SETCCs should both refer to the same CMP.
39781    if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
39782      return SDValue();
39783
39784    SDValue CMP00 = CMP0->getOperand(0);
39785    SDValue CMP01 = CMP0->getOperand(1);
39786    EVT     VT    = CMP00.getValueType();
39787
39788    if (VT == MVT::f32 || VT == MVT::f64) {
39789      bool ExpectingFlags = false;
39790      // Check for any users that want flags:
39791      for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
39792           !ExpectingFlags && UI != UE; ++UI)
39793        switch (UI->getOpcode()) {
39794        default:
39795        case ISD::BR_CC:
39796        case ISD::BRCOND:
39797        case ISD::SELECT:
39798          ExpectingFlags = true;
39799          break;
39800        case ISD::CopyToReg:
39801        case ISD::SIGN_EXTEND:
39802        case ISD::ZERO_EXTEND:
39803        case ISD::ANY_EXTEND:
39804          break;
39805        }
39806
39807      if (!ExpectingFlags) {
39808        enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
39809        enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
39810
39811        if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
39812          X86::CondCode tmp = cc0;
39813          cc0 = cc1;
39814          cc1 = tmp;
39815        }
39816
39817        if ((cc0 == X86::COND_E  && cc1 == X86::COND_NP) ||
39818            (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
39819          // FIXME: need symbolic constants for these magic numbers.
39820          // See X86ATTInstPrinter.cpp:printSSECC().
39821          unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
39822          if (Subtarget.hasAVX512()) {
39823            SDValue FSetCC =
39824                DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CMP00, CMP01,
39825                            DAG.getTargetConstant(x86cc, DL, MVT::i8));
39826            // Need to fill with zeros to ensure the bitcast will produce zeroes
39827            // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
39828            SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v16i1,
39829                                      DAG.getConstant(0, DL, MVT::v16i1),
39830                                      FSetCC, DAG.getIntPtrConstant(0, DL));
39831            return DAG.getZExtOrTrunc(DAG.getBitcast(MVT::i16, Ins), DL,
39832                                      N->getSimpleValueType(0));
39833          }
39834          SDValue OnesOrZeroesF =
39835              DAG.getNode(X86ISD::FSETCC, DL, CMP00.getValueType(), CMP00,
39836                          CMP01, DAG.getTargetConstant(x86cc, DL, MVT::i8));
39837
39838          bool is64BitFP = (CMP00.getValueType() == MVT::f64);
39839          MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
39840
39841          if (is64BitFP && !Subtarget.is64Bit()) {
39842            // On a 32-bit target, we cannot bitcast the 64-bit float to a
39843            // 64-bit integer, since that's not a legal type. Since
39844            // OnesOrZeroesF is all ones of all zeroes, we don't need all the
39845            // bits, but can do this little dance to extract the lowest 32 bits
39846            // and work with those going forward.
39847            SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
39848                                           OnesOrZeroesF);
39849            SDValue Vector32 = DAG.getBitcast(MVT::v4f32, Vector64);
39850            OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
39851                                        Vector32, DAG.getIntPtrConstant(0, DL));
39852            IntVT = MVT::i32;
39853          }
39854
39855          SDValue OnesOrZeroesI = DAG.getBitcast(IntVT, OnesOrZeroesF);
39856          SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
39857                                      DAG.getConstant(1, DL, IntVT));
39858          SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
39859                                              ANDed);
39860          return OneBitOfTruth;
39861        }
39862      }
39863    }
39864  }
39865  return SDValue();
39866}
39867
39868/// Try to fold: (and (xor X, -1), Y) -> (andnp X, Y).
39869static SDValue combineANDXORWithAllOnesIntoANDNP(SDNode *N, SelectionDAG &DAG) {
39870  assert(N->getOpcode() == ISD::AND);
39871
39872  MVT VT = N->getSimpleValueType(0);
39873  if (!VT.is128BitVector() && !VT.is256BitVector() && !VT.is512BitVector())
39874    return SDValue();
39875
39876  SDValue X, Y;
39877  SDValue N0 = N->getOperand(0);
39878  SDValue N1 = N->getOperand(1);
39879
39880  if (SDValue Not = IsNOT(N0, DAG)) {
39881    X = Not;
39882    Y = N1;
39883  } else if (SDValue Not = IsNOT(N1, DAG)) {
39884    X = Not;
39885    Y = N0;
39886  } else
39887    return SDValue();
39888
39889  X = DAG.getBitcast(VT, X);
39890  Y = DAG.getBitcast(VT, Y);
39891  return DAG.getNode(X86ISD::ANDNP, SDLoc(N), VT, X, Y);
39892}
39893
39894// On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
39895// register. In most cases we actually compare or select YMM-sized registers
39896// and mixing the two types creates horrible code. This method optimizes
39897// some of the transition sequences.
39898// Even with AVX-512 this is still useful for removing casts around logical
39899// operations on vXi1 mask types.
39900static SDValue PromoteMaskArithmetic(SDNode *N, SelectionDAG &DAG,
39901                                     const X86Subtarget &Subtarget) {
39902  EVT VT = N->getValueType(0);
39903  assert(VT.isVector() && "Expected vector type");
39904
39905  assert((N->getOpcode() == ISD::ANY_EXTEND ||
39906          N->getOpcode() == ISD::ZERO_EXTEND ||
39907          N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
39908
39909  SDValue Narrow = N->getOperand(0);
39910  EVT NarrowVT = Narrow.getValueType();
39911
39912  if (Narrow->getOpcode() != ISD::XOR &&
39913      Narrow->getOpcode() != ISD::AND &&
39914      Narrow->getOpcode() != ISD::OR)
39915    return SDValue();
39916
39917  SDValue N0  = Narrow->getOperand(0);
39918  SDValue N1  = Narrow->getOperand(1);
39919  SDLoc DL(Narrow);
39920
39921  // The Left side has to be a trunc.
39922  if (N0.getOpcode() != ISD::TRUNCATE)
39923    return SDValue();
39924
39925  // The type of the truncated inputs.
39926  if (N0.getOperand(0).getValueType() != VT)
39927    return SDValue();
39928
39929  // The right side has to be a 'trunc' or a constant vector.
39930  bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE &&
39931                  N1.getOperand(0).getValueType() == VT;
39932  if (!RHSTrunc &&
39933      !ISD::isBuildVectorOfConstantSDNodes(N1.getNode()))
39934    return SDValue();
39935
39936  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39937
39938  if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), VT))
39939    return SDValue();
39940
39941  // Set N0 and N1 to hold the inputs to the new wide operation.
39942  N0 = N0.getOperand(0);
39943  if (RHSTrunc)
39944    N1 = N1.getOperand(0);
39945  else
39946    N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N1);
39947
39948  // Generate the wide operation.
39949  SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, VT, N0, N1);
39950  unsigned Opcode = N->getOpcode();
39951  switch (Opcode) {
39952  default: llvm_unreachable("Unexpected opcode");
39953  case ISD::ANY_EXTEND:
39954    return Op;
39955  case ISD::ZERO_EXTEND:
39956    return DAG.getZeroExtendInReg(Op, DL, NarrowVT.getScalarType());
39957  case ISD::SIGN_EXTEND:
39958    return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
39959                       Op, DAG.getValueType(NarrowVT));
39960  }
39961}
39962
39963/// If both input operands of a logic op are being cast from floating point
39964/// types, try to convert this into a floating point logic node to avoid
39965/// unnecessary moves from SSE to integer registers.
39966static SDValue convertIntLogicToFPLogic(SDNode *N, SelectionDAG &DAG,
39967                                        const X86Subtarget &Subtarget) {
39968  EVT VT = N->getValueType(0);
39969  SDValue N0 = N->getOperand(0);
39970  SDValue N1 = N->getOperand(1);
39971  SDLoc DL(N);
39972
39973  if (N0.getOpcode() != ISD::BITCAST || N1.getOpcode() != ISD::BITCAST)
39974    return SDValue();
39975
39976  SDValue N00 = N0.getOperand(0);
39977  SDValue N10 = N1.getOperand(0);
39978  EVT N00Type = N00.getValueType();
39979  EVT N10Type = N10.getValueType();
39980
39981  // Ensure that both types are the same and are legal scalar fp types.
39982  if (N00Type != N10Type ||
39983      !((Subtarget.hasSSE1() && N00Type == MVT::f32) ||
39984        (Subtarget.hasSSE2() && N00Type == MVT::f64)))
39985    return SDValue();
39986
39987  unsigned FPOpcode;
39988  switch (N->getOpcode()) {
39989  default: llvm_unreachable("Unexpected input node for FP logic conversion");
39990  case ISD::AND: FPOpcode = X86ISD::FAND; break;
39991  case ISD::OR:  FPOpcode = X86ISD::FOR;  break;
39992  case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
39993  }
39994
39995  SDValue FPLogic = DAG.getNode(FPOpcode, DL, N00Type, N00, N10);
39996  return DAG.getBitcast(VT, FPLogic);
39997}
39998
39999/// If this is a zero/all-bits result that is bitwise-anded with a low bits
40000/// mask. (Mask == 1 for the x86 lowering of a SETCC + ZEXT), replace the 'and'
40001/// with a shift-right to eliminate loading the vector constant mask value.
40002static SDValue combineAndMaskToShift(SDNode *N, SelectionDAG &DAG,
40003                                     const X86Subtarget &Subtarget) {
40004  SDValue Op0 = peekThroughBitcasts(N->getOperand(0));
40005  SDValue Op1 = peekThroughBitcasts(N->getOperand(1));
40006  EVT VT0 = Op0.getValueType();
40007  EVT VT1 = Op1.getValueType();
40008
40009  if (VT0 != VT1 || !VT0.isSimple() || !VT0.isInteger())
40010    return SDValue();
40011
40012  APInt SplatVal;
40013  if (!ISD::isConstantSplatVector(Op1.getNode(), SplatVal) ||
40014      !SplatVal.isMask())
40015    return SDValue();
40016
40017  // Don't prevent creation of ANDN.
40018  if (isBitwiseNot(Op0))
40019    return SDValue();
40020
40021  if (!SupportedVectorShiftWithImm(VT0.getSimpleVT(), Subtarget, ISD::SRL))
40022    return SDValue();
40023
40024  unsigned EltBitWidth = VT0.getScalarSizeInBits();
40025  if (EltBitWidth != DAG.ComputeNumSignBits(Op0))
40026    return SDValue();
40027
40028  SDLoc DL(N);
40029  unsigned ShiftVal = SplatVal.countTrailingOnes();
40030  SDValue ShAmt = DAG.getTargetConstant(EltBitWidth - ShiftVal, DL, MVT::i8);
40031  SDValue Shift = DAG.getNode(X86ISD::VSRLI, DL, VT0, Op0, ShAmt);
40032  return DAG.getBitcast(N->getValueType(0), Shift);
40033}
40034
40035// Get the index node from the lowered DAG of a GEP IR instruction with one
40036// indexing dimension.
40037static SDValue getIndexFromUnindexedLoad(LoadSDNode *Ld) {
40038  if (Ld->isIndexed())
40039    return SDValue();
40040
40041  SDValue Base = Ld->getBasePtr();
40042
40043  if (Base.getOpcode() != ISD::ADD)
40044    return SDValue();
40045
40046  SDValue ShiftedIndex = Base.getOperand(0);
40047
40048  if (ShiftedIndex.getOpcode() != ISD::SHL)
40049    return SDValue();
40050
40051  return ShiftedIndex.getOperand(0);
40052
40053}
40054
40055static bool hasBZHI(const X86Subtarget &Subtarget, MVT VT) {
40056  if (Subtarget.hasBMI2() && VT.isScalarInteger()) {
40057    switch (VT.getSizeInBits()) {
40058    default: return false;
40059    case 64: return Subtarget.is64Bit() ? true : false;
40060    case 32: return true;
40061    }
40062  }
40063  return false;
40064}
40065
40066// This function recognizes cases where X86 bzhi instruction can replace and
40067// 'and-load' sequence.
40068// In case of loading integer value from an array of constants which is defined
40069// as follows:
40070//
40071//   int array[SIZE] = {0x0, 0x1, 0x3, 0x7, 0xF ..., 2^(SIZE-1) - 1}
40072//
40073// then applying a bitwise and on the result with another input.
40074// It's equivalent to performing bzhi (zero high bits) on the input, with the
40075// same index of the load.
40076static SDValue combineAndLoadToBZHI(SDNode *Node, SelectionDAG &DAG,
40077                                    const X86Subtarget &Subtarget) {
40078  MVT VT = Node->getSimpleValueType(0);
40079  SDLoc dl(Node);
40080
40081  // Check if subtarget has BZHI instruction for the node's type
40082  if (!hasBZHI(Subtarget, VT))
40083    return SDValue();
40084
40085  // Try matching the pattern for both operands.
40086  for (unsigned i = 0; i < 2; i++) {
40087    SDValue N = Node->getOperand(i);
40088    LoadSDNode *Ld = dyn_cast<LoadSDNode>(N.getNode());
40089
40090     // continue if the operand is not a load instruction
40091    if (!Ld)
40092      return SDValue();
40093
40094    const Value *MemOp = Ld->getMemOperand()->getValue();
40095
40096    if (!MemOp)
40097      return SDValue();
40098
40099    if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(MemOp)) {
40100      if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) {
40101        if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
40102
40103          Constant *Init = GV->getInitializer();
40104          Type *Ty = Init->getType();
40105          if (!isa<ConstantDataArray>(Init) ||
40106              !Ty->getArrayElementType()->isIntegerTy() ||
40107              Ty->getArrayElementType()->getScalarSizeInBits() !=
40108                  VT.getSizeInBits() ||
40109              Ty->getArrayNumElements() >
40110                  Ty->getArrayElementType()->getScalarSizeInBits())
40111            continue;
40112
40113          // Check if the array's constant elements are suitable to our case.
40114          uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
40115          bool ConstantsMatch = true;
40116          for (uint64_t j = 0; j < ArrayElementCount; j++) {
40117            ConstantInt *Elem =
40118                dyn_cast<ConstantInt>(Init->getAggregateElement(j));
40119            if (Elem->getZExtValue() != (((uint64_t)1 << j) - 1)) {
40120              ConstantsMatch = false;
40121              break;
40122            }
40123          }
40124          if (!ConstantsMatch)
40125            continue;
40126
40127          // Do the transformation (For 32-bit type):
40128          // -> (and (load arr[idx]), inp)
40129          // <- (and (srl 0xFFFFFFFF, (sub 32, idx)))
40130          //    that will be replaced with one bzhi instruction.
40131          SDValue Inp = (i == 0) ? Node->getOperand(1) : Node->getOperand(0);
40132          SDValue SizeC = DAG.getConstant(VT.getSizeInBits(), dl, MVT::i32);
40133
40134          // Get the Node which indexes into the array.
40135          SDValue Index = getIndexFromUnindexedLoad(Ld);
40136          if (!Index)
40137            return SDValue();
40138          Index = DAG.getZExtOrTrunc(Index, dl, MVT::i32);
40139
40140          SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, SizeC, Index);
40141          Sub = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Sub);
40142
40143          SDValue AllOnes = DAG.getAllOnesConstant(dl, VT);
40144          SDValue LShr = DAG.getNode(ISD::SRL, dl, VT, AllOnes, Sub);
40145
40146          return DAG.getNode(ISD::AND, dl, VT, Inp, LShr);
40147        }
40148      }
40149    }
40150  }
40151  return SDValue();
40152}
40153
40154// Look for (and (ctpop X), 1) which is the IR form of __builtin_parity.
40155// Turn it into series of XORs and a setnp.
40156static SDValue combineParity(SDNode *N, SelectionDAG &DAG,
40157                             const X86Subtarget &Subtarget) {
40158  EVT VT = N->getValueType(0);
40159
40160  // We only support 64-bit and 32-bit. 64-bit requires special handling
40161  // unless the 64-bit popcnt instruction is legal.
40162  if (VT != MVT::i32 && VT != MVT::i64)
40163    return SDValue();
40164
40165  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40166  if (TLI.isTypeLegal(VT) && TLI.isOperationLegal(ISD::CTPOP, VT))
40167    return SDValue();
40168
40169  SDValue N0 = N->getOperand(0);
40170  SDValue N1 = N->getOperand(1);
40171
40172  // LHS needs to be a single use CTPOP.
40173  if (N0.getOpcode() != ISD::CTPOP || !N0.hasOneUse())
40174    return SDValue();
40175
40176  // RHS needs to be 1.
40177  if (!isOneConstant(N1))
40178    return SDValue();
40179
40180  SDLoc DL(N);
40181  SDValue X = N0.getOperand(0);
40182
40183  // If this is 64-bit, its always best to xor the two 32-bit pieces together
40184  // even if we have popcnt.
40185  if (VT == MVT::i64) {
40186    SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32,
40187                             DAG.getNode(ISD::SRL, DL, VT, X,
40188                                         DAG.getConstant(32, DL, MVT::i8)));
40189    SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, X);
40190    X = DAG.getNode(ISD::XOR, DL, MVT::i32, Lo, Hi);
40191    // Generate a 32-bit parity idiom. This will bring us back here if we need
40192    // to expand it too.
40193    SDValue Parity = DAG.getNode(ISD::AND, DL, MVT::i32,
40194                                 DAG.getNode(ISD::CTPOP, DL, MVT::i32, X),
40195                                 DAG.getConstant(1, DL, MVT::i32));
40196    return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Parity);
40197  }
40198  assert(VT == MVT::i32 && "Unexpected VT!");
40199
40200  // Xor the high and low 16-bits together using a 32-bit operation.
40201  SDValue Hi16 = DAG.getNode(ISD::SRL, DL, VT, X,
40202                             DAG.getConstant(16, DL, MVT::i8));
40203  X = DAG.getNode(ISD::XOR, DL, VT, X, Hi16);
40204
40205  // Finally xor the low 2 bytes together and use a 8-bit flag setting xor.
40206  // This should allow an h-reg to be used to save a shift.
40207  // FIXME: We only get an h-reg in 32-bit mode.
40208  SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
40209                           DAG.getNode(ISD::SRL, DL, VT, X,
40210                                       DAG.getConstant(8, DL, MVT::i8)));
40211  SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, X);
40212  SDVTList VTs = DAG.getVTList(MVT::i8, MVT::i32);
40213  SDValue Flags = DAG.getNode(X86ISD::XOR, DL, VTs, Lo, Hi).getValue(1);
40214
40215  // Copy the inverse of the parity flag into a register with setcc.
40216  SDValue Setnp = getSETCC(X86::COND_NP, Flags, DL, DAG);
40217  // Zero extend to original type.
40218  return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0), Setnp);
40219}
40220
40221
40222// Look for (and (bitcast (vXi1 (concat_vectors (vYi1 setcc), undef,))), C)
40223// Where C is a mask containing the same number of bits as the setcc and
40224// where the setcc will freely 0 upper bits of k-register. We can replace the
40225// undef in the concat with 0s and remove the AND. This mainly helps with
40226// v2i1/v4i1 setcc being casted to scalar.
40227static SDValue combineScalarAndWithMaskSetcc(SDNode *N, SelectionDAG &DAG,
40228                                             const X86Subtarget &Subtarget) {
40229  assert(N->getOpcode() == ISD::AND && "Unexpected opcode!");
40230
40231  EVT VT = N->getValueType(0);
40232
40233  // Make sure this is an AND with constant. We will check the value of the
40234  // constant later.
40235  if (!isa<ConstantSDNode>(N->getOperand(1)))
40236    return SDValue();
40237
40238  // This is implied by the ConstantSDNode.
40239  assert(!VT.isVector() && "Expected scalar VT!");
40240
40241  if (N->getOperand(0).getOpcode() != ISD::BITCAST ||
40242      !N->getOperand(0).hasOneUse() ||
40243      !N->getOperand(0).getOperand(0).hasOneUse())
40244    return SDValue();
40245
40246  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40247  SDValue Src = N->getOperand(0).getOperand(0);
40248  EVT SrcVT = Src.getValueType();
40249  if (!SrcVT.isVector() || SrcVT.getVectorElementType() != MVT::i1 ||
40250      !TLI.isTypeLegal(SrcVT))
40251    return SDValue();
40252
40253  if (Src.getOpcode() != ISD::CONCAT_VECTORS)
40254    return SDValue();
40255
40256  // We only care about the first subvector of the concat, we expect the
40257  // other subvectors to be ignored due to the AND if we make the change.
40258  SDValue SubVec = Src.getOperand(0);
40259  EVT SubVecVT = SubVec.getValueType();
40260
40261  // First subvector should be a setcc with a legal result type. The RHS of the
40262  // AND should be a mask with this many bits.
40263  if (SubVec.getOpcode() != ISD::SETCC || !TLI.isTypeLegal(SubVecVT) ||
40264      !N->getConstantOperandAPInt(1).isMask(SubVecVT.getVectorNumElements()))
40265    return SDValue();
40266
40267  EVT SetccVT = SubVec.getOperand(0).getValueType();
40268  if (!TLI.isTypeLegal(SetccVT) ||
40269      !(Subtarget.hasVLX() || SetccVT.is512BitVector()))
40270    return SDValue();
40271
40272  if (!(Subtarget.hasBWI() || SetccVT.getScalarSizeInBits() >= 32))
40273    return SDValue();
40274
40275  // We passed all the checks. Rebuild the concat_vectors with zeroes
40276  // and cast it back to VT.
40277  SDLoc dl(N);
40278  SmallVector<SDValue, 4> Ops(Src.getNumOperands(),
40279                              DAG.getConstant(0, dl, SubVecVT));
40280  Ops[0] = SubVec;
40281  SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT,
40282                               Ops);
40283  return DAG.getBitcast(VT, Concat);
40284}
40285
40286static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
40287                          TargetLowering::DAGCombinerInfo &DCI,
40288                          const X86Subtarget &Subtarget) {
40289  EVT VT = N->getValueType(0);
40290
40291  // If this is SSE1 only convert to FAND to avoid scalarization.
40292  if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
40293    return DAG.getBitcast(
40294        MVT::v4i32, DAG.getNode(X86ISD::FAND, SDLoc(N), MVT::v4f32,
40295                                DAG.getBitcast(MVT::v4f32, N->getOperand(0)),
40296                                DAG.getBitcast(MVT::v4f32, N->getOperand(1))));
40297  }
40298
40299  // Use a 32-bit and+zext if upper bits known zero.
40300  if (VT == MVT::i64 && Subtarget.is64Bit() &&
40301      !isa<ConstantSDNode>(N->getOperand(1))) {
40302    APInt HiMask = APInt::getHighBitsSet(64, 32);
40303    if (DAG.MaskedValueIsZero(N->getOperand(1), HiMask) ||
40304        DAG.MaskedValueIsZero(N->getOperand(0), HiMask)) {
40305      SDLoc dl(N);
40306      SDValue LHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N->getOperand(0));
40307      SDValue RHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N->getOperand(1));
40308      return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64,
40309                         DAG.getNode(ISD::AND, dl, MVT::i32, LHS, RHS));
40310    }
40311  }
40312
40313  // This must be done before legalization has expanded the ctpop.
40314  if (SDValue V = combineParity(N, DAG, Subtarget))
40315    return V;
40316
40317  // Match all-of bool scalar reductions into a bitcast/movmsk + cmp.
40318  // TODO: Support multiple SrcOps.
40319  if (VT == MVT::i1) {
40320    SmallVector<SDValue, 2> SrcOps;
40321    if (matchScalarReduction(SDValue(N, 0), ISD::AND, SrcOps) &&
40322        SrcOps.size() == 1) {
40323      SDLoc dl(N);
40324      const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40325      unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
40326      EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
40327      SDValue Mask = combineBitcastvxi1(DAG, MaskVT, SrcOps[0], dl, Subtarget);
40328      if (!Mask && TLI.isTypeLegal(SrcOps[0].getValueType()))
40329        Mask = DAG.getBitcast(MaskVT, SrcOps[0]);
40330      if (Mask) {
40331        APInt AllBits = APInt::getAllOnesValue(NumElts);
40332        return DAG.getSetCC(dl, MVT::i1, Mask,
40333                            DAG.getConstant(AllBits, dl, MaskVT), ISD::SETEQ);
40334      }
40335    }
40336  }
40337
40338  if (SDValue V = combineScalarAndWithMaskSetcc(N, DAG, Subtarget))
40339    return V;
40340
40341  if (DCI.isBeforeLegalizeOps())
40342    return SDValue();
40343
40344  if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
40345    return R;
40346
40347  if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
40348    return FPLogic;
40349
40350  if (SDValue R = combineANDXORWithAllOnesIntoANDNP(N, DAG))
40351    return R;
40352
40353  if (SDValue ShiftRight = combineAndMaskToShift(N, DAG, Subtarget))
40354    return ShiftRight;
40355
40356  if (SDValue R = combineAndLoadToBZHI(N, DAG, Subtarget))
40357    return R;
40358
40359  // Attempt to recursively combine a bitmask AND with shuffles.
40360  if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
40361    SDValue Op(N, 0);
40362    if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
40363      return Res;
40364  }
40365
40366  // Attempt to combine a scalar bitmask AND with an extracted shuffle.
40367  if ((VT.getScalarSizeInBits() % 8) == 0 &&
40368      N->getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
40369      isa<ConstantSDNode>(N->getOperand(0).getOperand(1))) {
40370    SDValue BitMask = N->getOperand(1);
40371    SDValue SrcVec = N->getOperand(0).getOperand(0);
40372    EVT SrcVecVT = SrcVec.getValueType();
40373
40374    // Check that the constant bitmask masks whole bytes.
40375    APInt UndefElts;
40376    SmallVector<APInt, 64> EltBits;
40377    if (VT == SrcVecVT.getScalarType() &&
40378        N->getOperand(0)->isOnlyUserOf(SrcVec.getNode()) &&
40379        getTargetConstantBitsFromNode(BitMask, 8, UndefElts, EltBits) &&
40380        llvm::all_of(EltBits, [](APInt M) {
40381          return M.isNullValue() || M.isAllOnesValue();
40382        })) {
40383      unsigned NumElts = SrcVecVT.getVectorNumElements();
40384      unsigned Scale = SrcVecVT.getScalarSizeInBits() / 8;
40385      unsigned Idx = N->getOperand(0).getConstantOperandVal(1);
40386
40387      // Create a root shuffle mask from the byte mask and the extracted index.
40388      SmallVector<int, 16> ShuffleMask(NumElts * Scale, SM_SentinelUndef);
40389      for (unsigned i = 0; i != Scale; ++i) {
40390        if (UndefElts[i])
40391          continue;
40392        int VecIdx = Scale * Idx + i;
40393        ShuffleMask[VecIdx] =
40394            EltBits[i].isNullValue() ? SM_SentinelZero : VecIdx;
40395      }
40396
40397      if (SDValue Shuffle = combineX86ShufflesRecursively(
40398              {SrcVec}, 0, SrcVec, ShuffleMask, {}, /*Depth*/ 1,
40399              /*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget))
40400        return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), VT, Shuffle,
40401                           N->getOperand(0).getOperand(1));
40402    }
40403  }
40404
40405  return SDValue();
40406}
40407
40408// Canonicalize OR(AND(X,C),AND(Y,~C)) -> OR(AND(X,C),ANDNP(C,Y))
40409static SDValue canonicalizeBitSelect(SDNode *N, SelectionDAG &DAG,
40410                                     const X86Subtarget &Subtarget) {
40411  assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
40412
40413  MVT VT = N->getSimpleValueType(0);
40414  if (!VT.isVector() || (VT.getScalarSizeInBits() % 8) != 0)
40415    return SDValue();
40416
40417  SDValue N0 = peekThroughBitcasts(N->getOperand(0));
40418  SDValue N1 = peekThroughBitcasts(N->getOperand(1));
40419  if (N0.getOpcode() != ISD::AND || N1.getOpcode() != ISD::AND)
40420    return SDValue();
40421
40422  // On XOP we'll lower to PCMOV so accept one use. With AVX512, we can use
40423  // VPTERNLOG. Otherwise only do this if either mask has multiple uses already.
40424  bool UseVPTERNLOG = (Subtarget.hasAVX512() && VT.is512BitVector()) ||
40425                      Subtarget.hasVLX();
40426  if (!(Subtarget.hasXOP() || UseVPTERNLOG ||
40427        !N0.getOperand(1).hasOneUse() || !N1.getOperand(1).hasOneUse()))
40428    return SDValue();
40429
40430  // Attempt to extract constant byte masks.
40431  APInt UndefElts0, UndefElts1;
40432  SmallVector<APInt, 32> EltBits0, EltBits1;
40433  if (!getTargetConstantBitsFromNode(N0.getOperand(1), 8, UndefElts0, EltBits0,
40434                                     false, false))
40435    return SDValue();
40436  if (!getTargetConstantBitsFromNode(N1.getOperand(1), 8, UndefElts1, EltBits1,
40437                                     false, false))
40438    return SDValue();
40439
40440  for (unsigned i = 0, e = EltBits0.size(); i != e; ++i) {
40441    // TODO - add UNDEF elts support.
40442    if (UndefElts0[i] || UndefElts1[i])
40443      return SDValue();
40444    if (EltBits0[i] != ~EltBits1[i])
40445      return SDValue();
40446  }
40447
40448  SDLoc DL(N);
40449  SDValue X = N->getOperand(0);
40450  SDValue Y =
40451      DAG.getNode(X86ISD::ANDNP, DL, VT, DAG.getBitcast(VT, N0.getOperand(1)),
40452                  DAG.getBitcast(VT, N1.getOperand(0)));
40453  return DAG.getNode(ISD::OR, DL, VT, X, Y);
40454}
40455
40456// Try to match OR(AND(~MASK,X),AND(MASK,Y)) logic pattern.
40457static bool matchLogicBlend(SDNode *N, SDValue &X, SDValue &Y, SDValue &Mask) {
40458  if (N->getOpcode() != ISD::OR)
40459    return false;
40460
40461  SDValue N0 = N->getOperand(0);
40462  SDValue N1 = N->getOperand(1);
40463
40464  // Canonicalize AND to LHS.
40465  if (N1.getOpcode() == ISD::AND)
40466    std::swap(N0, N1);
40467
40468  // Attempt to match OR(AND(M,Y),ANDNP(M,X)).
40469  if (N0.getOpcode() != ISD::AND || N1.getOpcode() != X86ISD::ANDNP)
40470    return false;
40471
40472  Mask = N1.getOperand(0);
40473  X = N1.getOperand(1);
40474
40475  // Check to see if the mask appeared in both the AND and ANDNP.
40476  if (N0.getOperand(0) == Mask)
40477    Y = N0.getOperand(1);
40478  else if (N0.getOperand(1) == Mask)
40479    Y = N0.getOperand(0);
40480  else
40481    return false;
40482
40483  // TODO: Attempt to match against AND(XOR(-1,M),Y) as well, waiting for
40484  // ANDNP combine allows other combines to happen that prevent matching.
40485  return true;
40486}
40487
40488// Try to fold:
40489//   (or (and (m, y), (pandn m, x)))
40490// into:
40491//   (vselect m, x, y)
40492// As a special case, try to fold:
40493//   (or (and (m, (sub 0, x)), (pandn m, x)))
40494// into:
40495//   (sub (xor X, M), M)
40496static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
40497                                            const X86Subtarget &Subtarget) {
40498  assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
40499
40500  EVT VT = N->getValueType(0);
40501  if (!((VT.is128BitVector() && Subtarget.hasSSE2()) ||
40502        (VT.is256BitVector() && Subtarget.hasInt256())))
40503    return SDValue();
40504
40505  SDValue X, Y, Mask;
40506  if (!matchLogicBlend(N, X, Y, Mask))
40507    return SDValue();
40508
40509  // Validate that X, Y, and Mask are bitcasts, and see through them.
40510  Mask = peekThroughBitcasts(Mask);
40511  X = peekThroughBitcasts(X);
40512  Y = peekThroughBitcasts(Y);
40513
40514  EVT MaskVT = Mask.getValueType();
40515  unsigned EltBits = MaskVT.getScalarSizeInBits();
40516
40517  // TODO: Attempt to handle floating point cases as well?
40518  if (!MaskVT.isInteger() || DAG.ComputeNumSignBits(Mask) != EltBits)
40519    return SDValue();
40520
40521  SDLoc DL(N);
40522
40523  // Attempt to combine to conditional negate: (sub (xor X, M), M)
40524  if (SDValue Res = combineLogicBlendIntoConditionalNegate(VT, Mask, X, Y, DL,
40525                                                           DAG, Subtarget))
40526    return Res;
40527
40528  // PBLENDVB is only available on SSE 4.1.
40529  if (!Subtarget.hasSSE41())
40530    return SDValue();
40531
40532  MVT BlendVT = VT.is256BitVector() ? MVT::v32i8 : MVT::v16i8;
40533
40534  X = DAG.getBitcast(BlendVT, X);
40535  Y = DAG.getBitcast(BlendVT, Y);
40536  Mask = DAG.getBitcast(BlendVT, Mask);
40537  Mask = DAG.getSelect(DL, BlendVT, Mask, Y, X);
40538  return DAG.getBitcast(VT, Mask);
40539}
40540
40541// Helper function for combineOrCmpEqZeroToCtlzSrl
40542// Transforms:
40543//   seteq(cmp x, 0)
40544//   into:
40545//   srl(ctlz x), log2(bitsize(x))
40546// Input pattern is checked by caller.
40547static SDValue lowerX86CmpEqZeroToCtlzSrl(SDValue Op, EVT ExtTy,
40548                                          SelectionDAG &DAG) {
40549  SDValue Cmp = Op.getOperand(1);
40550  EVT VT = Cmp.getOperand(0).getValueType();
40551  unsigned Log2b = Log2_32(VT.getSizeInBits());
40552  SDLoc dl(Op);
40553  SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Cmp->getOperand(0));
40554  // The result of the shift is true or false, and on X86, the 32-bit
40555  // encoding of shr and lzcnt is more desirable.
40556  SDValue Trunc = DAG.getZExtOrTrunc(Clz, dl, MVT::i32);
40557  SDValue Scc = DAG.getNode(ISD::SRL, dl, MVT::i32, Trunc,
40558                            DAG.getConstant(Log2b, dl, MVT::i8));
40559  return DAG.getZExtOrTrunc(Scc, dl, ExtTy);
40560}
40561
40562// Try to transform:
40563//   zext(or(setcc(eq, (cmp x, 0)), setcc(eq, (cmp y, 0))))
40564//   into:
40565//   srl(or(ctlz(x), ctlz(y)), log2(bitsize(x))
40566// Will also attempt to match more generic cases, eg:
40567//   zext(or(or(setcc(eq, cmp 0), setcc(eq, cmp 0)), setcc(eq, cmp 0)))
40568// Only applies if the target supports the FastLZCNT feature.
40569static SDValue combineOrCmpEqZeroToCtlzSrl(SDNode *N, SelectionDAG &DAG,
40570                                           TargetLowering::DAGCombinerInfo &DCI,
40571                                           const X86Subtarget &Subtarget) {
40572  if (DCI.isBeforeLegalize() || !Subtarget.getTargetLowering()->isCtlzFast())
40573    return SDValue();
40574
40575  auto isORCandidate = [](SDValue N) {
40576    return (N->getOpcode() == ISD::OR && N->hasOneUse());
40577  };
40578
40579  // Check the zero extend is extending to 32-bit or more. The code generated by
40580  // srl(ctlz) for 16-bit or less variants of the pattern would require extra
40581  // instructions to clear the upper bits.
40582  if (!N->hasOneUse() || !N->getSimpleValueType(0).bitsGE(MVT::i32) ||
40583      !isORCandidate(N->getOperand(0)))
40584    return SDValue();
40585
40586  // Check the node matches: setcc(eq, cmp 0)
40587  auto isSetCCCandidate = [](SDValue N) {
40588    return N->getOpcode() == X86ISD::SETCC && N->hasOneUse() &&
40589           X86::CondCode(N->getConstantOperandVal(0)) == X86::COND_E &&
40590           N->getOperand(1).getOpcode() == X86ISD::CMP &&
40591           isNullConstant(N->getOperand(1).getOperand(1)) &&
40592           N->getOperand(1).getValueType().bitsGE(MVT::i32);
40593  };
40594
40595  SDNode *OR = N->getOperand(0).getNode();
40596  SDValue LHS = OR->getOperand(0);
40597  SDValue RHS = OR->getOperand(1);
40598
40599  // Save nodes matching or(or, setcc(eq, cmp 0)).
40600  SmallVector<SDNode *, 2> ORNodes;
40601  while (((isORCandidate(LHS) && isSetCCCandidate(RHS)) ||
40602          (isORCandidate(RHS) && isSetCCCandidate(LHS)))) {
40603    ORNodes.push_back(OR);
40604    OR = (LHS->getOpcode() == ISD::OR) ? LHS.getNode() : RHS.getNode();
40605    LHS = OR->getOperand(0);
40606    RHS = OR->getOperand(1);
40607  }
40608
40609  // The last OR node should match or(setcc(eq, cmp 0), setcc(eq, cmp 0)).
40610  if (!(isSetCCCandidate(LHS) && isSetCCCandidate(RHS)) ||
40611      !isORCandidate(SDValue(OR, 0)))
40612    return SDValue();
40613
40614  // We have a or(setcc(eq, cmp 0), setcc(eq, cmp 0)) pattern, try to lower it
40615  // to
40616  // or(srl(ctlz),srl(ctlz)).
40617  // The dag combiner can then fold it into:
40618  // srl(or(ctlz, ctlz)).
40619  EVT VT = OR->getValueType(0);
40620  SDValue NewLHS = lowerX86CmpEqZeroToCtlzSrl(LHS, VT, DAG);
40621  SDValue Ret, NewRHS;
40622  if (NewLHS && (NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, VT, DAG)))
40623    Ret = DAG.getNode(ISD::OR, SDLoc(OR), VT, NewLHS, NewRHS);
40624
40625  if (!Ret)
40626    return SDValue();
40627
40628  // Try to lower nodes matching the or(or, setcc(eq, cmp 0)) pattern.
40629  while (ORNodes.size() > 0) {
40630    OR = ORNodes.pop_back_val();
40631    LHS = OR->getOperand(0);
40632    RHS = OR->getOperand(1);
40633    // Swap rhs with lhs to match or(setcc(eq, cmp, 0), or).
40634    if (RHS->getOpcode() == ISD::OR)
40635      std::swap(LHS, RHS);
40636    NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, VT, DAG);
40637    if (!NewRHS)
40638      return SDValue();
40639    Ret = DAG.getNode(ISD::OR, SDLoc(OR), VT, Ret, NewRHS);
40640  }
40641
40642  if (Ret)
40643    Ret = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), Ret);
40644
40645  return Ret;
40646}
40647
40648static SDValue combineOrShiftToFunnelShift(SDNode *N, SelectionDAG &DAG,
40649                                           const X86Subtarget &Subtarget) {
40650  assert(N->getOpcode() == ISD::OR && "Expected ISD::OR node");
40651  SDValue N0 = N->getOperand(0);
40652  SDValue N1 = N->getOperand(1);
40653  EVT VT = N->getValueType(0);
40654  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40655
40656  if (!TLI.isOperationLegalOrCustom(ISD::FSHL, VT) ||
40657      !TLI.isOperationLegalOrCustom(ISD::FSHR, VT))
40658    return SDValue();
40659
40660  // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
40661  bool OptForSize = DAG.shouldOptForSize();
40662  unsigned Bits = VT.getScalarSizeInBits();
40663
40664  // SHLD/SHRD instructions have lower register pressure, but on some
40665  // platforms they have higher latency than the equivalent
40666  // series of shifts/or that would otherwise be generated.
40667  // Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
40668  // have higher latencies and we are not optimizing for size.
40669  if (!OptForSize && Subtarget.isSHLDSlow())
40670    return SDValue();
40671
40672  if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
40673    std::swap(N0, N1);
40674  if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
40675    return SDValue();
40676  if (!N0.hasOneUse() || !N1.hasOneUse())
40677    return SDValue();
40678
40679  EVT ShiftVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout());
40680
40681  SDValue ShAmt0 = N0.getOperand(1);
40682  if (ShAmt0.getValueType() != ShiftVT)
40683    return SDValue();
40684  SDValue ShAmt1 = N1.getOperand(1);
40685  if (ShAmt1.getValueType() != ShiftVT)
40686    return SDValue();
40687
40688  // Peek through any modulo shift masks.
40689  SDValue ShMsk0;
40690  if (ShAmt0.getOpcode() == ISD::AND &&
40691      isa<ConstantSDNode>(ShAmt0.getOperand(1)) &&
40692      ShAmt0.getConstantOperandAPInt(1) == (Bits - 1)) {
40693    ShMsk0 = ShAmt0;
40694    ShAmt0 = ShAmt0.getOperand(0);
40695  }
40696  SDValue ShMsk1;
40697  if (ShAmt1.getOpcode() == ISD::AND &&
40698      isa<ConstantSDNode>(ShAmt1.getOperand(1)) &&
40699      ShAmt1.getConstantOperandAPInt(1) == (Bits - 1)) {
40700    ShMsk1 = ShAmt1;
40701    ShAmt1 = ShAmt1.getOperand(0);
40702  }
40703
40704  if (ShAmt0.getOpcode() == ISD::TRUNCATE)
40705    ShAmt0 = ShAmt0.getOperand(0);
40706  if (ShAmt1.getOpcode() == ISD::TRUNCATE)
40707    ShAmt1 = ShAmt1.getOperand(0);
40708
40709  SDLoc DL(N);
40710  unsigned Opc = ISD::FSHL;
40711  SDValue Op0 = N0.getOperand(0);
40712  SDValue Op1 = N1.getOperand(0);
40713  if (ShAmt0.getOpcode() == ISD::SUB || ShAmt0.getOpcode() == ISD::XOR) {
40714    Opc = ISD::FSHR;
40715    std::swap(Op0, Op1);
40716    std::swap(ShAmt0, ShAmt1);
40717    std::swap(ShMsk0, ShMsk1);
40718  }
40719
40720  auto GetFunnelShift = [&DAG, &DL, VT, Opc, &ShiftVT](SDValue Op0, SDValue Op1,
40721                                                       SDValue Amt) {
40722    if (Opc == ISD::FSHR)
40723      std::swap(Op0, Op1);
40724    return DAG.getNode(Opc, DL, VT, Op0, Op1,
40725                       DAG.getNode(ISD::TRUNCATE, DL, ShiftVT, Amt));
40726  };
40727
40728  // OR( SHL( X, C ), SRL( Y, 32 - C ) ) -> FSHL( X, Y, C )
40729  // OR( SRL( X, C ), SHL( Y, 32 - C ) ) -> FSHR( Y, X, C )
40730  // OR( SHL( X, C ), SRL( SRL( Y, 1 ), XOR( C, 31 ) ) ) -> FSHL( X, Y, C )
40731  // OR( SRL( X, C ), SHL( SHL( Y, 1 ), XOR( C, 31 ) ) ) -> FSHR( Y, X, C )
40732  // OR( SHL( X, AND( C, 31 ) ), SRL( Y, AND( 0 - C, 31 ) ) ) -> FSHL( X, Y, C )
40733  // OR( SRL( X, AND( C, 31 ) ), SHL( Y, AND( 0 - C, 31 ) ) ) -> FSHR( Y, X, C )
40734  if (ShAmt1.getOpcode() == ISD::SUB) {
40735    SDValue Sum = ShAmt1.getOperand(0);
40736    if (auto *SumC = dyn_cast<ConstantSDNode>(Sum)) {
40737      SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
40738      if (ShAmt1Op1.getOpcode() == ISD::AND &&
40739          isa<ConstantSDNode>(ShAmt1Op1.getOperand(1)) &&
40740          ShAmt1Op1.getConstantOperandAPInt(1) == (Bits - 1)) {
40741        ShMsk1 = ShAmt1Op1;
40742        ShAmt1Op1 = ShAmt1Op1.getOperand(0);
40743      }
40744      if (ShAmt1Op1.getOpcode() == ISD::TRUNCATE)
40745        ShAmt1Op1 = ShAmt1Op1.getOperand(0);
40746      if ((SumC->getAPIntValue() == Bits ||
40747           (SumC->getAPIntValue() == 0 && ShMsk1)) &&
40748          ShAmt1Op1 == ShAmt0)
40749        return GetFunnelShift(Op0, Op1, ShAmt0);
40750    }
40751  } else if (auto *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
40752    auto *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
40753    if (ShAmt0C && (ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue()) == Bits)
40754      return GetFunnelShift(Op0, Op1, ShAmt0);
40755  } else if (ShAmt1.getOpcode() == ISD::XOR) {
40756    SDValue Mask = ShAmt1.getOperand(1);
40757    if (auto *MaskC = dyn_cast<ConstantSDNode>(Mask)) {
40758      unsigned InnerShift = (ISD::FSHL == Opc ? ISD::SRL : ISD::SHL);
40759      SDValue ShAmt1Op0 = ShAmt1.getOperand(0);
40760      if (ShAmt1Op0.getOpcode() == ISD::TRUNCATE)
40761        ShAmt1Op0 = ShAmt1Op0.getOperand(0);
40762      if (MaskC->getSExtValue() == (Bits - 1) &&
40763          (ShAmt1Op0 == ShAmt0 || ShAmt1Op0 == ShMsk0)) {
40764        if (Op1.getOpcode() == InnerShift &&
40765            isa<ConstantSDNode>(Op1.getOperand(1)) &&
40766            Op1.getConstantOperandAPInt(1).isOneValue()) {
40767          return GetFunnelShift(Op0, Op1.getOperand(0), ShAmt0);
40768        }
40769        // Test for ADD( Y, Y ) as an equivalent to SHL( Y, 1 ).
40770        if (InnerShift == ISD::SHL && Op1.getOpcode() == ISD::ADD &&
40771            Op1.getOperand(0) == Op1.getOperand(1)) {
40772          return GetFunnelShift(Op0, Op1.getOperand(0), ShAmt0);
40773        }
40774      }
40775    }
40776  }
40777
40778  return SDValue();
40779}
40780
40781static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
40782                         TargetLowering::DAGCombinerInfo &DCI,
40783                         const X86Subtarget &Subtarget) {
40784  SDValue N0 = N->getOperand(0);
40785  SDValue N1 = N->getOperand(1);
40786  EVT VT = N->getValueType(0);
40787
40788  // If this is SSE1 only convert to FOR to avoid scalarization.
40789  if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
40790    return DAG.getBitcast(MVT::v4i32,
40791                          DAG.getNode(X86ISD::FOR, SDLoc(N), MVT::v4f32,
40792                                      DAG.getBitcast(MVT::v4f32, N0),
40793                                      DAG.getBitcast(MVT::v4f32, N1)));
40794  }
40795
40796  // Match any-of bool scalar reductions into a bitcast/movmsk + cmp.
40797  // TODO: Support multiple SrcOps.
40798  if (VT == MVT::i1) {
40799    SmallVector<SDValue, 2> SrcOps;
40800    if (matchScalarReduction(SDValue(N, 0), ISD::OR, SrcOps) &&
40801        SrcOps.size() == 1) {
40802      SDLoc dl(N);
40803      const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40804      unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
40805      EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
40806      SDValue Mask = combineBitcastvxi1(DAG, MaskVT, SrcOps[0], dl, Subtarget);
40807      if (!Mask && TLI.isTypeLegal(SrcOps[0].getValueType()))
40808        Mask = DAG.getBitcast(MaskVT, SrcOps[0]);
40809      if (Mask) {
40810        APInt AllBits = APInt::getNullValue(NumElts);
40811        return DAG.getSetCC(dl, MVT::i1, Mask,
40812                            DAG.getConstant(AllBits, dl, MaskVT), ISD::SETNE);
40813      }
40814    }
40815  }
40816
40817  if (DCI.isBeforeLegalizeOps())
40818    return SDValue();
40819
40820  if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
40821    return R;
40822
40823  if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
40824    return FPLogic;
40825
40826  if (SDValue R = canonicalizeBitSelect(N, DAG, Subtarget))
40827    return R;
40828
40829  if (SDValue R = combineLogicBlendIntoPBLENDV(N, DAG, Subtarget))
40830    return R;
40831
40832  if (SDValue R = combineOrShiftToFunnelShift(N, DAG, Subtarget))
40833    return R;
40834
40835  // Attempt to recursively combine an OR of shuffles.
40836  if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
40837    SDValue Op(N, 0);
40838    if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
40839      return Res;
40840  }
40841
40842  return SDValue();
40843}
40844
40845/// Try to turn tests against the signbit in the form of:
40846///   XOR(TRUNCATE(SRL(X, size(X)-1)), 1)
40847/// into:
40848///   SETGT(X, -1)
40849static SDValue foldXorTruncShiftIntoCmp(SDNode *N, SelectionDAG &DAG) {
40850  // This is only worth doing if the output type is i8 or i1.
40851  EVT ResultType = N->getValueType(0);
40852  if (ResultType != MVT::i8 && ResultType != MVT::i1)
40853    return SDValue();
40854
40855  SDValue N0 = N->getOperand(0);
40856  SDValue N1 = N->getOperand(1);
40857
40858  // We should be performing an xor against a truncated shift.
40859  if (N0.getOpcode() != ISD::TRUNCATE || !N0.hasOneUse())
40860    return SDValue();
40861
40862  // Make sure we are performing an xor against one.
40863  if (!isOneConstant(N1))
40864    return SDValue();
40865
40866  // SetCC on x86 zero extends so only act on this if it's a logical shift.
40867  SDValue Shift = N0.getOperand(0);
40868  if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse())
40869    return SDValue();
40870
40871  // Make sure we are truncating from one of i16, i32 or i64.
40872  EVT ShiftTy = Shift.getValueType();
40873  if (ShiftTy != MVT::i16 && ShiftTy != MVT::i32 && ShiftTy != MVT::i64)
40874    return SDValue();
40875
40876  // Make sure the shift amount extracts the sign bit.
40877  if (!isa<ConstantSDNode>(Shift.getOperand(1)) ||
40878      Shift.getConstantOperandAPInt(1) != (ShiftTy.getSizeInBits() - 1))
40879    return SDValue();
40880
40881  // Create a greater-than comparison against -1.
40882  // N.B. Using SETGE against 0 works but we want a canonical looking
40883  // comparison, using SETGT matches up with what TranslateX86CC.
40884  SDLoc DL(N);
40885  SDValue ShiftOp = Shift.getOperand(0);
40886  EVT ShiftOpTy = ShiftOp.getValueType();
40887  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40888  EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
40889                                               *DAG.getContext(), ResultType);
40890  SDValue Cond = DAG.getSetCC(DL, SetCCResultType, ShiftOp,
40891                              DAG.getConstant(-1, DL, ShiftOpTy), ISD::SETGT);
40892  if (SetCCResultType != ResultType)
40893    Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, ResultType, Cond);
40894  return Cond;
40895}
40896
40897/// Turn vector tests of the signbit in the form of:
40898///   xor (sra X, elt_size(X)-1), -1
40899/// into:
40900///   pcmpgt X, -1
40901///
40902/// This should be called before type legalization because the pattern may not
40903/// persist after that.
40904static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
40905                                         const X86Subtarget &Subtarget) {
40906  EVT VT = N->getValueType(0);
40907  if (!VT.isSimple())
40908    return SDValue();
40909
40910  switch (VT.getSimpleVT().SimpleTy) {
40911  default: return SDValue();
40912  case MVT::v16i8:
40913  case MVT::v8i16:
40914  case MVT::v4i32:
40915  case MVT::v2i64: if (!Subtarget.hasSSE2()) return SDValue(); break;
40916  case MVT::v32i8:
40917  case MVT::v16i16:
40918  case MVT::v8i32:
40919  case MVT::v4i64: if (!Subtarget.hasAVX2()) return SDValue(); break;
40920  }
40921
40922  // There must be a shift right algebraic before the xor, and the xor must be a
40923  // 'not' operation.
40924  SDValue Shift = N->getOperand(0);
40925  SDValue Ones = N->getOperand(1);
40926  if (Shift.getOpcode() != ISD::SRA || !Shift.hasOneUse() ||
40927      !ISD::isBuildVectorAllOnes(Ones.getNode()))
40928    return SDValue();
40929
40930  // The shift should be smearing the sign bit across each vector element.
40931  auto *ShiftAmt =
40932      isConstOrConstSplat(Shift.getOperand(1), /*AllowUndefs*/ true);
40933  if (!ShiftAmt ||
40934      ShiftAmt->getAPIntValue() != (Shift.getScalarValueSizeInBits() - 1))
40935    return SDValue();
40936
40937  // Create a greater-than comparison against -1. We don't use the more obvious
40938  // greater-than-or-equal-to-zero because SSE/AVX don't have that instruction.
40939  return DAG.getSetCC(SDLoc(N), VT, Shift.getOperand(0), Ones, ISD::SETGT);
40940}
40941
40942/// Detect patterns of truncation with unsigned saturation:
40943///
40944/// 1. (truncate (umin (x, unsigned_max_of_dest_type)) to dest_type).
40945///   Return the source value x to be truncated or SDValue() if the pattern was
40946///   not matched.
40947///
40948/// 2. (truncate (smin (smax (x, C1), C2)) to dest_type),
40949///   where C1 >= 0 and C2 is unsigned max of destination type.
40950///
40951///    (truncate (smax (smin (x, C2), C1)) to dest_type)
40952///   where C1 >= 0, C2 is unsigned max of destination type and C1 <= C2.
40953///
40954///   These two patterns are equivalent to:
40955///   (truncate (umin (smax(x, C1), unsigned_max_of_dest_type)) to dest_type)
40956///   So return the smax(x, C1) value to be truncated or SDValue() if the
40957///   pattern was not matched.
40958static SDValue detectUSatPattern(SDValue In, EVT VT, SelectionDAG &DAG,
40959                                 const SDLoc &DL) {
40960  EVT InVT = In.getValueType();
40961
40962  // Saturation with truncation. We truncate from InVT to VT.
40963  assert(InVT.getScalarSizeInBits() > VT.getScalarSizeInBits() &&
40964         "Unexpected types for truncate operation");
40965
40966  // Match min/max and return limit value as a parameter.
40967  auto MatchMinMax = [](SDValue V, unsigned Opcode, APInt &Limit) -> SDValue {
40968    if (V.getOpcode() == Opcode &&
40969        ISD::isConstantSplatVector(V.getOperand(1).getNode(), Limit))
40970      return V.getOperand(0);
40971    return SDValue();
40972  };
40973
40974  APInt C1, C2;
40975  if (SDValue UMin = MatchMinMax(In, ISD::UMIN, C2))
40976    // C2 should be equal to UINT32_MAX / UINT16_MAX / UINT8_MAX according
40977    // the element size of the destination type.
40978    if (C2.isMask(VT.getScalarSizeInBits()))
40979      return UMin;
40980
40981  if (SDValue SMin = MatchMinMax(In, ISD::SMIN, C2))
40982    if (MatchMinMax(SMin, ISD::SMAX, C1))
40983      if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()))
40984        return SMin;
40985
40986  if (SDValue SMax = MatchMinMax(In, ISD::SMAX, C1))
40987    if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, C2))
40988      if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()) &&
40989          C2.uge(C1)) {
40990        return DAG.getNode(ISD::SMAX, DL, InVT, SMin, In.getOperand(1));
40991      }
40992
40993  return SDValue();
40994}
40995
40996/// Detect patterns of truncation with signed saturation:
40997/// (truncate (smin ((smax (x, signed_min_of_dest_type)),
40998///                  signed_max_of_dest_type)) to dest_type)
40999/// or:
41000/// (truncate (smax ((smin (x, signed_max_of_dest_type)),
41001///                  signed_min_of_dest_type)) to dest_type).
41002/// With MatchPackUS, the smax/smin range is [0, unsigned_max_of_dest_type].
41003/// Return the source value to be truncated or SDValue() if the pattern was not
41004/// matched.
41005static SDValue detectSSatPattern(SDValue In, EVT VT, bool MatchPackUS = false) {
41006  unsigned NumDstBits = VT.getScalarSizeInBits();
41007  unsigned NumSrcBits = In.getScalarValueSizeInBits();
41008  assert(NumSrcBits > NumDstBits && "Unexpected types for truncate operation");
41009
41010  auto MatchMinMax = [](SDValue V, unsigned Opcode,
41011                        const APInt &Limit) -> SDValue {
41012    APInt C;
41013    if (V.getOpcode() == Opcode &&
41014        ISD::isConstantSplatVector(V.getOperand(1).getNode(), C) && C == Limit)
41015      return V.getOperand(0);
41016    return SDValue();
41017  };
41018
41019  APInt SignedMax, SignedMin;
41020  if (MatchPackUS) {
41021    SignedMax = APInt::getAllOnesValue(NumDstBits).zext(NumSrcBits);
41022    SignedMin = APInt(NumSrcBits, 0);
41023  } else {
41024    SignedMax = APInt::getSignedMaxValue(NumDstBits).sext(NumSrcBits);
41025    SignedMin = APInt::getSignedMinValue(NumDstBits).sext(NumSrcBits);
41026  }
41027
41028  if (SDValue SMin = MatchMinMax(In, ISD::SMIN, SignedMax))
41029    if (SDValue SMax = MatchMinMax(SMin, ISD::SMAX, SignedMin))
41030      return SMax;
41031
41032  if (SDValue SMax = MatchMinMax(In, ISD::SMAX, SignedMin))
41033    if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, SignedMax))
41034      return SMin;
41035
41036  return SDValue();
41037}
41038
41039static SDValue combineTruncateWithSat(SDValue In, EVT VT, const SDLoc &DL,
41040                                      SelectionDAG &DAG,
41041                                      const X86Subtarget &Subtarget) {
41042  if (!Subtarget.hasSSE2() || !VT.isVector())
41043    return SDValue();
41044
41045  EVT SVT = VT.getVectorElementType();
41046  EVT InVT = In.getValueType();
41047  EVT InSVT = InVT.getVectorElementType();
41048
41049  // If we're clamping a signed 32-bit vector to 0-255 and the 32-bit vector is
41050  // split across two registers. We can use a packusdw+perm to clamp to 0-65535
41051  // and concatenate at the same time. Then we can use a final vpmovuswb to
41052  // clip to 0-255.
41053  if (Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&
41054      InVT == MVT::v16i32 && VT == MVT::v16i8) {
41055    if (auto USatVal = detectSSatPattern(In, VT, true)) {
41056      // Emit a VPACKUSDW+VPERMQ followed by a VPMOVUSWB.
41057      SDValue Mid = truncateVectorWithPACK(X86ISD::PACKUS, MVT::v16i16, USatVal,
41058                                           DL, DAG, Subtarget);
41059      assert(Mid && "Failed to pack!");
41060      return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, Mid);
41061    }
41062  }
41063
41064  // vXi32 truncate instructions are available with AVX512F.
41065  // vXi16 truncate instructions are only available with AVX512BW.
41066  // For 256-bit or smaller vectors, we require VLX.
41067  // FIXME: We could widen truncates to 512 to remove the VLX restriction.
41068  // If the result type is 256-bits or larger and we have disable 512-bit
41069  // registers, we should go ahead and use the pack instructions if possible.
41070  bool PreferAVX512 = ((Subtarget.hasAVX512() && InSVT == MVT::i32) ||
41071                       (Subtarget.hasBWI() && InSVT == MVT::i16)) &&
41072                      (InVT.getSizeInBits() > 128) &&
41073                      (Subtarget.hasVLX() || InVT.getSizeInBits() > 256) &&
41074                      !(!Subtarget.useAVX512Regs() && VT.getSizeInBits() >= 256);
41075
41076  if (isPowerOf2_32(VT.getVectorNumElements()) && !PreferAVX512 &&
41077      VT.getSizeInBits() >= 64 &&
41078      (SVT == MVT::i8 || SVT == MVT::i16) &&
41079      (InSVT == MVT::i16 || InSVT == MVT::i32)) {
41080    if (auto USatVal = detectSSatPattern(In, VT, true)) {
41081      // vXi32 -> vXi8 must be performed as PACKUSWB(PACKSSDW,PACKSSDW).
41082      // Only do this when the result is at least 64 bits or we'll leaving
41083      // dangling PACKSSDW nodes.
41084      if (SVT == MVT::i8 && InSVT == MVT::i32) {
41085        EVT MidVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
41086                                     VT.getVectorNumElements());
41087        SDValue Mid = truncateVectorWithPACK(X86ISD::PACKSS, MidVT, USatVal, DL,
41088                                             DAG, Subtarget);
41089        assert(Mid && "Failed to pack!");
41090        SDValue V = truncateVectorWithPACK(X86ISD::PACKUS, VT, Mid, DL, DAG,
41091                                           Subtarget);
41092        assert(V && "Failed to pack!");
41093        return V;
41094      } else if (SVT == MVT::i8 || Subtarget.hasSSE41())
41095        return truncateVectorWithPACK(X86ISD::PACKUS, VT, USatVal, DL, DAG,
41096                                      Subtarget);
41097    }
41098    if (auto SSatVal = detectSSatPattern(In, VT))
41099      return truncateVectorWithPACK(X86ISD::PACKSS, VT, SSatVal, DL, DAG,
41100                                    Subtarget);
41101  }
41102
41103  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41104  if (TLI.isTypeLegal(InVT) && InVT.isVector() && SVT != MVT::i1 &&
41105      Subtarget.hasAVX512() && (InSVT != MVT::i16 || Subtarget.hasBWI())) {
41106    unsigned TruncOpc = 0;
41107    SDValue SatVal;
41108    if (auto SSatVal = detectSSatPattern(In, VT)) {
41109      SatVal = SSatVal;
41110      TruncOpc = X86ISD::VTRUNCS;
41111    } else if (auto USatVal = detectUSatPattern(In, VT, DAG, DL)) {
41112      SatVal = USatVal;
41113      TruncOpc = X86ISD::VTRUNCUS;
41114    }
41115    if (SatVal) {
41116      unsigned ResElts = VT.getVectorNumElements();
41117      // If the input type is less than 512 bits and we don't have VLX, we need
41118      // to widen to 512 bits.
41119      if (!Subtarget.hasVLX() && !InVT.is512BitVector()) {
41120        unsigned NumConcats = 512 / InVT.getSizeInBits();
41121        ResElts *= NumConcats;
41122        SmallVector<SDValue, 4> ConcatOps(NumConcats, DAG.getUNDEF(InVT));
41123        ConcatOps[0] = SatVal;
41124        InVT = EVT::getVectorVT(*DAG.getContext(), InSVT,
41125                                NumConcats * InVT.getVectorNumElements());
41126        SatVal = DAG.getNode(ISD::CONCAT_VECTORS, DL, InVT, ConcatOps);
41127      }
41128      // Widen the result if its narrower than 128 bits.
41129      if (ResElts * SVT.getSizeInBits() < 128)
41130        ResElts = 128 / SVT.getSizeInBits();
41131      EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), SVT, ResElts);
41132      SDValue Res = DAG.getNode(TruncOpc, DL, TruncVT, SatVal);
41133      return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
41134                         DAG.getIntPtrConstant(0, DL));
41135    }
41136  }
41137
41138  return SDValue();
41139}
41140
41141/// This function detects the AVG pattern between vectors of unsigned i8/i16,
41142/// which is c = (a + b + 1) / 2, and replace this operation with the efficient
41143/// X86ISD::AVG instruction.
41144static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
41145                                const X86Subtarget &Subtarget,
41146                                const SDLoc &DL) {
41147  if (!VT.isVector())
41148    return SDValue();
41149  EVT InVT = In.getValueType();
41150  unsigned NumElems = VT.getVectorNumElements();
41151
41152  EVT ScalarVT = VT.getVectorElementType();
41153  if (!((ScalarVT == MVT::i8 || ScalarVT == MVT::i16) &&
41154        NumElems >= 2 && isPowerOf2_32(NumElems)))
41155    return SDValue();
41156
41157  // InScalarVT is the intermediate type in AVG pattern and it should be greater
41158  // than the original input type (i8/i16).
41159  EVT InScalarVT = InVT.getVectorElementType();
41160  if (InScalarVT.getSizeInBits() <= ScalarVT.getSizeInBits())
41161    return SDValue();
41162
41163  if (!Subtarget.hasSSE2())
41164    return SDValue();
41165
41166  // Detect the following pattern:
41167  //
41168  //   %1 = zext <N x i8> %a to <N x i32>
41169  //   %2 = zext <N x i8> %b to <N x i32>
41170  //   %3 = add nuw nsw <N x i32> %1, <i32 1 x N>
41171  //   %4 = add nuw nsw <N x i32> %3, %2
41172  //   %5 = lshr <N x i32> %N, <i32 1 x N>
41173  //   %6 = trunc <N x i32> %5 to <N x i8>
41174  //
41175  // In AVX512, the last instruction can also be a trunc store.
41176  if (In.getOpcode() != ISD::SRL)
41177    return SDValue();
41178
41179  // A lambda checking the given SDValue is a constant vector and each element
41180  // is in the range [Min, Max].
41181  auto IsConstVectorInRange = [](SDValue V, unsigned Min, unsigned Max) {
41182    BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(V);
41183    if (!BV || !BV->isConstant())
41184      return false;
41185    for (SDValue Op : V->ops()) {
41186      ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
41187      if (!C)
41188        return false;
41189      const APInt &Val = C->getAPIntValue();
41190      if (Val.ult(Min) || Val.ugt(Max))
41191        return false;
41192    }
41193    return true;
41194  };
41195
41196  // Check if each element of the vector is right-shifted by one.
41197  auto LHS = In.getOperand(0);
41198  auto RHS = In.getOperand(1);
41199  if (!IsConstVectorInRange(RHS, 1, 1))
41200    return SDValue();
41201  if (LHS.getOpcode() != ISD::ADD)
41202    return SDValue();
41203
41204  // Detect a pattern of a + b + 1 where the order doesn't matter.
41205  SDValue Operands[3];
41206  Operands[0] = LHS.getOperand(0);
41207  Operands[1] = LHS.getOperand(1);
41208
41209  auto AVGBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
41210                       ArrayRef<SDValue> Ops) {
41211    return DAG.getNode(X86ISD::AVG, DL, Ops[0].getValueType(), Ops);
41212  };
41213
41214  // Take care of the case when one of the operands is a constant vector whose
41215  // element is in the range [1, 256].
41216  if (IsConstVectorInRange(Operands[1], 1, ScalarVT == MVT::i8 ? 256 : 65536) &&
41217      Operands[0].getOpcode() == ISD::ZERO_EXTEND &&
41218      Operands[0].getOperand(0).getValueType() == VT) {
41219    // The pattern is detected. Subtract one from the constant vector, then
41220    // demote it and emit X86ISD::AVG instruction.
41221    SDValue VecOnes = DAG.getConstant(1, DL, InVT);
41222    Operands[1] = DAG.getNode(ISD::SUB, DL, InVT, Operands[1], VecOnes);
41223    Operands[1] = DAG.getNode(ISD::TRUNCATE, DL, VT, Operands[1]);
41224    return SplitOpsAndApply(DAG, Subtarget, DL, VT,
41225                            { Operands[0].getOperand(0), Operands[1] },
41226                            AVGBuilder);
41227  }
41228
41229  // Matches 'add like' patterns: add(Op0,Op1) + zext(or(Op0,Op1)).
41230  // Match the or case only if its 'add-like' - can be replaced by an add.
41231  auto FindAddLike = [&](SDValue V, SDValue &Op0, SDValue &Op1) {
41232    if (ISD::ADD == V.getOpcode()) {
41233      Op0 = V.getOperand(0);
41234      Op1 = V.getOperand(1);
41235      return true;
41236    }
41237    if (ISD::ZERO_EXTEND != V.getOpcode())
41238      return false;
41239    V = V.getOperand(0);
41240    if (V.getValueType() != VT || ISD::OR != V.getOpcode() ||
41241        !DAG.haveNoCommonBitsSet(V.getOperand(0), V.getOperand(1)))
41242      return false;
41243    Op0 = V.getOperand(0);
41244    Op1 = V.getOperand(1);
41245    return true;
41246  };
41247
41248  SDValue Op0, Op1;
41249  if (FindAddLike(Operands[0], Op0, Op1))
41250    std::swap(Operands[0], Operands[1]);
41251  else if (!FindAddLike(Operands[1], Op0, Op1))
41252    return SDValue();
41253  Operands[2] = Op0;
41254  Operands[1] = Op1;
41255
41256  // Now we have three operands of two additions. Check that one of them is a
41257  // constant vector with ones, and the other two can be promoted from i8/i16.
41258  for (int i = 0; i < 3; ++i) {
41259    if (!IsConstVectorInRange(Operands[i], 1, 1))
41260      continue;
41261    std::swap(Operands[i], Operands[2]);
41262
41263    // Check if Operands[0] and Operands[1] are results of type promotion.
41264    for (int j = 0; j < 2; ++j)
41265      if (Operands[j].getValueType() != VT) {
41266        if (Operands[j].getOpcode() != ISD::ZERO_EXTEND ||
41267            Operands[j].getOperand(0).getValueType() != VT)
41268          return SDValue();
41269        Operands[j] = Operands[j].getOperand(0);
41270      }
41271
41272    // The pattern is detected, emit X86ISD::AVG instruction(s).
41273    return SplitOpsAndApply(DAG, Subtarget, DL, VT, {Operands[0], Operands[1]},
41274                            AVGBuilder);
41275  }
41276
41277  return SDValue();
41278}
41279
41280static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
41281                           TargetLowering::DAGCombinerInfo &DCI,
41282                           const X86Subtarget &Subtarget) {
41283  LoadSDNode *Ld = cast<LoadSDNode>(N);
41284  EVT RegVT = Ld->getValueType(0);
41285  EVT MemVT = Ld->getMemoryVT();
41286  SDLoc dl(Ld);
41287  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41288
41289  // For chips with slow 32-byte unaligned loads, break the 32-byte operation
41290  // into two 16-byte operations. Also split non-temporal aligned loads on
41291  // pre-AVX2 targets as 32-byte loads will lower to regular temporal loads.
41292  ISD::LoadExtType Ext = Ld->getExtensionType();
41293  bool Fast;
41294  unsigned Alignment = Ld->getAlignment();
41295  if (RegVT.is256BitVector() && !DCI.isBeforeLegalizeOps() &&
41296      Ext == ISD::NON_EXTLOAD &&
41297      ((Ld->isNonTemporal() && !Subtarget.hasInt256() && Alignment >= 16) ||
41298       (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), RegVT,
41299                               *Ld->getMemOperand(), &Fast) &&
41300        !Fast))) {
41301    unsigned NumElems = RegVT.getVectorNumElements();
41302    if (NumElems < 2)
41303      return SDValue();
41304
41305    unsigned HalfAlign = 16;
41306    SDValue Ptr1 = Ld->getBasePtr();
41307    SDValue Ptr2 = DAG.getMemBasePlusOffset(Ptr1, HalfAlign, dl);
41308    EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
41309                                  NumElems / 2);
41310    SDValue Load1 =
41311        DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr1, Ld->getPointerInfo(),
41312                    Alignment, Ld->getMemOperand()->getFlags());
41313    SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr2,
41314                                Ld->getPointerInfo().getWithOffset(HalfAlign),
41315                                MinAlign(Alignment, HalfAlign),
41316                                Ld->getMemOperand()->getFlags());
41317    SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
41318                             Load1.getValue(1), Load2.getValue(1));
41319
41320    SDValue NewVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, RegVT, Load1, Load2);
41321    return DCI.CombineTo(N, NewVec, TF, true);
41322  }
41323
41324  // Bool vector load - attempt to cast to an integer, as we have good
41325  // (vXiY *ext(vXi1 bitcast(iX))) handling.
41326  if (Ext == ISD::NON_EXTLOAD && !Subtarget.hasAVX512() && RegVT.isVector() &&
41327      RegVT.getScalarType() == MVT::i1 && DCI.isBeforeLegalize()) {
41328    unsigned NumElts = RegVT.getVectorNumElements();
41329    EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
41330    if (TLI.isTypeLegal(IntVT)) {
41331      SDValue IntLoad = DAG.getLoad(IntVT, dl, Ld->getChain(), Ld->getBasePtr(),
41332                                    Ld->getPointerInfo(), Alignment,
41333                                    Ld->getMemOperand()->getFlags());
41334      SDValue BoolVec = DAG.getBitcast(RegVT, IntLoad);
41335      return DCI.CombineTo(N, BoolVec, IntLoad.getValue(1), true);
41336    }
41337  }
41338
41339  return SDValue();
41340}
41341
41342/// If V is a build vector of boolean constants and exactly one of those
41343/// constants is true, return the operand index of that true element.
41344/// Otherwise, return -1.
41345static int getOneTrueElt(SDValue V) {
41346  // This needs to be a build vector of booleans.
41347  // TODO: Checking for the i1 type matches the IR definition for the mask,
41348  // but the mask check could be loosened to i8 or other types. That might
41349  // also require checking more than 'allOnesValue'; eg, the x86 HW
41350  // instructions only require that the MSB is set for each mask element.
41351  // The ISD::MSTORE comments/definition do not specify how the mask operand
41352  // is formatted.
41353  auto *BV = dyn_cast<BuildVectorSDNode>(V);
41354  if (!BV || BV->getValueType(0).getVectorElementType() != MVT::i1)
41355    return -1;
41356
41357  int TrueIndex = -1;
41358  unsigned NumElts = BV->getValueType(0).getVectorNumElements();
41359  for (unsigned i = 0; i < NumElts; ++i) {
41360    const SDValue &Op = BV->getOperand(i);
41361    if (Op.isUndef())
41362      continue;
41363    auto *ConstNode = dyn_cast<ConstantSDNode>(Op);
41364    if (!ConstNode)
41365      return -1;
41366    if (ConstNode->getAPIntValue().isAllOnesValue()) {
41367      // If we already found a one, this is too many.
41368      if (TrueIndex >= 0)
41369        return -1;
41370      TrueIndex = i;
41371    }
41372  }
41373  return TrueIndex;
41374}
41375
41376/// Given a masked memory load/store operation, return true if it has one mask
41377/// bit set. If it has one mask bit set, then also return the memory address of
41378/// the scalar element to load/store, the vector index to insert/extract that
41379/// scalar element, and the alignment for the scalar memory access.
41380static bool getParamsForOneTrueMaskedElt(MaskedLoadStoreSDNode *MaskedOp,
41381                                         SelectionDAG &DAG, SDValue &Addr,
41382                                         SDValue &Index, unsigned &Alignment) {
41383  int TrueMaskElt = getOneTrueElt(MaskedOp->getMask());
41384  if (TrueMaskElt < 0)
41385    return false;
41386
41387  // Get the address of the one scalar element that is specified by the mask
41388  // using the appropriate offset from the base pointer.
41389  EVT EltVT = MaskedOp->getMemoryVT().getVectorElementType();
41390  Addr = MaskedOp->getBasePtr();
41391  if (TrueMaskElt != 0) {
41392    unsigned Offset = TrueMaskElt * EltVT.getStoreSize();
41393    Addr = DAG.getMemBasePlusOffset(Addr, Offset, SDLoc(MaskedOp));
41394  }
41395
41396  Index = DAG.getIntPtrConstant(TrueMaskElt, SDLoc(MaskedOp));
41397  Alignment = MinAlign(MaskedOp->getAlignment(), EltVT.getStoreSize());
41398  return true;
41399}
41400
41401/// If exactly one element of the mask is set for a non-extending masked load,
41402/// it is a scalar load and vector insert.
41403/// Note: It is expected that the degenerate cases of an all-zeros or all-ones
41404/// mask have already been optimized in IR, so we don't bother with those here.
41405static SDValue
41406reduceMaskedLoadToScalarLoad(MaskedLoadSDNode *ML, SelectionDAG &DAG,
41407                             TargetLowering::DAGCombinerInfo &DCI) {
41408  assert(ML->isUnindexed() && "Unexpected indexed masked load!");
41409  // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
41410  // However, some target hooks may need to be added to know when the transform
41411  // is profitable. Endianness would also have to be considered.
41412
41413  SDValue Addr, VecIndex;
41414  unsigned Alignment;
41415  if (!getParamsForOneTrueMaskedElt(ML, DAG, Addr, VecIndex, Alignment))
41416    return SDValue();
41417
41418  // Load the one scalar element that is specified by the mask using the
41419  // appropriate offset from the base pointer.
41420  SDLoc DL(ML);
41421  EVT VT = ML->getValueType(0);
41422  EVT EltVT = VT.getVectorElementType();
41423  SDValue Load =
41424      DAG.getLoad(EltVT, DL, ML->getChain(), Addr, ML->getPointerInfo(),
41425                  Alignment, ML->getMemOperand()->getFlags());
41426
41427  // Insert the loaded element into the appropriate place in the vector.
41428  SDValue Insert = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT,
41429                               ML->getPassThru(), Load, VecIndex);
41430  return DCI.CombineTo(ML, Insert, Load.getValue(1), true);
41431}
41432
41433static SDValue
41434combineMaskedLoadConstantMask(MaskedLoadSDNode *ML, SelectionDAG &DAG,
41435                              TargetLowering::DAGCombinerInfo &DCI) {
41436  assert(ML->isUnindexed() && "Unexpected indexed masked load!");
41437  if (!ISD::isBuildVectorOfConstantSDNodes(ML->getMask().getNode()))
41438    return SDValue();
41439
41440  SDLoc DL(ML);
41441  EVT VT = ML->getValueType(0);
41442
41443  // If we are loading the first and last elements of a vector, it is safe and
41444  // always faster to load the whole vector. Replace the masked load with a
41445  // vector load and select.
41446  unsigned NumElts = VT.getVectorNumElements();
41447  BuildVectorSDNode *MaskBV = cast<BuildVectorSDNode>(ML->getMask());
41448  bool LoadFirstElt = !isNullConstant(MaskBV->getOperand(0));
41449  bool LoadLastElt = !isNullConstant(MaskBV->getOperand(NumElts - 1));
41450  if (LoadFirstElt && LoadLastElt) {
41451    SDValue VecLd = DAG.getLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
41452                                ML->getMemOperand());
41453    SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), VecLd,
41454                                  ML->getPassThru());
41455    return DCI.CombineTo(ML, Blend, VecLd.getValue(1), true);
41456  }
41457
41458  // Convert a masked load with a constant mask into a masked load and a select.
41459  // This allows the select operation to use a faster kind of select instruction
41460  // (for example, vblendvps -> vblendps).
41461
41462  // Don't try this if the pass-through operand is already undefined. That would
41463  // cause an infinite loop because that's what we're about to create.
41464  if (ML->getPassThru().isUndef())
41465    return SDValue();
41466
41467  if (ISD::isBuildVectorAllZeros(ML->getPassThru().getNode()))
41468    return SDValue();
41469
41470  // The new masked load has an undef pass-through operand. The select uses the
41471  // original pass-through operand.
41472  SDValue NewML = DAG.getMaskedLoad(
41473      VT, DL, ML->getChain(), ML->getBasePtr(), ML->getOffset(), ML->getMask(),
41474      DAG.getUNDEF(VT), ML->getMemoryVT(), ML->getMemOperand(),
41475      ML->getAddressingMode(), ML->getExtensionType());
41476  SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), NewML,
41477                                ML->getPassThru());
41478
41479  return DCI.CombineTo(ML, Blend, NewML.getValue(1), true);
41480}
41481
41482static SDValue combineMaskedLoad(SDNode *N, SelectionDAG &DAG,
41483                                 TargetLowering::DAGCombinerInfo &DCI,
41484                                 const X86Subtarget &Subtarget) {
41485  MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
41486
41487  // TODO: Expanding load with constant mask may be optimized as well.
41488  if (Mld->isExpandingLoad())
41489    return SDValue();
41490
41491  if (Mld->getExtensionType() == ISD::NON_EXTLOAD) {
41492    if (SDValue ScalarLoad = reduceMaskedLoadToScalarLoad(Mld, DAG, DCI))
41493      return ScalarLoad;
41494    // TODO: Do some AVX512 subsets benefit from this transform?
41495    if (!Subtarget.hasAVX512())
41496      if (SDValue Blend = combineMaskedLoadConstantMask(Mld, DAG, DCI))
41497        return Blend;
41498  }
41499
41500  return SDValue();
41501}
41502
41503/// If exactly one element of the mask is set for a non-truncating masked store,
41504/// it is a vector extract and scalar store.
41505/// Note: It is expected that the degenerate cases of an all-zeros or all-ones
41506/// mask have already been optimized in IR, so we don't bother with those here.
41507static SDValue reduceMaskedStoreToScalarStore(MaskedStoreSDNode *MS,
41508                                              SelectionDAG &DAG) {
41509  // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
41510  // However, some target hooks may need to be added to know when the transform
41511  // is profitable. Endianness would also have to be considered.
41512
41513  SDValue Addr, VecIndex;
41514  unsigned Alignment;
41515  if (!getParamsForOneTrueMaskedElt(MS, DAG, Addr, VecIndex, Alignment))
41516    return SDValue();
41517
41518  // Extract the one scalar element that is actually being stored.
41519  SDLoc DL(MS);
41520  EVT VT = MS->getValue().getValueType();
41521  EVT EltVT = VT.getVectorElementType();
41522  SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
41523                                MS->getValue(), VecIndex);
41524
41525  // Store that element at the appropriate offset from the base pointer.
41526  return DAG.getStore(MS->getChain(), DL, Extract, Addr, MS->getPointerInfo(),
41527                      Alignment, MS->getMemOperand()->getFlags());
41528}
41529
41530static SDValue combineMaskedStore(SDNode *N, SelectionDAG &DAG,
41531                                  TargetLowering::DAGCombinerInfo &DCI,
41532                                  const X86Subtarget &Subtarget) {
41533  MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
41534  if (Mst->isCompressingStore())
41535    return SDValue();
41536
41537  EVT VT = Mst->getValue().getValueType();
41538  SDLoc dl(Mst);
41539  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41540
41541  if (Mst->isTruncatingStore())
41542    return SDValue();
41543
41544  if (SDValue ScalarStore = reduceMaskedStoreToScalarStore(Mst, DAG))
41545    return ScalarStore;
41546
41547  // If the mask value has been legalized to a non-boolean vector, try to
41548  // simplify ops leading up to it. We only demand the MSB of each lane.
41549  SDValue Mask = Mst->getMask();
41550  if (Mask.getScalarValueSizeInBits() != 1) {
41551    APInt DemandedMask(APInt::getSignMask(VT.getScalarSizeInBits()));
41552    if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI))
41553      return SDValue(N, 0);
41554  }
41555
41556  SDValue Value = Mst->getValue();
41557  if (Value.getOpcode() == ISD::TRUNCATE && Value.getNode()->hasOneUse() &&
41558      TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(),
41559                            Mst->getMemoryVT())) {
41560    return DAG.getMaskedStore(Mst->getChain(), SDLoc(N), Value.getOperand(0),
41561                              Mst->getBasePtr(), Mst->getOffset(), Mask,
41562                              Mst->getMemoryVT(), Mst->getMemOperand(),
41563                              Mst->getAddressingMode(), true);
41564  }
41565
41566  return SDValue();
41567}
41568
41569static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
41570                            TargetLowering::DAGCombinerInfo &DCI,
41571                            const X86Subtarget &Subtarget) {
41572  StoreSDNode *St = cast<StoreSDNode>(N);
41573  EVT StVT = St->getMemoryVT();
41574  SDLoc dl(St);
41575  unsigned Alignment = St->getAlignment();
41576  SDValue StoredVal = St->getValue();
41577  EVT VT = StoredVal.getValueType();
41578  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41579
41580  // Convert a store of vXi1 into a store of iX and a bitcast.
41581  if (!Subtarget.hasAVX512() && VT == StVT && VT.isVector() &&
41582      VT.getVectorElementType() == MVT::i1) {
41583
41584    EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), VT.getVectorNumElements());
41585    StoredVal = DAG.getBitcast(NewVT, StoredVal);
41586
41587    return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
41588                        St->getPointerInfo(), St->getAlignment(),
41589                        St->getMemOperand()->getFlags());
41590  }
41591
41592  // If this is a store of a scalar_to_vector to v1i1, just use a scalar store.
41593  // This will avoid a copy to k-register.
41594  if (VT == MVT::v1i1 && VT == StVT && Subtarget.hasAVX512() &&
41595      StoredVal.getOpcode() == ISD::SCALAR_TO_VECTOR &&
41596      StoredVal.getOperand(0).getValueType() == MVT::i8) {
41597    return DAG.getStore(St->getChain(), dl, StoredVal.getOperand(0),
41598                        St->getBasePtr(), St->getPointerInfo(),
41599                        St->getAlignment(), St->getMemOperand()->getFlags());
41600  }
41601
41602  // Widen v2i1/v4i1 stores to v8i1.
41603  if ((VT == MVT::v2i1 || VT == MVT::v4i1) && VT == StVT &&
41604      Subtarget.hasAVX512()) {
41605    unsigned NumConcats = 8 / VT.getVectorNumElements();
41606    SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(VT));
41607    Ops[0] = StoredVal;
41608    StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
41609    return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
41610                        St->getPointerInfo(), St->getAlignment(),
41611                        St->getMemOperand()->getFlags());
41612  }
41613
41614  // Turn vXi1 stores of constants into a scalar store.
41615  if ((VT == MVT::v8i1 || VT == MVT::v16i1 || VT == MVT::v32i1 ||
41616       VT == MVT::v64i1) && VT == StVT && TLI.isTypeLegal(VT) &&
41617      ISD::isBuildVectorOfConstantSDNodes(StoredVal.getNode())) {
41618    // If its a v64i1 store without 64-bit support, we need two stores.
41619    if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
41620      SDValue Lo = DAG.getBuildVector(MVT::v32i1, dl,
41621                                      StoredVal->ops().slice(0, 32));
41622      Lo = combinevXi1ConstantToInteger(Lo, DAG);
41623      SDValue Hi = DAG.getBuildVector(MVT::v32i1, dl,
41624                                      StoredVal->ops().slice(32, 32));
41625      Hi = combinevXi1ConstantToInteger(Hi, DAG);
41626
41627      SDValue Ptr0 = St->getBasePtr();
41628      SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, 4, dl);
41629
41630      SDValue Ch0 =
41631          DAG.getStore(St->getChain(), dl, Lo, Ptr0, St->getPointerInfo(),
41632                       Alignment, St->getMemOperand()->getFlags());
41633      SDValue Ch1 =
41634          DAG.getStore(St->getChain(), dl, Hi, Ptr1,
41635                       St->getPointerInfo().getWithOffset(4),
41636                       MinAlign(Alignment, 4U),
41637                       St->getMemOperand()->getFlags());
41638      return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
41639    }
41640
41641    StoredVal = combinevXi1ConstantToInteger(StoredVal, DAG);
41642    return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
41643                        St->getPointerInfo(), St->getAlignment(),
41644                        St->getMemOperand()->getFlags());
41645  }
41646
41647  // If we are saving a 32-byte vector and 32-byte stores are slow, such as on
41648  // Sandy Bridge, perform two 16-byte stores.
41649  bool Fast;
41650  if (VT.is256BitVector() && StVT == VT &&
41651      TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
41652                             *St->getMemOperand(), &Fast) &&
41653      !Fast) {
41654    unsigned NumElems = VT.getVectorNumElements();
41655    if (NumElems < 2)
41656      return SDValue();
41657
41658    return splitVectorStore(St, DAG);
41659  }
41660
41661  // Split under-aligned vector non-temporal stores.
41662  if (St->isNonTemporal() && StVT == VT && Alignment < VT.getStoreSize()) {
41663    // ZMM/YMM nt-stores - either it can be stored as a series of shorter
41664    // vectors or the legalizer can scalarize it to use MOVNTI.
41665    if (VT.is256BitVector() || VT.is512BitVector()) {
41666      unsigned NumElems = VT.getVectorNumElements();
41667      if (NumElems < 2)
41668        return SDValue();
41669      return splitVectorStore(St, DAG);
41670    }
41671
41672    // XMM nt-stores - scalarize this to f64 nt-stores on SSE4A, else i32/i64
41673    // to use MOVNTI.
41674    if (VT.is128BitVector() && Subtarget.hasSSE2()) {
41675      MVT NTVT = Subtarget.hasSSE4A()
41676                     ? MVT::v2f64
41677                     : (TLI.isTypeLegal(MVT::i64) ? MVT::v2i64 : MVT::v4i32);
41678      return scalarizeVectorStore(St, NTVT, DAG);
41679    }
41680  }
41681
41682  // Try to optimize v16i16->v16i8 truncating stores when BWI is not
41683  // supported, but avx512f is by extending to v16i32 and truncating.
41684  if (!St->isTruncatingStore() && VT == MVT::v16i8 && !Subtarget.hasBWI() &&
41685      St->getValue().getOpcode() == ISD::TRUNCATE &&
41686      St->getValue().getOperand(0).getValueType() == MVT::v16i16 &&
41687      TLI.isTruncStoreLegal(MVT::v16i32, MVT::v16i8) &&
41688      St->getValue().hasOneUse() && !DCI.isBeforeLegalizeOps()) {
41689    SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::v16i32, St->getValue());
41690    return DAG.getTruncStore(St->getChain(), dl, Ext, St->getBasePtr(),
41691                             MVT::v16i8, St->getMemOperand());
41692  }
41693
41694  // Try to fold a VTRUNCUS or VTRUNCS into a truncating store.
41695  if (!St->isTruncatingStore() && StoredVal.hasOneUse() &&
41696      (StoredVal.getOpcode() == X86ISD::VTRUNCUS ||
41697       StoredVal.getOpcode() == X86ISD::VTRUNCS) &&
41698      TLI.isTruncStoreLegal(StoredVal.getOperand(0).getValueType(), VT)) {
41699    bool IsSigned = StoredVal.getOpcode() == X86ISD::VTRUNCS;
41700    return EmitTruncSStore(IsSigned, St->getChain(),
41701                           dl, StoredVal.getOperand(0), St->getBasePtr(),
41702                           VT, St->getMemOperand(), DAG);
41703  }
41704
41705  // Optimize trunc store (of multiple scalars) to shuffle and store.
41706  // First, pack all of the elements in one place. Next, store to memory
41707  // in fewer chunks.
41708  if (St->isTruncatingStore() && VT.isVector()) {
41709    // Check if we can detect an AVG pattern from the truncation. If yes,
41710    // replace the trunc store by a normal store with the result of X86ISD::AVG
41711    // instruction.
41712    if (DCI.isBeforeLegalize() || TLI.isTypeLegal(St->getMemoryVT()))
41713      if (SDValue Avg = detectAVGPattern(St->getValue(), St->getMemoryVT(), DAG,
41714                                         Subtarget, dl))
41715        return DAG.getStore(St->getChain(), dl, Avg, St->getBasePtr(),
41716                            St->getPointerInfo(), St->getAlignment(),
41717                            St->getMemOperand()->getFlags());
41718
41719    if (TLI.isTruncStoreLegal(VT, StVT)) {
41720      if (SDValue Val = detectSSatPattern(St->getValue(), St->getMemoryVT()))
41721        return EmitTruncSStore(true /* Signed saturation */, St->getChain(),
41722                               dl, Val, St->getBasePtr(),
41723                               St->getMemoryVT(), St->getMemOperand(), DAG);
41724      if (SDValue Val = detectUSatPattern(St->getValue(), St->getMemoryVT(),
41725                                          DAG, dl))
41726        return EmitTruncSStore(false /* Unsigned saturation */, St->getChain(),
41727                               dl, Val, St->getBasePtr(),
41728                               St->getMemoryVT(), St->getMemOperand(), DAG);
41729    }
41730
41731    return SDValue();
41732  }
41733
41734  // Turn load->store of MMX types into GPR load/stores.  This avoids clobbering
41735  // the FP state in cases where an emms may be missing.
41736  // A preferable solution to the general problem is to figure out the right
41737  // places to insert EMMS.  This qualifies as a quick hack.
41738
41739  // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
41740  if (VT.getSizeInBits() != 64)
41741    return SDValue();
41742
41743  const Function &F = DAG.getMachineFunction().getFunction();
41744  bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
41745  bool F64IsLegal =
41746      !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2();
41747  if ((VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit()) &&
41748      isa<LoadSDNode>(St->getValue()) &&
41749      cast<LoadSDNode>(St->getValue())->isSimple() &&
41750      St->getChain().hasOneUse() && St->isSimple()) {
41751    LoadSDNode *Ld = cast<LoadSDNode>(St->getValue().getNode());
41752
41753    if (!ISD::isNormalLoad(Ld))
41754      return SDValue();
41755
41756    // Avoid the transformation if there are multiple uses of the loaded value.
41757    if (!Ld->hasNUsesOfValue(1, 0))
41758      return SDValue();
41759
41760    SDLoc LdDL(Ld);
41761    SDLoc StDL(N);
41762    // Lower to a single movq load/store pair.
41763    SDValue NewLd = DAG.getLoad(MVT::f64, LdDL, Ld->getChain(),
41764                                Ld->getBasePtr(), Ld->getMemOperand());
41765
41766    // Make sure new load is placed in same chain order.
41767    DAG.makeEquivalentMemoryOrdering(Ld, NewLd);
41768    return DAG.getStore(St->getChain(), StDL, NewLd, St->getBasePtr(),
41769                        St->getMemOperand());
41770  }
41771
41772  // This is similar to the above case, but here we handle a scalar 64-bit
41773  // integer store that is extracted from a vector on a 32-bit target.
41774  // If we have SSE2, then we can treat it like a floating-point double
41775  // to get past legalization. The execution dependencies fixup pass will
41776  // choose the optimal machine instruction for the store if this really is
41777  // an integer or v2f32 rather than an f64.
41778  if (VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit() &&
41779      St->getOperand(1).getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
41780    SDValue OldExtract = St->getOperand(1);
41781    SDValue ExtOp0 = OldExtract.getOperand(0);
41782    unsigned VecSize = ExtOp0.getValueSizeInBits();
41783    EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, VecSize / 64);
41784    SDValue BitCast = DAG.getBitcast(VecVT, ExtOp0);
41785    SDValue NewExtract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
41786                                     BitCast, OldExtract.getOperand(1));
41787    return DAG.getStore(St->getChain(), dl, NewExtract, St->getBasePtr(),
41788                        St->getPointerInfo(), St->getAlignment(),
41789                        St->getMemOperand()->getFlags());
41790  }
41791
41792  return SDValue();
41793}
41794
41795/// Return 'true' if this vector operation is "horizontal"
41796/// and return the operands for the horizontal operation in LHS and RHS.  A
41797/// horizontal operation performs the binary operation on successive elements
41798/// of its first operand, then on successive elements of its second operand,
41799/// returning the resulting values in a vector.  For example, if
41800///   A = < float a0, float a1, float a2, float a3 >
41801/// and
41802///   B = < float b0, float b1, float b2, float b3 >
41803/// then the result of doing a horizontal operation on A and B is
41804///   A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
41805/// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
41806/// A horizontal-op B, for some already available A and B, and if so then LHS is
41807/// set to A, RHS to B, and the routine returns 'true'.
41808static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, SelectionDAG &DAG,
41809                              const X86Subtarget &Subtarget,
41810                              bool IsCommutative) {
41811  // If either operand is undef, bail out. The binop should be simplified.
41812  if (LHS.isUndef() || RHS.isUndef())
41813    return false;
41814
41815  // Look for the following pattern:
41816  //   A = < float a0, float a1, float a2, float a3 >
41817  //   B = < float b0, float b1, float b2, float b3 >
41818  // and
41819  //   LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
41820  //   RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
41821  // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
41822  // which is A horizontal-op B.
41823
41824  MVT VT = LHS.getSimpleValueType();
41825  assert((VT.is128BitVector() || VT.is256BitVector()) &&
41826         "Unsupported vector type for horizontal add/sub");
41827  unsigned NumElts = VT.getVectorNumElements();
41828
41829  // TODO - can we make a general helper method that does all of this for us?
41830  auto GetShuffle = [&](SDValue Op, SDValue &N0, SDValue &N1,
41831                        SmallVectorImpl<int> &ShuffleMask) {
41832    if (Op.getOpcode() == ISD::VECTOR_SHUFFLE) {
41833      if (!Op.getOperand(0).isUndef())
41834        N0 = Op.getOperand(0);
41835      if (!Op.getOperand(1).isUndef())
41836        N1 = Op.getOperand(1);
41837      ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op)->getMask();
41838      ShuffleMask.append(Mask.begin(), Mask.end());
41839      return;
41840    }
41841    bool UseSubVector = false;
41842    if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
41843        Op.getOperand(0).getValueType().is256BitVector() &&
41844        llvm::isNullConstant(Op.getOperand(1))) {
41845      Op = Op.getOperand(0);
41846      UseSubVector = true;
41847    }
41848    bool IsUnary;
41849    SmallVector<SDValue, 2> SrcOps;
41850    SmallVector<int, 16> SrcShuffleMask;
41851    SDValue BC = peekThroughBitcasts(Op);
41852    if (isTargetShuffle(BC.getOpcode()) &&
41853        getTargetShuffleMask(BC.getNode(), BC.getSimpleValueType(), false,
41854                             SrcOps, SrcShuffleMask, IsUnary)) {
41855      if (!UseSubVector && SrcShuffleMask.size() == NumElts &&
41856          SrcOps.size() <= 2) {
41857        N0 = SrcOps.size() > 0 ? SrcOps[0] : SDValue();
41858        N1 = SrcOps.size() > 1 ? SrcOps[1] : SDValue();
41859        ShuffleMask.append(SrcShuffleMask.begin(), SrcShuffleMask.end());
41860      }
41861      if (UseSubVector && (SrcShuffleMask.size() == (NumElts * 2)) &&
41862          SrcOps.size() == 1) {
41863        N0 = extract128BitVector(SrcOps[0], 0, DAG, SDLoc(Op));
41864        N1 = extract128BitVector(SrcOps[0], NumElts, DAG, SDLoc(Op));
41865        ArrayRef<int> Mask = ArrayRef<int>(SrcShuffleMask).slice(0, NumElts);
41866        ShuffleMask.append(Mask.begin(), Mask.end());
41867      }
41868    }
41869  };
41870
41871  // View LHS in the form
41872  //   LHS = VECTOR_SHUFFLE A, B, LMask
41873  // If LHS is not a shuffle, then pretend it is the identity shuffle:
41874  //   LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
41875  // NOTE: A default initialized SDValue represents an UNDEF of type VT.
41876  SDValue A, B;
41877  SmallVector<int, 16> LMask;
41878  GetShuffle(LHS, A, B, LMask);
41879
41880  // Likewise, view RHS in the form
41881  //   RHS = VECTOR_SHUFFLE C, D, RMask
41882  SDValue C, D;
41883  SmallVector<int, 16> RMask;
41884  GetShuffle(RHS, C, D, RMask);
41885
41886  // At least one of the operands should be a vector shuffle.
41887  unsigned NumShuffles = (LMask.empty() ? 0 : 1) + (RMask.empty() ? 0 : 1);
41888  if (NumShuffles == 0)
41889    return false;
41890
41891  if (LMask.empty()) {
41892    A = LHS;
41893    for (unsigned i = 0; i != NumElts; ++i)
41894      LMask.push_back(i);
41895  }
41896
41897  if (RMask.empty()) {
41898    C = RHS;
41899    for (unsigned i = 0; i != NumElts; ++i)
41900      RMask.push_back(i);
41901  }
41902
41903  // If A and B occur in reverse order in RHS, then canonicalize by commuting
41904  // RHS operands and shuffle mask.
41905  if (A != C) {
41906    std::swap(C, D);
41907    ShuffleVectorSDNode::commuteMask(RMask);
41908  }
41909  // Check that the shuffles are both shuffling the same vectors.
41910  if (!(A == C && B == D))
41911    return false;
41912
41913  // LHS and RHS are now:
41914  //   LHS = shuffle A, B, LMask
41915  //   RHS = shuffle A, B, RMask
41916  // Check that the masks correspond to performing a horizontal operation.
41917  // AVX defines horizontal add/sub to operate independently on 128-bit lanes,
41918  // so we just repeat the inner loop if this is a 256-bit op.
41919  unsigned Num128BitChunks = VT.getSizeInBits() / 128;
41920  unsigned NumEltsPer128BitChunk = NumElts / Num128BitChunks;
41921  assert((NumEltsPer128BitChunk % 2 == 0) &&
41922         "Vector type should have an even number of elements in each lane");
41923  for (unsigned j = 0; j != NumElts; j += NumEltsPer128BitChunk) {
41924    for (unsigned i = 0; i != NumEltsPer128BitChunk; ++i) {
41925      // Ignore undefined components.
41926      int LIdx = LMask[i + j], RIdx = RMask[i + j];
41927      if (LIdx < 0 || RIdx < 0 ||
41928          (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
41929          (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
41930        continue;
41931
41932      // The  low half of the 128-bit result must choose from A.
41933      // The high half of the 128-bit result must choose from B,
41934      // unless B is undef. In that case, we are always choosing from A.
41935      unsigned NumEltsPer64BitChunk = NumEltsPer128BitChunk / 2;
41936      unsigned Src = B.getNode() ? i >= NumEltsPer64BitChunk : 0;
41937
41938      // Check that successive elements are being operated on. If not, this is
41939      // not a horizontal operation.
41940      int Index = 2 * (i % NumEltsPer64BitChunk) + NumElts * Src + j;
41941      if (!(LIdx == Index && RIdx == Index + 1) &&
41942          !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
41943        return false;
41944    }
41945  }
41946
41947  LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
41948  RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
41949
41950  if (!shouldUseHorizontalOp(LHS == RHS && NumShuffles < 2, DAG, Subtarget))
41951    return false;
41952
41953  LHS = DAG.getBitcast(VT, LHS);
41954  RHS = DAG.getBitcast(VT, RHS);
41955  return true;
41956}
41957
41958/// Do target-specific dag combines on floating-point adds/subs.
41959static SDValue combineFaddFsub(SDNode *N, SelectionDAG &DAG,
41960                               const X86Subtarget &Subtarget) {
41961  EVT VT = N->getValueType(0);
41962  SDValue LHS = N->getOperand(0);
41963  SDValue RHS = N->getOperand(1);
41964  bool IsFadd = N->getOpcode() == ISD::FADD;
41965  auto HorizOpcode = IsFadd ? X86ISD::FHADD : X86ISD::FHSUB;
41966  assert((IsFadd || N->getOpcode() == ISD::FSUB) && "Wrong opcode");
41967
41968  // Try to synthesize horizontal add/sub from adds/subs of shuffles.
41969  if (((Subtarget.hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
41970       (Subtarget.hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
41971      isHorizontalBinOp(LHS, RHS, DAG, Subtarget, IsFadd))
41972    return DAG.getNode(HorizOpcode, SDLoc(N), VT, LHS, RHS);
41973
41974  return SDValue();
41975}
41976
41977/// Attempt to pre-truncate inputs to arithmetic ops if it will simplify
41978/// the codegen.
41979/// e.g. TRUNC( BINOP( X, Y ) ) --> BINOP( TRUNC( X ), TRUNC( Y ) )
41980/// TODO: This overlaps with the generic combiner's visitTRUNCATE. Remove
41981///       anything that is guaranteed to be transformed by DAGCombiner.
41982static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG,
41983                                          const X86Subtarget &Subtarget,
41984                                          const SDLoc &DL) {
41985  assert(N->getOpcode() == ISD::TRUNCATE && "Wrong opcode");
41986  SDValue Src = N->getOperand(0);
41987  unsigned SrcOpcode = Src.getOpcode();
41988  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41989
41990  EVT VT = N->getValueType(0);
41991  EVT SrcVT = Src.getValueType();
41992
41993  auto IsFreeTruncation = [VT](SDValue Op) {
41994    unsigned TruncSizeInBits = VT.getScalarSizeInBits();
41995
41996    // See if this has been extended from a smaller/equal size to
41997    // the truncation size, allowing a truncation to combine with the extend.
41998    unsigned Opcode = Op.getOpcode();
41999    if ((Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND ||
42000         Opcode == ISD::ZERO_EXTEND) &&
42001        Op.getOperand(0).getScalarValueSizeInBits() <= TruncSizeInBits)
42002      return true;
42003
42004    // See if this is a single use constant which can be constant folded.
42005    // NOTE: We don't peek throught bitcasts here because there is currently
42006    // no support for constant folding truncate+bitcast+vector_of_constants. So
42007    // we'll just send up with a truncate on both operands which will
42008    // get turned back into (truncate (binop)) causing an infinite loop.
42009    return ISD::isBuildVectorOfConstantSDNodes(Op.getNode());
42010  };
42011
42012  auto TruncateArithmetic = [&](SDValue N0, SDValue N1) {
42013    SDValue Trunc0 = DAG.getNode(ISD::TRUNCATE, DL, VT, N0);
42014    SDValue Trunc1 = DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
42015    return DAG.getNode(SrcOpcode, DL, VT, Trunc0, Trunc1);
42016  };
42017
42018  // Don't combine if the operation has other uses.
42019  if (!Src.hasOneUse())
42020    return SDValue();
42021
42022  // Only support vector truncation for now.
42023  // TODO: i64 scalar math would benefit as well.
42024  if (!VT.isVector())
42025    return SDValue();
42026
42027  // In most cases its only worth pre-truncating if we're only facing the cost
42028  // of one truncation.
42029  // i.e. if one of the inputs will constant fold or the input is repeated.
42030  switch (SrcOpcode) {
42031  case ISD::AND:
42032  case ISD::XOR:
42033  case ISD::OR: {
42034    SDValue Op0 = Src.getOperand(0);
42035    SDValue Op1 = Src.getOperand(1);
42036    if (TLI.isOperationLegalOrPromote(SrcOpcode, VT) &&
42037        (Op0 == Op1 || IsFreeTruncation(Op0) || IsFreeTruncation(Op1)))
42038      return TruncateArithmetic(Op0, Op1);
42039    break;
42040  }
42041
42042  case ISD::MUL:
42043    // X86 is rubbish at scalar and vector i64 multiplies (until AVX512DQ) - its
42044    // better to truncate if we have the chance.
42045    if (SrcVT.getScalarType() == MVT::i64 &&
42046        TLI.isOperationLegal(SrcOpcode, VT) &&
42047        !TLI.isOperationLegal(SrcOpcode, SrcVT))
42048      return TruncateArithmetic(Src.getOperand(0), Src.getOperand(1));
42049    LLVM_FALLTHROUGH;
42050  case ISD::ADD: {
42051    SDValue Op0 = Src.getOperand(0);
42052    SDValue Op1 = Src.getOperand(1);
42053    if (TLI.isOperationLegal(SrcOpcode, VT) &&
42054        (Op0 == Op1 || IsFreeTruncation(Op0) || IsFreeTruncation(Op1)))
42055      return TruncateArithmetic(Op0, Op1);
42056    break;
42057  }
42058  case ISD::SUB: {
42059    // TODO: ISD::SUB We are conservative and require both sides to be freely
42060    // truncatable to avoid interfering with combineSubToSubus.
42061    SDValue Op0 = Src.getOperand(0);
42062    SDValue Op1 = Src.getOperand(1);
42063    if (TLI.isOperationLegal(SrcOpcode, VT) &&
42064        (Op0 == Op1 || (IsFreeTruncation(Op0) && IsFreeTruncation(Op1))))
42065      return TruncateArithmetic(Op0, Op1);
42066    break;
42067  }
42068  }
42069
42070  return SDValue();
42071}
42072
42073/// Truncate using ISD::AND mask and X86ISD::PACKUS.
42074/// e.g. trunc <8 x i32> X to <8 x i16> -->
42075/// MaskX = X & 0xffff (clear high bits to prevent saturation)
42076/// packus (extract_subv MaskX, 0), (extract_subv MaskX, 1)
42077static SDValue combineVectorTruncationWithPACKUS(SDNode *N, const SDLoc &DL,
42078                                                 const X86Subtarget &Subtarget,
42079                                                 SelectionDAG &DAG) {
42080  SDValue In = N->getOperand(0);
42081  EVT InVT = In.getValueType();
42082  EVT OutVT = N->getValueType(0);
42083
42084  APInt Mask = APInt::getLowBitsSet(InVT.getScalarSizeInBits(),
42085                                    OutVT.getScalarSizeInBits());
42086  In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(Mask, DL, InVT));
42087  return truncateVectorWithPACK(X86ISD::PACKUS, OutVT, In, DL, DAG, Subtarget);
42088}
42089
42090/// Truncate a group of v4i32 into v8i16 using X86ISD::PACKSS.
42091static SDValue combineVectorTruncationWithPACKSS(SDNode *N, const SDLoc &DL,
42092                                                 const X86Subtarget &Subtarget,
42093                                                 SelectionDAG &DAG) {
42094  SDValue In = N->getOperand(0);
42095  EVT InVT = In.getValueType();
42096  EVT OutVT = N->getValueType(0);
42097  In = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, InVT, In,
42098                   DAG.getValueType(OutVT));
42099  return truncateVectorWithPACK(X86ISD::PACKSS, OutVT, In, DL, DAG, Subtarget);
42100}
42101
42102/// This function transforms truncation from vXi32/vXi64 to vXi8/vXi16 into
42103/// X86ISD::PACKUS/X86ISD::PACKSS operations. We do it here because after type
42104/// legalization the truncation will be translated into a BUILD_VECTOR with each
42105/// element that is extracted from a vector and then truncated, and it is
42106/// difficult to do this optimization based on them.
42107static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG,
42108                                       const X86Subtarget &Subtarget) {
42109  EVT OutVT = N->getValueType(0);
42110  if (!OutVT.isVector())
42111    return SDValue();
42112
42113  SDValue In = N->getOperand(0);
42114  if (!In.getValueType().isSimple())
42115    return SDValue();
42116
42117  EVT InVT = In.getValueType();
42118  unsigned NumElems = OutVT.getVectorNumElements();
42119
42120  // TODO: On AVX2, the behavior of X86ISD::PACKUS is different from that on
42121  // SSE2, and we need to take care of it specially.
42122  // AVX512 provides vpmovdb.
42123  if (!Subtarget.hasSSE2() || Subtarget.hasAVX2())
42124    return SDValue();
42125
42126  EVT OutSVT = OutVT.getVectorElementType();
42127  EVT InSVT = InVT.getVectorElementType();
42128  if (!((InSVT == MVT::i32 || InSVT == MVT::i64) &&
42129        (OutSVT == MVT::i8 || OutSVT == MVT::i16) && isPowerOf2_32(NumElems) &&
42130        NumElems >= 8))
42131    return SDValue();
42132
42133  // SSSE3's pshufb results in less instructions in the cases below.
42134  if (Subtarget.hasSSSE3() && NumElems == 8 &&
42135      ((OutSVT == MVT::i8 && InSVT != MVT::i64) ||
42136       (InSVT == MVT::i32 && OutSVT == MVT::i16)))
42137    return SDValue();
42138
42139  SDLoc DL(N);
42140  // SSE2 provides PACKUS for only 2 x v8i16 -> v16i8 and SSE4.1 provides PACKUS
42141  // for 2 x v4i32 -> v8i16. For SSSE3 and below, we need to use PACKSS to
42142  // truncate 2 x v4i32 to v8i16.
42143  if (Subtarget.hasSSE41() || OutSVT == MVT::i8)
42144    return combineVectorTruncationWithPACKUS(N, DL, Subtarget, DAG);
42145  if (InSVT == MVT::i32)
42146    return combineVectorTruncationWithPACKSS(N, DL, Subtarget, DAG);
42147
42148  return SDValue();
42149}
42150
42151/// This function transforms vector truncation of 'extended sign-bits' or
42152/// 'extended zero-bits' values.
42153/// vXi16/vXi32/vXi64 to vXi8/vXi16/vXi32 into X86ISD::PACKSS/PACKUS operations.
42154static SDValue combineVectorSignBitsTruncation(SDNode *N, const SDLoc &DL,
42155                                               SelectionDAG &DAG,
42156                                               const X86Subtarget &Subtarget) {
42157  // Requires SSE2.
42158  if (!Subtarget.hasSSE2())
42159    return SDValue();
42160
42161  if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple())
42162    return SDValue();
42163
42164  SDValue In = N->getOperand(0);
42165  if (!In.getValueType().isSimple())
42166    return SDValue();
42167
42168  MVT VT = N->getValueType(0).getSimpleVT();
42169  MVT SVT = VT.getScalarType();
42170
42171  MVT InVT = In.getValueType().getSimpleVT();
42172  MVT InSVT = InVT.getScalarType();
42173
42174  // Check we have a truncation suited for PACKSS/PACKUS.
42175  if (!VT.is128BitVector() && !VT.is256BitVector())
42176    return SDValue();
42177  if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32)
42178    return SDValue();
42179  if (InSVT != MVT::i16 && InSVT != MVT::i32 && InSVT != MVT::i64)
42180    return SDValue();
42181
42182  // AVX512 has fast truncate, but if the input is already going to be split,
42183  // there's no harm in trying pack.
42184  if (Subtarget.hasAVX512() &&
42185      !(!Subtarget.useAVX512Regs() && VT.is256BitVector() &&
42186        InVT.is512BitVector()))
42187    return SDValue();
42188
42189  unsigned NumPackedSignBits = std::min<unsigned>(SVT.getSizeInBits(), 16);
42190  unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
42191
42192  // Use PACKUS if the input has zero-bits that extend all the way to the
42193  // packed/truncated value. e.g. masks, zext_in_reg, etc.
42194  KnownBits Known = DAG.computeKnownBits(In);
42195  unsigned NumLeadingZeroBits = Known.countMinLeadingZeros();
42196  if (NumLeadingZeroBits >= (InSVT.getSizeInBits() - NumPackedZeroBits))
42197    return truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget);
42198
42199  // Use PACKSS if the input has sign-bits that extend all the way to the
42200  // packed/truncated value. e.g. Comparison result, sext_in_reg, etc.
42201  unsigned NumSignBits = DAG.ComputeNumSignBits(In);
42202  if (NumSignBits > (InSVT.getSizeInBits() - NumPackedSignBits))
42203    return truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget);
42204
42205  return SDValue();
42206}
42207
42208// Try to form a MULHU or MULHS node by looking for
42209// (trunc (srl (mul ext, ext), 16))
42210// TODO: This is X86 specific because we want to be able to handle wide types
42211// before type legalization. But we can only do it if the vector will be
42212// legalized via widening/splitting. Type legalization can't handle promotion
42213// of a MULHU/MULHS. There isn't a way to convey this to the generic DAG
42214// combiner.
42215static SDValue combinePMULH(SDValue Src, EVT VT, const SDLoc &DL,
42216                            SelectionDAG &DAG, const X86Subtarget &Subtarget) {
42217  // First instruction should be a right shift of a multiply.
42218  if (Src.getOpcode() != ISD::SRL ||
42219      Src.getOperand(0).getOpcode() != ISD::MUL)
42220    return SDValue();
42221
42222  if (!Subtarget.hasSSE2())
42223    return SDValue();
42224
42225  // Only handle vXi16 types that are at least 128-bits unless they will be
42226  // widened.
42227  if (!VT.isVector() || VT.getVectorElementType() != MVT::i16)
42228    return SDValue();
42229
42230  // Input type should be vXi32.
42231  EVT InVT = Src.getValueType();
42232  if (InVT.getVectorElementType() != MVT::i32)
42233    return SDValue();
42234
42235  // Need a shift by 16.
42236  APInt ShiftAmt;
42237  if (!ISD::isConstantSplatVector(Src.getOperand(1).getNode(), ShiftAmt) ||
42238      ShiftAmt != 16)
42239    return SDValue();
42240
42241  SDValue LHS = Src.getOperand(0).getOperand(0);
42242  SDValue RHS = Src.getOperand(0).getOperand(1);
42243
42244  unsigned ExtOpc = LHS.getOpcode();
42245  if ((ExtOpc != ISD::SIGN_EXTEND && ExtOpc != ISD::ZERO_EXTEND) ||
42246      RHS.getOpcode() != ExtOpc)
42247    return SDValue();
42248
42249  // Peek through the extends.
42250  LHS = LHS.getOperand(0);
42251  RHS = RHS.getOperand(0);
42252
42253  // Ensure the input types match.
42254  if (LHS.getValueType() != VT || RHS.getValueType() != VT)
42255    return SDValue();
42256
42257  unsigned Opc = ExtOpc == ISD::SIGN_EXTEND ? ISD::MULHS : ISD::MULHU;
42258  return DAG.getNode(Opc, DL, VT, LHS, RHS);
42259}
42260
42261// Attempt to match PMADDUBSW, which multiplies corresponding unsigned bytes
42262// from one vector with signed bytes from another vector, adds together
42263// adjacent pairs of 16-bit products, and saturates the result before
42264// truncating to 16-bits.
42265//
42266// Which looks something like this:
42267// (i16 (ssat (add (mul (zext (even elts (i8 A))), (sext (even elts (i8 B)))),
42268//                 (mul (zext (odd elts (i8 A)), (sext (odd elts (i8 B))))))))
42269static SDValue detectPMADDUBSW(SDValue In, EVT VT, SelectionDAG &DAG,
42270                               const X86Subtarget &Subtarget,
42271                               const SDLoc &DL) {
42272  if (!VT.isVector() || !Subtarget.hasSSSE3())
42273    return SDValue();
42274
42275  unsigned NumElems = VT.getVectorNumElements();
42276  EVT ScalarVT = VT.getVectorElementType();
42277  if (ScalarVT != MVT::i16 || NumElems < 8 || !isPowerOf2_32(NumElems))
42278    return SDValue();
42279
42280  SDValue SSatVal = detectSSatPattern(In, VT);
42281  if (!SSatVal || SSatVal.getOpcode() != ISD::ADD)
42282    return SDValue();
42283
42284  // Ok this is a signed saturation of an ADD. See if this ADD is adding pairs
42285  // of multiplies from even/odd elements.
42286  SDValue N0 = SSatVal.getOperand(0);
42287  SDValue N1 = SSatVal.getOperand(1);
42288
42289  if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
42290    return SDValue();
42291
42292  SDValue N00 = N0.getOperand(0);
42293  SDValue N01 = N0.getOperand(1);
42294  SDValue N10 = N1.getOperand(0);
42295  SDValue N11 = N1.getOperand(1);
42296
42297  // TODO: Handle constant vectors and use knownbits/computenumsignbits?
42298  // Canonicalize zero_extend to LHS.
42299  if (N01.getOpcode() == ISD::ZERO_EXTEND)
42300    std::swap(N00, N01);
42301  if (N11.getOpcode() == ISD::ZERO_EXTEND)
42302    std::swap(N10, N11);
42303
42304  // Ensure we have a zero_extend and a sign_extend.
42305  if (N00.getOpcode() != ISD::ZERO_EXTEND ||
42306      N01.getOpcode() != ISD::SIGN_EXTEND ||
42307      N10.getOpcode() != ISD::ZERO_EXTEND ||
42308      N11.getOpcode() != ISD::SIGN_EXTEND)
42309    return SDValue();
42310
42311  // Peek through the extends.
42312  N00 = N00.getOperand(0);
42313  N01 = N01.getOperand(0);
42314  N10 = N10.getOperand(0);
42315  N11 = N11.getOperand(0);
42316
42317  // Ensure the extend is from vXi8.
42318  if (N00.getValueType().getVectorElementType() != MVT::i8 ||
42319      N01.getValueType().getVectorElementType() != MVT::i8 ||
42320      N10.getValueType().getVectorElementType() != MVT::i8 ||
42321      N11.getValueType().getVectorElementType() != MVT::i8)
42322    return SDValue();
42323
42324  // All inputs should be build_vectors.
42325  if (N00.getOpcode() != ISD::BUILD_VECTOR ||
42326      N01.getOpcode() != ISD::BUILD_VECTOR ||
42327      N10.getOpcode() != ISD::BUILD_VECTOR ||
42328      N11.getOpcode() != ISD::BUILD_VECTOR)
42329    return SDValue();
42330
42331  // N00/N10 are zero extended. N01/N11 are sign extended.
42332
42333  // For each element, we need to ensure we have an odd element from one vector
42334  // multiplied by the odd element of another vector and the even element from
42335  // one of the same vectors being multiplied by the even element from the
42336  // other vector. So we need to make sure for each element i, this operator
42337  // is being performed:
42338  //  A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
42339  SDValue ZExtIn, SExtIn;
42340  for (unsigned i = 0; i != NumElems; ++i) {
42341    SDValue N00Elt = N00.getOperand(i);
42342    SDValue N01Elt = N01.getOperand(i);
42343    SDValue N10Elt = N10.getOperand(i);
42344    SDValue N11Elt = N11.getOperand(i);
42345    // TODO: Be more tolerant to undefs.
42346    if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
42347        N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
42348        N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
42349        N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
42350      return SDValue();
42351    auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
42352    auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
42353    auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
42354    auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
42355    if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
42356      return SDValue();
42357    unsigned IdxN00 = ConstN00Elt->getZExtValue();
42358    unsigned IdxN01 = ConstN01Elt->getZExtValue();
42359    unsigned IdxN10 = ConstN10Elt->getZExtValue();
42360    unsigned IdxN11 = ConstN11Elt->getZExtValue();
42361    // Add is commutative so indices can be reordered.
42362    if (IdxN00 > IdxN10) {
42363      std::swap(IdxN00, IdxN10);
42364      std::swap(IdxN01, IdxN11);
42365    }
42366    // N0 indices be the even element. N1 indices must be the next odd element.
42367    if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
42368        IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
42369      return SDValue();
42370    SDValue N00In = N00Elt.getOperand(0);
42371    SDValue N01In = N01Elt.getOperand(0);
42372    SDValue N10In = N10Elt.getOperand(0);
42373    SDValue N11In = N11Elt.getOperand(0);
42374    // First time we find an input capture it.
42375    if (!ZExtIn) {
42376      ZExtIn = N00In;
42377      SExtIn = N01In;
42378    }
42379    if (ZExtIn != N00In || SExtIn != N01In ||
42380        ZExtIn != N10In || SExtIn != N11In)
42381      return SDValue();
42382  }
42383
42384  auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
42385                         ArrayRef<SDValue> Ops) {
42386    // Shrink by adding truncate nodes and let DAGCombine fold with the
42387    // sources.
42388    EVT InVT = Ops[0].getValueType();
42389    assert(InVT.getScalarType() == MVT::i8 &&
42390           "Unexpected scalar element type");
42391    assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
42392    EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
42393                                 InVT.getVectorNumElements() / 2);
42394    return DAG.getNode(X86ISD::VPMADDUBSW, DL, ResVT, Ops[0], Ops[1]);
42395  };
42396  return SplitOpsAndApply(DAG, Subtarget, DL, VT, { ZExtIn, SExtIn },
42397                          PMADDBuilder);
42398}
42399
42400static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG,
42401                               const X86Subtarget &Subtarget) {
42402  EVT VT = N->getValueType(0);
42403  SDValue Src = N->getOperand(0);
42404  SDLoc DL(N);
42405
42406  // Attempt to pre-truncate inputs to arithmetic ops instead.
42407  if (SDValue V = combineTruncatedArithmetic(N, DAG, Subtarget, DL))
42408    return V;
42409
42410  // Try to detect AVG pattern first.
42411  if (SDValue Avg = detectAVGPattern(Src, VT, DAG, Subtarget, DL))
42412    return Avg;
42413
42414  // Try to detect PMADD
42415  if (SDValue PMAdd = detectPMADDUBSW(Src, VT, DAG, Subtarget, DL))
42416    return PMAdd;
42417
42418  // Try to combine truncation with signed/unsigned saturation.
42419  if (SDValue Val = combineTruncateWithSat(Src, VT, DL, DAG, Subtarget))
42420    return Val;
42421
42422  // Try to combine PMULHUW/PMULHW for vXi16.
42423  if (SDValue V = combinePMULH(Src, VT, DL, DAG, Subtarget))
42424    return V;
42425
42426  // The bitcast source is a direct mmx result.
42427  // Detect bitcasts between i32 to x86mmx
42428  if (Src.getOpcode() == ISD::BITCAST && VT == MVT::i32) {
42429    SDValue BCSrc = Src.getOperand(0);
42430    if (BCSrc.getValueType() == MVT::x86mmx)
42431      return DAG.getNode(X86ISD::MMX_MOVD2W, DL, MVT::i32, BCSrc);
42432  }
42433
42434  // Try to truncate extended sign/zero bits with PACKSS/PACKUS.
42435  if (SDValue V = combineVectorSignBitsTruncation(N, DL, DAG, Subtarget))
42436    return V;
42437
42438  return combineVectorTruncation(N, DAG, Subtarget);
42439}
42440
42441static SDValue combineVTRUNC(SDNode *N, SelectionDAG &DAG) {
42442  EVT VT = N->getValueType(0);
42443  SDValue In = N->getOperand(0);
42444  SDLoc DL(N);
42445
42446  if (auto SSatVal = detectSSatPattern(In, VT))
42447    return DAG.getNode(X86ISD::VTRUNCS, DL, VT, SSatVal);
42448  if (auto USatVal = detectUSatPattern(In, VT, DAG, DL))
42449    return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, USatVal);
42450
42451  return SDValue();
42452}
42453
42454/// Returns the negated value if the node \p N flips sign of FP value.
42455///
42456/// FP-negation node may have different forms: FNEG(x), FXOR (x, 0x80000000)
42457/// or FSUB(0, x)
42458/// AVX512F does not have FXOR, so FNEG is lowered as
42459/// (bitcast (xor (bitcast x), (bitcast ConstantFP(0x80000000)))).
42460/// In this case we go though all bitcasts.
42461/// This also recognizes splat of a negated value and returns the splat of that
42462/// value.
42463static SDValue isFNEG(SelectionDAG &DAG, SDNode *N, unsigned Depth = 0) {
42464  if (N->getOpcode() == ISD::FNEG)
42465    return N->getOperand(0);
42466
42467  // Don't recurse exponentially.
42468  if (Depth > SelectionDAG::MaxRecursionDepth)
42469    return SDValue();
42470
42471  unsigned ScalarSize = N->getValueType(0).getScalarSizeInBits();
42472
42473  SDValue Op = peekThroughBitcasts(SDValue(N, 0));
42474  EVT VT = Op->getValueType(0);
42475
42476  // Make sure the element size doesn't change.
42477  if (VT.getScalarSizeInBits() != ScalarSize)
42478    return SDValue();
42479
42480  unsigned Opc = Op.getOpcode();
42481  switch (Opc) {
42482  case ISD::VECTOR_SHUFFLE: {
42483    // For a VECTOR_SHUFFLE(VEC1, VEC2), if the VEC2 is undef, then the negate
42484    // of this is VECTOR_SHUFFLE(-VEC1, UNDEF).  The mask can be anything here.
42485    if (!Op.getOperand(1).isUndef())
42486      return SDValue();
42487    if (SDValue NegOp0 = isFNEG(DAG, Op.getOperand(0).getNode(), Depth + 1))
42488      if (NegOp0.getValueType() == VT) // FIXME: Can we do better?
42489        return DAG.getVectorShuffle(VT, SDLoc(Op), NegOp0, DAG.getUNDEF(VT),
42490                                    cast<ShuffleVectorSDNode>(Op)->getMask());
42491    break;
42492  }
42493  case ISD::INSERT_VECTOR_ELT: {
42494    // Negate of INSERT_VECTOR_ELT(UNDEF, V, INDEX) is INSERT_VECTOR_ELT(UNDEF,
42495    // -V, INDEX).
42496    SDValue InsVector = Op.getOperand(0);
42497    SDValue InsVal = Op.getOperand(1);
42498    if (!InsVector.isUndef())
42499      return SDValue();
42500    if (SDValue NegInsVal = isFNEG(DAG, InsVal.getNode(), Depth + 1))
42501      if (NegInsVal.getValueType() == VT.getVectorElementType()) // FIXME
42502        return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(Op), VT, InsVector,
42503                           NegInsVal, Op.getOperand(2));
42504    break;
42505  }
42506  case ISD::FSUB:
42507  case ISD::XOR:
42508  case X86ISD::FXOR: {
42509    SDValue Op1 = Op.getOperand(1);
42510    SDValue Op0 = Op.getOperand(0);
42511
42512    // For XOR and FXOR, we want to check if constant
42513    // bits of Op1 are sign bit masks. For FSUB, we
42514    // have to check if constant bits of Op0 are sign
42515    // bit masks and hence we swap the operands.
42516    if (Opc == ISD::FSUB)
42517      std::swap(Op0, Op1);
42518
42519    APInt UndefElts;
42520    SmallVector<APInt, 16> EltBits;
42521    // Extract constant bits and see if they are all
42522    // sign bit masks. Ignore the undef elements.
42523    if (getTargetConstantBitsFromNode(Op1, ScalarSize, UndefElts, EltBits,
42524                                      /* AllowWholeUndefs */ true,
42525                                      /* AllowPartialUndefs */ false)) {
42526      for (unsigned I = 0, E = EltBits.size(); I < E; I++)
42527        if (!UndefElts[I] && !EltBits[I].isSignMask())
42528          return SDValue();
42529
42530      return peekThroughBitcasts(Op0);
42531    }
42532  }
42533  }
42534
42535  return SDValue();
42536}
42537
42538static unsigned negateFMAOpcode(unsigned Opcode, bool NegMul, bool NegAcc,
42539                                bool NegRes) {
42540  if (NegMul) {
42541    switch (Opcode) {
42542    default: llvm_unreachable("Unexpected opcode");
42543    case ISD::FMA:             Opcode = X86ISD::FNMADD;       break;
42544    case X86ISD::FMADD_RND:    Opcode = X86ISD::FNMADD_RND;   break;
42545    case X86ISD::FMSUB:        Opcode = X86ISD::FNMSUB;       break;
42546    case X86ISD::FMSUB_RND:    Opcode = X86ISD::FNMSUB_RND;   break;
42547    case X86ISD::FNMADD:       Opcode = ISD::FMA;             break;
42548    case X86ISD::FNMADD_RND:   Opcode = X86ISD::FMADD_RND;    break;
42549    case X86ISD::FNMSUB:       Opcode = X86ISD::FMSUB;        break;
42550    case X86ISD::FNMSUB_RND:   Opcode = X86ISD::FMSUB_RND;    break;
42551    }
42552  }
42553
42554  if (NegAcc) {
42555    switch (Opcode) {
42556    default: llvm_unreachable("Unexpected opcode");
42557    case ISD::FMA:             Opcode = X86ISD::FMSUB;        break;
42558    case X86ISD::FMADD_RND:    Opcode = X86ISD::FMSUB_RND;    break;
42559    case X86ISD::FMSUB:        Opcode = ISD::FMA;             break;
42560    case X86ISD::FMSUB_RND:    Opcode = X86ISD::FMADD_RND;    break;
42561    case X86ISD::FNMADD:       Opcode = X86ISD::FNMSUB;       break;
42562    case X86ISD::FNMADD_RND:   Opcode = X86ISD::FNMSUB_RND;   break;
42563    case X86ISD::FNMSUB:       Opcode = X86ISD::FNMADD;       break;
42564    case X86ISD::FNMSUB_RND:   Opcode = X86ISD::FNMADD_RND;   break;
42565    case X86ISD::FMADDSUB:     Opcode = X86ISD::FMSUBADD;     break;
42566    case X86ISD::FMADDSUB_RND: Opcode = X86ISD::FMSUBADD_RND; break;
42567    case X86ISD::FMSUBADD:     Opcode = X86ISD::FMADDSUB;     break;
42568    case X86ISD::FMSUBADD_RND: Opcode = X86ISD::FMADDSUB_RND; break;
42569    }
42570  }
42571
42572  if (NegRes) {
42573    switch (Opcode) {
42574    default: llvm_unreachable("Unexpected opcode");
42575    case ISD::FMA:             Opcode = X86ISD::FNMSUB;       break;
42576    case X86ISD::FMADD_RND:    Opcode = X86ISD::FNMSUB_RND;   break;
42577    case X86ISD::FMSUB:        Opcode = X86ISD::FNMADD;       break;
42578    case X86ISD::FMSUB_RND:    Opcode = X86ISD::FNMADD_RND;   break;
42579    case X86ISD::FNMADD:       Opcode = X86ISD::FMSUB;        break;
42580    case X86ISD::FNMADD_RND:   Opcode = X86ISD::FMSUB_RND;    break;
42581    case X86ISD::FNMSUB:       Opcode = ISD::FMA;             break;
42582    case X86ISD::FNMSUB_RND:   Opcode = X86ISD::FMADD_RND;    break;
42583    }
42584  }
42585
42586  return Opcode;
42587}
42588
42589/// Do target-specific dag combines on floating point negations.
42590static SDValue combineFneg(SDNode *N, SelectionDAG &DAG,
42591                           const X86Subtarget &Subtarget) {
42592  EVT OrigVT = N->getValueType(0);
42593  SDValue Arg = isFNEG(DAG, N);
42594  if (!Arg)
42595    return SDValue();
42596
42597  EVT VT = Arg.getValueType();
42598  EVT SVT = VT.getScalarType();
42599  SDLoc DL(N);
42600
42601  // Let legalize expand this if it isn't a legal type yet.
42602  if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
42603    return SDValue();
42604
42605  // If we're negating a FMUL node on a target with FMA, then we can avoid the
42606  // use of a constant by performing (-0 - A*B) instead.
42607  // FIXME: Check rounding control flags as well once it becomes available.
42608  if (Arg.getOpcode() == ISD::FMUL && (SVT == MVT::f32 || SVT == MVT::f64) &&
42609      Arg->getFlags().hasNoSignedZeros() && Subtarget.hasAnyFMA()) {
42610    SDValue Zero = DAG.getConstantFP(0.0, DL, VT);
42611    SDValue NewNode = DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0),
42612                                  Arg.getOperand(1), Zero);
42613    return DAG.getBitcast(OrigVT, NewNode);
42614  }
42615
42616  // If we're negating an FMA node, then we can adjust the
42617  // instruction to include the extra negation.
42618  if (Arg.hasOneUse() && Subtarget.hasAnyFMA()) {
42619    switch (Arg.getOpcode()) {
42620    case ISD::FMA:
42621    case X86ISD::FMSUB:
42622    case X86ISD::FNMADD:
42623    case X86ISD::FNMSUB:
42624    case X86ISD::FMADD_RND:
42625    case X86ISD::FMSUB_RND:
42626    case X86ISD::FNMADD_RND:
42627    case X86ISD::FNMSUB_RND: {
42628      // We can't handle scalar intrinsic node here because it would only
42629      // invert one element and not the whole vector. But we could try to handle
42630      // a negation of the lower element only.
42631      unsigned NewOpcode = negateFMAOpcode(Arg.getOpcode(), false, false, true);
42632      return DAG.getBitcast(OrigVT, DAG.getNode(NewOpcode, DL, VT, Arg->ops()));
42633    }
42634    }
42635  }
42636
42637  return SDValue();
42638}
42639
42640char X86TargetLowering::isNegatibleForFree(SDValue Op, SelectionDAG &DAG,
42641                                           bool LegalOperations,
42642                                           bool ForCodeSize,
42643                                           unsigned Depth) const {
42644  // fneg patterns are removable even if they have multiple uses.
42645  if (isFNEG(DAG, Op.getNode(), Depth))
42646    return 2;
42647
42648  // Don't recurse exponentially.
42649  if (Depth > SelectionDAG::MaxRecursionDepth)
42650    return 0;
42651
42652  EVT VT = Op.getValueType();
42653  EVT SVT = VT.getScalarType();
42654  switch (Op.getOpcode()) {
42655  case ISD::FMA:
42656  case X86ISD::FMSUB:
42657  case X86ISD::FNMADD:
42658  case X86ISD::FNMSUB:
42659  case X86ISD::FMADD_RND:
42660  case X86ISD::FMSUB_RND:
42661  case X86ISD::FNMADD_RND:
42662  case X86ISD::FNMSUB_RND: {
42663    if (!Op.hasOneUse() || !Subtarget.hasAnyFMA() || !isTypeLegal(VT) ||
42664        !(SVT == MVT::f32 || SVT == MVT::f64) || !LegalOperations)
42665      break;
42666
42667    // This is always negatible for free but we might be able to remove some
42668    // extra operand negations as well.
42669    for (int i = 0; i != 3; ++i) {
42670      char V = isNegatibleForFree(Op.getOperand(i), DAG, LegalOperations,
42671                                  ForCodeSize, Depth + 1);
42672      if (V == 2)
42673        return V;
42674    }
42675    return 1;
42676  }
42677  }
42678
42679  return TargetLowering::isNegatibleForFree(Op, DAG, LegalOperations,
42680                                            ForCodeSize, Depth);
42681}
42682
42683SDValue X86TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
42684                                                bool LegalOperations,
42685                                                bool ForCodeSize,
42686                                                unsigned Depth) const {
42687  // fneg patterns are removable even if they have multiple uses.
42688  if (SDValue Arg = isFNEG(DAG, Op.getNode(), Depth))
42689    return DAG.getBitcast(Op.getValueType(), Arg);
42690
42691  EVT VT = Op.getValueType();
42692  EVT SVT = VT.getScalarType();
42693  unsigned Opc = Op.getOpcode();
42694  switch (Opc) {
42695  case ISD::FMA:
42696  case X86ISD::FMSUB:
42697  case X86ISD::FNMADD:
42698  case X86ISD::FNMSUB:
42699  case X86ISD::FMADD_RND:
42700  case X86ISD::FMSUB_RND:
42701  case X86ISD::FNMADD_RND:
42702  case X86ISD::FNMSUB_RND: {
42703    if (!Op.hasOneUse() || !Subtarget.hasAnyFMA() || !isTypeLegal(VT) ||
42704        !(SVT == MVT::f32 || SVT == MVT::f64) || !LegalOperations)
42705      break;
42706
42707    // This is always negatible for free but we might be able to remove some
42708    // extra operand negations as well.
42709    SmallVector<SDValue, 4> NewOps(Op.getNumOperands(), SDValue());
42710    for (int i = 0; i != 3; ++i) {
42711      char V = isNegatibleForFree(Op.getOperand(i), DAG, LegalOperations,
42712                                  ForCodeSize, Depth + 1);
42713      if (V == 2)
42714        NewOps[i] = getNegatedExpression(Op.getOperand(i), DAG, LegalOperations,
42715                                         ForCodeSize, Depth + 1);
42716    }
42717
42718    bool NegA = !!NewOps[0];
42719    bool NegB = !!NewOps[1];
42720    bool NegC = !!NewOps[2];
42721    unsigned NewOpc = negateFMAOpcode(Opc, NegA != NegB, NegC, true);
42722
42723    // Fill in the non-negated ops with the original values.
42724    for (int i = 0, e = Op.getNumOperands(); i != e; ++i)
42725      if (!NewOps[i])
42726        NewOps[i] = Op.getOperand(i);
42727    return DAG.getNode(NewOpc, SDLoc(Op), VT, NewOps);
42728  }
42729  }
42730
42731  return TargetLowering::getNegatedExpression(Op, DAG, LegalOperations,
42732                                              ForCodeSize, Depth);
42733}
42734
42735static SDValue lowerX86FPLogicOp(SDNode *N, SelectionDAG &DAG,
42736                                 const X86Subtarget &Subtarget) {
42737  MVT VT = N->getSimpleValueType(0);
42738  // If we have integer vector types available, use the integer opcodes.
42739  if (!VT.isVector() || !Subtarget.hasSSE2())
42740    return SDValue();
42741
42742  SDLoc dl(N);
42743
42744  unsigned IntBits = VT.getScalarSizeInBits();
42745  MVT IntSVT = MVT::getIntegerVT(IntBits);
42746  MVT IntVT = MVT::getVectorVT(IntSVT, VT.getSizeInBits() / IntBits);
42747
42748  SDValue Op0 = DAG.getBitcast(IntVT, N->getOperand(0));
42749  SDValue Op1 = DAG.getBitcast(IntVT, N->getOperand(1));
42750  unsigned IntOpcode;
42751  switch (N->getOpcode()) {
42752  default: llvm_unreachable("Unexpected FP logic op");
42753  case X86ISD::FOR:   IntOpcode = ISD::OR; break;
42754  case X86ISD::FXOR:  IntOpcode = ISD::XOR; break;
42755  case X86ISD::FAND:  IntOpcode = ISD::AND; break;
42756  case X86ISD::FANDN: IntOpcode = X86ISD::ANDNP; break;
42757  }
42758  SDValue IntOp = DAG.getNode(IntOpcode, dl, IntVT, Op0, Op1);
42759  return DAG.getBitcast(VT, IntOp);
42760}
42761
42762
42763/// Fold a xor(setcc cond, val), 1 --> setcc (inverted(cond), val)
42764static SDValue foldXor1SetCC(SDNode *N, SelectionDAG &DAG) {
42765  if (N->getOpcode() != ISD::XOR)
42766    return SDValue();
42767
42768  SDValue LHS = N->getOperand(0);
42769  if (!isOneConstant(N->getOperand(1)) || LHS->getOpcode() != X86ISD::SETCC)
42770    return SDValue();
42771
42772  X86::CondCode NewCC = X86::GetOppositeBranchCondition(
42773      X86::CondCode(LHS->getConstantOperandVal(0)));
42774  SDLoc DL(N);
42775  return getSETCC(NewCC, LHS->getOperand(1), DL, DAG);
42776}
42777
42778static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
42779                          TargetLowering::DAGCombinerInfo &DCI,
42780                          const X86Subtarget &Subtarget) {
42781  // If this is SSE1 only convert to FXOR to avoid scalarization.
42782  if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() &&
42783      N->getValueType(0) == MVT::v4i32) {
42784    return DAG.getBitcast(
42785        MVT::v4i32, DAG.getNode(X86ISD::FXOR, SDLoc(N), MVT::v4f32,
42786                                DAG.getBitcast(MVT::v4f32, N->getOperand(0)),
42787                                DAG.getBitcast(MVT::v4f32, N->getOperand(1))));
42788  }
42789
42790  if (SDValue Cmp = foldVectorXorShiftIntoCmp(N, DAG, Subtarget))
42791    return Cmp;
42792
42793  if (DCI.isBeforeLegalizeOps())
42794    return SDValue();
42795
42796  if (SDValue SetCC = foldXor1SetCC(N, DAG))
42797    return SetCC;
42798
42799  if (SDValue RV = foldXorTruncShiftIntoCmp(N, DAG))
42800    return RV;
42801
42802  if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
42803    return FPLogic;
42804
42805  return combineFneg(N, DAG, Subtarget);
42806}
42807
42808static SDValue combineBEXTR(SDNode *N, SelectionDAG &DAG,
42809                            TargetLowering::DAGCombinerInfo &DCI,
42810                            const X86Subtarget &Subtarget) {
42811  SDValue Op0 = N->getOperand(0);
42812  SDValue Op1 = N->getOperand(1);
42813  EVT VT = N->getValueType(0);
42814  unsigned NumBits = VT.getSizeInBits();
42815
42816  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42817
42818  // TODO - Constant Folding.
42819  if (auto *Cst1 = dyn_cast<ConstantSDNode>(Op1)) {
42820    // Reduce Cst1 to the bottom 16-bits.
42821    // NOTE: SimplifyDemandedBits won't do this for constants.
42822    const APInt &Val1 = Cst1->getAPIntValue();
42823    APInt MaskedVal1 = Val1 & 0xFFFF;
42824    if (MaskedVal1 != Val1)
42825      return DAG.getNode(X86ISD::BEXTR, SDLoc(N), VT, Op0,
42826                         DAG.getConstant(MaskedVal1, SDLoc(N), VT));
42827  }
42828
42829  // Only bottom 16-bits of the control bits are required.
42830  APInt DemandedMask(APInt::getLowBitsSet(NumBits, 16));
42831  if (TLI.SimplifyDemandedBits(Op1, DemandedMask, DCI))
42832    return SDValue(N, 0);
42833
42834  return SDValue();
42835}
42836
42837static bool isNullFPScalarOrVectorConst(SDValue V) {
42838  return isNullFPConstant(V) || ISD::isBuildVectorAllZeros(V.getNode());
42839}
42840
42841/// If a value is a scalar FP zero or a vector FP zero (potentially including
42842/// undefined elements), return a zero constant that may be used to fold away
42843/// that value. In the case of a vector, the returned constant will not contain
42844/// undefined elements even if the input parameter does. This makes it suitable
42845/// to be used as a replacement operand with operations (eg, bitwise-and) where
42846/// an undef should not propagate.
42847static SDValue getNullFPConstForNullVal(SDValue V, SelectionDAG &DAG,
42848                                        const X86Subtarget &Subtarget) {
42849  if (!isNullFPScalarOrVectorConst(V))
42850    return SDValue();
42851
42852  if (V.getValueType().isVector())
42853    return getZeroVector(V.getSimpleValueType(), Subtarget, DAG, SDLoc(V));
42854
42855  return V;
42856}
42857
42858static SDValue combineFAndFNotToFAndn(SDNode *N, SelectionDAG &DAG,
42859                                      const X86Subtarget &Subtarget) {
42860  SDValue N0 = N->getOperand(0);
42861  SDValue N1 = N->getOperand(1);
42862  EVT VT = N->getValueType(0);
42863  SDLoc DL(N);
42864
42865  // Vector types are handled in combineANDXORWithAllOnesIntoANDNP().
42866  if (!((VT == MVT::f32 && Subtarget.hasSSE1()) ||
42867        (VT == MVT::f64 && Subtarget.hasSSE2()) ||
42868        (VT == MVT::v4f32 && Subtarget.hasSSE1() && !Subtarget.hasSSE2())))
42869    return SDValue();
42870
42871  auto isAllOnesConstantFP = [](SDValue V) {
42872    if (V.getSimpleValueType().isVector())
42873      return ISD::isBuildVectorAllOnes(V.getNode());
42874    auto *C = dyn_cast<ConstantFPSDNode>(V);
42875    return C && C->getConstantFPValue()->isAllOnesValue();
42876  };
42877
42878  // fand (fxor X, -1), Y --> fandn X, Y
42879  if (N0.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N0.getOperand(1)))
42880    return DAG.getNode(X86ISD::FANDN, DL, VT, N0.getOperand(0), N1);
42881
42882  // fand X, (fxor Y, -1) --> fandn Y, X
42883  if (N1.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N1.getOperand(1)))
42884    return DAG.getNode(X86ISD::FANDN, DL, VT, N1.getOperand(0), N0);
42885
42886  return SDValue();
42887}
42888
42889/// Do target-specific dag combines on X86ISD::FAND nodes.
42890static SDValue combineFAnd(SDNode *N, SelectionDAG &DAG,
42891                           const X86Subtarget &Subtarget) {
42892  // FAND(0.0, x) -> 0.0
42893  if (SDValue V = getNullFPConstForNullVal(N->getOperand(0), DAG, Subtarget))
42894    return V;
42895
42896  // FAND(x, 0.0) -> 0.0
42897  if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
42898    return V;
42899
42900  if (SDValue V = combineFAndFNotToFAndn(N, DAG, Subtarget))
42901    return V;
42902
42903  return lowerX86FPLogicOp(N, DAG, Subtarget);
42904}
42905
42906/// Do target-specific dag combines on X86ISD::FANDN nodes.
42907static SDValue combineFAndn(SDNode *N, SelectionDAG &DAG,
42908                            const X86Subtarget &Subtarget) {
42909  // FANDN(0.0, x) -> x
42910  if (isNullFPScalarOrVectorConst(N->getOperand(0)))
42911    return N->getOperand(1);
42912
42913  // FANDN(x, 0.0) -> 0.0
42914  if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
42915    return V;
42916
42917  return lowerX86FPLogicOp(N, DAG, Subtarget);
42918}
42919
42920/// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
42921static SDValue combineFOr(SDNode *N, SelectionDAG &DAG,
42922                          const X86Subtarget &Subtarget) {
42923  assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
42924
42925  // F[X]OR(0.0, x) -> x
42926  if (isNullFPScalarOrVectorConst(N->getOperand(0)))
42927    return N->getOperand(1);
42928
42929  // F[X]OR(x, 0.0) -> x
42930  if (isNullFPScalarOrVectorConst(N->getOperand(1)))
42931    return N->getOperand(0);
42932
42933  if (SDValue NewVal = combineFneg(N, DAG, Subtarget))
42934    return NewVal;
42935
42936  return lowerX86FPLogicOp(N, DAG, Subtarget);
42937}
42938
42939/// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
42940static SDValue combineFMinFMax(SDNode *N, SelectionDAG &DAG) {
42941  assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
42942
42943  // FMIN/FMAX are commutative if no NaNs and no negative zeros are allowed.
42944  if (!DAG.getTarget().Options.NoNaNsFPMath ||
42945      !DAG.getTarget().Options.NoSignedZerosFPMath)
42946    return SDValue();
42947
42948  // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
42949  // into FMINC and FMAXC, which are Commutative operations.
42950  unsigned NewOp = 0;
42951  switch (N->getOpcode()) {
42952    default: llvm_unreachable("unknown opcode");
42953    case X86ISD::FMIN:  NewOp = X86ISD::FMINC; break;
42954    case X86ISD::FMAX:  NewOp = X86ISD::FMAXC; break;
42955  }
42956
42957  return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
42958                     N->getOperand(0), N->getOperand(1));
42959}
42960
42961static SDValue combineFMinNumFMaxNum(SDNode *N, SelectionDAG &DAG,
42962                                     const X86Subtarget &Subtarget) {
42963  if (Subtarget.useSoftFloat())
42964    return SDValue();
42965
42966  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42967
42968  EVT VT = N->getValueType(0);
42969  if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
42970        (Subtarget.hasSSE2() && VT == MVT::f64) ||
42971        (VT.isVector() && TLI.isTypeLegal(VT))))
42972    return SDValue();
42973
42974  SDValue Op0 = N->getOperand(0);
42975  SDValue Op1 = N->getOperand(1);
42976  SDLoc DL(N);
42977  auto MinMaxOp = N->getOpcode() == ISD::FMAXNUM ? X86ISD::FMAX : X86ISD::FMIN;
42978
42979  // If we don't have to respect NaN inputs, this is a direct translation to x86
42980  // min/max instructions.
42981  if (DAG.getTarget().Options.NoNaNsFPMath || N->getFlags().hasNoNaNs())
42982    return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
42983
42984  // If one of the operands is known non-NaN use the native min/max instructions
42985  // with the non-NaN input as second operand.
42986  if (DAG.isKnownNeverNaN(Op1))
42987    return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
42988  if (DAG.isKnownNeverNaN(Op0))
42989    return DAG.getNode(MinMaxOp, DL, VT, Op1, Op0, N->getFlags());
42990
42991  // If we have to respect NaN inputs, this takes at least 3 instructions.
42992  // Favor a library call when operating on a scalar and minimizing code size.
42993  if (!VT.isVector() && DAG.getMachineFunction().getFunction().hasMinSize())
42994    return SDValue();
42995
42996  EVT SetCCType = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
42997                                         VT);
42998
42999  // There are 4 possibilities involving NaN inputs, and these are the required
43000  // outputs:
43001  //                   Op1
43002  //               Num     NaN
43003  //            ----------------
43004  //       Num  |  Max  |  Op0 |
43005  // Op0        ----------------
43006  //       NaN  |  Op1  |  NaN |
43007  //            ----------------
43008  //
43009  // The SSE FP max/min instructions were not designed for this case, but rather
43010  // to implement:
43011  //   Min = Op1 < Op0 ? Op1 : Op0
43012  //   Max = Op1 > Op0 ? Op1 : Op0
43013  //
43014  // So they always return Op0 if either input is a NaN. However, we can still
43015  // use those instructions for fmaxnum by selecting away a NaN input.
43016
43017  // If either operand is NaN, the 2nd source operand (Op0) is passed through.
43018  SDValue MinOrMax = DAG.getNode(MinMaxOp, DL, VT, Op1, Op0);
43019  SDValue IsOp0Nan = DAG.getSetCC(DL, SetCCType, Op0, Op0, ISD::SETUO);
43020
43021  // If Op0 is a NaN, select Op1. Otherwise, select the max. If both operands
43022  // are NaN, the NaN value of Op1 is the result.
43023  return DAG.getSelect(DL, VT, IsOp0Nan, Op1, MinOrMax);
43024}
43025
43026static SDValue combineX86INT_TO_FP(SDNode *N, SelectionDAG &DAG,
43027                                   TargetLowering::DAGCombinerInfo &DCI) {
43028  EVT VT = N->getValueType(0);
43029  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43030
43031  APInt KnownUndef, KnownZero;
43032  APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
43033  if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
43034                                     KnownZero, DCI))
43035    return SDValue(N, 0);
43036
43037  // Convert a full vector load into vzload when not all bits are needed.
43038  SDValue In = N->getOperand(0);
43039  MVT InVT = In.getSimpleValueType();
43040  if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
43041      ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
43042    assert(InVT.is128BitVector() && "Expected 128-bit input vector");
43043    LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0));
43044    // Unless the load is volatile or atomic.
43045    if (LN->isSimple()) {
43046      SDLoc dl(N);
43047      unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
43048      MVT MemVT = MVT::getIntegerVT(NumBits);
43049      MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
43050      SDVTList Tys = DAG.getVTList(LoadVT, MVT::Other);
43051      SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
43052      SDValue VZLoad =
43053          DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, MemVT,
43054                                  LN->getPointerInfo(),
43055                                  LN->getAlignment(),
43056                                  LN->getMemOperand()->getFlags());
43057      SDValue Convert = DAG.getNode(N->getOpcode(), dl, VT,
43058                                    DAG.getBitcast(InVT, VZLoad));
43059      DCI.CombineTo(N, Convert);
43060      DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
43061      return SDValue(N, 0);
43062    }
43063  }
43064
43065  return SDValue();
43066}
43067
43068static SDValue combineCVTP2I_CVTTP2I(SDNode *N, SelectionDAG &DAG,
43069                                     TargetLowering::DAGCombinerInfo &DCI) {
43070  // FIXME: Handle strict fp nodes.
43071  EVT VT = N->getValueType(0);
43072
43073  // Convert a full vector load into vzload when not all bits are needed.
43074  SDValue In = N->getOperand(0);
43075  MVT InVT = In.getSimpleValueType();
43076  if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
43077      ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
43078    assert(InVT.is128BitVector() && "Expected 128-bit input vector");
43079    LoadSDNode *LN = cast<LoadSDNode>(In);
43080    // Unless the load is volatile or atomic.
43081    if (LN->isSimple()) {
43082      SDLoc dl(N);
43083      unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
43084      MVT MemVT = MVT::getFloatingPointVT(NumBits);
43085      MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
43086      SDVTList Tys = DAG.getVTList(LoadVT, MVT::Other);
43087      SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
43088      SDValue VZLoad =
43089          DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, MemVT,
43090                                  LN->getPointerInfo(),
43091                                  LN->getAlignment(),
43092                                  LN->getMemOperand()->getFlags());
43093      SDValue Convert = DAG.getNode(N->getOpcode(), dl, VT,
43094                                    DAG.getBitcast(InVT, VZLoad));
43095      DCI.CombineTo(N, Convert);
43096      DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
43097      return SDValue(N, 0);
43098    }
43099  }
43100
43101  return SDValue();
43102}
43103
43104/// Do target-specific dag combines on X86ISD::ANDNP nodes.
43105static SDValue combineAndnp(SDNode *N, SelectionDAG &DAG,
43106                            TargetLowering::DAGCombinerInfo &DCI,
43107                            const X86Subtarget &Subtarget) {
43108  MVT VT = N->getSimpleValueType(0);
43109
43110  // ANDNP(0, x) -> x
43111  if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()))
43112    return N->getOperand(1);
43113
43114  // ANDNP(x, 0) -> 0
43115  if (ISD::isBuildVectorAllZeros(N->getOperand(1).getNode()))
43116    return DAG.getConstant(0, SDLoc(N), VT);
43117
43118  // Turn ANDNP back to AND if input is inverted.
43119  if (SDValue Not = IsNOT(N->getOperand(0), DAG))
43120    return DAG.getNode(ISD::AND, SDLoc(N), VT, DAG.getBitcast(VT, Not),
43121                       N->getOperand(1));
43122
43123  // Attempt to recursively combine a bitmask ANDNP with shuffles.
43124  if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
43125    SDValue Op(N, 0);
43126    if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
43127      return Res;
43128  }
43129
43130  return SDValue();
43131}
43132
43133static SDValue combineBT(SDNode *N, SelectionDAG &DAG,
43134                         TargetLowering::DAGCombinerInfo &DCI) {
43135  SDValue N0 = N->getOperand(0);
43136  SDValue N1 = N->getOperand(1);
43137
43138  // BT ignores high bits in the bit index operand.
43139  unsigned BitWidth = N1.getValueSizeInBits();
43140  APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
43141  if (SDValue DemandedN1 = DAG.GetDemandedBits(N1, DemandedMask))
43142    return DAG.getNode(X86ISD::BT, SDLoc(N), MVT::i32, N0, DemandedN1);
43143
43144  return SDValue();
43145}
43146
43147// Try to combine sext_in_reg of a cmov of constants by extending the constants.
43148static SDValue combineSextInRegCmov(SDNode *N, SelectionDAG &DAG) {
43149  assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
43150
43151  EVT DstVT = N->getValueType(0);
43152
43153  SDValue N0 = N->getOperand(0);
43154  SDValue N1 = N->getOperand(1);
43155  EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
43156
43157  if (ExtraVT != MVT::i8 && ExtraVT != MVT::i16)
43158    return SDValue();
43159
43160  // Look through single use any_extends / truncs.
43161  SDValue IntermediateBitwidthOp;
43162  if ((N0.getOpcode() == ISD::ANY_EXTEND || N0.getOpcode() == ISD::TRUNCATE) &&
43163      N0.hasOneUse()) {
43164    IntermediateBitwidthOp = N0;
43165    N0 = N0.getOperand(0);
43166  }
43167
43168  // See if we have a single use cmov.
43169  if (N0.getOpcode() != X86ISD::CMOV || !N0.hasOneUse())
43170    return SDValue();
43171
43172  SDValue CMovOp0 = N0.getOperand(0);
43173  SDValue CMovOp1 = N0.getOperand(1);
43174
43175  // Make sure both operands are constants.
43176  if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
43177      !isa<ConstantSDNode>(CMovOp1.getNode()))
43178    return SDValue();
43179
43180  SDLoc DL(N);
43181
43182  // If we looked through an any_extend/trunc above, add one to the constants.
43183  if (IntermediateBitwidthOp) {
43184    unsigned IntermediateOpc = IntermediateBitwidthOp.getOpcode();
43185    CMovOp0 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp0);
43186    CMovOp1 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp1);
43187  }
43188
43189  CMovOp0 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp0, N1);
43190  CMovOp1 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp1, N1);
43191
43192  EVT CMovVT = DstVT;
43193  // We do not want i16 CMOV's. Promote to i32 and truncate afterwards.
43194  if (DstVT == MVT::i16) {
43195    CMovVT = MVT::i32;
43196    CMovOp0 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp0);
43197    CMovOp1 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp1);
43198  }
43199
43200  SDValue CMov = DAG.getNode(X86ISD::CMOV, DL, CMovVT, CMovOp0, CMovOp1,
43201                             N0.getOperand(2), N0.getOperand(3));
43202
43203  if (CMovVT != DstVT)
43204    CMov = DAG.getNode(ISD::TRUNCATE, DL, DstVT, CMov);
43205
43206  return CMov;
43207}
43208
43209static SDValue combineSignExtendInReg(SDNode *N, SelectionDAG &DAG,
43210                                      const X86Subtarget &Subtarget) {
43211  assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
43212
43213  if (SDValue V = combineSextInRegCmov(N, DAG))
43214    return V;
43215
43216  EVT VT = N->getValueType(0);
43217  SDValue N0 = N->getOperand(0);
43218  SDValue N1 = N->getOperand(1);
43219  EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
43220  SDLoc dl(N);
43221
43222  // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
43223  // both SSE and AVX2 since there is no sign-extended shift right
43224  // operation on a vector with 64-bit elements.
43225  //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
43226  // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
43227  if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
43228      N0.getOpcode() == ISD::SIGN_EXTEND)) {
43229    SDValue N00 = N0.getOperand(0);
43230
43231    // EXTLOAD has a better solution on AVX2,
43232    // it may be replaced with X86ISD::VSEXT node.
43233    if (N00.getOpcode() == ISD::LOAD && Subtarget.hasInt256())
43234      if (!ISD::isNormalLoad(N00.getNode()))
43235        return SDValue();
43236
43237    if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
43238        SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
43239                                  N00, N1);
43240      return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
43241    }
43242  }
43243  return SDValue();
43244}
43245
43246/// sext(add_nsw(x, C)) --> add(sext(x), C_sext)
43247/// zext(add_nuw(x, C)) --> add(zext(x), C_zext)
43248/// Promoting a sign/zero extension ahead of a no overflow 'add' exposes
43249/// opportunities to combine math ops, use an LEA, or use a complex addressing
43250/// mode. This can eliminate extend, add, and shift instructions.
43251static SDValue promoteExtBeforeAdd(SDNode *Ext, SelectionDAG &DAG,
43252                                   const X86Subtarget &Subtarget) {
43253  if (Ext->getOpcode() != ISD::SIGN_EXTEND &&
43254      Ext->getOpcode() != ISD::ZERO_EXTEND)
43255    return SDValue();
43256
43257  // TODO: This should be valid for other integer types.
43258  EVT VT = Ext->getValueType(0);
43259  if (VT != MVT::i64)
43260    return SDValue();
43261
43262  SDValue Add = Ext->getOperand(0);
43263  if (Add.getOpcode() != ISD::ADD)
43264    return SDValue();
43265
43266  bool Sext = Ext->getOpcode() == ISD::SIGN_EXTEND;
43267  bool NSW = Add->getFlags().hasNoSignedWrap();
43268  bool NUW = Add->getFlags().hasNoUnsignedWrap();
43269
43270  // We need an 'add nsw' feeding into the 'sext' or 'add nuw' feeding
43271  // into the 'zext'
43272  if ((Sext && !NSW) || (!Sext && !NUW))
43273    return SDValue();
43274
43275  // Having a constant operand to the 'add' ensures that we are not increasing
43276  // the instruction count because the constant is extended for free below.
43277  // A constant operand can also become the displacement field of an LEA.
43278  auto *AddOp1 = dyn_cast<ConstantSDNode>(Add.getOperand(1));
43279  if (!AddOp1)
43280    return SDValue();
43281
43282  // Don't make the 'add' bigger if there's no hope of combining it with some
43283  // other 'add' or 'shl' instruction.
43284  // TODO: It may be profitable to generate simpler LEA instructions in place
43285  // of single 'add' instructions, but the cost model for selecting an LEA
43286  // currently has a high threshold.
43287  bool HasLEAPotential = false;
43288  for (auto *User : Ext->uses()) {
43289    if (User->getOpcode() == ISD::ADD || User->getOpcode() == ISD::SHL) {
43290      HasLEAPotential = true;
43291      break;
43292    }
43293  }
43294  if (!HasLEAPotential)
43295    return SDValue();
43296
43297  // Everything looks good, so pull the '{s|z}ext' ahead of the 'add'.
43298  int64_t AddConstant = Sext ? AddOp1->getSExtValue() : AddOp1->getZExtValue();
43299  SDValue AddOp0 = Add.getOperand(0);
43300  SDValue NewExt = DAG.getNode(Ext->getOpcode(), SDLoc(Ext), VT, AddOp0);
43301  SDValue NewConstant = DAG.getConstant(AddConstant, SDLoc(Add), VT);
43302
43303  // The wider add is guaranteed to not wrap because both operands are
43304  // sign-extended.
43305  SDNodeFlags Flags;
43306  Flags.setNoSignedWrap(NSW);
43307  Flags.setNoUnsignedWrap(NUW);
43308  return DAG.getNode(ISD::ADD, SDLoc(Add), VT, NewExt, NewConstant, Flags);
43309}
43310
43311// If we face {ANY,SIGN,ZERO}_EXTEND that is applied to a CMOV with constant
43312// operands and the result of CMOV is not used anywhere else - promote CMOV
43313// itself instead of promoting its result. This could be beneficial, because:
43314//     1) X86TargetLowering::EmitLoweredSelect later can do merging of two
43315//        (or more) pseudo-CMOVs only when they go one-after-another and
43316//        getting rid of result extension code after CMOV will help that.
43317//     2) Promotion of constant CMOV arguments is free, hence the
43318//        {ANY,SIGN,ZERO}_EXTEND will just be deleted.
43319//     3) 16-bit CMOV encoding is 4 bytes, 32-bit CMOV is 3-byte, so this
43320//        promotion is also good in terms of code-size.
43321//        (64-bit CMOV is 4-bytes, that's why we don't do 32-bit => 64-bit
43322//         promotion).
43323static SDValue combineToExtendCMOV(SDNode *Extend, SelectionDAG &DAG) {
43324  SDValue CMovN = Extend->getOperand(0);
43325  if (CMovN.getOpcode() != X86ISD::CMOV || !CMovN.hasOneUse())
43326    return SDValue();
43327
43328  EVT TargetVT = Extend->getValueType(0);
43329  unsigned ExtendOpcode = Extend->getOpcode();
43330  SDLoc DL(Extend);
43331
43332  EVT VT = CMovN.getValueType();
43333  SDValue CMovOp0 = CMovN.getOperand(0);
43334  SDValue CMovOp1 = CMovN.getOperand(1);
43335
43336  if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
43337      !isa<ConstantSDNode>(CMovOp1.getNode()))
43338    return SDValue();
43339
43340  // Only extend to i32 or i64.
43341  if (TargetVT != MVT::i32 && TargetVT != MVT::i64)
43342    return SDValue();
43343
43344  // Only extend from i16 unless its a sign_extend from i32. Zext/aext from i32
43345  // are free.
43346  if (VT != MVT::i16 && !(ExtendOpcode == ISD::SIGN_EXTEND && VT == MVT::i32))
43347    return SDValue();
43348
43349  // If this a zero extend to i64, we should only extend to i32 and use a free
43350  // zero extend to finish.
43351  EVT ExtendVT = TargetVT;
43352  if (TargetVT == MVT::i64 && ExtendOpcode != ISD::SIGN_EXTEND)
43353    ExtendVT = MVT::i32;
43354
43355  CMovOp0 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp0);
43356  CMovOp1 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp1);
43357
43358  SDValue Res = DAG.getNode(X86ISD::CMOV, DL, ExtendVT, CMovOp0, CMovOp1,
43359                            CMovN.getOperand(2), CMovN.getOperand(3));
43360
43361  // Finish extending if needed.
43362  if (ExtendVT != TargetVT)
43363    Res = DAG.getNode(ExtendOpcode, DL, TargetVT, Res);
43364
43365  return Res;
43366}
43367
43368// Convert (vXiY *ext(vXi1 bitcast(iX))) to extend_in_reg(broadcast(iX)).
43369// This is more or less the reverse of combineBitcastvxi1.
43370static SDValue
43371combineToExtendBoolVectorInReg(SDNode *N, SelectionDAG &DAG,
43372                               TargetLowering::DAGCombinerInfo &DCI,
43373                               const X86Subtarget &Subtarget) {
43374  unsigned Opcode = N->getOpcode();
43375  if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND &&
43376      Opcode != ISD::ANY_EXTEND)
43377    return SDValue();
43378  if (!DCI.isBeforeLegalizeOps())
43379    return SDValue();
43380  if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
43381    return SDValue();
43382
43383  SDValue N0 = N->getOperand(0);
43384  EVT VT = N->getValueType(0);
43385  EVT SVT = VT.getScalarType();
43386  EVT InSVT = N0.getValueType().getScalarType();
43387  unsigned EltSizeInBits = SVT.getSizeInBits();
43388
43389  // Input type must be extending a bool vector (bit-casted from a scalar
43390  // integer) to legal integer types.
43391  if (!VT.isVector())
43392    return SDValue();
43393  if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16 && SVT != MVT::i8)
43394    return SDValue();
43395  if (InSVT != MVT::i1 || N0.getOpcode() != ISD::BITCAST)
43396    return SDValue();
43397
43398  SDValue N00 = N0.getOperand(0);
43399  EVT SclVT = N0.getOperand(0).getValueType();
43400  if (!SclVT.isScalarInteger())
43401    return SDValue();
43402
43403  SDLoc DL(N);
43404  SDValue Vec;
43405  SmallVector<int, 32> ShuffleMask;
43406  unsigned NumElts = VT.getVectorNumElements();
43407  assert(NumElts == SclVT.getSizeInBits() && "Unexpected bool vector size");
43408
43409  // Broadcast the scalar integer to the vector elements.
43410  if (NumElts > EltSizeInBits) {
43411    // If the scalar integer is greater than the vector element size, then we
43412    // must split it down into sub-sections for broadcasting. For example:
43413    //   i16 -> v16i8 (i16 -> v8i16 -> v16i8) with 2 sub-sections.
43414    //   i32 -> v32i8 (i32 -> v8i32 -> v32i8) with 4 sub-sections.
43415    assert((NumElts % EltSizeInBits) == 0 && "Unexpected integer scale");
43416    unsigned Scale = NumElts / EltSizeInBits;
43417    EVT BroadcastVT =
43418        EVT::getVectorVT(*DAG.getContext(), SclVT, EltSizeInBits);
43419    Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
43420    Vec = DAG.getBitcast(VT, Vec);
43421
43422    for (unsigned i = 0; i != Scale; ++i)
43423      ShuffleMask.append(EltSizeInBits, i);
43424  } else {
43425    // For smaller scalar integers, we can simply any-extend it to the vector
43426    // element size (we don't care about the upper bits) and broadcast it to all
43427    // elements.
43428    SDValue Scl = DAG.getAnyExtOrTrunc(N00, DL, SVT);
43429    Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
43430    ShuffleMask.append(NumElts, 0);
43431  }
43432  Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
43433
43434  // Now, mask the relevant bit in each element.
43435  SmallVector<SDValue, 32> Bits;
43436  for (unsigned i = 0; i != NumElts; ++i) {
43437    int BitIdx = (i % EltSizeInBits);
43438    APInt Bit = APInt::getBitsSet(EltSizeInBits, BitIdx, BitIdx + 1);
43439    Bits.push_back(DAG.getConstant(Bit, DL, SVT));
43440  }
43441  SDValue BitMask = DAG.getBuildVector(VT, DL, Bits);
43442  Vec = DAG.getNode(ISD::AND, DL, VT, Vec, BitMask);
43443
43444  // Compare against the bitmask and extend the result.
43445  EVT CCVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
43446  Vec = DAG.getSetCC(DL, CCVT, Vec, BitMask, ISD::SETEQ);
43447  Vec = DAG.getSExtOrTrunc(Vec, DL, VT);
43448
43449  // For SEXT, this is now done, otherwise shift the result down for
43450  // zero-extension.
43451  if (Opcode == ISD::SIGN_EXTEND)
43452    return Vec;
43453  return DAG.getNode(ISD::SRL, DL, VT, Vec,
43454                     DAG.getConstant(EltSizeInBits - 1, DL, VT));
43455}
43456
43457// Attempt to combine a (sext/zext (setcc)) to a setcc with a xmm/ymm/zmm
43458// result type.
43459static SDValue combineExtSetcc(SDNode *N, SelectionDAG &DAG,
43460                               const X86Subtarget &Subtarget) {
43461  SDValue N0 = N->getOperand(0);
43462  EVT VT = N->getValueType(0);
43463  SDLoc dl(N);
43464
43465  // Only do this combine with AVX512 for vector extends.
43466  if (!Subtarget.hasAVX512() || !VT.isVector() || N0.getOpcode() != ISD::SETCC)
43467    return SDValue();
43468
43469  // Only combine legal element types.
43470  EVT SVT = VT.getVectorElementType();
43471  if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32 &&
43472      SVT != MVT::i64 && SVT != MVT::f32 && SVT != MVT::f64)
43473    return SDValue();
43474
43475  // We can only do this if the vector size in 256 bits or less.
43476  unsigned Size = VT.getSizeInBits();
43477  if (Size > 256)
43478    return SDValue();
43479
43480  // Don't fold if the condition code can't be handled by PCMPEQ/PCMPGT since
43481  // that's the only integer compares with we have.
43482  ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
43483  if (ISD::isUnsignedIntSetCC(CC))
43484    return SDValue();
43485
43486  // Only do this combine if the extension will be fully consumed by the setcc.
43487  EVT N00VT = N0.getOperand(0).getValueType();
43488  EVT MatchingVecType = N00VT.changeVectorElementTypeToInteger();
43489  if (Size != MatchingVecType.getSizeInBits())
43490    return SDValue();
43491
43492  SDValue Res = DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
43493
43494  if (N->getOpcode() == ISD::ZERO_EXTEND)
43495    Res = DAG.getZeroExtendInReg(Res, dl, N0.getValueType().getScalarType());
43496
43497  return Res;
43498}
43499
43500static SDValue combineSext(SDNode *N, SelectionDAG &DAG,
43501                           TargetLowering::DAGCombinerInfo &DCI,
43502                           const X86Subtarget &Subtarget) {
43503  SDValue N0 = N->getOperand(0);
43504  EVT VT = N->getValueType(0);
43505  EVT InVT = N0.getValueType();
43506  SDLoc DL(N);
43507
43508  if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
43509    return NewCMov;
43510
43511  if (!DCI.isBeforeLegalizeOps())
43512    return SDValue();
43513
43514  if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
43515    return V;
43516
43517  if (InVT == MVT::i1 && N0.getOpcode() == ISD::XOR &&
43518      isAllOnesConstant(N0.getOperand(1)) && N0.hasOneUse()) {
43519    // Invert and sign-extend a boolean is the same as zero-extend and subtract
43520    // 1 because 0 becomes -1 and 1 becomes 0. The subtract is efficiently
43521    // lowered with an LEA or a DEC. This is the same as: select Bool, 0, -1.
43522    // sext (xor Bool, -1) --> sub (zext Bool), 1
43523    SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0));
43524    return DAG.getNode(ISD::SUB, DL, VT, Zext, DAG.getConstant(1, DL, VT));
43525  }
43526
43527  if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
43528    return V;
43529
43530  if (VT.isVector())
43531    if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
43532      return R;
43533
43534  if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
43535    return NewAdd;
43536
43537  return SDValue();
43538}
43539
43540static SDValue combineFMA(SDNode *N, SelectionDAG &DAG,
43541                          TargetLowering::DAGCombinerInfo &DCI,
43542                          const X86Subtarget &Subtarget) {
43543  SDLoc dl(N);
43544  EVT VT = N->getValueType(0);
43545
43546  // Let legalize expand this if it isn't a legal type yet.
43547  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43548  if (!TLI.isTypeLegal(VT))
43549    return SDValue();
43550
43551  EVT ScalarVT = VT.getScalarType();
43552  if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || !Subtarget.hasAnyFMA())
43553    return SDValue();
43554
43555  SDValue A = N->getOperand(0);
43556  SDValue B = N->getOperand(1);
43557  SDValue C = N->getOperand(2);
43558
43559  auto invertIfNegative = [&DAG, &TLI, &DCI](SDValue &V) {
43560    bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
43561    bool LegalOperations = !DCI.isBeforeLegalizeOps();
43562    if (TLI.isNegatibleForFree(V, DAG, LegalOperations, CodeSize) == 2) {
43563      V = TLI.getNegatedExpression(V, DAG, LegalOperations, CodeSize);
43564      return true;
43565    }
43566    // Look through extract_vector_elts. If it comes from an FNEG, create a
43567    // new extract from the FNEG input.
43568    if (V.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
43569        isNullConstant(V.getOperand(1))) {
43570      SDValue Vec = V.getOperand(0);
43571      if (TLI.isNegatibleForFree(Vec, DAG, LegalOperations, CodeSize) == 2) {
43572        SDValue NegVal =
43573            TLI.getNegatedExpression(Vec, DAG, LegalOperations, CodeSize);
43574        V = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), V.getValueType(),
43575                        NegVal, V.getOperand(1));
43576        return true;
43577      }
43578    }
43579
43580    return false;
43581  };
43582
43583  // Do not convert the passthru input of scalar intrinsics.
43584  // FIXME: We could allow negations of the lower element only.
43585  bool NegA = invertIfNegative(A);
43586  bool NegB = invertIfNegative(B);
43587  bool NegC = invertIfNegative(C);
43588
43589  if (!NegA && !NegB && !NegC)
43590    return SDValue();
43591
43592  unsigned NewOpcode =
43593      negateFMAOpcode(N->getOpcode(), NegA != NegB, NegC, false);
43594
43595  if (N->getNumOperands() == 4)
43596    return DAG.getNode(NewOpcode, dl, VT, A, B, C, N->getOperand(3));
43597  return DAG.getNode(NewOpcode, dl, VT, A, B, C);
43598}
43599
43600// Combine FMADDSUB(A, B, FNEG(C)) -> FMSUBADD(A, B, C)
43601// Combine FMSUBADD(A, B, FNEG(C)) -> FMADDSUB(A, B, C)
43602static SDValue combineFMADDSUB(SDNode *N, SelectionDAG &DAG,
43603                               TargetLowering::DAGCombinerInfo &DCI) {
43604  SDLoc dl(N);
43605  EVT VT = N->getValueType(0);
43606  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43607  bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
43608  bool LegalOperations = !DCI.isBeforeLegalizeOps();
43609
43610  SDValue N2 = N->getOperand(2);
43611  if (TLI.isNegatibleForFree(N2, DAG, LegalOperations, CodeSize) != 2)
43612    return SDValue();
43613
43614  SDValue NegN2 = TLI.getNegatedExpression(N2, DAG, LegalOperations, CodeSize);
43615  unsigned NewOpcode = negateFMAOpcode(N->getOpcode(), false, true, false);
43616
43617  if (N->getNumOperands() == 4)
43618    return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
43619                       NegN2, N->getOperand(3));
43620  return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
43621                     NegN2);
43622}
43623
43624static SDValue combineZext(SDNode *N, SelectionDAG &DAG,
43625                           TargetLowering::DAGCombinerInfo &DCI,
43626                           const X86Subtarget &Subtarget) {
43627  // (i32 zext (and (i8  x86isd::setcc_carry), 1)) ->
43628  //           (and (i32 x86isd::setcc_carry), 1)
43629  // This eliminates the zext. This transformation is necessary because
43630  // ISD::SETCC is always legalized to i8.
43631  SDLoc dl(N);
43632  SDValue N0 = N->getOperand(0);
43633  EVT VT = N->getValueType(0);
43634
43635  if (N0.getOpcode() == ISD::AND &&
43636      N0.hasOneUse() &&
43637      N0.getOperand(0).hasOneUse()) {
43638    SDValue N00 = N0.getOperand(0);
43639    if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
43640      if (!isOneConstant(N0.getOperand(1)))
43641        return SDValue();
43642      return DAG.getNode(ISD::AND, dl, VT,
43643                         DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
43644                                     N00.getOperand(0), N00.getOperand(1)),
43645                         DAG.getConstant(1, dl, VT));
43646    }
43647  }
43648
43649  if (N0.getOpcode() == ISD::TRUNCATE &&
43650      N0.hasOneUse() &&
43651      N0.getOperand(0).hasOneUse()) {
43652    SDValue N00 = N0.getOperand(0);
43653    if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
43654      return DAG.getNode(ISD::AND, dl, VT,
43655                         DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
43656                                     N00.getOperand(0), N00.getOperand(1)),
43657                         DAG.getConstant(1, dl, VT));
43658    }
43659  }
43660
43661  if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
43662    return NewCMov;
43663
43664  if (DCI.isBeforeLegalizeOps())
43665    if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
43666      return V;
43667
43668  if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
43669    return V;
43670
43671  if (VT.isVector())
43672    if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
43673      return R;
43674
43675  if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
43676    return NewAdd;
43677
43678  if (SDValue R = combineOrCmpEqZeroToCtlzSrl(N, DAG, DCI, Subtarget))
43679    return R;
43680
43681  // TODO: Combine with any target/faux shuffle.
43682  if (N0.getOpcode() == X86ISD::PACKUS && N0.getValueSizeInBits() == 128 &&
43683      VT.getScalarSizeInBits() == N0.getOperand(0).getScalarValueSizeInBits()) {
43684    SDValue N00 = N0.getOperand(0);
43685    SDValue N01 = N0.getOperand(1);
43686    unsigned NumSrcEltBits = N00.getScalarValueSizeInBits();
43687    APInt ZeroMask = APInt::getHighBitsSet(NumSrcEltBits, NumSrcEltBits / 2);
43688    if ((N00.isUndef() || DAG.MaskedValueIsZero(N00, ZeroMask)) &&
43689        (N01.isUndef() || DAG.MaskedValueIsZero(N01, ZeroMask))) {
43690      return concatSubVectors(N00, N01, DAG, dl);
43691    }
43692  }
43693
43694  return SDValue();
43695}
43696
43697/// Recursive helper for combineVectorSizedSetCCEquality() to see if we have a
43698/// recognizable memcmp expansion.
43699static bool isOrXorXorTree(SDValue X, bool Root = true) {
43700  if (X.getOpcode() == ISD::OR)
43701    return isOrXorXorTree(X.getOperand(0), false) &&
43702           isOrXorXorTree(X.getOperand(1), false);
43703  if (Root)
43704    return false;
43705  return X.getOpcode() == ISD::XOR;
43706}
43707
43708/// Recursive helper for combineVectorSizedSetCCEquality() to emit the memcmp
43709/// expansion.
43710template<typename F>
43711static SDValue emitOrXorXorTree(SDValue X, SDLoc &DL, SelectionDAG &DAG,
43712                                EVT VecVT, EVT CmpVT, bool HasPT, F SToV) {
43713  SDValue Op0 = X.getOperand(0);
43714  SDValue Op1 = X.getOperand(1);
43715  if (X.getOpcode() == ISD::OR) {
43716    SDValue A = emitOrXorXorTree(Op0, DL, DAG, VecVT, CmpVT, HasPT, SToV);
43717    SDValue B = emitOrXorXorTree(Op1, DL, DAG, VecVT, CmpVT, HasPT, SToV);
43718    if (VecVT != CmpVT)
43719      return DAG.getNode(ISD::OR, DL, CmpVT, A, B);
43720    if (HasPT)
43721      return DAG.getNode(ISD::OR, DL, VecVT, A, B);
43722    return DAG.getNode(ISD::AND, DL, CmpVT, A, B);
43723  } else if (X.getOpcode() == ISD::XOR) {
43724    SDValue A = SToV(Op0);
43725    SDValue B = SToV(Op1);
43726    if (VecVT != CmpVT)
43727      return DAG.getSetCC(DL, CmpVT, A, B, ISD::SETNE);
43728    if (HasPT)
43729      return DAG.getNode(ISD::XOR, DL, VecVT, A, B);
43730    return DAG.getSetCC(DL, CmpVT, A, B, ISD::SETEQ);
43731  }
43732  llvm_unreachable("Impossible");
43733}
43734
43735/// Try to map a 128-bit or larger integer comparison to vector instructions
43736/// before type legalization splits it up into chunks.
43737static SDValue combineVectorSizedSetCCEquality(SDNode *SetCC, SelectionDAG &DAG,
43738                                               const X86Subtarget &Subtarget) {
43739  ISD::CondCode CC = cast<CondCodeSDNode>(SetCC->getOperand(2))->get();
43740  assert((CC == ISD::SETNE || CC == ISD::SETEQ) && "Bad comparison predicate");
43741
43742  // We're looking for an oversized integer equality comparison.
43743  SDValue X = SetCC->getOperand(0);
43744  SDValue Y = SetCC->getOperand(1);
43745  EVT OpVT = X.getValueType();
43746  unsigned OpSize = OpVT.getSizeInBits();
43747  if (!OpVT.isScalarInteger() || OpSize < 128)
43748    return SDValue();
43749
43750  // Ignore a comparison with zero because that gets special treatment in
43751  // EmitTest(). But make an exception for the special case of a pair of
43752  // logically-combined vector-sized operands compared to zero. This pattern may
43753  // be generated by the memcmp expansion pass with oversized integer compares
43754  // (see PR33325).
43755  bool IsOrXorXorTreeCCZero = isNullConstant(Y) && isOrXorXorTree(X);
43756  if (isNullConstant(Y) && !IsOrXorXorTreeCCZero)
43757    return SDValue();
43758
43759  // Don't perform this combine if constructing the vector will be expensive.
43760  auto IsVectorBitCastCheap = [](SDValue X) {
43761    X = peekThroughBitcasts(X);
43762    return isa<ConstantSDNode>(X) || X.getValueType().isVector() ||
43763           X.getOpcode() == ISD::LOAD;
43764  };
43765  if ((!IsVectorBitCastCheap(X) || !IsVectorBitCastCheap(Y)) &&
43766      !IsOrXorXorTreeCCZero)
43767    return SDValue();
43768
43769  EVT VT = SetCC->getValueType(0);
43770  SDLoc DL(SetCC);
43771  bool HasAVX = Subtarget.hasAVX();
43772
43773  // Use XOR (plus OR) and PTEST after SSE4.1 for 128/256-bit operands.
43774  // Use PCMPNEQ (plus OR) and KORTEST for 512-bit operands.
43775  // Otherwise use PCMPEQ (plus AND) and mask testing.
43776  if ((OpSize == 128 && Subtarget.hasSSE2()) ||
43777      (OpSize == 256 && HasAVX) ||
43778      (OpSize == 512 && Subtarget.useAVX512Regs())) {
43779    bool HasPT = Subtarget.hasSSE41();
43780
43781    // PTEST and MOVMSK are slow on Knights Landing and Knights Mill and widened
43782    // vector registers are essentially free. (Technically, widening registers
43783    // prevents load folding, but the tradeoff is worth it.)
43784    bool PreferKOT = Subtarget.preferMaskRegisters();
43785    bool NeedZExt = PreferKOT && !Subtarget.hasVLX() && OpSize != 512;
43786
43787    EVT VecVT = MVT::v16i8;
43788    EVT CmpVT = PreferKOT ? MVT::v16i1 : VecVT;
43789    if (OpSize == 256) {
43790      VecVT = MVT::v32i8;
43791      CmpVT = PreferKOT ? MVT::v32i1 : VecVT;
43792    }
43793    EVT CastVT = VecVT;
43794    bool NeedsAVX512FCast = false;
43795    if (OpSize == 512 || NeedZExt) {
43796      if (Subtarget.hasBWI()) {
43797        VecVT = MVT::v64i8;
43798        CmpVT = MVT::v64i1;
43799        if (OpSize == 512)
43800          CastVT = VecVT;
43801      } else {
43802        VecVT = MVT::v16i32;
43803        CmpVT = MVT::v16i1;
43804        CastVT = OpSize == 512 ? VecVT :
43805                 OpSize == 256 ? MVT::v8i32 : MVT::v4i32;
43806        NeedsAVX512FCast = true;
43807      }
43808    }
43809
43810    auto ScalarToVector = [&](SDValue X) -> SDValue {
43811      bool TmpZext = false;
43812      EVT TmpCastVT = CastVT;
43813      if (X.getOpcode() == ISD::ZERO_EXTEND) {
43814        SDValue OrigX = X.getOperand(0);
43815        unsigned OrigSize = OrigX.getScalarValueSizeInBits();
43816        if (OrigSize < OpSize) {
43817          if (OrigSize == 128) {
43818            TmpCastVT = NeedsAVX512FCast ? MVT::v4i32 : MVT::v16i8;
43819            X = OrigX;
43820            TmpZext = true;
43821          } else if (OrigSize == 256) {
43822            TmpCastVT = NeedsAVX512FCast ? MVT::v8i32 : MVT::v32i8;
43823            X = OrigX;
43824            TmpZext = true;
43825          }
43826        }
43827      }
43828      X = DAG.getBitcast(TmpCastVT, X);
43829      if (!NeedZExt && !TmpZext)
43830        return X;
43831      const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43832      MVT VecIdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
43833      return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT,
43834                         DAG.getConstant(0, DL, VecVT), X,
43835                         DAG.getConstant(0, DL, VecIdxVT));
43836    };
43837
43838    SDValue Cmp;
43839    if (IsOrXorXorTreeCCZero) {
43840      // This is a bitwise-combined equality comparison of 2 pairs of vectors:
43841      // setcc i128 (or (xor A, B), (xor C, D)), 0, eq|ne
43842      // Use 2 vector equality compares and 'and' the results before doing a
43843      // MOVMSK.
43844      Cmp = emitOrXorXorTree(X, DL, DAG, VecVT, CmpVT, HasPT, ScalarToVector);
43845    } else {
43846      SDValue VecX = ScalarToVector(X);
43847      SDValue VecY = ScalarToVector(Y);
43848      if (VecVT != CmpVT) {
43849        Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETNE);
43850      } else if (HasPT) {
43851        Cmp = DAG.getNode(ISD::XOR, DL, VecVT, VecX, VecY);
43852      } else {
43853        Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETEQ);
43854      }
43855    }
43856    // AVX512 should emit a setcc that will lower to kortest.
43857    if (VecVT != CmpVT) {
43858      EVT KRegVT = CmpVT == MVT::v64i1 ? MVT::i64 :
43859                   CmpVT == MVT::v32i1 ? MVT::i32 : MVT::i16;
43860      return DAG.getSetCC(DL, VT, DAG.getBitcast(KRegVT, Cmp),
43861                          DAG.getConstant(0, DL, KRegVT), CC);
43862    }
43863    if (HasPT) {
43864      SDValue BCCmp = DAG.getBitcast(OpSize == 256 ? MVT::v4i64 : MVT::v2i64,
43865                                     Cmp);
43866      SDValue PT = DAG.getNode(X86ISD::PTEST, DL, MVT::i32, BCCmp, BCCmp);
43867      X86::CondCode X86CC = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
43868      SDValue SetCC = getSETCC(X86CC, PT, DL, DAG);
43869      return DAG.getNode(ISD::TRUNCATE, DL, VT, SetCC.getValue(0));
43870    }
43871    // If all bytes match (bitmask is 0x(FFFF)FFFF), that's equality.
43872    // setcc i128 X, Y, eq --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, eq
43873    // setcc i128 X, Y, ne --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, ne
43874    // setcc i256 X, Y, eq --> setcc (vpmovmskb (vpcmpeqb X, Y)), 0xFFFFFFFF, eq
43875    // setcc i256 X, Y, ne --> setcc (vpmovmskb (vpcmpeqb X, Y)), 0xFFFFFFFF, ne
43876    SDValue MovMsk = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Cmp);
43877    SDValue FFFFs = DAG.getConstant(OpSize == 128 ? 0xFFFF : 0xFFFFFFFF, DL,
43878                                    MVT::i32);
43879    return DAG.getSetCC(DL, VT, MovMsk, FFFFs, CC);
43880  }
43881
43882  return SDValue();
43883}
43884
43885static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
43886                            const X86Subtarget &Subtarget) {
43887  const ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
43888  const SDValue LHS = N->getOperand(0);
43889  const SDValue RHS = N->getOperand(1);
43890  EVT VT = N->getValueType(0);
43891  EVT OpVT = LHS.getValueType();
43892  SDLoc DL(N);
43893
43894  if (CC == ISD::SETNE || CC == ISD::SETEQ) {
43895    // 0-x == y --> x+y == 0
43896    // 0-x != y --> x+y != 0
43897    if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
43898        LHS.hasOneUse()) {
43899      SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, RHS, LHS.getOperand(1));
43900      return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
43901    }
43902    // x == 0-y --> x+y == 0
43903    // x != 0-y --> x+y != 0
43904    if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) &&
43905        RHS.hasOneUse()) {
43906      SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
43907      return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
43908    }
43909
43910    if (SDValue V = combineVectorSizedSetCCEquality(N, DAG, Subtarget))
43911      return V;
43912  }
43913
43914  if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
43915      (CC == ISD::SETNE || CC == ISD::SETEQ || ISD::isSignedIntSetCC(CC))) {
43916    // Using temporaries to avoid messing up operand ordering for later
43917    // transformations if this doesn't work.
43918    SDValue Op0 = LHS;
43919    SDValue Op1 = RHS;
43920    ISD::CondCode TmpCC = CC;
43921    // Put build_vector on the right.
43922    if (Op0.getOpcode() == ISD::BUILD_VECTOR) {
43923      std::swap(Op0, Op1);
43924      TmpCC = ISD::getSetCCSwappedOperands(TmpCC);
43925    }
43926
43927    bool IsSEXT0 =
43928        (Op0.getOpcode() == ISD::SIGN_EXTEND) &&
43929        (Op0.getOperand(0).getValueType().getVectorElementType() == MVT::i1);
43930    bool IsVZero1 = ISD::isBuildVectorAllZeros(Op1.getNode());
43931
43932    if (IsSEXT0 && IsVZero1) {
43933      assert(VT == Op0.getOperand(0).getValueType() &&
43934             "Uexpected operand type");
43935      if (TmpCC == ISD::SETGT)
43936        return DAG.getConstant(0, DL, VT);
43937      if (TmpCC == ISD::SETLE)
43938        return DAG.getConstant(1, DL, VT);
43939      if (TmpCC == ISD::SETEQ || TmpCC == ISD::SETGE)
43940        return DAG.getNOT(DL, Op0.getOperand(0), VT);
43941
43942      assert((TmpCC == ISD::SETNE || TmpCC == ISD::SETLT) &&
43943             "Unexpected condition code!");
43944      return Op0.getOperand(0);
43945    }
43946  }
43947
43948  // If we have AVX512, but not BWI and this is a vXi16/vXi8 setcc, just
43949  // pre-promote its result type since vXi1 vectors don't get promoted
43950  // during type legalization.
43951  // NOTE: The element count check is to ignore operand types that need to
43952  // go through type promotion to a 128-bit vector.
43953  if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && VT.isVector() &&
43954      VT.getVectorElementType() == MVT::i1 &&
43955      (OpVT.getVectorElementType() == MVT::i8 ||
43956       OpVT.getVectorElementType() == MVT::i16)) {
43957    SDValue Setcc = DAG.getSetCC(DL, OpVT, LHS, RHS, CC);
43958    return DAG.getNode(ISD::TRUNCATE, DL, VT, Setcc);
43959  }
43960
43961  // For an SSE1-only target, lower a comparison of v4f32 to X86ISD::CMPP early
43962  // to avoid scalarization via legalization because v4i32 is not a legal type.
43963  if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32 &&
43964      LHS.getValueType() == MVT::v4f32)
43965    return LowerVSETCC(SDValue(N, 0), Subtarget, DAG);
43966
43967  return SDValue();
43968}
43969
43970static SDValue combineMOVMSK(SDNode *N, SelectionDAG &DAG,
43971                             TargetLowering::DAGCombinerInfo &DCI,
43972                             const X86Subtarget &Subtarget) {
43973  SDValue Src = N->getOperand(0);
43974  MVT SrcVT = Src.getSimpleValueType();
43975  MVT VT = N->getSimpleValueType(0);
43976  unsigned NumBits = VT.getScalarSizeInBits();
43977  unsigned NumElts = SrcVT.getVectorNumElements();
43978
43979  // Perform constant folding.
43980  if (ISD::isBuildVectorOfConstantSDNodes(Src.getNode())) {
43981    assert(VT == MVT::i32 && "Unexpected result type");
43982    APInt Imm(32, 0);
43983    for (unsigned Idx = 0, e = Src.getNumOperands(); Idx < e; ++Idx) {
43984      if (!Src.getOperand(Idx).isUndef() &&
43985          Src.getConstantOperandAPInt(Idx).isNegative())
43986        Imm.setBit(Idx);
43987    }
43988    return DAG.getConstant(Imm, SDLoc(N), VT);
43989  }
43990
43991  // Look through int->fp bitcasts that don't change the element width.
43992  unsigned EltWidth = SrcVT.getScalarSizeInBits();
43993  if (Subtarget.hasSSE2() && Src.getOpcode() == ISD::BITCAST &&
43994      Src.getOperand(0).getScalarValueSizeInBits() == EltWidth)
43995    return DAG.getNode(X86ISD::MOVMSK, SDLoc(N), VT, Src.getOperand(0));
43996
43997  // Fold movmsk(not(x)) -> not(movmsk) to improve folding of movmsk results
43998  // with scalar comparisons.
43999  if (SDValue NotSrc = IsNOT(Src, DAG)) {
44000    SDLoc DL(N);
44001    APInt NotMask = APInt::getLowBitsSet(NumBits, NumElts);
44002    NotSrc = DAG.getBitcast(SrcVT, NotSrc);
44003    return DAG.getNode(ISD::XOR, DL, VT,
44004                       DAG.getNode(X86ISD::MOVMSK, DL, VT, NotSrc),
44005                       DAG.getConstant(NotMask, DL, VT));
44006  }
44007
44008  // Simplify the inputs.
44009  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44010  APInt DemandedMask(APInt::getAllOnesValue(NumBits));
44011  if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
44012    return SDValue(N, 0);
44013
44014  return SDValue();
44015}
44016
44017static SDValue combineX86GatherScatter(SDNode *N, SelectionDAG &DAG,
44018                                       TargetLowering::DAGCombinerInfo &DCI) {
44019  // With vector masks we only demand the upper bit of the mask.
44020  SDValue Mask = cast<X86MaskedGatherScatterSDNode>(N)->getMask();
44021  if (Mask.getScalarValueSizeInBits() != 1) {
44022    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44023    APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
44024    if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI))
44025      return SDValue(N, 0);
44026  }
44027
44028  return SDValue();
44029}
44030
44031static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
44032                                    TargetLowering::DAGCombinerInfo &DCI) {
44033  SDLoc DL(N);
44034  auto *GorS = cast<MaskedGatherScatterSDNode>(N);
44035  SDValue Chain = GorS->getChain();
44036  SDValue Index = GorS->getIndex();
44037  SDValue Mask = GorS->getMask();
44038  SDValue Base = GorS->getBasePtr();
44039  SDValue Scale = GorS->getScale();
44040
44041  if (DCI.isBeforeLegalize()) {
44042    unsigned IndexWidth = Index.getScalarValueSizeInBits();
44043
44044    // Shrink constant indices if they are larger than 32-bits.
44045    // Only do this before legalize types since v2i64 could become v2i32.
44046    // FIXME: We could check that the type is legal if we're after legalize
44047    // types, but then we would need to construct test cases where that happens.
44048    // FIXME: We could support more than just constant vectors, but we need to
44049    // careful with costing. A truncate that can be optimized out would be fine.
44050    // Otherwise we might only want to create a truncate if it avoids a split.
44051    if (auto *BV = dyn_cast<BuildVectorSDNode>(Index)) {
44052      if (BV->isConstant() && IndexWidth > 32 &&
44053          DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
44054        unsigned NumElts = Index.getValueType().getVectorNumElements();
44055        EVT NewVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
44056        Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
44057        if (auto *Gather = dyn_cast<MaskedGatherSDNode>(GorS)) {
44058          SDValue Ops[] = { Chain, Gather->getPassThru(),
44059                            Mask, Base, Index, Scale } ;
44060          return DAG.getMaskedGather(Gather->getVTList(),
44061                                     Gather->getMemoryVT(), DL, Ops,
44062                                     Gather->getMemOperand(),
44063                                     Gather->getIndexType());
44064        }
44065        auto *Scatter = cast<MaskedScatterSDNode>(GorS);
44066        SDValue Ops[] = { Chain, Scatter->getValue(),
44067                          Mask, Base, Index, Scale };
44068        return DAG.getMaskedScatter(Scatter->getVTList(),
44069                                    Scatter->getMemoryVT(), DL,
44070                                    Ops, Scatter->getMemOperand(),
44071                                    Scatter->getIndexType());
44072      }
44073    }
44074
44075    // Shrink any sign/zero extends from 32 or smaller to larger than 32 if
44076    // there are sufficient sign bits. Only do this before legalize types to
44077    // avoid creating illegal types in truncate.
44078    if ((Index.getOpcode() == ISD::SIGN_EXTEND ||
44079         Index.getOpcode() == ISD::ZERO_EXTEND) &&
44080        IndexWidth > 32 &&
44081        Index.getOperand(0).getScalarValueSizeInBits() <= 32 &&
44082        DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
44083      unsigned NumElts = Index.getValueType().getVectorNumElements();
44084      EVT NewVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
44085      Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
44086      if (auto *Gather = dyn_cast<MaskedGatherSDNode>(GorS)) {
44087        SDValue Ops[] = { Chain, Gather->getPassThru(),
44088                          Mask, Base, Index, Scale } ;
44089        return DAG.getMaskedGather(Gather->getVTList(),
44090                                   Gather->getMemoryVT(), DL, Ops,
44091                                   Gather->getMemOperand(),
44092                                   Gather->getIndexType());
44093      }
44094      auto *Scatter = cast<MaskedScatterSDNode>(GorS);
44095      SDValue Ops[] = { Chain, Scatter->getValue(),
44096                        Mask, Base, Index, Scale };
44097      return DAG.getMaskedScatter(Scatter->getVTList(),
44098                                  Scatter->getMemoryVT(), DL,
44099                                  Ops, Scatter->getMemOperand(),
44100                                  Scatter->getIndexType());
44101    }
44102  }
44103
44104  if (DCI.isBeforeLegalizeOps()) {
44105    unsigned IndexWidth = Index.getScalarValueSizeInBits();
44106
44107    // Make sure the index is either i32 or i64
44108    if (IndexWidth != 32 && IndexWidth != 64) {
44109      MVT EltVT = IndexWidth > 32 ? MVT::i64 : MVT::i32;
44110      EVT IndexVT = EVT::getVectorVT(*DAG.getContext(), EltVT,
44111                                   Index.getValueType().getVectorNumElements());
44112      Index = DAG.getSExtOrTrunc(Index, DL, IndexVT);
44113      if (auto *Gather = dyn_cast<MaskedGatherSDNode>(GorS)) {
44114        SDValue Ops[] = { Chain, Gather->getPassThru(),
44115                          Mask, Base, Index, Scale } ;
44116        return DAG.getMaskedGather(Gather->getVTList(),
44117                                   Gather->getMemoryVT(), DL, Ops,
44118                                   Gather->getMemOperand(),
44119                                   Gather->getIndexType());
44120      }
44121      auto *Scatter = cast<MaskedScatterSDNode>(GorS);
44122      SDValue Ops[] = { Chain, Scatter->getValue(),
44123                        Mask, Base, Index, Scale };
44124      return DAG.getMaskedScatter(Scatter->getVTList(),
44125                                  Scatter->getMemoryVT(), DL,
44126                                  Ops, Scatter->getMemOperand(),
44127                                  Scatter->getIndexType());
44128    }
44129  }
44130
44131  // With vector masks we only demand the upper bit of the mask.
44132  if (Mask.getScalarValueSizeInBits() != 1) {
44133    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44134    APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
44135    if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI))
44136      return SDValue(N, 0);
44137  }
44138
44139  return SDValue();
44140}
44141
44142// Optimize  RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
44143static SDValue combineX86SetCC(SDNode *N, SelectionDAG &DAG,
44144                               const X86Subtarget &Subtarget) {
44145  SDLoc DL(N);
44146  X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
44147  SDValue EFLAGS = N->getOperand(1);
44148
44149  // Try to simplify the EFLAGS and condition code operands.
44150  if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget))
44151    return getSETCC(CC, Flags, DL, DAG);
44152
44153  return SDValue();
44154}
44155
44156/// Optimize branch condition evaluation.
44157static SDValue combineBrCond(SDNode *N, SelectionDAG &DAG,
44158                             const X86Subtarget &Subtarget) {
44159  SDLoc DL(N);
44160  SDValue EFLAGS = N->getOperand(3);
44161  X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
44162
44163  // Try to simplify the EFLAGS and condition code operands.
44164  // Make sure to not keep references to operands, as combineSetCCEFLAGS can
44165  // RAUW them under us.
44166  if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget)) {
44167    SDValue Cond = DAG.getTargetConstant(CC, DL, MVT::i8);
44168    return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), N->getOperand(0),
44169                       N->getOperand(1), Cond, Flags);
44170  }
44171
44172  return SDValue();
44173}
44174
44175static SDValue combineVectorCompareAndMaskUnaryOp(SDNode *N,
44176                                                  SelectionDAG &DAG) {
44177  // Take advantage of vector comparisons producing 0 or -1 in each lane to
44178  // optimize away operation when it's from a constant.
44179  //
44180  // The general transformation is:
44181  //    UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
44182  //       AND(VECTOR_CMP(x,y), constant2)
44183  //    constant2 = UNARYOP(constant)
44184
44185  // Early exit if this isn't a vector operation, the operand of the
44186  // unary operation isn't a bitwise AND, or if the sizes of the operations
44187  // aren't the same.
44188  EVT VT = N->getValueType(0);
44189  bool IsStrict = N->isStrictFPOpcode();
44190  SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
44191  if (!VT.isVector() || Op0->getOpcode() != ISD::AND ||
44192      Op0->getOperand(0)->getOpcode() != ISD::SETCC ||
44193      VT.getSizeInBits() != Op0.getValueSizeInBits())
44194    return SDValue();
44195
44196  // Now check that the other operand of the AND is a constant. We could
44197  // make the transformation for non-constant splats as well, but it's unclear
44198  // that would be a benefit as it would not eliminate any operations, just
44199  // perform one more step in scalar code before moving to the vector unit.
44200  if (auto *BV = dyn_cast<BuildVectorSDNode>(Op0.getOperand(1))) {
44201    // Bail out if the vector isn't a constant.
44202    if (!BV->isConstant())
44203      return SDValue();
44204
44205    // Everything checks out. Build up the new and improved node.
44206    SDLoc DL(N);
44207    EVT IntVT = BV->getValueType(0);
44208    // Create a new constant of the appropriate type for the transformed
44209    // DAG.
44210    SDValue SourceConst;
44211    if (IsStrict)
44212      SourceConst = DAG.getNode(N->getOpcode(), DL, {VT, MVT::Other},
44213                                {N->getOperand(0), SDValue(BV, 0)});
44214    else
44215      SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
44216    // The AND node needs bitcasts to/from an integer vector type around it.
44217    SDValue MaskConst = DAG.getBitcast(IntVT, SourceConst);
44218    SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT, Op0->getOperand(0),
44219                                 MaskConst);
44220    SDValue Res = DAG.getBitcast(VT, NewAnd);
44221    if (IsStrict)
44222      return DAG.getMergeValues({Res, SourceConst.getValue(1)}, DL);
44223    return Res;
44224  }
44225
44226  return SDValue();
44227}
44228
44229/// If we are converting a value to floating-point, try to replace scalar
44230/// truncate of an extracted vector element with a bitcast. This tries to keep
44231/// the sequence on XMM registers rather than moving between vector and GPRs.
44232static SDValue combineToFPTruncExtElt(SDNode *N, SelectionDAG &DAG) {
44233  // TODO: This is currently only used by combineSIntToFP, but it is generalized
44234  //       to allow being called by any similar cast opcode.
44235  // TODO: Consider merging this into lowering: vectorizeExtractedCast().
44236  SDValue Trunc = N->getOperand(0);
44237  if (!Trunc.hasOneUse() || Trunc.getOpcode() != ISD::TRUNCATE)
44238    return SDValue();
44239
44240  SDValue ExtElt = Trunc.getOperand(0);
44241  if (!ExtElt.hasOneUse() || ExtElt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
44242      !isNullConstant(ExtElt.getOperand(1)))
44243    return SDValue();
44244
44245  EVT TruncVT = Trunc.getValueType();
44246  EVT SrcVT = ExtElt.getValueType();
44247  unsigned DestWidth = TruncVT.getSizeInBits();
44248  unsigned SrcWidth = SrcVT.getSizeInBits();
44249  if (SrcWidth % DestWidth != 0)
44250    return SDValue();
44251
44252  // inttofp (trunc (extelt X, 0)) --> inttofp (extelt (bitcast X), 0)
44253  EVT SrcVecVT = ExtElt.getOperand(0).getValueType();
44254  unsigned VecWidth = SrcVecVT.getSizeInBits();
44255  unsigned NumElts = VecWidth / DestWidth;
44256  EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), TruncVT, NumElts);
44257  SDValue BitcastVec = DAG.getBitcast(BitcastVT, ExtElt.getOperand(0));
44258  SDLoc DL(N);
44259  SDValue NewExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, TruncVT,
44260                                  BitcastVec, ExtElt.getOperand(1));
44261  return DAG.getNode(N->getOpcode(), DL, N->getValueType(0), NewExtElt);
44262}
44263
44264static SDValue combineUIntToFP(SDNode *N, SelectionDAG &DAG,
44265                               const X86Subtarget &Subtarget) {
44266  bool IsStrict = N->isStrictFPOpcode();
44267  SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
44268  EVT VT = N->getValueType(0);
44269  EVT InVT = Op0.getValueType();
44270
44271  // UINT_TO_FP(vXi1) -> SINT_TO_FP(ZEXT(vXi1 to vXi32))
44272  // UINT_TO_FP(vXi8) -> SINT_TO_FP(ZEXT(vXi8 to vXi32))
44273  // UINT_TO_FP(vXi16) -> SINT_TO_FP(ZEXT(vXi16 to vXi32))
44274  if (InVT.isVector() && InVT.getScalarSizeInBits() < 32) {
44275    SDLoc dl(N);
44276    EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
44277                                 InVT.getVectorNumElements());
44278    SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
44279
44280    // UINT_TO_FP isn't legal without AVX512 so use SINT_TO_FP.
44281    if (IsStrict)
44282      return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
44283                         {N->getOperand(0), P});
44284    return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
44285  }
44286
44287  // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
44288  // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
44289  // the optimization here.
44290  if (DAG.SignBitIsZero(Op0)) {
44291    if (IsStrict)
44292      return DAG.getNode(ISD::STRICT_SINT_TO_FP, SDLoc(N), {VT, MVT::Other},
44293                         {N->getOperand(0), Op0});
44294    return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, Op0);
44295  }
44296
44297  return SDValue();
44298}
44299
44300static SDValue combineSIntToFP(SDNode *N, SelectionDAG &DAG,
44301                               TargetLowering::DAGCombinerInfo &DCI,
44302                               const X86Subtarget &Subtarget) {
44303  // First try to optimize away the conversion entirely when it's
44304  // conditionally from a constant. Vectors only.
44305  bool IsStrict = N->isStrictFPOpcode();
44306  if (SDValue Res = combineVectorCompareAndMaskUnaryOp(N, DAG))
44307    return Res;
44308
44309  // Now move on to more general possibilities.
44310  SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
44311  EVT VT = N->getValueType(0);
44312  EVT InVT = Op0.getValueType();
44313
44314  // SINT_TO_FP(vXi1) -> SINT_TO_FP(SEXT(vXi1 to vXi32))
44315  // SINT_TO_FP(vXi8) -> SINT_TO_FP(SEXT(vXi8 to vXi32))
44316  // SINT_TO_FP(vXi16) -> SINT_TO_FP(SEXT(vXi16 to vXi32))
44317  if (InVT.isVector() && InVT.getScalarSizeInBits() < 32) {
44318    SDLoc dl(N);
44319    EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
44320                                 InVT.getVectorNumElements());
44321    SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
44322    if (IsStrict)
44323      return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
44324                         {N->getOperand(0), P});
44325    return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
44326  }
44327
44328  // Without AVX512DQ we only support i64 to float scalar conversion. For both
44329  // vectors and scalars, see if we know that the upper bits are all the sign
44330  // bit, in which case we can truncate the input to i32 and convert from that.
44331  if (InVT.getScalarSizeInBits() > 32 && !Subtarget.hasDQI()) {
44332    unsigned BitWidth = InVT.getScalarSizeInBits();
44333    unsigned NumSignBits = DAG.ComputeNumSignBits(Op0);
44334    if (NumSignBits >= (BitWidth - 31)) {
44335      EVT TruncVT = MVT::i32;
44336      if (InVT.isVector())
44337        TruncVT = EVT::getVectorVT(*DAG.getContext(), TruncVT,
44338                                   InVT.getVectorNumElements());
44339      SDLoc dl(N);
44340      if (DCI.isBeforeLegalize() || TruncVT != MVT::v2i32) {
44341        SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Op0);
44342        if (IsStrict)
44343          return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
44344                             {N->getOperand(0), Trunc});
44345        return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Trunc);
44346      }
44347      // If we're after legalize and the type is v2i32 we need to shuffle and
44348      // use CVTSI2P.
44349      assert(InVT == MVT::v2i64 && "Unexpected VT!");
44350      SDValue Cast = DAG.getBitcast(MVT::v4i32, Op0);
44351      SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Cast, Cast,
44352                                          { 0, 2, -1, -1 });
44353      if (IsStrict)
44354        return DAG.getNode(X86ISD::STRICT_CVTSI2P, dl, {VT, MVT::Other},
44355                           {N->getOperand(0), Shuf});
44356      return DAG.getNode(X86ISD::CVTSI2P, dl, VT, Shuf);
44357    }
44358  }
44359
44360  // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
44361  // a 32-bit target where SSE doesn't support i64->FP operations.
44362  if (!Subtarget.useSoftFloat() && Subtarget.hasX87() &&
44363      Op0.getOpcode() == ISD::LOAD) {
44364    LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
44365    EVT LdVT = Ld->getValueType(0);
44366
44367    // This transformation is not supported if the result type is f16 or f128.
44368    if (VT == MVT::f16 || VT == MVT::f128)
44369      return SDValue();
44370
44371    // If we have AVX512DQ we can use packed conversion instructions unless
44372    // the VT is f80.
44373    if (Subtarget.hasDQI() && VT != MVT::f80)
44374      return SDValue();
44375
44376    if (Ld->isSimple() && !VT.isVector() &&
44377        ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
44378        !Subtarget.is64Bit() && LdVT == MVT::i64) {
44379      std::pair<SDValue, SDValue> Tmp = Subtarget.getTargetLowering()->BuildFILD(
44380          SDValue(N, 0), LdVT, Ld->getChain(), Op0, DAG);
44381      DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), Tmp.second);
44382      return Tmp.first;
44383    }
44384  }
44385
44386  if (IsStrict)
44387    return SDValue();
44388
44389  if (SDValue V = combineToFPTruncExtElt(N, DAG))
44390    return V;
44391
44392  return SDValue();
44393}
44394
44395static bool needCarryOrOverflowFlag(SDValue Flags) {
44396  assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
44397
44398  for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
44399         UI != UE; ++UI) {
44400    SDNode *User = *UI;
44401
44402    X86::CondCode CC;
44403    switch (User->getOpcode()) {
44404    default:
44405      // Be conservative.
44406      return true;
44407    case X86ISD::SETCC:
44408    case X86ISD::SETCC_CARRY:
44409      CC = (X86::CondCode)User->getConstantOperandVal(0);
44410      break;
44411    case X86ISD::BRCOND:
44412      CC = (X86::CondCode)User->getConstantOperandVal(2);
44413      break;
44414    case X86ISD::CMOV:
44415      CC = (X86::CondCode)User->getConstantOperandVal(2);
44416      break;
44417    }
44418
44419    switch (CC) {
44420    default: break;
44421    case X86::COND_A: case X86::COND_AE:
44422    case X86::COND_B: case X86::COND_BE:
44423    case X86::COND_O: case X86::COND_NO:
44424    case X86::COND_G: case X86::COND_GE:
44425    case X86::COND_L: case X86::COND_LE:
44426      return true;
44427    }
44428  }
44429
44430  return false;
44431}
44432
44433static bool onlyZeroFlagUsed(SDValue Flags) {
44434  assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
44435
44436  for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
44437         UI != UE; ++UI) {
44438    SDNode *User = *UI;
44439
44440    unsigned CCOpNo;
44441    switch (User->getOpcode()) {
44442    default:
44443      // Be conservative.
44444      return false;
44445    case X86ISD::SETCC:       CCOpNo = 0; break;
44446    case X86ISD::SETCC_CARRY: CCOpNo = 0; break;
44447    case X86ISD::BRCOND:      CCOpNo = 2; break;
44448    case X86ISD::CMOV:        CCOpNo = 2; break;
44449    }
44450
44451    X86::CondCode CC = (X86::CondCode)User->getConstantOperandVal(CCOpNo);
44452    if (CC != X86::COND_E && CC != X86::COND_NE)
44453      return false;
44454  }
44455
44456  return true;
44457}
44458
44459static SDValue combineCMP(SDNode *N, SelectionDAG &DAG) {
44460  // Only handle test patterns.
44461  if (!isNullConstant(N->getOperand(1)))
44462    return SDValue();
44463
44464  // If we have a CMP of a truncated binop, see if we can make a smaller binop
44465  // and use its flags directly.
44466  // TODO: Maybe we should try promoting compares that only use the zero flag
44467  // first if we can prove the upper bits with computeKnownBits?
44468  SDLoc dl(N);
44469  SDValue Op = N->getOperand(0);
44470  EVT VT = Op.getValueType();
44471
44472  // If we have a constant logical shift that's only used in a comparison
44473  // against zero turn it into an equivalent AND. This allows turning it into
44474  // a TEST instruction later.
44475  if ((Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) &&
44476      Op.hasOneUse() && isa<ConstantSDNode>(Op.getOperand(1)) &&
44477      onlyZeroFlagUsed(SDValue(N, 0))) {
44478    unsigned BitWidth = VT.getSizeInBits();
44479    const APInt &ShAmt = Op.getConstantOperandAPInt(1);
44480    if (ShAmt.ult(BitWidth)) { // Avoid undefined shifts.
44481      unsigned MaskBits = BitWidth - ShAmt.getZExtValue();
44482      APInt Mask = Op.getOpcode() == ISD::SRL
44483                       ? APInt::getHighBitsSet(BitWidth, MaskBits)
44484                       : APInt::getLowBitsSet(BitWidth, MaskBits);
44485      if (Mask.isSignedIntN(32)) {
44486        Op = DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0),
44487                         DAG.getConstant(Mask, dl, VT));
44488        return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
44489                           DAG.getConstant(0, dl, VT));
44490      }
44491    }
44492  }
44493
44494  // Look for a truncate with a single use.
44495  if (Op.getOpcode() != ISD::TRUNCATE || !Op.hasOneUse())
44496    return SDValue();
44497
44498  Op = Op.getOperand(0);
44499
44500  // Arithmetic op can only have one use.
44501  if (!Op.hasOneUse())
44502    return SDValue();
44503
44504  unsigned NewOpc;
44505  switch (Op.getOpcode()) {
44506  default: return SDValue();
44507  case ISD::AND:
44508    // Skip and with constant. We have special handling for and with immediate
44509    // during isel to generate test instructions.
44510    if (isa<ConstantSDNode>(Op.getOperand(1)))
44511      return SDValue();
44512    NewOpc = X86ISD::AND;
44513    break;
44514  case ISD::OR:  NewOpc = X86ISD::OR;  break;
44515  case ISD::XOR: NewOpc = X86ISD::XOR; break;
44516  case ISD::ADD:
44517    // If the carry or overflow flag is used, we can't truncate.
44518    if (needCarryOrOverflowFlag(SDValue(N, 0)))
44519      return SDValue();
44520    NewOpc = X86ISD::ADD;
44521    break;
44522  case ISD::SUB:
44523    // If the carry or overflow flag is used, we can't truncate.
44524    if (needCarryOrOverflowFlag(SDValue(N, 0)))
44525      return SDValue();
44526    NewOpc = X86ISD::SUB;
44527    break;
44528  }
44529
44530  // We found an op we can narrow. Truncate its inputs.
44531  SDValue Op0 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(0));
44532  SDValue Op1 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(1));
44533
44534  // Use a X86 specific opcode to avoid DAG combine messing with it.
44535  SDVTList VTs = DAG.getVTList(VT, MVT::i32);
44536  Op = DAG.getNode(NewOpc, dl, VTs, Op0, Op1);
44537
44538  // For AND, keep a CMP so that we can match the test pattern.
44539  if (NewOpc == X86ISD::AND)
44540    return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
44541                       DAG.getConstant(0, dl, VT));
44542
44543  // Return the flags.
44544  return Op.getValue(1);
44545}
44546
44547static SDValue combineX86AddSub(SDNode *N, SelectionDAG &DAG,
44548                                TargetLowering::DAGCombinerInfo &DCI) {
44549  assert((X86ISD::ADD == N->getOpcode() || X86ISD::SUB == N->getOpcode()) &&
44550         "Expected X86ISD::ADD or X86ISD::SUB");
44551
44552  SDLoc DL(N);
44553  SDValue LHS = N->getOperand(0);
44554  SDValue RHS = N->getOperand(1);
44555  MVT VT = LHS.getSimpleValueType();
44556  unsigned GenericOpc = X86ISD::ADD == N->getOpcode() ? ISD::ADD : ISD::SUB;
44557
44558  // If we don't use the flag result, simplify back to a generic ADD/SUB.
44559  if (!N->hasAnyUseOfValue(1)) {
44560    SDValue Res = DAG.getNode(GenericOpc, DL, VT, LHS, RHS);
44561    return DAG.getMergeValues({Res, DAG.getConstant(0, DL, MVT::i32)}, DL);
44562  }
44563
44564  // Fold any similar generic ADD/SUB opcodes to reuse this node.
44565  auto MatchGeneric = [&](SDValue N0, SDValue N1, bool Negate) {
44566    SDValue Ops[] = {N0, N1};
44567    SDVTList VTs = DAG.getVTList(N->getValueType(0));
44568    if (SDNode *GenericAddSub = DAG.getNodeIfExists(GenericOpc, VTs, Ops)) {
44569      SDValue Op(N, 0);
44570      if (Negate)
44571        Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
44572      DCI.CombineTo(GenericAddSub, Op);
44573    }
44574  };
44575  MatchGeneric(LHS, RHS, false);
44576  MatchGeneric(RHS, LHS, X86ISD::SUB == N->getOpcode());
44577
44578  return SDValue();
44579}
44580
44581static SDValue combineSBB(SDNode *N, SelectionDAG &DAG) {
44582  if (SDValue Flags = combineCarryThroughADD(N->getOperand(2), DAG)) {
44583    MVT VT = N->getSimpleValueType(0);
44584    SDVTList VTs = DAG.getVTList(VT, MVT::i32);
44585    return DAG.getNode(X86ISD::SBB, SDLoc(N), VTs,
44586                       N->getOperand(0), N->getOperand(1),
44587                       Flags);
44588  }
44589
44590  // Fold SBB(SUB(X,Y),0,Carry) -> SBB(X,Y,Carry)
44591  // iff the flag result is dead.
44592  SDValue Op0 = N->getOperand(0);
44593  SDValue Op1 = N->getOperand(1);
44594  if (Op0.getOpcode() == ISD::SUB && isNullConstant(Op1) &&
44595      !N->hasAnyUseOfValue(1))
44596    return DAG.getNode(X86ISD::SBB, SDLoc(N), N->getVTList(), Op0.getOperand(0),
44597                       Op0.getOperand(1), N->getOperand(2));
44598
44599  return SDValue();
44600}
44601
44602// Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
44603static SDValue combineADC(SDNode *N, SelectionDAG &DAG,
44604                          TargetLowering::DAGCombinerInfo &DCI) {
44605  // If the LHS and RHS of the ADC node are zero, then it can't overflow and
44606  // the result is either zero or one (depending on the input carry bit).
44607  // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
44608  if (X86::isZeroNode(N->getOperand(0)) &&
44609      X86::isZeroNode(N->getOperand(1)) &&
44610      // We don't have a good way to replace an EFLAGS use, so only do this when
44611      // dead right now.
44612      SDValue(N, 1).use_empty()) {
44613    SDLoc DL(N);
44614    EVT VT = N->getValueType(0);
44615    SDValue CarryOut = DAG.getConstant(0, DL, N->getValueType(1));
44616    SDValue Res1 =
44617        DAG.getNode(ISD::AND, DL, VT,
44618                    DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
44619                                DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
44620                                N->getOperand(2)),
44621                    DAG.getConstant(1, DL, VT));
44622    return DCI.CombineTo(N, Res1, CarryOut);
44623  }
44624
44625  if (SDValue Flags = combineCarryThroughADD(N->getOperand(2), DAG)) {
44626    MVT VT = N->getSimpleValueType(0);
44627    SDVTList VTs = DAG.getVTList(VT, MVT::i32);
44628    return DAG.getNode(X86ISD::ADC, SDLoc(N), VTs,
44629                       N->getOperand(0), N->getOperand(1),
44630                       Flags);
44631  }
44632
44633  return SDValue();
44634}
44635
44636/// If this is an add or subtract where one operand is produced by a cmp+setcc,
44637/// then try to convert it to an ADC or SBB. This replaces TEST+SET+{ADD/SUB}
44638/// with CMP+{ADC, SBB}.
44639static SDValue combineAddOrSubToADCOrSBB(SDNode *N, SelectionDAG &DAG) {
44640  bool IsSub = N->getOpcode() == ISD::SUB;
44641  SDValue X = N->getOperand(0);
44642  SDValue Y = N->getOperand(1);
44643
44644  // If this is an add, canonicalize a zext operand to the RHS.
44645  // TODO: Incomplete? What if both sides are zexts?
44646  if (!IsSub && X.getOpcode() == ISD::ZERO_EXTEND &&
44647      Y.getOpcode() != ISD::ZERO_EXTEND)
44648    std::swap(X, Y);
44649
44650  // Look through a one-use zext.
44651  bool PeekedThroughZext = false;
44652  if (Y.getOpcode() == ISD::ZERO_EXTEND && Y.hasOneUse()) {
44653    Y = Y.getOperand(0);
44654    PeekedThroughZext = true;
44655  }
44656
44657  // If this is an add, canonicalize a setcc operand to the RHS.
44658  // TODO: Incomplete? What if both sides are setcc?
44659  // TODO: Should we allow peeking through a zext of the other operand?
44660  if (!IsSub && !PeekedThroughZext && X.getOpcode() == X86ISD::SETCC &&
44661      Y.getOpcode() != X86ISD::SETCC)
44662    std::swap(X, Y);
44663
44664  if (Y.getOpcode() != X86ISD::SETCC || !Y.hasOneUse())
44665    return SDValue();
44666
44667  SDLoc DL(N);
44668  EVT VT = N->getValueType(0);
44669  X86::CondCode CC = (X86::CondCode)Y.getConstantOperandVal(0);
44670
44671  // If X is -1 or 0, then we have an opportunity to avoid constants required in
44672  // the general case below.
44673  auto *ConstantX = dyn_cast<ConstantSDNode>(X);
44674  if (ConstantX) {
44675    if ((!IsSub && CC == X86::COND_AE && ConstantX->isAllOnesValue()) ||
44676        (IsSub && CC == X86::COND_B && ConstantX->isNullValue())) {
44677      // This is a complicated way to get -1 or 0 from the carry flag:
44678      // -1 + SETAE --> -1 + (!CF) --> CF ? -1 : 0 --> SBB %eax, %eax
44679      //  0 - SETB  -->  0 -  (CF) --> CF ? -1 : 0 --> SBB %eax, %eax
44680      return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
44681                         DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
44682                         Y.getOperand(1));
44683    }
44684
44685    if ((!IsSub && CC == X86::COND_BE && ConstantX->isAllOnesValue()) ||
44686        (IsSub && CC == X86::COND_A && ConstantX->isNullValue())) {
44687      SDValue EFLAGS = Y->getOperand(1);
44688      if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
44689          EFLAGS.getValueType().isInteger() &&
44690          !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
44691        // Swap the operands of a SUB, and we have the same pattern as above.
44692        // -1 + SETBE (SUB A, B) --> -1 + SETAE (SUB B, A) --> SUB + SBB
44693        //  0 - SETA  (SUB A, B) -->  0 - SETB  (SUB B, A) --> SUB + SBB
44694        SDValue NewSub = DAG.getNode(
44695            X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
44696            EFLAGS.getOperand(1), EFLAGS.getOperand(0));
44697        SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
44698        return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
44699                           DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
44700                           NewEFLAGS);
44701      }
44702    }
44703  }
44704
44705  if (CC == X86::COND_B) {
44706    // X + SETB Z --> adc X, 0
44707    // X - SETB Z --> sbb X, 0
44708    return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
44709                       DAG.getVTList(VT, MVT::i32), X,
44710                       DAG.getConstant(0, DL, VT), Y.getOperand(1));
44711  }
44712
44713  if (CC == X86::COND_A) {
44714    SDValue EFLAGS = Y->getOperand(1);
44715    // Try to convert COND_A into COND_B in an attempt to facilitate
44716    // materializing "setb reg".
44717    //
44718    // Do not flip "e > c", where "c" is a constant, because Cmp instruction
44719    // cannot take an immediate as its first operand.
44720    //
44721    if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.getNode()->hasOneUse() &&
44722        EFLAGS.getValueType().isInteger() &&
44723        !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
44724      SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
44725                                   EFLAGS.getNode()->getVTList(),
44726                                   EFLAGS.getOperand(1), EFLAGS.getOperand(0));
44727      SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
44728      return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
44729                         DAG.getVTList(VT, MVT::i32), X,
44730                         DAG.getConstant(0, DL, VT), NewEFLAGS);
44731    }
44732  }
44733
44734  if (CC != X86::COND_E && CC != X86::COND_NE)
44735    return SDValue();
44736
44737  SDValue Cmp = Y.getOperand(1);
44738  if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
44739      !X86::isZeroNode(Cmp.getOperand(1)) ||
44740      !Cmp.getOperand(0).getValueType().isInteger())
44741    return SDValue();
44742
44743  SDValue Z = Cmp.getOperand(0);
44744  EVT ZVT = Z.getValueType();
44745
44746  // If X is -1 or 0, then we have an opportunity to avoid constants required in
44747  // the general case below.
44748  if (ConstantX) {
44749    // 'neg' sets the carry flag when Z != 0, so create 0 or -1 using 'sbb' with
44750    // fake operands:
44751    //  0 - (Z != 0) --> sbb %eax, %eax, (neg Z)
44752    // -1 + (Z == 0) --> sbb %eax, %eax, (neg Z)
44753    if ((IsSub && CC == X86::COND_NE && ConstantX->isNullValue()) ||
44754        (!IsSub && CC == X86::COND_E && ConstantX->isAllOnesValue())) {
44755      SDValue Zero = DAG.getConstant(0, DL, ZVT);
44756      SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
44757      SDValue Neg = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Zero, Z);
44758      return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
44759                         DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
44760                         SDValue(Neg.getNode(), 1));
44761    }
44762
44763    // cmp with 1 sets the carry flag when Z == 0, so create 0 or -1 using 'sbb'
44764    // with fake operands:
44765    //  0 - (Z == 0) --> sbb %eax, %eax, (cmp Z, 1)
44766    // -1 + (Z != 0) --> sbb %eax, %eax, (cmp Z, 1)
44767    if ((IsSub && CC == X86::COND_E && ConstantX->isNullValue()) ||
44768        (!IsSub && CC == X86::COND_NE && ConstantX->isAllOnesValue())) {
44769      SDValue One = DAG.getConstant(1, DL, ZVT);
44770      SDValue Cmp1 = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Z, One);
44771      return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
44772                         DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), Cmp1);
44773    }
44774  }
44775
44776  // (cmp Z, 1) sets the carry flag if Z is 0.
44777  SDValue One = DAG.getConstant(1, DL, ZVT);
44778  SDValue Cmp1 = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Z, One);
44779
44780  // Add the flags type for ADC/SBB nodes.
44781  SDVTList VTs = DAG.getVTList(VT, MVT::i32);
44782
44783  // X - (Z != 0) --> sub X, (zext(setne Z, 0)) --> adc X, -1, (cmp Z, 1)
44784  // X + (Z != 0) --> add X, (zext(setne Z, 0)) --> sbb X, -1, (cmp Z, 1)
44785  if (CC == X86::COND_NE)
44786    return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL, VTs, X,
44787                       DAG.getConstant(-1ULL, DL, VT), Cmp1);
44788
44789  // X - (Z == 0) --> sub X, (zext(sete  Z, 0)) --> sbb X, 0, (cmp Z, 1)
44790  // X + (Z == 0) --> add X, (zext(sete  Z, 0)) --> adc X, 0, (cmp Z, 1)
44791  return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL, VTs, X,
44792                     DAG.getConstant(0, DL, VT), Cmp1);
44793}
44794
44795static SDValue combineLoopMAddPattern(SDNode *N, SelectionDAG &DAG,
44796                                      const X86Subtarget &Subtarget) {
44797  if (!Subtarget.hasSSE2())
44798    return SDValue();
44799
44800  EVT VT = N->getValueType(0);
44801
44802  // If the vector size is less than 128, or greater than the supported RegSize,
44803  // do not use PMADD.
44804  if (!VT.isVector() || VT.getVectorNumElements() < 8)
44805    return SDValue();
44806
44807  SDValue Op0 = N->getOperand(0);
44808  SDValue Op1 = N->getOperand(1);
44809
44810  auto UsePMADDWD = [&](SDValue Op) {
44811    ShrinkMode Mode;
44812    return Op.getOpcode() == ISD::MUL &&
44813           canReduceVMulWidth(Op.getNode(), DAG, Mode) &&
44814           Mode != ShrinkMode::MULU16 &&
44815           (!Subtarget.hasSSE41() ||
44816            (Op->isOnlyUserOf(Op.getOperand(0).getNode()) &&
44817             Op->isOnlyUserOf(Op.getOperand(1).getNode())));
44818  };
44819
44820  SDValue MulOp, OtherOp;
44821  if (UsePMADDWD(Op0)) {
44822    MulOp = Op0;
44823    OtherOp = Op1;
44824  } else if (UsePMADDWD(Op1)) {
44825    MulOp = Op1;
44826    OtherOp = Op0;
44827  } else
44828   return SDValue();
44829
44830  SDLoc DL(N);
44831  EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
44832                                   VT.getVectorNumElements());
44833  EVT MAddVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
44834                                VT.getVectorNumElements() / 2);
44835
44836  // Shrink the operands of mul.
44837  SDValue N0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, MulOp->getOperand(0));
44838  SDValue N1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, MulOp->getOperand(1));
44839
44840  // Madd vector size is half of the original vector size
44841  auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
44842                           ArrayRef<SDValue> Ops) {
44843    MVT OpVT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
44844    return DAG.getNode(X86ISD::VPMADDWD, DL, OpVT, Ops);
44845  };
44846  SDValue Madd = SplitOpsAndApply(DAG, Subtarget, DL, MAddVT, { N0, N1 },
44847                                  PMADDWDBuilder);
44848  // Fill the rest of the output with 0
44849  SDValue Zero = DAG.getConstant(0, DL, Madd.getSimpleValueType());
44850  SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Madd, Zero);
44851
44852  // Preserve the reduction flag on the ADD. We may need to revisit for the
44853  // other operand.
44854  SDNodeFlags Flags;
44855  Flags.setVectorReduction(true);
44856  return DAG.getNode(ISD::ADD, DL, VT, Concat, OtherOp, Flags);
44857}
44858
44859static SDValue combineLoopSADPattern(SDNode *N, SelectionDAG &DAG,
44860                                     const X86Subtarget &Subtarget) {
44861  if (!Subtarget.hasSSE2())
44862    return SDValue();
44863
44864  SDLoc DL(N);
44865  EVT VT = N->getValueType(0);
44866
44867  // TODO: There's nothing special about i32, any integer type above i16 should
44868  // work just as well.
44869  if (!VT.isVector() || !VT.isSimple() ||
44870      !(VT.getVectorElementType() == MVT::i32))
44871    return SDValue();
44872
44873  unsigned RegSize = 128;
44874  if (Subtarget.useBWIRegs())
44875    RegSize = 512;
44876  else if (Subtarget.hasAVX())
44877    RegSize = 256;
44878
44879  // We only handle v16i32 for SSE2 / v32i32 for AVX / v64i32 for AVX512.
44880  // TODO: We should be able to handle larger vectors by splitting them before
44881  // feeding them into several SADs, and then reducing over those.
44882  if (VT.getSizeInBits() / 4 > RegSize)
44883    return SDValue();
44884
44885  // We know N is a reduction add. To match SAD, we need one of the operands to
44886  // be an ABS.
44887  SDValue AbsOp = N->getOperand(0);
44888  SDValue OtherOp = N->getOperand(1);
44889  if (AbsOp.getOpcode() != ISD::ABS)
44890    std::swap(AbsOp, OtherOp);
44891  if (AbsOp.getOpcode() != ISD::ABS)
44892    return SDValue();
44893
44894  // Check whether we have an abs-diff pattern feeding into the select.
44895  SDValue SadOp0, SadOp1;
44896  if(!detectZextAbsDiff(AbsOp, SadOp0, SadOp1))
44897    return SDValue();
44898
44899  // SAD pattern detected. Now build a SAD instruction and an addition for
44900  // reduction. Note that the number of elements of the result of SAD is less
44901  // than the number of elements of its input. Therefore, we could only update
44902  // part of elements in the reduction vector.
44903  SDValue Sad = createPSADBW(DAG, SadOp0, SadOp1, DL, Subtarget);
44904
44905  // The output of PSADBW is a vector of i64.
44906  // We need to turn the vector of i64 into a vector of i32.
44907  // If the reduction vector is at least as wide as the psadbw result, just
44908  // bitcast. If it's narrower which can only occur for v2i32, bits 127:16 of
44909  // the PSADBW will be zero. If we promote/ narrow vectors, truncate the v2i64
44910  // result to v2i32 which will be removed by type legalization. If we/ widen
44911  // narrow vectors then we bitcast to v4i32 and extract v2i32.
44912  MVT ResVT = MVT::getVectorVT(MVT::i32, Sad.getValueSizeInBits() / 32);
44913  Sad = DAG.getNode(ISD::BITCAST, DL, ResVT, Sad);
44914
44915  if (VT.getSizeInBits() > ResVT.getSizeInBits()) {
44916    // Fill the upper elements with zero to match the add width.
44917    assert(VT.getSizeInBits() % ResVT.getSizeInBits() == 0 && "Unexpected VTs");
44918    unsigned NumConcats = VT.getSizeInBits() / ResVT.getSizeInBits();
44919    SmallVector<SDValue, 4> Ops(NumConcats, DAG.getConstant(0, DL, ResVT));
44920    Ops[0] = Sad;
44921    Sad = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Ops);
44922  } else if (VT.getSizeInBits() < ResVT.getSizeInBits()) {
44923    Sad = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Sad,
44924                      DAG.getIntPtrConstant(0, DL));
44925  }
44926
44927  // Preserve the reduction flag on the ADD. We may need to revisit for the
44928  // other operand.
44929  SDNodeFlags Flags;
44930  Flags.setVectorReduction(true);
44931  return DAG.getNode(ISD::ADD, DL, VT, Sad, OtherOp, Flags);
44932}
44933
44934static SDValue matchPMADDWD(SelectionDAG &DAG, SDValue Op0, SDValue Op1,
44935                            const SDLoc &DL, EVT VT,
44936                            const X86Subtarget &Subtarget) {
44937  // Example of pattern we try to detect:
44938  // t := (v8i32 mul (sext (v8i16 x0), (sext (v8i16 x1))))
44939  //(add (build_vector (extract_elt t, 0),
44940  //                   (extract_elt t, 2),
44941  //                   (extract_elt t, 4),
44942  //                   (extract_elt t, 6)),
44943  //     (build_vector (extract_elt t, 1),
44944  //                   (extract_elt t, 3),
44945  //                   (extract_elt t, 5),
44946  //                   (extract_elt t, 7)))
44947
44948  if (!Subtarget.hasSSE2())
44949    return SDValue();
44950
44951  if (Op0.getOpcode() != ISD::BUILD_VECTOR ||
44952      Op1.getOpcode() != ISD::BUILD_VECTOR)
44953    return SDValue();
44954
44955  if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
44956      VT.getVectorNumElements() < 4 ||
44957      !isPowerOf2_32(VT.getVectorNumElements()))
44958    return SDValue();
44959
44960  // Check if one of Op0,Op1 is of the form:
44961  // (build_vector (extract_elt Mul, 0),
44962  //               (extract_elt Mul, 2),
44963  //               (extract_elt Mul, 4),
44964  //                   ...
44965  // the other is of the form:
44966  // (build_vector (extract_elt Mul, 1),
44967  //               (extract_elt Mul, 3),
44968  //               (extract_elt Mul, 5),
44969  //                   ...
44970  // and identify Mul.
44971  SDValue Mul;
44972  for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; i += 2) {
44973    SDValue Op0L = Op0->getOperand(i), Op1L = Op1->getOperand(i),
44974            Op0H = Op0->getOperand(i + 1), Op1H = Op1->getOperand(i + 1);
44975    // TODO: Be more tolerant to undefs.
44976    if (Op0L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
44977        Op1L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
44978        Op0H.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
44979        Op1H.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
44980      return SDValue();
44981    auto *Const0L = dyn_cast<ConstantSDNode>(Op0L->getOperand(1));
44982    auto *Const1L = dyn_cast<ConstantSDNode>(Op1L->getOperand(1));
44983    auto *Const0H = dyn_cast<ConstantSDNode>(Op0H->getOperand(1));
44984    auto *Const1H = dyn_cast<ConstantSDNode>(Op1H->getOperand(1));
44985    if (!Const0L || !Const1L || !Const0H || !Const1H)
44986      return SDValue();
44987    unsigned Idx0L = Const0L->getZExtValue(), Idx1L = Const1L->getZExtValue(),
44988             Idx0H = Const0H->getZExtValue(), Idx1H = Const1H->getZExtValue();
44989    // Commutativity of mul allows factors of a product to reorder.
44990    if (Idx0L > Idx1L)
44991      std::swap(Idx0L, Idx1L);
44992    if (Idx0H > Idx1H)
44993      std::swap(Idx0H, Idx1H);
44994    // Commutativity of add allows pairs of factors to reorder.
44995    if (Idx0L > Idx0H) {
44996      std::swap(Idx0L, Idx0H);
44997      std::swap(Idx1L, Idx1H);
44998    }
44999    if (Idx0L != 2 * i || Idx1L != 2 * i + 1 || Idx0H != 2 * i + 2 ||
45000        Idx1H != 2 * i + 3)
45001      return SDValue();
45002    if (!Mul) {
45003      // First time an extract_elt's source vector is visited. Must be a MUL
45004      // with 2X number of vector elements than the BUILD_VECTOR.
45005      // Both extracts must be from same MUL.
45006      Mul = Op0L->getOperand(0);
45007      if (Mul->getOpcode() != ISD::MUL ||
45008          Mul.getValueType().getVectorNumElements() != 2 * e)
45009        return SDValue();
45010    }
45011    // Check that the extract is from the same MUL previously seen.
45012    if (Mul != Op0L->getOperand(0) || Mul != Op1L->getOperand(0) ||
45013        Mul != Op0H->getOperand(0) || Mul != Op1H->getOperand(0))
45014      return SDValue();
45015  }
45016
45017  // Check if the Mul source can be safely shrunk.
45018  ShrinkMode Mode;
45019  if (!canReduceVMulWidth(Mul.getNode(), DAG, Mode) ||
45020      Mode == ShrinkMode::MULU16)
45021    return SDValue();
45022
45023  auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
45024                         ArrayRef<SDValue> Ops) {
45025    // Shrink by adding truncate nodes and let DAGCombine fold with the
45026    // sources.
45027    EVT InVT = Ops[0].getValueType();
45028    assert(InVT.getScalarType() == MVT::i32 &&
45029           "Unexpected scalar element type");
45030    assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
45031    EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
45032                                 InVT.getVectorNumElements() / 2);
45033    EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
45034                                   InVT.getVectorNumElements());
45035    return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT,
45036                       DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Ops[0]),
45037                       DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Ops[1]));
45038  };
45039  return SplitOpsAndApply(DAG, Subtarget, DL, VT,
45040                          { Mul.getOperand(0), Mul.getOperand(1) },
45041                          PMADDBuilder);
45042}
45043
45044// Attempt to turn this pattern into PMADDWD.
45045// (mul (add (sext (build_vector)), (sext (build_vector))),
45046//      (add (sext (build_vector)), (sext (build_vector)))
45047static SDValue matchPMADDWD_2(SelectionDAG &DAG, SDValue N0, SDValue N1,
45048                              const SDLoc &DL, EVT VT,
45049                              const X86Subtarget &Subtarget) {
45050  if (!Subtarget.hasSSE2())
45051    return SDValue();
45052
45053  if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
45054    return SDValue();
45055
45056  if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
45057      VT.getVectorNumElements() < 4 ||
45058      !isPowerOf2_32(VT.getVectorNumElements()))
45059    return SDValue();
45060
45061  SDValue N00 = N0.getOperand(0);
45062  SDValue N01 = N0.getOperand(1);
45063  SDValue N10 = N1.getOperand(0);
45064  SDValue N11 = N1.getOperand(1);
45065
45066  // All inputs need to be sign extends.
45067  // TODO: Support ZERO_EXTEND from known positive?
45068  if (N00.getOpcode() != ISD::SIGN_EXTEND ||
45069      N01.getOpcode() != ISD::SIGN_EXTEND ||
45070      N10.getOpcode() != ISD::SIGN_EXTEND ||
45071      N11.getOpcode() != ISD::SIGN_EXTEND)
45072    return SDValue();
45073
45074  // Peek through the extends.
45075  N00 = N00.getOperand(0);
45076  N01 = N01.getOperand(0);
45077  N10 = N10.getOperand(0);
45078  N11 = N11.getOperand(0);
45079
45080  // Must be extending from vXi16.
45081  EVT InVT = N00.getValueType();
45082  if (InVT.getVectorElementType() != MVT::i16 || N01.getValueType() != InVT ||
45083      N10.getValueType() != InVT || N11.getValueType() != InVT)
45084    return SDValue();
45085
45086  // All inputs should be build_vectors.
45087  if (N00.getOpcode() != ISD::BUILD_VECTOR ||
45088      N01.getOpcode() != ISD::BUILD_VECTOR ||
45089      N10.getOpcode() != ISD::BUILD_VECTOR ||
45090      N11.getOpcode() != ISD::BUILD_VECTOR)
45091    return SDValue();
45092
45093  // For each element, we need to ensure we have an odd element from one vector
45094  // multiplied by the odd element of another vector and the even element from
45095  // one of the same vectors being multiplied by the even element from the
45096  // other vector. So we need to make sure for each element i, this operator
45097  // is being performed:
45098  //  A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
45099  SDValue In0, In1;
45100  for (unsigned i = 0; i != N00.getNumOperands(); ++i) {
45101    SDValue N00Elt = N00.getOperand(i);
45102    SDValue N01Elt = N01.getOperand(i);
45103    SDValue N10Elt = N10.getOperand(i);
45104    SDValue N11Elt = N11.getOperand(i);
45105    // TODO: Be more tolerant to undefs.
45106    if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
45107        N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
45108        N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
45109        N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
45110      return SDValue();
45111    auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
45112    auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
45113    auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
45114    auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
45115    if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
45116      return SDValue();
45117    unsigned IdxN00 = ConstN00Elt->getZExtValue();
45118    unsigned IdxN01 = ConstN01Elt->getZExtValue();
45119    unsigned IdxN10 = ConstN10Elt->getZExtValue();
45120    unsigned IdxN11 = ConstN11Elt->getZExtValue();
45121    // Add is commutative so indices can be reordered.
45122    if (IdxN00 > IdxN10) {
45123      std::swap(IdxN00, IdxN10);
45124      std::swap(IdxN01, IdxN11);
45125    }
45126    // N0 indices be the even element. N1 indices must be the next odd element.
45127    if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
45128        IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
45129      return SDValue();
45130    SDValue N00In = N00Elt.getOperand(0);
45131    SDValue N01In = N01Elt.getOperand(0);
45132    SDValue N10In = N10Elt.getOperand(0);
45133    SDValue N11In = N11Elt.getOperand(0);
45134    // First time we find an input capture it.
45135    if (!In0) {
45136      In0 = N00In;
45137      In1 = N01In;
45138    }
45139    // Mul is commutative so the input vectors can be in any order.
45140    // Canonicalize to make the compares easier.
45141    if (In0 != N00In)
45142      std::swap(N00In, N01In);
45143    if (In0 != N10In)
45144      std::swap(N10In, N11In);
45145    if (In0 != N00In || In1 != N01In || In0 != N10In || In1 != N11In)
45146      return SDValue();
45147  }
45148
45149  auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
45150                         ArrayRef<SDValue> Ops) {
45151    // Shrink by adding truncate nodes and let DAGCombine fold with the
45152    // sources.
45153    EVT OpVT = Ops[0].getValueType();
45154    assert(OpVT.getScalarType() == MVT::i16 &&
45155           "Unexpected scalar element type");
45156    assert(OpVT == Ops[1].getValueType() && "Operands' types mismatch");
45157    EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
45158                                 OpVT.getVectorNumElements() / 2);
45159    return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT, Ops[0], Ops[1]);
45160  };
45161  return SplitOpsAndApply(DAG, Subtarget, DL, VT, { In0, In1 },
45162                          PMADDBuilder);
45163}
45164
45165static SDValue combineAdd(SDNode *N, SelectionDAG &DAG,
45166                          TargetLowering::DAGCombinerInfo &DCI,
45167                          const X86Subtarget &Subtarget) {
45168  const SDNodeFlags Flags = N->getFlags();
45169  if (Flags.hasVectorReduction()) {
45170    if (SDValue Sad = combineLoopSADPattern(N, DAG, Subtarget))
45171      return Sad;
45172    if (SDValue MAdd = combineLoopMAddPattern(N, DAG, Subtarget))
45173      return MAdd;
45174  }
45175  EVT VT = N->getValueType(0);
45176  SDValue Op0 = N->getOperand(0);
45177  SDValue Op1 = N->getOperand(1);
45178
45179  if (SDValue MAdd = matchPMADDWD(DAG, Op0, Op1, SDLoc(N), VT, Subtarget))
45180    return MAdd;
45181  if (SDValue MAdd = matchPMADDWD_2(DAG, Op0, Op1, SDLoc(N), VT, Subtarget))
45182    return MAdd;
45183
45184  // Try to synthesize horizontal adds from adds of shuffles.
45185  if ((VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v16i16 ||
45186       VT == MVT::v8i32) &&
45187      Subtarget.hasSSSE3() &&
45188      isHorizontalBinOp(Op0, Op1, DAG, Subtarget, true)) {
45189    auto HADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
45190                          ArrayRef<SDValue> Ops) {
45191      return DAG.getNode(X86ISD::HADD, DL, Ops[0].getValueType(), Ops);
45192    };
45193    return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {Op0, Op1},
45194                            HADDBuilder);
45195  }
45196
45197  // If vectors of i1 are legal, turn (add (zext (vXi1 X)), Y) into
45198  // (sub Y, (sext (vXi1 X))).
45199  // FIXME: We have the (sub Y, (zext (vXi1 X))) -> (add (sext (vXi1 X)), Y) in
45200  // generic DAG combine without a legal type check, but adding this there
45201  // caused regressions.
45202  if (VT.isVector()) {
45203    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45204    if (Op0.getOpcode() == ISD::ZERO_EXTEND &&
45205        Op0.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
45206        TLI.isTypeLegal(Op0.getOperand(0).getValueType())) {
45207      SDLoc DL(N);
45208      SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op0.getOperand(0));
45209      return DAG.getNode(ISD::SUB, DL, VT, Op1, SExt);
45210    }
45211
45212    if (Op1.getOpcode() == ISD::ZERO_EXTEND &&
45213        Op1.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
45214        TLI.isTypeLegal(Op1.getOperand(0).getValueType())) {
45215      SDLoc DL(N);
45216      SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op1.getOperand(0));
45217      return DAG.getNode(ISD::SUB, DL, VT, Op0, SExt);
45218    }
45219  }
45220
45221  return combineAddOrSubToADCOrSBB(N, DAG);
45222}
45223
45224static SDValue combineSubToSubus(SDNode *N, SelectionDAG &DAG,
45225                                 const X86Subtarget &Subtarget) {
45226  SDValue Op0 = N->getOperand(0);
45227  SDValue Op1 = N->getOperand(1);
45228  EVT VT = N->getValueType(0);
45229
45230  if (!VT.isVector())
45231    return SDValue();
45232
45233  // PSUBUS is supported, starting from SSE2, but truncation for v8i32
45234  // is only worth it with SSSE3 (PSHUFB).
45235  EVT EltVT = VT.getVectorElementType();
45236  if (!(Subtarget.hasSSE2() && (EltVT == MVT::i8 || EltVT == MVT::i16)) &&
45237      !(Subtarget.hasSSSE3() && (VT == MVT::v8i32 || VT == MVT::v8i64)) &&
45238      !(Subtarget.useBWIRegs() && (VT == MVT::v16i32)))
45239    return SDValue();
45240
45241  SDValue SubusLHS, SubusRHS;
45242  // Try to find umax(a,b) - b or a - umin(a,b) patterns
45243  // they may be converted to subus(a,b).
45244  // TODO: Need to add IR canonicalization for this code.
45245  if (Op0.getOpcode() == ISD::UMAX) {
45246    SubusRHS = Op1;
45247    SDValue MaxLHS = Op0.getOperand(0);
45248    SDValue MaxRHS = Op0.getOperand(1);
45249    if (MaxLHS == Op1)
45250      SubusLHS = MaxRHS;
45251    else if (MaxRHS == Op1)
45252      SubusLHS = MaxLHS;
45253    else
45254      return SDValue();
45255  } else if (Op1.getOpcode() == ISD::UMIN) {
45256    SubusLHS = Op0;
45257    SDValue MinLHS = Op1.getOperand(0);
45258    SDValue MinRHS = Op1.getOperand(1);
45259    if (MinLHS == Op0)
45260      SubusRHS = MinRHS;
45261    else if (MinRHS == Op0)
45262      SubusRHS = MinLHS;
45263    else
45264      return SDValue();
45265  } else
45266    return SDValue();
45267
45268  // PSUBUS doesn't support v8i32/v8i64/v16i32, but it can be enabled with
45269  // special preprocessing in some cases.
45270  if (EltVT == MVT::i8 || EltVT == MVT::i16)
45271    return DAG.getNode(ISD::USUBSAT, SDLoc(N), VT, SubusLHS, SubusRHS);
45272
45273  assert((VT == MVT::v8i32 || VT == MVT::v16i32 || VT == MVT::v8i64) &&
45274         "Unexpected VT!");
45275
45276  // Special preprocessing case can be only applied
45277  // if the value was zero extended from 16 bit,
45278  // so we require first 16 bits to be zeros for 32 bit
45279  // values, or first 48 bits for 64 bit values.
45280  KnownBits Known = DAG.computeKnownBits(SubusLHS);
45281  unsigned NumZeros = Known.countMinLeadingZeros();
45282  if ((VT == MVT::v8i64 && NumZeros < 48) || NumZeros < 16)
45283    return SDValue();
45284
45285  EVT ExtType = SubusLHS.getValueType();
45286  EVT ShrinkedType;
45287  if (VT == MVT::v8i32 || VT == MVT::v8i64)
45288    ShrinkedType = MVT::v8i16;
45289  else
45290    ShrinkedType = NumZeros >= 24 ? MVT::v16i8 : MVT::v16i16;
45291
45292  // If SubusLHS is zeroextended - truncate SubusRHS to it's
45293  // size SubusRHS = umin(0xFFF.., SubusRHS).
45294  SDValue SaturationConst =
45295      DAG.getConstant(APInt::getLowBitsSet(ExtType.getScalarSizeInBits(),
45296                                           ShrinkedType.getScalarSizeInBits()),
45297                      SDLoc(SubusLHS), ExtType);
45298  SDValue UMin = DAG.getNode(ISD::UMIN, SDLoc(SubusLHS), ExtType, SubusRHS,
45299                             SaturationConst);
45300  SDValue NewSubusLHS =
45301      DAG.getZExtOrTrunc(SubusLHS, SDLoc(SubusLHS), ShrinkedType);
45302  SDValue NewSubusRHS = DAG.getZExtOrTrunc(UMin, SDLoc(SubusRHS), ShrinkedType);
45303  SDValue Psubus = DAG.getNode(ISD::USUBSAT, SDLoc(N), ShrinkedType,
45304                               NewSubusLHS, NewSubusRHS);
45305
45306  // Zero extend the result, it may be used somewhere as 32 bit,
45307  // if not zext and following trunc will shrink.
45308  return DAG.getZExtOrTrunc(Psubus, SDLoc(N), ExtType);
45309}
45310
45311static SDValue combineSub(SDNode *N, SelectionDAG &DAG,
45312                          TargetLowering::DAGCombinerInfo &DCI,
45313                          const X86Subtarget &Subtarget) {
45314  SDValue Op0 = N->getOperand(0);
45315  SDValue Op1 = N->getOperand(1);
45316
45317  // X86 can't encode an immediate LHS of a sub. See if we can push the
45318  // negation into a preceding instruction.
45319  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
45320    // If the RHS of the sub is a XOR with one use and a constant, invert the
45321    // immediate. Then add one to the LHS of the sub so we can turn
45322    // X-Y -> X+~Y+1, saving one register.
45323    if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
45324        isa<ConstantSDNode>(Op1.getOperand(1))) {
45325      const APInt &XorC = Op1.getConstantOperandAPInt(1);
45326      EVT VT = Op0.getValueType();
45327      SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
45328                                   Op1.getOperand(0),
45329                                   DAG.getConstant(~XorC, SDLoc(Op1), VT));
45330      return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
45331                         DAG.getConstant(C->getAPIntValue() + 1, SDLoc(N), VT));
45332    }
45333  }
45334
45335  // Try to synthesize horizontal subs from subs of shuffles.
45336  EVT VT = N->getValueType(0);
45337  if ((VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v16i16 ||
45338       VT == MVT::v8i32) &&
45339      Subtarget.hasSSSE3() &&
45340      isHorizontalBinOp(Op0, Op1, DAG, Subtarget, false)) {
45341    auto HSUBBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
45342                          ArrayRef<SDValue> Ops) {
45343      return DAG.getNode(X86ISD::HSUB, DL, Ops[0].getValueType(), Ops);
45344    };
45345    return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {Op0, Op1},
45346                            HSUBBuilder);
45347  }
45348
45349  // Try to create PSUBUS if SUB's argument is max/min
45350  if (SDValue V = combineSubToSubus(N, DAG, Subtarget))
45351    return V;
45352
45353  return combineAddOrSubToADCOrSBB(N, DAG);
45354}
45355
45356static SDValue combineVectorCompare(SDNode *N, SelectionDAG &DAG,
45357                                    const X86Subtarget &Subtarget) {
45358  MVT VT = N->getSimpleValueType(0);
45359  SDLoc DL(N);
45360
45361  if (N->getOperand(0) == N->getOperand(1)) {
45362    if (N->getOpcode() == X86ISD::PCMPEQ)
45363      return DAG.getConstant(-1, DL, VT);
45364    if (N->getOpcode() == X86ISD::PCMPGT)
45365      return DAG.getConstant(0, DL, VT);
45366  }
45367
45368  return SDValue();
45369}
45370
45371/// Helper that combines an array of subvector ops as if they were the operands
45372/// of a ISD::CONCAT_VECTORS node, but may have come from another source (e.g.
45373/// ISD::INSERT_SUBVECTOR). The ops are assumed to be of the same type.
45374static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
45375                                      ArrayRef<SDValue> Ops, SelectionDAG &DAG,
45376                                      TargetLowering::DAGCombinerInfo &DCI,
45377                                      const X86Subtarget &Subtarget) {
45378  assert(Subtarget.hasAVX() && "AVX assumed for concat_vectors");
45379
45380  if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
45381    return DAG.getUNDEF(VT);
45382
45383  if (llvm::all_of(Ops, [](SDValue Op) {
45384        return ISD::isBuildVectorAllZeros(Op.getNode());
45385      }))
45386    return getZeroVector(VT, Subtarget, DAG, DL);
45387
45388  SDValue Op0 = Ops[0];
45389
45390  // Fold subvector loads into one.
45391  // If needed, look through bitcasts to get to the load.
45392  if (auto *FirstLd = dyn_cast<LoadSDNode>(peekThroughBitcasts(Op0))) {
45393    bool Fast;
45394    const X86TargetLowering *TLI = Subtarget.getTargetLowering();
45395    if (TLI->allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
45396                                *FirstLd->getMemOperand(), &Fast) &&
45397        Fast) {
45398      if (SDValue Ld =
45399              EltsFromConsecutiveLoads(VT, Ops, DL, DAG, Subtarget, false))
45400        return Ld;
45401    }
45402  }
45403
45404  // Repeated subvectors.
45405  if (llvm::all_of(Ops, [Op0](SDValue Op) { return Op == Op0; })) {
45406    // If this broadcast/subv_broadcast is inserted into both halves, use a
45407    // larger broadcast/subv_broadcast.
45408    if (Op0.getOpcode() == X86ISD::VBROADCAST ||
45409        Op0.getOpcode() == X86ISD::SUBV_BROADCAST)
45410      return DAG.getNode(Op0.getOpcode(), DL, VT, Op0.getOperand(0));
45411
45412    // concat_vectors(movddup(x),movddup(x)) -> broadcast(x)
45413    if (Op0.getOpcode() == X86ISD::MOVDDUP && VT == MVT::v4f64 &&
45414        (Subtarget.hasAVX2() || MayFoldLoad(Op0.getOperand(0))))
45415      return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
45416                         DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f64,
45417                                     Op0.getOperand(0),
45418                                     DAG.getIntPtrConstant(0, DL)));
45419
45420    // concat_vectors(scalar_to_vector(x),scalar_to_vector(x)) -> broadcast(x)
45421    if (Op0.getOpcode() == ISD::SCALAR_TO_VECTOR &&
45422        (Subtarget.hasAVX2() ||
45423         (VT.getScalarSizeInBits() >= 32 && MayFoldLoad(Op0.getOperand(0)))) &&
45424        Op0.getOperand(0).getValueType() == VT.getScalarType())
45425      return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Op0.getOperand(0));
45426  }
45427
45428  bool IsSplat = llvm::all_of(Ops, [&Op0](SDValue Op) { return Op == Op0; });
45429
45430  // Repeated opcode.
45431  // TODO - combineX86ShufflesRecursively should handle shuffle concatenation
45432  // but it currently struggles with different vector widths.
45433  if (llvm::all_of(Ops, [Op0](SDValue Op) {
45434        return Op.getOpcode() == Op0.getOpcode();
45435      })) {
45436    unsigned NumOps = Ops.size();
45437    switch (Op0.getOpcode()) {
45438    case X86ISD::PSHUFHW:
45439    case X86ISD::PSHUFLW:
45440    case X86ISD::PSHUFD:
45441      if (!IsSplat && NumOps == 2 && VT.is256BitVector() &&
45442          Subtarget.hasInt256() && Op0.getOperand(1) == Ops[1].getOperand(1)) {
45443        SmallVector<SDValue, 2> Src;
45444        for (unsigned i = 0; i != NumOps; ++i)
45445          Src.push_back(Ops[i].getOperand(0));
45446        return DAG.getNode(Op0.getOpcode(), DL, VT,
45447                           DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Src),
45448                           Op0.getOperand(1));
45449      }
45450      LLVM_FALLTHROUGH;
45451    case X86ISD::VPERMILPI:
45452      // TODO - add support for vXf64/vXi64 shuffles.
45453      if (!IsSplat && NumOps == 2 && (VT == MVT::v8f32 || VT == MVT::v8i32) &&
45454          Subtarget.hasAVX() && Op0.getOperand(1) == Ops[1].getOperand(1)) {
45455        SmallVector<SDValue, 2> Src;
45456        for (unsigned i = 0; i != NumOps; ++i)
45457          Src.push_back(DAG.getBitcast(MVT::v4f32, Ops[i].getOperand(0)));
45458        SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8f32, Src);
45459        Res = DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, Res,
45460                          Op0.getOperand(1));
45461        return DAG.getBitcast(VT, Res);
45462      }
45463      break;
45464    case X86ISD::PACKUS:
45465      if (NumOps == 2 && VT.is256BitVector() && Subtarget.hasInt256()) {
45466        SmallVector<SDValue, 2> LHS, RHS;
45467        for (unsigned i = 0; i != NumOps; ++i) {
45468          LHS.push_back(Ops[i].getOperand(0));
45469          RHS.push_back(Ops[i].getOperand(1));
45470        }
45471        MVT SrcVT = Op0.getOperand(0).getSimpleValueType();
45472        SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
45473                                 NumOps * SrcVT.getVectorNumElements());
45474        return DAG.getNode(Op0.getOpcode(), DL, VT,
45475                           DAG.getNode(ISD::CONCAT_VECTORS, DL, SrcVT, LHS),
45476                           DAG.getNode(ISD::CONCAT_VECTORS, DL, SrcVT, RHS));
45477      }
45478      break;
45479    }
45480  }
45481
45482  return SDValue();
45483}
45484
45485static SDValue combineConcatVectors(SDNode *N, SelectionDAG &DAG,
45486                                    TargetLowering::DAGCombinerInfo &DCI,
45487                                    const X86Subtarget &Subtarget) {
45488  EVT VT = N->getValueType(0);
45489  EVT SrcVT = N->getOperand(0).getValueType();
45490  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45491
45492  // Don't do anything for i1 vectors.
45493  if (VT.getVectorElementType() == MVT::i1)
45494    return SDValue();
45495
45496  if (Subtarget.hasAVX() && TLI.isTypeLegal(VT) && TLI.isTypeLegal(SrcVT)) {
45497    SmallVector<SDValue, 4> Ops(N->op_begin(), N->op_end());
45498    if (SDValue R = combineConcatVectorOps(SDLoc(N), VT.getSimpleVT(), Ops, DAG,
45499                                           DCI, Subtarget))
45500      return R;
45501  }
45502
45503  return SDValue();
45504}
45505
45506static SDValue combineInsertSubvector(SDNode *N, SelectionDAG &DAG,
45507                                      TargetLowering::DAGCombinerInfo &DCI,
45508                                      const X86Subtarget &Subtarget) {
45509  if (DCI.isBeforeLegalizeOps())
45510    return SDValue();
45511
45512  MVT OpVT = N->getSimpleValueType(0);
45513
45514  bool IsI1Vector = OpVT.getVectorElementType() == MVT::i1;
45515
45516  SDLoc dl(N);
45517  SDValue Vec = N->getOperand(0);
45518  SDValue SubVec = N->getOperand(1);
45519
45520  uint64_t IdxVal = N->getConstantOperandVal(2);
45521  MVT SubVecVT = SubVec.getSimpleValueType();
45522
45523  if (Vec.isUndef() && SubVec.isUndef())
45524    return DAG.getUNDEF(OpVT);
45525
45526  // Inserting undefs/zeros into zeros/undefs is a zero vector.
45527  if ((Vec.isUndef() || ISD::isBuildVectorAllZeros(Vec.getNode())) &&
45528      (SubVec.isUndef() || ISD::isBuildVectorAllZeros(SubVec.getNode())))
45529    return getZeroVector(OpVT, Subtarget, DAG, dl);
45530
45531  if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
45532    // If we're inserting into a zero vector and then into a larger zero vector,
45533    // just insert into the larger zero vector directly.
45534    if (SubVec.getOpcode() == ISD::INSERT_SUBVECTOR &&
45535        ISD::isBuildVectorAllZeros(SubVec.getOperand(0).getNode())) {
45536      uint64_t Idx2Val = SubVec.getConstantOperandVal(2);
45537      return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
45538                         getZeroVector(OpVT, Subtarget, DAG, dl),
45539                         SubVec.getOperand(1),
45540                         DAG.getIntPtrConstant(IdxVal + Idx2Val, dl));
45541    }
45542
45543    // If we're inserting into a zero vector and our input was extracted from an
45544    // insert into a zero vector of the same type and the extraction was at
45545    // least as large as the original insertion. Just insert the original
45546    // subvector into a zero vector.
45547    if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR && IdxVal == 0 &&
45548        isNullConstant(SubVec.getOperand(1)) &&
45549        SubVec.getOperand(0).getOpcode() == ISD::INSERT_SUBVECTOR) {
45550      SDValue Ins = SubVec.getOperand(0);
45551      if (isNullConstant(Ins.getOperand(2)) &&
45552          ISD::isBuildVectorAllZeros(Ins.getOperand(0).getNode()) &&
45553          Ins.getOperand(1).getValueSizeInBits() <= SubVecVT.getSizeInBits())
45554        return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
45555                           getZeroVector(OpVT, Subtarget, DAG, dl),
45556                           Ins.getOperand(1), N->getOperand(2));
45557    }
45558  }
45559
45560  // Stop here if this is an i1 vector.
45561  if (IsI1Vector)
45562    return SDValue();
45563
45564  // If this is an insert of an extract, combine to a shuffle. Don't do this
45565  // if the insert or extract can be represented with a subregister operation.
45566  if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
45567      SubVec.getOperand(0).getSimpleValueType() == OpVT &&
45568      (IdxVal != 0 || !Vec.isUndef())) {
45569    int ExtIdxVal = SubVec.getConstantOperandVal(1);
45570    if (ExtIdxVal != 0) {
45571      int VecNumElts = OpVT.getVectorNumElements();
45572      int SubVecNumElts = SubVecVT.getVectorNumElements();
45573      SmallVector<int, 64> Mask(VecNumElts);
45574      // First create an identity shuffle mask.
45575      for (int i = 0; i != VecNumElts; ++i)
45576        Mask[i] = i;
45577      // Now insert the extracted portion.
45578      for (int i = 0; i != SubVecNumElts; ++i)
45579        Mask[i + IdxVal] = i + ExtIdxVal + VecNumElts;
45580
45581      return DAG.getVectorShuffle(OpVT, dl, Vec, SubVec.getOperand(0), Mask);
45582    }
45583  }
45584
45585  // Match concat_vector style patterns.
45586  SmallVector<SDValue, 2> SubVectorOps;
45587  if (collectConcatOps(N, SubVectorOps)) {
45588    if (SDValue Fold =
45589            combineConcatVectorOps(dl, OpVT, SubVectorOps, DAG, DCI, Subtarget))
45590      return Fold;
45591
45592    // If we're inserting all zeros into the upper half, change this to
45593    // a concat with zero. We will match this to a move
45594    // with implicit upper bit zeroing during isel.
45595    // We do this here because we don't want combineConcatVectorOps to
45596    // create INSERT_SUBVECTOR from CONCAT_VECTORS.
45597    if (SubVectorOps.size() == 2 &&
45598        ISD::isBuildVectorAllZeros(SubVectorOps[1].getNode()))
45599      return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
45600                         getZeroVector(OpVT, Subtarget, DAG, dl),
45601                         SubVectorOps[0], DAG.getIntPtrConstant(0, dl));
45602  }
45603
45604  // If this is a broadcast insert into an upper undef, use a larger broadcast.
45605  if (Vec.isUndef() && IdxVal != 0 && SubVec.getOpcode() == X86ISD::VBROADCAST)
45606    return DAG.getNode(X86ISD::VBROADCAST, dl, OpVT, SubVec.getOperand(0));
45607
45608  // If this is a broadcast load inserted into an upper undef, use a larger
45609  // broadcast load.
45610  if (Vec.isUndef() && IdxVal != 0 && SubVec.hasOneUse() &&
45611      SubVec.getOpcode() == X86ISD::VBROADCAST_LOAD) {
45612    auto *MemIntr = cast<MemIntrinsicSDNode>(SubVec);
45613    SDVTList Tys = DAG.getVTList(OpVT, MVT::Other);
45614    SDValue Ops[] = { MemIntr->getChain(), MemIntr->getBasePtr() };
45615    SDValue BcastLd =
45616        DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
45617                                MemIntr->getMemoryVT(),
45618                                MemIntr->getMemOperand());
45619    DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), BcastLd.getValue(1));
45620    return BcastLd;
45621  }
45622
45623  return SDValue();
45624}
45625
45626/// If we are extracting a subvector of a vector select and the select condition
45627/// is composed of concatenated vectors, try to narrow the select width. This
45628/// is a common pattern for AVX1 integer code because 256-bit selects may be
45629/// legal, but there is almost no integer math/logic available for 256-bit.
45630/// This function should only be called with legal types (otherwise, the calls
45631/// to get simple value types will assert).
45632static SDValue narrowExtractedVectorSelect(SDNode *Ext, SelectionDAG &DAG) {
45633  SDValue Sel = peekThroughBitcasts(Ext->getOperand(0));
45634  SmallVector<SDValue, 4> CatOps;
45635  if (Sel.getOpcode() != ISD::VSELECT ||
45636      !collectConcatOps(Sel.getOperand(0).getNode(), CatOps))
45637    return SDValue();
45638
45639  // Note: We assume simple value types because this should only be called with
45640  //       legal operations/types.
45641  // TODO: This can be extended to handle extraction to 256-bits.
45642  MVT VT = Ext->getSimpleValueType(0);
45643  if (!VT.is128BitVector())
45644    return SDValue();
45645
45646  MVT SelCondVT = Sel.getOperand(0).getSimpleValueType();
45647  if (!SelCondVT.is256BitVector() && !SelCondVT.is512BitVector())
45648    return SDValue();
45649
45650  MVT WideVT = Ext->getOperand(0).getSimpleValueType();
45651  MVT SelVT = Sel.getSimpleValueType();
45652  assert((SelVT.is256BitVector() || SelVT.is512BitVector()) &&
45653         "Unexpected vector type with legal operations");
45654
45655  unsigned SelElts = SelVT.getVectorNumElements();
45656  unsigned CastedElts = WideVT.getVectorNumElements();
45657  unsigned ExtIdx = cast<ConstantSDNode>(Ext->getOperand(1))->getZExtValue();
45658  if (SelElts % CastedElts == 0) {
45659    // The select has the same or more (narrower) elements than the extract
45660    // operand. The extraction index gets scaled by that factor.
45661    ExtIdx *= (SelElts / CastedElts);
45662  } else if (CastedElts % SelElts == 0) {
45663    // The select has less (wider) elements than the extract operand. Make sure
45664    // that the extraction index can be divided evenly.
45665    unsigned IndexDivisor = CastedElts / SelElts;
45666    if (ExtIdx % IndexDivisor != 0)
45667      return SDValue();
45668    ExtIdx /= IndexDivisor;
45669  } else {
45670    llvm_unreachable("Element count of simple vector types are not divisible?");
45671  }
45672
45673  unsigned NarrowingFactor = WideVT.getSizeInBits() / VT.getSizeInBits();
45674  unsigned NarrowElts = SelElts / NarrowingFactor;
45675  MVT NarrowSelVT = MVT::getVectorVT(SelVT.getVectorElementType(), NarrowElts);
45676  SDLoc DL(Ext);
45677  SDValue ExtCond = extract128BitVector(Sel.getOperand(0), ExtIdx, DAG, DL);
45678  SDValue ExtT = extract128BitVector(Sel.getOperand(1), ExtIdx, DAG, DL);
45679  SDValue ExtF = extract128BitVector(Sel.getOperand(2), ExtIdx, DAG, DL);
45680  SDValue NarrowSel = DAG.getSelect(DL, NarrowSelVT, ExtCond, ExtT, ExtF);
45681  return DAG.getBitcast(VT, NarrowSel);
45682}
45683
45684static SDValue combineExtractSubvector(SDNode *N, SelectionDAG &DAG,
45685                                       TargetLowering::DAGCombinerInfo &DCI,
45686                                       const X86Subtarget &Subtarget) {
45687  // For AVX1 only, if we are extracting from a 256-bit and+not (which will
45688  // eventually get combined/lowered into ANDNP) with a concatenated operand,
45689  // split the 'and' into 128-bit ops to avoid the concatenate and extract.
45690  // We let generic combining take over from there to simplify the
45691  // insert/extract and 'not'.
45692  // This pattern emerges during AVX1 legalization. We handle it before lowering
45693  // to avoid complications like splitting constant vector loads.
45694
45695  // Capture the original wide type in the likely case that we need to bitcast
45696  // back to this type.
45697  if (!N->getValueType(0).isSimple())
45698    return SDValue();
45699
45700  MVT VT = N->getSimpleValueType(0);
45701  SDValue InVec = N->getOperand(0);
45702  SDValue InVecBC = peekThroughBitcasts(InVec);
45703  EVT InVecVT = InVec.getValueType();
45704  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45705
45706  if (Subtarget.hasAVX() && !Subtarget.hasAVX2() &&
45707      TLI.isTypeLegal(InVecVT) &&
45708      InVecVT.getSizeInBits() == 256 && InVecBC.getOpcode() == ISD::AND) {
45709    auto isConcatenatedNot = [] (SDValue V) {
45710      V = peekThroughBitcasts(V);
45711      if (!isBitwiseNot(V))
45712        return false;
45713      SDValue NotOp = V->getOperand(0);
45714      return peekThroughBitcasts(NotOp).getOpcode() == ISD::CONCAT_VECTORS;
45715    };
45716    if (isConcatenatedNot(InVecBC.getOperand(0)) ||
45717        isConcatenatedNot(InVecBC.getOperand(1))) {
45718      // extract (and v4i64 X, (not (concat Y1, Y2))), n -> andnp v2i64 X(n), Y1
45719      SDValue Concat = split256IntArith(InVecBC, DAG);
45720      return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), VT,
45721                         DAG.getBitcast(InVecVT, Concat), N->getOperand(1));
45722    }
45723  }
45724
45725  if (DCI.isBeforeLegalizeOps())
45726    return SDValue();
45727
45728  if (SDValue V = narrowExtractedVectorSelect(N, DAG))
45729    return V;
45730
45731  unsigned IdxVal = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
45732
45733  if (ISD::isBuildVectorAllZeros(InVec.getNode()))
45734    return getZeroVector(VT, Subtarget, DAG, SDLoc(N));
45735
45736  if (ISD::isBuildVectorAllOnes(InVec.getNode())) {
45737    if (VT.getScalarType() == MVT::i1)
45738      return DAG.getConstant(1, SDLoc(N), VT);
45739    return getOnesVector(VT, DAG, SDLoc(N));
45740  }
45741
45742  if (InVec.getOpcode() == ISD::BUILD_VECTOR)
45743    return DAG.getBuildVector(
45744        VT, SDLoc(N),
45745        InVec.getNode()->ops().slice(IdxVal, VT.getVectorNumElements()));
45746
45747  // If we are extracting from an insert into a zero vector, replace with a
45748  // smaller insert into zero if we don't access less than the original
45749  // subvector. Don't do this for i1 vectors.
45750  if (VT.getVectorElementType() != MVT::i1 &&
45751      InVec.getOpcode() == ISD::INSERT_SUBVECTOR && IdxVal == 0 &&
45752      InVec.hasOneUse() && isNullConstant(InVec.getOperand(2)) &&
45753      ISD::isBuildVectorAllZeros(InVec.getOperand(0).getNode()) &&
45754      InVec.getOperand(1).getValueSizeInBits() <= VT.getSizeInBits()) {
45755    SDLoc DL(N);
45756    return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
45757                       getZeroVector(VT, Subtarget, DAG, DL),
45758                       InVec.getOperand(1), InVec.getOperand(2));
45759  }
45760
45761  // If we're extracting from a broadcast then we're better off just
45762  // broadcasting to the smaller type directly, assuming this is the only use.
45763  // As its a broadcast we don't care about the extraction index.
45764  if (InVec.getOpcode() == X86ISD::VBROADCAST && InVec.hasOneUse() &&
45765      InVec.getOperand(0).getValueSizeInBits() <= VT.getSizeInBits())
45766    return DAG.getNode(X86ISD::VBROADCAST, SDLoc(N), VT, InVec.getOperand(0));
45767
45768  if (InVec.getOpcode() == X86ISD::VBROADCAST_LOAD && InVec.hasOneUse()) {
45769    auto *MemIntr = cast<MemIntrinsicSDNode>(InVec);
45770    if (MemIntr->getMemoryVT().getSizeInBits() <= VT.getSizeInBits()) {
45771      SDVTList Tys = DAG.getVTList(VT, MVT::Other);
45772      SDValue Ops[] = { MemIntr->getChain(), MemIntr->getBasePtr() };
45773      SDValue BcastLd =
45774          DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, SDLoc(N), Tys, Ops,
45775                                  MemIntr->getMemoryVT(),
45776                                  MemIntr->getMemOperand());
45777      DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), BcastLd.getValue(1));
45778      return BcastLd;
45779    }
45780  }
45781
45782  // If we're extracting the lowest subvector and we're the only user,
45783  // we may be able to perform this with a smaller vector width.
45784  if (IdxVal == 0 && InVec.hasOneUse()) {
45785    unsigned InOpcode = InVec.getOpcode();
45786    if (VT == MVT::v2f64 && InVecVT == MVT::v4f64) {
45787      // v2f64 CVTDQ2PD(v4i32).
45788      if (InOpcode == ISD::SINT_TO_FP &&
45789          InVec.getOperand(0).getValueType() == MVT::v4i32) {
45790        return DAG.getNode(X86ISD::CVTSI2P, SDLoc(N), VT, InVec.getOperand(0));
45791      }
45792      // v2f64 CVTUDQ2PD(v4i32).
45793      if (InOpcode == ISD::UINT_TO_FP && Subtarget.hasVLX() &&
45794          InVec.getOperand(0).getValueType() == MVT::v4i32) {
45795        return DAG.getNode(X86ISD::CVTUI2P, SDLoc(N), VT, InVec.getOperand(0));
45796      }
45797      // v2f64 CVTPS2PD(v4f32).
45798      if (InOpcode == ISD::FP_EXTEND &&
45799          InVec.getOperand(0).getValueType() == MVT::v4f32) {
45800        return DAG.getNode(X86ISD::VFPEXT, SDLoc(N), VT, InVec.getOperand(0));
45801      }
45802    }
45803    if ((InOpcode == ISD::ANY_EXTEND ||
45804         InOpcode == ISD::ANY_EXTEND_VECTOR_INREG ||
45805         InOpcode == ISD::ZERO_EXTEND ||
45806         InOpcode == ISD::ZERO_EXTEND_VECTOR_INREG ||
45807         InOpcode == ISD::SIGN_EXTEND ||
45808         InOpcode == ISD::SIGN_EXTEND_VECTOR_INREG) &&
45809        VT.is128BitVector() &&
45810        InVec.getOperand(0).getSimpleValueType().is128BitVector()) {
45811      unsigned ExtOp = getOpcode_EXTEND_VECTOR_INREG(InOpcode);
45812      return DAG.getNode(ExtOp, SDLoc(N), VT, InVec.getOperand(0));
45813    }
45814    if (InOpcode == ISD::VSELECT &&
45815        InVec.getOperand(0).getValueType().is256BitVector() &&
45816        InVec.getOperand(1).getValueType().is256BitVector() &&
45817        InVec.getOperand(2).getValueType().is256BitVector()) {
45818      SDLoc DL(N);
45819      SDValue Ext0 = extractSubVector(InVec.getOperand(0), 0, DAG, DL, 128);
45820      SDValue Ext1 = extractSubVector(InVec.getOperand(1), 0, DAG, DL, 128);
45821      SDValue Ext2 = extractSubVector(InVec.getOperand(2), 0, DAG, DL, 128);
45822      return DAG.getNode(InOpcode, DL, VT, Ext0, Ext1, Ext2);
45823    }
45824  }
45825
45826  return SDValue();
45827}
45828
45829static SDValue combineScalarToVector(SDNode *N, SelectionDAG &DAG) {
45830  EVT VT = N->getValueType(0);
45831  SDValue Src = N->getOperand(0);
45832  SDLoc DL(N);
45833
45834  // If this is a scalar to vector to v1i1 from an AND with 1, bypass the and.
45835  // This occurs frequently in our masked scalar intrinsic code and our
45836  // floating point select lowering with AVX512.
45837  // TODO: SimplifyDemandedBits instead?
45838  if (VT == MVT::v1i1 && Src.getOpcode() == ISD::AND && Src.hasOneUse())
45839    if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1)))
45840      if (C->getAPIntValue().isOneValue())
45841        return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1,
45842                           Src.getOperand(0));
45843
45844  // Combine scalar_to_vector of an extract_vector_elt into an extract_subvec.
45845  if (VT == MVT::v1i1 && Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
45846      Src.hasOneUse() && Src.getOperand(0).getValueType().isVector() &&
45847      Src.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
45848    if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1)))
45849      if (C->isNullValue())
45850        return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src.getOperand(0),
45851                           Src.getOperand(1));
45852
45853  // Reduce v2i64 to v4i32 if we don't need the upper bits.
45854  // TODO: Move to DAGCombine?
45855  if (VT == MVT::v2i64 && Src.getOpcode() == ISD::ANY_EXTEND &&
45856      Src.getValueType() == MVT::i64 && Src.hasOneUse() &&
45857      Src.getOperand(0).getScalarValueSizeInBits() <= 32)
45858    return DAG.getBitcast(
45859        VT, DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4i32,
45860                        DAG.getAnyExtOrTrunc(Src.getOperand(0), DL, MVT::i32)));
45861
45862  return SDValue();
45863}
45864
45865// Simplify PMULDQ and PMULUDQ operations.
45866static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG,
45867                             TargetLowering::DAGCombinerInfo &DCI,
45868                             const X86Subtarget &Subtarget) {
45869  SDValue LHS = N->getOperand(0);
45870  SDValue RHS = N->getOperand(1);
45871
45872  // Canonicalize constant to RHS.
45873  if (DAG.isConstantIntBuildVectorOrConstantInt(LHS) &&
45874      !DAG.isConstantIntBuildVectorOrConstantInt(RHS))
45875    return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), RHS, LHS);
45876
45877  // Multiply by zero.
45878  // Don't return RHS as it may contain UNDEFs.
45879  if (ISD::isBuildVectorAllZeros(RHS.getNode()))
45880    return DAG.getConstant(0, SDLoc(N), N->getValueType(0));
45881
45882  // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
45883  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45884  if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnesValue(64), DCI))
45885    return SDValue(N, 0);
45886
45887  // If the input is an extend_invec and the SimplifyDemandedBits call didn't
45888  // convert it to any_extend_invec, due to the LegalOperations check, do the
45889  // conversion directly to a vector shuffle manually. This exposes combine
45890  // opportunities missed by combineExtInVec not calling
45891  // combineX86ShufflesRecursively on SSE4.1 targets.
45892  // FIXME: This is basically a hack around several other issues related to
45893  // ANY_EXTEND_VECTOR_INREG.
45894  if (N->getValueType(0) == MVT::v2i64 && LHS.hasOneUse() &&
45895      (LHS.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG ||
45896       LHS.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG) &&
45897      LHS.getOperand(0).getValueType() == MVT::v4i32) {
45898    SDLoc dl(N);
45899    LHS = DAG.getVectorShuffle(MVT::v4i32, dl, LHS.getOperand(0),
45900                               LHS.getOperand(0), { 0, -1, 1, -1 });
45901    LHS = DAG.getBitcast(MVT::v2i64, LHS);
45902    return DAG.getNode(N->getOpcode(), dl, MVT::v2i64, LHS, RHS);
45903  }
45904  if (N->getValueType(0) == MVT::v2i64 && RHS.hasOneUse() &&
45905      (RHS.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG ||
45906       RHS.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG) &&
45907      RHS.getOperand(0).getValueType() == MVT::v4i32) {
45908    SDLoc dl(N);
45909    RHS = DAG.getVectorShuffle(MVT::v4i32, dl, RHS.getOperand(0),
45910                               RHS.getOperand(0), { 0, -1, 1, -1 });
45911    RHS = DAG.getBitcast(MVT::v2i64, RHS);
45912    return DAG.getNode(N->getOpcode(), dl, MVT::v2i64, LHS, RHS);
45913  }
45914
45915  return SDValue();
45916}
45917
45918static SDValue combineExtInVec(SDNode *N, SelectionDAG &DAG,
45919                               TargetLowering::DAGCombinerInfo &DCI,
45920                               const X86Subtarget &Subtarget) {
45921  EVT VT = N->getValueType(0);
45922  SDValue In = N->getOperand(0);
45923  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45924
45925  // Try to merge vector loads and extend_inreg to an extload.
45926  if (!DCI.isBeforeLegalizeOps() && ISD::isNormalLoad(In.getNode()) &&
45927      In.hasOneUse()) {
45928    auto *Ld = cast<LoadSDNode>(In);
45929    if (Ld->isSimple()) {
45930      MVT SVT = In.getSimpleValueType().getVectorElementType();
45931      ISD::LoadExtType Ext = N->getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG ? ISD::SEXTLOAD : ISD::ZEXTLOAD;
45932      EVT MemVT = EVT::getVectorVT(*DAG.getContext(), SVT,
45933                                   VT.getVectorNumElements());
45934      if (TLI.isLoadExtLegal(Ext, VT, MemVT)) {
45935        SDValue Load =
45936            DAG.getExtLoad(Ext, SDLoc(N), VT, Ld->getChain(), Ld->getBasePtr(),
45937                           Ld->getPointerInfo(), MemVT, Ld->getAlignment(),
45938                           Ld->getMemOperand()->getFlags());
45939        DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
45940        return Load;
45941      }
45942    }
45943  }
45944
45945  // Attempt to combine as a shuffle.
45946  // TODO: SSE41 support
45947  if (Subtarget.hasAVX() && N->getOpcode() != ISD::SIGN_EXTEND_VECTOR_INREG) {
45948    SDValue Op(N, 0);
45949    if (TLI.isTypeLegal(VT) && TLI.isTypeLegal(In.getValueType()))
45950      if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
45951        return Res;
45952  }
45953
45954  return SDValue();
45955}
45956
45957static SDValue combineKSHIFT(SDNode *N, SelectionDAG &DAG,
45958                             TargetLowering::DAGCombinerInfo &DCI) {
45959  EVT VT = N->getValueType(0);
45960
45961  if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()))
45962    return DAG.getConstant(0, SDLoc(N), VT);
45963
45964  APInt KnownUndef, KnownZero;
45965  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45966  APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
45967  if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
45968                                     KnownZero, DCI))
45969    return SDValue(N, 0);
45970
45971  return SDValue();
45972}
45973
45974SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
45975                                             DAGCombinerInfo &DCI) const {
45976  SelectionDAG &DAG = DCI.DAG;
45977  switch (N->getOpcode()) {
45978  default: break;
45979  case ISD::SCALAR_TO_VECTOR:
45980    return combineScalarToVector(N, DAG);
45981  case ISD::EXTRACT_VECTOR_ELT:
45982  case X86ISD::PEXTRW:
45983  case X86ISD::PEXTRB:
45984    return combineExtractVectorElt(N, DAG, DCI, Subtarget);
45985  case ISD::CONCAT_VECTORS:
45986    return combineConcatVectors(N, DAG, DCI, Subtarget);
45987  case ISD::INSERT_SUBVECTOR:
45988    return combineInsertSubvector(N, DAG, DCI, Subtarget);
45989  case ISD::EXTRACT_SUBVECTOR:
45990    return combineExtractSubvector(N, DAG, DCI, Subtarget);
45991  case ISD::VSELECT:
45992  case ISD::SELECT:
45993  case X86ISD::BLENDV:      return combineSelect(N, DAG, DCI, Subtarget);
45994  case ISD::BITCAST:        return combineBitcast(N, DAG, DCI, Subtarget);
45995  case X86ISD::CMOV:        return combineCMov(N, DAG, DCI, Subtarget);
45996  case X86ISD::CMP:         return combineCMP(N, DAG);
45997  case ISD::ADD:            return combineAdd(N, DAG, DCI, Subtarget);
45998  case ISD::SUB:            return combineSub(N, DAG, DCI, Subtarget);
45999  case X86ISD::ADD:
46000  case X86ISD::SUB:         return combineX86AddSub(N, DAG, DCI);
46001  case X86ISD::SBB:         return combineSBB(N, DAG);
46002  case X86ISD::ADC:         return combineADC(N, DAG, DCI);
46003  case ISD::MUL:            return combineMul(N, DAG, DCI, Subtarget);
46004  case ISD::SHL:            return combineShiftLeft(N, DAG);
46005  case ISD::SRA:            return combineShiftRightArithmetic(N, DAG);
46006  case ISD::SRL:            return combineShiftRightLogical(N, DAG, DCI);
46007  case ISD::AND:            return combineAnd(N, DAG, DCI, Subtarget);
46008  case ISD::OR:             return combineOr(N, DAG, DCI, Subtarget);
46009  case ISD::XOR:            return combineXor(N, DAG, DCI, Subtarget);
46010  case X86ISD::BEXTR:       return combineBEXTR(N, DAG, DCI, Subtarget);
46011  case ISD::LOAD:           return combineLoad(N, DAG, DCI, Subtarget);
46012  case ISD::MLOAD:          return combineMaskedLoad(N, DAG, DCI, Subtarget);
46013  case ISD::STORE:          return combineStore(N, DAG, DCI, Subtarget);
46014  case ISD::MSTORE:         return combineMaskedStore(N, DAG, DCI, Subtarget);
46015  case ISD::SINT_TO_FP:
46016  case ISD::STRICT_SINT_TO_FP:
46017    return combineSIntToFP(N, DAG, DCI, Subtarget);
46018  case ISD::UINT_TO_FP:
46019  case ISD::STRICT_UINT_TO_FP:
46020    return combineUIntToFP(N, DAG, Subtarget);
46021  case ISD::FADD:
46022  case ISD::FSUB:           return combineFaddFsub(N, DAG, Subtarget);
46023  case ISD::FNEG:           return combineFneg(N, DAG, Subtarget);
46024  case ISD::TRUNCATE:       return combineTruncate(N, DAG, Subtarget);
46025  case X86ISD::VTRUNC:      return combineVTRUNC(N, DAG);
46026  case X86ISD::ANDNP:       return combineAndnp(N, DAG, DCI, Subtarget);
46027  case X86ISD::FAND:        return combineFAnd(N, DAG, Subtarget);
46028  case X86ISD::FANDN:       return combineFAndn(N, DAG, Subtarget);
46029  case X86ISD::FXOR:
46030  case X86ISD::FOR:         return combineFOr(N, DAG, Subtarget);
46031  case X86ISD::FMIN:
46032  case X86ISD::FMAX:        return combineFMinFMax(N, DAG);
46033  case ISD::FMINNUM:
46034  case ISD::FMAXNUM:        return combineFMinNumFMaxNum(N, DAG, Subtarget);
46035  case X86ISD::CVTSI2P:
46036  case X86ISD::CVTUI2P:     return combineX86INT_TO_FP(N, DAG, DCI);
46037  case X86ISD::CVTP2SI:
46038  case X86ISD::CVTP2UI:
46039  case X86ISD::CVTTP2SI:
46040  case X86ISD::CVTTP2UI:    return combineCVTP2I_CVTTP2I(N, DAG, DCI);
46041  case X86ISD::BT:          return combineBT(N, DAG, DCI);
46042  case ISD::ANY_EXTEND:
46043  case ISD::ZERO_EXTEND:    return combineZext(N, DAG, DCI, Subtarget);
46044  case ISD::SIGN_EXTEND:    return combineSext(N, DAG, DCI, Subtarget);
46045  case ISD::SIGN_EXTEND_INREG: return combineSignExtendInReg(N, DAG, Subtarget);
46046  case ISD::ANY_EXTEND_VECTOR_INREG:
46047  case ISD::SIGN_EXTEND_VECTOR_INREG:
46048  case ISD::ZERO_EXTEND_VECTOR_INREG: return combineExtInVec(N, DAG, DCI,
46049                                                             Subtarget);
46050  case ISD::SETCC:          return combineSetCC(N, DAG, Subtarget);
46051  case X86ISD::SETCC:       return combineX86SetCC(N, DAG, Subtarget);
46052  case X86ISD::BRCOND:      return combineBrCond(N, DAG, Subtarget);
46053  case X86ISD::PACKSS:
46054  case X86ISD::PACKUS:      return combineVectorPack(N, DAG, DCI, Subtarget);
46055  case X86ISD::VSHL:
46056  case X86ISD::VSRA:
46057  case X86ISD::VSRL:
46058    return combineVectorShiftVar(N, DAG, DCI, Subtarget);
46059  case X86ISD::VSHLI:
46060  case X86ISD::VSRAI:
46061  case X86ISD::VSRLI:
46062    return combineVectorShiftImm(N, DAG, DCI, Subtarget);
46063  case X86ISD::PINSRB:
46064  case X86ISD::PINSRW:      return combineVectorInsert(N, DAG, DCI, Subtarget);
46065  case X86ISD::SHUFP:       // Handle all target specific shuffles
46066  case X86ISD::INSERTPS:
46067  case X86ISD::EXTRQI:
46068  case X86ISD::INSERTQI:
46069  case X86ISD::PALIGNR:
46070  case X86ISD::VSHLDQ:
46071  case X86ISD::VSRLDQ:
46072  case X86ISD::BLENDI:
46073  case X86ISD::UNPCKH:
46074  case X86ISD::UNPCKL:
46075  case X86ISD::MOVHLPS:
46076  case X86ISD::MOVLHPS:
46077  case X86ISD::PSHUFB:
46078  case X86ISD::PSHUFD:
46079  case X86ISD::PSHUFHW:
46080  case X86ISD::PSHUFLW:
46081  case X86ISD::MOVSHDUP:
46082  case X86ISD::MOVSLDUP:
46083  case X86ISD::MOVDDUP:
46084  case X86ISD::MOVSS:
46085  case X86ISD::MOVSD:
46086  case X86ISD::VBROADCAST:
46087  case X86ISD::VPPERM:
46088  case X86ISD::VPERMI:
46089  case X86ISD::VPERMV:
46090  case X86ISD::VPERMV3:
46091  case X86ISD::VPERMIL2:
46092  case X86ISD::VPERMILPI:
46093  case X86ISD::VPERMILPV:
46094  case X86ISD::VPERM2X128:
46095  case X86ISD::SHUF128:
46096  case X86ISD::VZEXT_MOVL:
46097  case ISD::VECTOR_SHUFFLE: return combineShuffle(N, DAG, DCI,Subtarget);
46098  case X86ISD::FMADD_RND:
46099  case X86ISD::FMSUB:
46100  case X86ISD::FMSUB_RND:
46101  case X86ISD::FNMADD:
46102  case X86ISD::FNMADD_RND:
46103  case X86ISD::FNMSUB:
46104  case X86ISD::FNMSUB_RND:
46105  case ISD::FMA: return combineFMA(N, DAG, DCI, Subtarget);
46106  case X86ISD::FMADDSUB_RND:
46107  case X86ISD::FMSUBADD_RND:
46108  case X86ISD::FMADDSUB:
46109  case X86ISD::FMSUBADD:    return combineFMADDSUB(N, DAG, DCI);
46110  case X86ISD::MOVMSK:      return combineMOVMSK(N, DAG, DCI, Subtarget);
46111  case X86ISD::MGATHER:
46112  case X86ISD::MSCATTER:    return combineX86GatherScatter(N, DAG, DCI);
46113  case ISD::MGATHER:
46114  case ISD::MSCATTER:       return combineGatherScatter(N, DAG, DCI);
46115  case X86ISD::PCMPEQ:
46116  case X86ISD::PCMPGT:      return combineVectorCompare(N, DAG, Subtarget);
46117  case X86ISD::PMULDQ:
46118  case X86ISD::PMULUDQ:     return combinePMULDQ(N, DAG, DCI, Subtarget);
46119  case X86ISD::KSHIFTL:
46120  case X86ISD::KSHIFTR:     return combineKSHIFT(N, DAG, DCI);
46121  }
46122
46123  return SDValue();
46124}
46125
46126bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
46127  if (!isTypeLegal(VT))
46128    return false;
46129
46130  // There are no vXi8 shifts.
46131  if (Opc == ISD::SHL && VT.isVector() && VT.getVectorElementType() == MVT::i8)
46132    return false;
46133
46134  // TODO: Almost no 8-bit ops are desirable because they have no actual
46135  //       size/speed advantages vs. 32-bit ops, but they do have a major
46136  //       potential disadvantage by causing partial register stalls.
46137  //
46138  // 8-bit multiply/shl is probably not cheaper than 32-bit multiply/shl, and
46139  // we have specializations to turn 32-bit multiply/shl into LEA or other ops.
46140  // Also, see the comment in "IsDesirableToPromoteOp" - where we additionally
46141  // check for a constant operand to the multiply.
46142  if ((Opc == ISD::MUL || Opc == ISD::SHL) && VT == MVT::i8)
46143    return false;
46144
46145  // i16 instruction encodings are longer and some i16 instructions are slow,
46146  // so those are not desirable.
46147  if (VT == MVT::i16) {
46148    switch (Opc) {
46149    default:
46150      break;
46151    case ISD::LOAD:
46152    case ISD::SIGN_EXTEND:
46153    case ISD::ZERO_EXTEND:
46154    case ISD::ANY_EXTEND:
46155    case ISD::SHL:
46156    case ISD::SRA:
46157    case ISD::SRL:
46158    case ISD::SUB:
46159    case ISD::ADD:
46160    case ISD::MUL:
46161    case ISD::AND:
46162    case ISD::OR:
46163    case ISD::XOR:
46164      return false;
46165    }
46166  }
46167
46168  // Any legal type not explicitly accounted for above here is desirable.
46169  return true;
46170}
46171
46172SDValue X86TargetLowering::expandIndirectJTBranch(const SDLoc& dl,
46173                                                  SDValue Value, SDValue Addr,
46174                                                  SelectionDAG &DAG) const {
46175  const Module *M = DAG.getMachineFunction().getMMI().getModule();
46176  Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
46177  if (IsCFProtectionSupported) {
46178    // In case control-flow branch protection is enabled, we need to add
46179    // notrack prefix to the indirect branch.
46180    // In order to do that we create NT_BRIND SDNode.
46181    // Upon ISEL, the pattern will convert it to jmp with NoTrack prefix.
46182    return DAG.getNode(X86ISD::NT_BRIND, dl, MVT::Other, Value, Addr);
46183  }
46184
46185  return TargetLowering::expandIndirectJTBranch(dl, Value, Addr, DAG);
46186}
46187
46188bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
46189  EVT VT = Op.getValueType();
46190  bool Is8BitMulByConstant = VT == MVT::i8 && Op.getOpcode() == ISD::MUL &&
46191                             isa<ConstantSDNode>(Op.getOperand(1));
46192
46193  // i16 is legal, but undesirable since i16 instruction encodings are longer
46194  // and some i16 instructions are slow.
46195  // 8-bit multiply-by-constant can usually be expanded to something cheaper
46196  // using LEA and/or other ALU ops.
46197  if (VT != MVT::i16 && !Is8BitMulByConstant)
46198    return false;
46199
46200  auto IsFoldableRMW = [](SDValue Load, SDValue Op) {
46201    if (!Op.hasOneUse())
46202      return false;
46203    SDNode *User = *Op->use_begin();
46204    if (!ISD::isNormalStore(User))
46205      return false;
46206    auto *Ld = cast<LoadSDNode>(Load);
46207    auto *St = cast<StoreSDNode>(User);
46208    return Ld->getBasePtr() == St->getBasePtr();
46209  };
46210
46211  auto IsFoldableAtomicRMW = [](SDValue Load, SDValue Op) {
46212    if (!Load.hasOneUse() || Load.getOpcode() != ISD::ATOMIC_LOAD)
46213      return false;
46214    if (!Op.hasOneUse())
46215      return false;
46216    SDNode *User = *Op->use_begin();
46217    if (User->getOpcode() != ISD::ATOMIC_STORE)
46218      return false;
46219    auto *Ld = cast<AtomicSDNode>(Load);
46220    auto *St = cast<AtomicSDNode>(User);
46221    return Ld->getBasePtr() == St->getBasePtr();
46222  };
46223
46224  bool Commute = false;
46225  switch (Op.getOpcode()) {
46226  default: return false;
46227  case ISD::SIGN_EXTEND:
46228  case ISD::ZERO_EXTEND:
46229  case ISD::ANY_EXTEND:
46230    break;
46231  case ISD::SHL:
46232  case ISD::SRA:
46233  case ISD::SRL: {
46234    SDValue N0 = Op.getOperand(0);
46235    // Look out for (store (shl (load), x)).
46236    if (MayFoldLoad(N0) && IsFoldableRMW(N0, Op))
46237      return false;
46238    break;
46239  }
46240  case ISD::ADD:
46241  case ISD::MUL:
46242  case ISD::AND:
46243  case ISD::OR:
46244  case ISD::XOR:
46245    Commute = true;
46246    LLVM_FALLTHROUGH;
46247  case ISD::SUB: {
46248    SDValue N0 = Op.getOperand(0);
46249    SDValue N1 = Op.getOperand(1);
46250    // Avoid disabling potential load folding opportunities.
46251    if (MayFoldLoad(N1) &&
46252        (!Commute || !isa<ConstantSDNode>(N0) ||
46253         (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N1, Op))))
46254      return false;
46255    if (MayFoldLoad(N0) &&
46256        ((Commute && !isa<ConstantSDNode>(N1)) ||
46257         (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N0, Op))))
46258      return false;
46259    if (IsFoldableAtomicRMW(N0, Op) ||
46260        (Commute && IsFoldableAtomicRMW(N1, Op)))
46261      return false;
46262  }
46263  }
46264
46265  PVT = MVT::i32;
46266  return true;
46267}
46268
46269bool X86TargetLowering::
46270    isDesirableToCombineBuildVectorToShuffleTruncate(
46271        ArrayRef<int> ShuffleMask, EVT SrcVT, EVT TruncVT) const {
46272
46273  assert(SrcVT.getVectorNumElements() == ShuffleMask.size() &&
46274         "Element count mismatch");
46275  assert(
46276      Subtarget.getTargetLowering()->isShuffleMaskLegal(ShuffleMask, SrcVT) &&
46277      "Shuffle Mask expected to be legal");
46278
46279  // For 32-bit elements VPERMD is better than shuffle+truncate.
46280  // TODO: After we improve lowerBuildVector, add execption for VPERMW.
46281  if (SrcVT.getScalarSizeInBits() == 32 || !Subtarget.hasAVX2())
46282    return false;
46283
46284  if (is128BitLaneCrossingShuffleMask(SrcVT.getSimpleVT(), ShuffleMask))
46285    return false;
46286
46287  return true;
46288}
46289
46290//===----------------------------------------------------------------------===//
46291//                           X86 Inline Assembly Support
46292//===----------------------------------------------------------------------===//
46293
46294// Helper to match a string separated by whitespace.
46295static bool matchAsm(StringRef S, ArrayRef<const char *> Pieces) {
46296  S = S.substr(S.find_first_not_of(" \t")); // Skip leading whitespace.
46297
46298  for (StringRef Piece : Pieces) {
46299    if (!S.startswith(Piece)) // Check if the piece matches.
46300      return false;
46301
46302    S = S.substr(Piece.size());
46303    StringRef::size_type Pos = S.find_first_not_of(" \t");
46304    if (Pos == 0) // We matched a prefix.
46305      return false;
46306
46307    S = S.substr(Pos);
46308  }
46309
46310  return S.empty();
46311}
46312
46313static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
46314
46315  if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
46316    if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
46317        std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
46318        std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
46319
46320      if (AsmPieces.size() == 3)
46321        return true;
46322      else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
46323        return true;
46324    }
46325  }
46326  return false;
46327}
46328
46329bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
46330  InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
46331
46332  const std::string &AsmStr = IA->getAsmString();
46333
46334  IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
46335  if (!Ty || Ty->getBitWidth() % 16 != 0)
46336    return false;
46337
46338  // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
46339  SmallVector<StringRef, 4> AsmPieces;
46340  SplitString(AsmStr, AsmPieces, ";\n");
46341
46342  switch (AsmPieces.size()) {
46343  default: return false;
46344  case 1:
46345    // FIXME: this should verify that we are targeting a 486 or better.  If not,
46346    // we will turn this bswap into something that will be lowered to logical
46347    // ops instead of emitting the bswap asm.  For now, we don't support 486 or
46348    // lower so don't worry about this.
46349    // bswap $0
46350    if (matchAsm(AsmPieces[0], {"bswap", "$0"}) ||
46351        matchAsm(AsmPieces[0], {"bswapl", "$0"}) ||
46352        matchAsm(AsmPieces[0], {"bswapq", "$0"}) ||
46353        matchAsm(AsmPieces[0], {"bswap", "${0:q}"}) ||
46354        matchAsm(AsmPieces[0], {"bswapl", "${0:q}"}) ||
46355        matchAsm(AsmPieces[0], {"bswapq", "${0:q}"})) {
46356      // No need to check constraints, nothing other than the equivalent of
46357      // "=r,0" would be valid here.
46358      return IntrinsicLowering::LowerToByteSwap(CI);
46359    }
46360
46361    // rorw $$8, ${0:w}  -->  llvm.bswap.i16
46362    if (CI->getType()->isIntegerTy(16) &&
46363        IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
46364        (matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) ||
46365         matchAsm(AsmPieces[0], {"rolw", "$$8,", "${0:w}"}))) {
46366      AsmPieces.clear();
46367      StringRef ConstraintsStr = IA->getConstraintString();
46368      SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
46369      array_pod_sort(AsmPieces.begin(), AsmPieces.end());
46370      if (clobbersFlagRegisters(AsmPieces))
46371        return IntrinsicLowering::LowerToByteSwap(CI);
46372    }
46373    break;
46374  case 3:
46375    if (CI->getType()->isIntegerTy(32) &&
46376        IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
46377        matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) &&
46378        matchAsm(AsmPieces[1], {"rorl", "$$16,", "$0"}) &&
46379        matchAsm(AsmPieces[2], {"rorw", "$$8,", "${0:w}"})) {
46380      AsmPieces.clear();
46381      StringRef ConstraintsStr = IA->getConstraintString();
46382      SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
46383      array_pod_sort(AsmPieces.begin(), AsmPieces.end());
46384      if (clobbersFlagRegisters(AsmPieces))
46385        return IntrinsicLowering::LowerToByteSwap(CI);
46386    }
46387
46388    if (CI->getType()->isIntegerTy(64)) {
46389      InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
46390      if (Constraints.size() >= 2 &&
46391          Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
46392          Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
46393        // bswap %eax / bswap %edx / xchgl %eax, %edx  -> llvm.bswap.i64
46394        if (matchAsm(AsmPieces[0], {"bswap", "%eax"}) &&
46395            matchAsm(AsmPieces[1], {"bswap", "%edx"}) &&
46396            matchAsm(AsmPieces[2], {"xchgl", "%eax,", "%edx"}))
46397          return IntrinsicLowering::LowerToByteSwap(CI);
46398      }
46399    }
46400    break;
46401  }
46402  return false;
46403}
46404
46405static X86::CondCode parseConstraintCode(llvm::StringRef Constraint) {
46406  X86::CondCode Cond = StringSwitch<X86::CondCode>(Constraint)
46407                           .Case("{@cca}", X86::COND_A)
46408                           .Case("{@ccae}", X86::COND_AE)
46409                           .Case("{@ccb}", X86::COND_B)
46410                           .Case("{@ccbe}", X86::COND_BE)
46411                           .Case("{@ccc}", X86::COND_B)
46412                           .Case("{@cce}", X86::COND_E)
46413                           .Case("{@ccz}", X86::COND_E)
46414                           .Case("{@ccg}", X86::COND_G)
46415                           .Case("{@ccge}", X86::COND_GE)
46416                           .Case("{@ccl}", X86::COND_L)
46417                           .Case("{@ccle}", X86::COND_LE)
46418                           .Case("{@ccna}", X86::COND_BE)
46419                           .Case("{@ccnae}", X86::COND_B)
46420                           .Case("{@ccnb}", X86::COND_AE)
46421                           .Case("{@ccnbe}", X86::COND_A)
46422                           .Case("{@ccnc}", X86::COND_AE)
46423                           .Case("{@ccne}", X86::COND_NE)
46424                           .Case("{@ccnz}", X86::COND_NE)
46425                           .Case("{@ccng}", X86::COND_LE)
46426                           .Case("{@ccnge}", X86::COND_L)
46427                           .Case("{@ccnl}", X86::COND_GE)
46428                           .Case("{@ccnle}", X86::COND_G)
46429                           .Case("{@ccno}", X86::COND_NO)
46430                           .Case("{@ccnp}", X86::COND_P)
46431                           .Case("{@ccns}", X86::COND_NS)
46432                           .Case("{@cco}", X86::COND_O)
46433                           .Case("{@ccp}", X86::COND_P)
46434                           .Case("{@ccs}", X86::COND_S)
46435                           .Default(X86::COND_INVALID);
46436  return Cond;
46437}
46438
46439/// Given a constraint letter, return the type of constraint for this target.
46440X86TargetLowering::ConstraintType
46441X86TargetLowering::getConstraintType(StringRef Constraint) const {
46442  if (Constraint.size() == 1) {
46443    switch (Constraint[0]) {
46444    case 'R':
46445    case 'q':
46446    case 'Q':
46447    case 'f':
46448    case 't':
46449    case 'u':
46450    case 'y':
46451    case 'x':
46452    case 'v':
46453    case 'Y':
46454    case 'l':
46455    case 'k': // AVX512 masking registers.
46456      return C_RegisterClass;
46457    case 'a':
46458    case 'b':
46459    case 'c':
46460    case 'd':
46461    case 'S':
46462    case 'D':
46463    case 'A':
46464      return C_Register;
46465    case 'I':
46466    case 'J':
46467    case 'K':
46468    case 'N':
46469    case 'G':
46470    case 'L':
46471    case 'M':
46472      return C_Immediate;
46473    case 'C':
46474    case 'e':
46475    case 'Z':
46476      return C_Other;
46477    default:
46478      break;
46479    }
46480  }
46481  else if (Constraint.size() == 2) {
46482    switch (Constraint[0]) {
46483    default:
46484      break;
46485    case 'Y':
46486      switch (Constraint[1]) {
46487      default:
46488        break;
46489      case 'z':
46490      case '0':
46491        return C_Register;
46492      case 'i':
46493      case 'm':
46494      case 'k':
46495      case 't':
46496      case '2':
46497        return C_RegisterClass;
46498      }
46499    }
46500  } else if (parseConstraintCode(Constraint) != X86::COND_INVALID)
46501    return C_Other;
46502  return TargetLowering::getConstraintType(Constraint);
46503}
46504
46505/// Examine constraint type and operand type and determine a weight value.
46506/// This object must already have been set up with the operand type
46507/// and the current alternative constraint selected.
46508TargetLowering::ConstraintWeight
46509  X86TargetLowering::getSingleConstraintMatchWeight(
46510    AsmOperandInfo &info, const char *constraint) const {
46511  ConstraintWeight weight = CW_Invalid;
46512  Value *CallOperandVal = info.CallOperandVal;
46513    // If we don't have a value, we can't do a match,
46514    // but allow it at the lowest weight.
46515  if (!CallOperandVal)
46516    return CW_Default;
46517  Type *type = CallOperandVal->getType();
46518  // Look at the constraint type.
46519  switch (*constraint) {
46520  default:
46521    weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
46522    LLVM_FALLTHROUGH;
46523  case 'R':
46524  case 'q':
46525  case 'Q':
46526  case 'a':
46527  case 'b':
46528  case 'c':
46529  case 'd':
46530  case 'S':
46531  case 'D':
46532  case 'A':
46533    if (CallOperandVal->getType()->isIntegerTy())
46534      weight = CW_SpecificReg;
46535    break;
46536  case 'f':
46537  case 't':
46538  case 'u':
46539    if (type->isFloatingPointTy())
46540      weight = CW_SpecificReg;
46541    break;
46542  case 'y':
46543    if (type->isX86_MMXTy() && Subtarget.hasMMX())
46544      weight = CW_SpecificReg;
46545    break;
46546  case 'Y': {
46547    unsigned Size = StringRef(constraint).size();
46548    // Pick 'i' as the next char as 'Yi' and 'Y' are synonymous, when matching 'Y'
46549    char NextChar = Size == 2 ? constraint[1] : 'i';
46550    if (Size > 2)
46551      break;
46552    switch (NextChar) {
46553      default:
46554        return CW_Invalid;
46555      // XMM0
46556      case 'z':
46557      case '0':
46558        if ((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1())
46559          return CW_SpecificReg;
46560        return CW_Invalid;
46561      // Conditional OpMask regs (AVX512)
46562      case 'k':
46563        if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
46564          return CW_Register;
46565        return CW_Invalid;
46566      // Any MMX reg
46567      case 'm':
46568        if (type->isX86_MMXTy() && Subtarget.hasMMX())
46569          return weight;
46570        return CW_Invalid;
46571      // Any SSE reg when ISA >= SSE2, same as 'Y'
46572      case 'i':
46573      case 't':
46574      case '2':
46575        if (!Subtarget.hasSSE2())
46576          return CW_Invalid;
46577        break;
46578    }
46579    // Fall through (handle "Y" constraint).
46580    LLVM_FALLTHROUGH;
46581  }
46582  case 'v':
46583    if ((type->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512())
46584      weight = CW_Register;
46585    LLVM_FALLTHROUGH;
46586  case 'x':
46587    if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
46588        ((type->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX()))
46589      weight = CW_Register;
46590    break;
46591  case 'k':
46592    // Enable conditional vector operations using %k<#> registers.
46593    if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
46594      weight = CW_Register;
46595    break;
46596  case 'I':
46597    if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
46598      if (C->getZExtValue() <= 31)
46599        weight = CW_Constant;
46600    }
46601    break;
46602  case 'J':
46603    if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
46604      if (C->getZExtValue() <= 63)
46605        weight = CW_Constant;
46606    }
46607    break;
46608  case 'K':
46609    if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
46610      if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
46611        weight = CW_Constant;
46612    }
46613    break;
46614  case 'L':
46615    if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
46616      if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
46617        weight = CW_Constant;
46618    }
46619    break;
46620  case 'M':
46621    if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
46622      if (C->getZExtValue() <= 3)
46623        weight = CW_Constant;
46624    }
46625    break;
46626  case 'N':
46627    if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
46628      if (C->getZExtValue() <= 0xff)
46629        weight = CW_Constant;
46630    }
46631    break;
46632  case 'G':
46633  case 'C':
46634    if (isa<ConstantFP>(CallOperandVal)) {
46635      weight = CW_Constant;
46636    }
46637    break;
46638  case 'e':
46639    if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
46640      if ((C->getSExtValue() >= -0x80000000LL) &&
46641          (C->getSExtValue() <= 0x7fffffffLL))
46642        weight = CW_Constant;
46643    }
46644    break;
46645  case 'Z':
46646    if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
46647      if (C->getZExtValue() <= 0xffffffff)
46648        weight = CW_Constant;
46649    }
46650    break;
46651  }
46652  return weight;
46653}
46654
46655/// Try to replace an X constraint, which matches anything, with another that
46656/// has more specific requirements based on the type of the corresponding
46657/// operand.
46658const char *X86TargetLowering::
46659LowerXConstraint(EVT ConstraintVT) const {
46660  // FP X constraints get lowered to SSE1/2 registers if available, otherwise
46661  // 'f' like normal targets.
46662  if (ConstraintVT.isFloatingPoint()) {
46663    if (Subtarget.hasSSE2())
46664      return "Y";
46665    if (Subtarget.hasSSE1())
46666      return "x";
46667  }
46668
46669  return TargetLowering::LowerXConstraint(ConstraintVT);
46670}
46671
46672// Lower @cc targets via setcc.
46673SDValue X86TargetLowering::LowerAsmOutputForConstraint(
46674    SDValue &Chain, SDValue &Flag, SDLoc DL, const AsmOperandInfo &OpInfo,
46675    SelectionDAG &DAG) const {
46676  X86::CondCode Cond = parseConstraintCode(OpInfo.ConstraintCode);
46677  if (Cond == X86::COND_INVALID)
46678    return SDValue();
46679  // Check that return type is valid.
46680  if (OpInfo.ConstraintVT.isVector() || !OpInfo.ConstraintVT.isInteger() ||
46681      OpInfo.ConstraintVT.getSizeInBits() < 8)
46682    report_fatal_error("Flag output operand is of invalid type");
46683
46684  // Get EFLAGS register. Only update chain when copyfrom is glued.
46685  if (Flag.getNode()) {
46686    Flag = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32, Flag);
46687    Chain = Flag.getValue(1);
46688  } else
46689    Flag = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32);
46690  // Extract CC code.
46691  SDValue CC = getSETCC(Cond, Flag, DL, DAG);
46692  // Extend to 32-bits
46693  SDValue Result = DAG.getNode(ISD::ZERO_EXTEND, DL, OpInfo.ConstraintVT, CC);
46694
46695  return Result;
46696}
46697
46698/// Lower the specified operand into the Ops vector.
46699/// If it is invalid, don't add anything to Ops.
46700void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
46701                                                     std::string &Constraint,
46702                                                     std::vector<SDValue>&Ops,
46703                                                     SelectionDAG &DAG) const {
46704  SDValue Result;
46705
46706  // Only support length 1 constraints for now.
46707  if (Constraint.length() > 1) return;
46708
46709  char ConstraintLetter = Constraint[0];
46710  switch (ConstraintLetter) {
46711  default: break;
46712  case 'I':
46713    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
46714      if (C->getZExtValue() <= 31) {
46715        Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
46716                                       Op.getValueType());
46717        break;
46718      }
46719    }
46720    return;
46721  case 'J':
46722    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
46723      if (C->getZExtValue() <= 63) {
46724        Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
46725                                       Op.getValueType());
46726        break;
46727      }
46728    }
46729    return;
46730  case 'K':
46731    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
46732      if (isInt<8>(C->getSExtValue())) {
46733        Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
46734                                       Op.getValueType());
46735        break;
46736      }
46737    }
46738    return;
46739  case 'L':
46740    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
46741      if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
46742          (Subtarget.is64Bit() && C->getZExtValue() == 0xffffffff)) {
46743        Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
46744                                       Op.getValueType());
46745        break;
46746      }
46747    }
46748    return;
46749  case 'M':
46750    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
46751      if (C->getZExtValue() <= 3) {
46752        Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
46753                                       Op.getValueType());
46754        break;
46755      }
46756    }
46757    return;
46758  case 'N':
46759    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
46760      if (C->getZExtValue() <= 255) {
46761        Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
46762                                       Op.getValueType());
46763        break;
46764      }
46765    }
46766    return;
46767  case 'O':
46768    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
46769      if (C->getZExtValue() <= 127) {
46770        Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
46771                                       Op.getValueType());
46772        break;
46773      }
46774    }
46775    return;
46776  case 'e': {
46777    // 32-bit signed value
46778    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
46779      if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
46780                                           C->getSExtValue())) {
46781        // Widen to 64 bits here to get it sign extended.
46782        Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), MVT::i64);
46783        break;
46784      }
46785    // FIXME gcc accepts some relocatable values here too, but only in certain
46786    // memory models; it's complicated.
46787    }
46788    return;
46789  }
46790  case 'Z': {
46791    // 32-bit unsigned value
46792    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
46793      if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
46794                                           C->getZExtValue())) {
46795        Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
46796                                       Op.getValueType());
46797        break;
46798      }
46799    }
46800    // FIXME gcc accepts some relocatable values here too, but only in certain
46801    // memory models; it's complicated.
46802    return;
46803  }
46804  case 'i': {
46805    // Literal immediates are always ok.
46806    if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
46807      bool IsBool = CST->getConstantIntValue()->getBitWidth() == 1;
46808      BooleanContent BCont = getBooleanContents(MVT::i64);
46809      ISD::NodeType ExtOpc = IsBool ? getExtendForContent(BCont)
46810                                    : ISD::SIGN_EXTEND;
46811      int64_t ExtVal = ExtOpc == ISD::ZERO_EXTEND ? CST->getZExtValue()
46812                                                  : CST->getSExtValue();
46813      Result = DAG.getTargetConstant(ExtVal, SDLoc(Op), MVT::i64);
46814      break;
46815    }
46816
46817    // In any sort of PIC mode addresses need to be computed at runtime by
46818    // adding in a register or some sort of table lookup.  These can't
46819    // be used as immediates.
46820    if (Subtarget.isPICStyleGOT() || Subtarget.isPICStyleStubPIC())
46821      return;
46822
46823    // If we are in non-pic codegen mode, we allow the address of a global (with
46824    // an optional displacement) to be used with 'i'.
46825    if (auto *GA = dyn_cast<GlobalAddressSDNode>(Op))
46826      // If we require an extra load to get this address, as in PIC mode, we
46827      // can't accept it.
46828      if (isGlobalStubReference(
46829              Subtarget.classifyGlobalReference(GA->getGlobal())))
46830        return;
46831    break;
46832  }
46833  }
46834
46835  if (Result.getNode()) {
46836    Ops.push_back(Result);
46837    return;
46838  }
46839  return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
46840}
46841
46842/// Check if \p RC is a general purpose register class.
46843/// I.e., GR* or one of their variant.
46844static bool isGRClass(const TargetRegisterClass &RC) {
46845  return RC.hasSuperClassEq(&X86::GR8RegClass) ||
46846         RC.hasSuperClassEq(&X86::GR16RegClass) ||
46847         RC.hasSuperClassEq(&X86::GR32RegClass) ||
46848         RC.hasSuperClassEq(&X86::GR64RegClass) ||
46849         RC.hasSuperClassEq(&X86::LOW32_ADDR_ACCESS_RBPRegClass);
46850}
46851
46852/// Check if \p RC is a vector register class.
46853/// I.e., FR* / VR* or one of their variant.
46854static bool isFRClass(const TargetRegisterClass &RC) {
46855  return RC.hasSuperClassEq(&X86::FR32XRegClass) ||
46856         RC.hasSuperClassEq(&X86::FR64XRegClass) ||
46857         RC.hasSuperClassEq(&X86::VR128XRegClass) ||
46858         RC.hasSuperClassEq(&X86::VR256XRegClass) ||
46859         RC.hasSuperClassEq(&X86::VR512RegClass);
46860}
46861
46862/// Check if \p RC is a mask register class.
46863/// I.e., VK* or one of their variant.
46864static bool isVKClass(const TargetRegisterClass &RC) {
46865  return RC.hasSuperClassEq(&X86::VK1RegClass) ||
46866         RC.hasSuperClassEq(&X86::VK2RegClass) ||
46867         RC.hasSuperClassEq(&X86::VK4RegClass) ||
46868         RC.hasSuperClassEq(&X86::VK8RegClass) ||
46869         RC.hasSuperClassEq(&X86::VK16RegClass) ||
46870         RC.hasSuperClassEq(&X86::VK32RegClass) ||
46871         RC.hasSuperClassEq(&X86::VK64RegClass);
46872}
46873
46874std::pair<unsigned, const TargetRegisterClass *>
46875X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
46876                                                StringRef Constraint,
46877                                                MVT VT) const {
46878  // First, see if this is a constraint that directly corresponds to an LLVM
46879  // register class.
46880  if (Constraint.size() == 1) {
46881    // GCC Constraint Letters
46882    switch (Constraint[0]) {
46883    default: break;
46884    // 'A' means [ER]AX + [ER]DX.
46885    case 'A':
46886      if (Subtarget.is64Bit())
46887        return std::make_pair(X86::RAX, &X86::GR64_ADRegClass);
46888      assert((Subtarget.is32Bit() || Subtarget.is16Bit()) &&
46889             "Expecting 64, 32 or 16 bit subtarget");
46890      return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
46891
46892      // TODO: Slight differences here in allocation order and leaving
46893      // RIP in the class. Do they matter any more here than they do
46894      // in the normal allocation?
46895    case 'k':
46896      if (Subtarget.hasAVX512()) {
46897        if (VT == MVT::i1)
46898          return std::make_pair(0U, &X86::VK1RegClass);
46899        if (VT == MVT::i8)
46900          return std::make_pair(0U, &X86::VK8RegClass);
46901        if (VT == MVT::i16)
46902          return std::make_pair(0U, &X86::VK16RegClass);
46903      }
46904      if (Subtarget.hasBWI()) {
46905        if (VT == MVT::i32)
46906          return std::make_pair(0U, &X86::VK32RegClass);
46907        if (VT == MVT::i64)
46908          return std::make_pair(0U, &X86::VK64RegClass);
46909      }
46910      break;
46911    case 'q':   // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
46912      if (Subtarget.is64Bit()) {
46913        if (VT == MVT::i32 || VT == MVT::f32)
46914          return std::make_pair(0U, &X86::GR32RegClass);
46915        if (VT == MVT::i16)
46916          return std::make_pair(0U, &X86::GR16RegClass);
46917        if (VT == MVT::i8 || VT == MVT::i1)
46918          return std::make_pair(0U, &X86::GR8RegClass);
46919        if (VT == MVT::i64 || VT == MVT::f64)
46920          return std::make_pair(0U, &X86::GR64RegClass);
46921        break;
46922      }
46923      LLVM_FALLTHROUGH;
46924      // 32-bit fallthrough
46925    case 'Q':   // Q_REGS
46926      if (VT == MVT::i32 || VT == MVT::f32)
46927        return std::make_pair(0U, &X86::GR32_ABCDRegClass);
46928      if (VT == MVT::i16)
46929        return std::make_pair(0U, &X86::GR16_ABCDRegClass);
46930      if (VT == MVT::i8 || VT == MVT::i1)
46931        return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
46932      if (VT == MVT::i64)
46933        return std::make_pair(0U, &X86::GR64_ABCDRegClass);
46934      break;
46935    case 'r':   // GENERAL_REGS
46936    case 'l':   // INDEX_REGS
46937      if (VT == MVT::i8 || VT == MVT::i1)
46938        return std::make_pair(0U, &X86::GR8RegClass);
46939      if (VT == MVT::i16)
46940        return std::make_pair(0U, &X86::GR16RegClass);
46941      if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget.is64Bit())
46942        return std::make_pair(0U, &X86::GR32RegClass);
46943      return std::make_pair(0U, &X86::GR64RegClass);
46944    case 'R':   // LEGACY_REGS
46945      if (VT == MVT::i8 || VT == MVT::i1)
46946        return std::make_pair(0U, &X86::GR8_NOREXRegClass);
46947      if (VT == MVT::i16)
46948        return std::make_pair(0U, &X86::GR16_NOREXRegClass);
46949      if (VT == MVT::i32 || !Subtarget.is64Bit())
46950        return std::make_pair(0U, &X86::GR32_NOREXRegClass);
46951      return std::make_pair(0U, &X86::GR64_NOREXRegClass);
46952    case 'f':  // FP Stack registers.
46953      // If SSE is enabled for this VT, use f80 to ensure the isel moves the
46954      // value to the correct fpstack register class.
46955      if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
46956        return std::make_pair(0U, &X86::RFP32RegClass);
46957      if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
46958        return std::make_pair(0U, &X86::RFP64RegClass);
46959      return std::make_pair(0U, &X86::RFP80RegClass);
46960    case 'y':   // MMX_REGS if MMX allowed.
46961      if (!Subtarget.hasMMX()) break;
46962      return std::make_pair(0U, &X86::VR64RegClass);
46963    case 'Y':   // SSE_REGS if SSE2 allowed
46964      if (!Subtarget.hasSSE2()) break;
46965      LLVM_FALLTHROUGH;
46966    case 'v':
46967    case 'x':   // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
46968      if (!Subtarget.hasSSE1()) break;
46969      bool VConstraint = (Constraint[0] == 'v');
46970
46971      switch (VT.SimpleTy) {
46972      default: break;
46973      // Scalar SSE types.
46974      case MVT::f32:
46975      case MVT::i32:
46976        if (VConstraint && Subtarget.hasVLX())
46977          return std::make_pair(0U, &X86::FR32XRegClass);
46978        return std::make_pair(0U, &X86::FR32RegClass);
46979      case MVT::f64:
46980      case MVT::i64:
46981        if (VConstraint && Subtarget.hasVLX())
46982          return std::make_pair(0U, &X86::FR64XRegClass);
46983        return std::make_pair(0U, &X86::FR64RegClass);
46984      // TODO: Handle i128 in FR128RegClass after it is tested well.
46985      // Vector types and fp128.
46986      case MVT::f128:
46987      case MVT::v16i8:
46988      case MVT::v8i16:
46989      case MVT::v4i32:
46990      case MVT::v2i64:
46991      case MVT::v4f32:
46992      case MVT::v2f64:
46993        if (VConstraint && Subtarget.hasVLX())
46994          return std::make_pair(0U, &X86::VR128XRegClass);
46995        return std::make_pair(0U, &X86::VR128RegClass);
46996      // AVX types.
46997      case MVT::v32i8:
46998      case MVT::v16i16:
46999      case MVT::v8i32:
47000      case MVT::v4i64:
47001      case MVT::v8f32:
47002      case MVT::v4f64:
47003        if (VConstraint && Subtarget.hasVLX())
47004          return std::make_pair(0U, &X86::VR256XRegClass);
47005        if (Subtarget.hasAVX())
47006          return std::make_pair(0U, &X86::VR256RegClass);
47007        break;
47008      case MVT::v8f64:
47009      case MVT::v16f32:
47010      case MVT::v16i32:
47011      case MVT::v8i64:
47012        if (!Subtarget.hasAVX512()) break;
47013        if (VConstraint)
47014          return std::make_pair(0U, &X86::VR512RegClass);
47015        return std::make_pair(0U, &X86::VR512_0_15RegClass);
47016      }
47017      break;
47018    }
47019  } else if (Constraint.size() == 2 && Constraint[0] == 'Y') {
47020    switch (Constraint[1]) {
47021    default:
47022      break;
47023    case 'i':
47024    case 't':
47025    case '2':
47026      return getRegForInlineAsmConstraint(TRI, "Y", VT);
47027    case 'm':
47028      if (!Subtarget.hasMMX()) break;
47029      return std::make_pair(0U, &X86::VR64RegClass);
47030    case 'z':
47031    case '0':
47032      if (!Subtarget.hasSSE1()) break;
47033      return std::make_pair(X86::XMM0, &X86::VR128RegClass);
47034    case 'k':
47035      // This register class doesn't allocate k0 for masked vector operation.
47036      if (Subtarget.hasAVX512()) {
47037        if (VT == MVT::i1)
47038          return std::make_pair(0U, &X86::VK1WMRegClass);
47039        if (VT == MVT::i8)
47040          return std::make_pair(0U, &X86::VK8WMRegClass);
47041        if (VT == MVT::i16)
47042          return std::make_pair(0U, &X86::VK16WMRegClass);
47043      }
47044      if (Subtarget.hasBWI()) {
47045        if (VT == MVT::i32)
47046          return std::make_pair(0U, &X86::VK32WMRegClass);
47047        if (VT == MVT::i64)
47048          return std::make_pair(0U, &X86::VK64WMRegClass);
47049      }
47050      break;
47051    }
47052  }
47053
47054  if (parseConstraintCode(Constraint) != X86::COND_INVALID)
47055    return std::make_pair(0U, &X86::GR32RegClass);
47056
47057  // Use the default implementation in TargetLowering to convert the register
47058  // constraint into a member of a register class.
47059  std::pair<unsigned, const TargetRegisterClass*> Res;
47060  Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
47061
47062  // Not found as a standard register?
47063  if (!Res.second) {
47064    // Map st(0) -> st(7) -> ST0
47065    if (Constraint.size() == 7 && Constraint[0] == '{' &&
47066        tolower(Constraint[1]) == 's' && tolower(Constraint[2]) == 't' &&
47067        Constraint[3] == '(' &&
47068        (Constraint[4] >= '0' && Constraint[4] <= '7') &&
47069        Constraint[5] == ')' && Constraint[6] == '}') {
47070      // st(7) is not allocatable and thus not a member of RFP80. Return
47071      // singleton class in cases where we have a reference to it.
47072      if (Constraint[4] == '7')
47073        return std::make_pair(X86::FP7, &X86::RFP80_7RegClass);
47074      return std::make_pair(X86::FP0 + Constraint[4] - '0',
47075                            &X86::RFP80RegClass);
47076    }
47077
47078    // GCC allows "st(0)" to be called just plain "st".
47079    if (StringRef("{st}").equals_lower(Constraint))
47080      return std::make_pair(X86::FP0, &X86::RFP80RegClass);
47081
47082    // flags -> EFLAGS
47083    if (StringRef("{flags}").equals_lower(Constraint))
47084      return std::make_pair(X86::EFLAGS, &X86::CCRRegClass);
47085
47086    // dirflag -> DF
47087    if (StringRef("{dirflag}").equals_lower(Constraint))
47088      return std::make_pair(X86::DF, &X86::DFCCRRegClass);
47089
47090    // fpsr -> FPSW
47091    if (StringRef("{fpsr}").equals_lower(Constraint))
47092      return std::make_pair(X86::FPSW, &X86::FPCCRRegClass);
47093
47094    return Res;
47095  }
47096
47097  // Make sure it isn't a register that requires 64-bit mode.
47098  if (!Subtarget.is64Bit() &&
47099      (isFRClass(*Res.second) || isGRClass(*Res.second)) &&
47100      TRI->getEncodingValue(Res.first) >= 8) {
47101    // Register requires REX prefix, but we're in 32-bit mode.
47102    return std::make_pair(0, nullptr);
47103  }
47104
47105  // Make sure it isn't a register that requires AVX512.
47106  if (!Subtarget.hasAVX512() && isFRClass(*Res.second) &&
47107      TRI->getEncodingValue(Res.first) & 0x10) {
47108    // Register requires EVEX prefix.
47109    return std::make_pair(0, nullptr);
47110  }
47111
47112  // Otherwise, check to see if this is a register class of the wrong value
47113  // type.  For example, we want to map "{ax},i32" -> {eax}, we don't want it to
47114  // turn into {ax},{dx}.
47115  // MVT::Other is used to specify clobber names.
47116  if (TRI->isTypeLegalForClass(*Res.second, VT) || VT == MVT::Other)
47117    return Res;   // Correct type already, nothing to do.
47118
47119  // Get a matching integer of the correct size. i.e. "ax" with MVT::32 should
47120  // return "eax". This should even work for things like getting 64bit integer
47121  // registers when given an f64 type.
47122  const TargetRegisterClass *Class = Res.second;
47123  // The generic code will match the first register class that contains the
47124  // given register. Thus, based on the ordering of the tablegened file,
47125  // the "plain" GR classes might not come first.
47126  // Therefore, use a helper method.
47127  if (isGRClass(*Class)) {
47128    unsigned Size = VT.getSizeInBits();
47129    if (Size == 1) Size = 8;
47130    unsigned DestReg = getX86SubSuperRegisterOrZero(Res.first, Size);
47131    if (DestReg > 0) {
47132      bool is64Bit = Subtarget.is64Bit();
47133      const TargetRegisterClass *RC =
47134          Size == 8 ? (is64Bit ? &X86::GR8RegClass : &X86::GR8_NOREXRegClass)
47135        : Size == 16 ? (is64Bit ? &X86::GR16RegClass : &X86::GR16_NOREXRegClass)
47136        : Size == 32 ? (is64Bit ? &X86::GR32RegClass : &X86::GR32_NOREXRegClass)
47137        : Size == 64 ? (is64Bit ? &X86::GR64RegClass : nullptr)
47138        : nullptr;
47139      if (Size == 64 && !is64Bit) {
47140        // Model GCC's behavior here and select a fixed pair of 32-bit
47141        // registers.
47142        switch (DestReg) {
47143        case X86::RAX:
47144          return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
47145        case X86::RDX:
47146          return std::make_pair(X86::EDX, &X86::GR32_DCRegClass);
47147        case X86::RCX:
47148          return std::make_pair(X86::ECX, &X86::GR32_CBRegClass);
47149        case X86::RBX:
47150          return std::make_pair(X86::EBX, &X86::GR32_BSIRegClass);
47151        case X86::RSI:
47152          return std::make_pair(X86::ESI, &X86::GR32_SIDIRegClass);
47153        case X86::RDI:
47154          return std::make_pair(X86::EDI, &X86::GR32_DIBPRegClass);
47155        case X86::RBP:
47156          return std::make_pair(X86::EBP, &X86::GR32_BPSPRegClass);
47157        default:
47158          return std::make_pair(0, nullptr);
47159        }
47160      }
47161      if (RC && RC->contains(DestReg))
47162        return std::make_pair(DestReg, RC);
47163      return Res;
47164    }
47165    // No register found/type mismatch.
47166    return std::make_pair(0, nullptr);
47167  } else if (isFRClass(*Class)) {
47168    // Handle references to XMM physical registers that got mapped into the
47169    // wrong class.  This can happen with constraints like {xmm0} where the
47170    // target independent register mapper will just pick the first match it can
47171    // find, ignoring the required type.
47172
47173    // TODO: Handle f128 and i128 in FR128RegClass after it is tested well.
47174    if (VT == MVT::f32 || VT == MVT::i32)
47175      Res.second = &X86::FR32XRegClass;
47176    else if (VT == MVT::f64 || VT == MVT::i64)
47177      Res.second = &X86::FR64XRegClass;
47178    else if (TRI->isTypeLegalForClass(X86::VR128XRegClass, VT))
47179      Res.second = &X86::VR128XRegClass;
47180    else if (TRI->isTypeLegalForClass(X86::VR256XRegClass, VT))
47181      Res.second = &X86::VR256XRegClass;
47182    else if (TRI->isTypeLegalForClass(X86::VR512RegClass, VT))
47183      Res.second = &X86::VR512RegClass;
47184    else {
47185      // Type mismatch and not a clobber: Return an error;
47186      Res.first = 0;
47187      Res.second = nullptr;
47188    }
47189  } else if (isVKClass(*Class)) {
47190    if (VT == MVT::i1)
47191      Res.second = &X86::VK1RegClass;
47192    else if (VT == MVT::i8)
47193      Res.second = &X86::VK8RegClass;
47194    else if (VT == MVT::i16)
47195      Res.second = &X86::VK16RegClass;
47196    else if (VT == MVT::i32)
47197      Res.second = &X86::VK32RegClass;
47198    else if (VT == MVT::i64)
47199      Res.second = &X86::VK64RegClass;
47200    else {
47201      // Type mismatch and not a clobber: Return an error;
47202      Res.first = 0;
47203      Res.second = nullptr;
47204    }
47205  }
47206
47207  return Res;
47208}
47209
47210int X86TargetLowering::getScalingFactorCost(const DataLayout &DL,
47211                                            const AddrMode &AM, Type *Ty,
47212                                            unsigned AS) const {
47213  // Scaling factors are not free at all.
47214  // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
47215  // will take 2 allocations in the out of order engine instead of 1
47216  // for plain addressing mode, i.e. inst (reg1).
47217  // E.g.,
47218  // vaddps (%rsi,%rdx), %ymm0, %ymm1
47219  // Requires two allocations (one for the load, one for the computation)
47220  // whereas:
47221  // vaddps (%rsi), %ymm0, %ymm1
47222  // Requires just 1 allocation, i.e., freeing allocations for other operations
47223  // and having less micro operations to execute.
47224  //
47225  // For some X86 architectures, this is even worse because for instance for
47226  // stores, the complex addressing mode forces the instruction to use the
47227  // "load" ports instead of the dedicated "store" port.
47228  // E.g., on Haswell:
47229  // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
47230  // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
47231  if (isLegalAddressingMode(DL, AM, Ty, AS))
47232    // Scale represents reg2 * scale, thus account for 1
47233    // as soon as we use a second register.
47234    return AM.Scale != 0;
47235  return -1;
47236}
47237
47238bool X86TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
47239  // Integer division on x86 is expensive. However, when aggressively optimizing
47240  // for code size, we prefer to use a div instruction, as it is usually smaller
47241  // than the alternative sequence.
47242  // The exception to this is vector division. Since x86 doesn't have vector
47243  // integer division, leaving the division as-is is a loss even in terms of
47244  // size, because it will have to be scalarized, while the alternative code
47245  // sequence can be performed in vector form.
47246  bool OptSize =
47247      Attr.hasAttribute(AttributeList::FunctionIndex, Attribute::MinSize);
47248  return OptSize && !VT.isVector();
47249}
47250
47251void X86TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
47252  if (!Subtarget.is64Bit())
47253    return;
47254
47255  // Update IsSplitCSR in X86MachineFunctionInfo.
47256  X86MachineFunctionInfo *AFI =
47257      Entry->getParent()->getInfo<X86MachineFunctionInfo>();
47258  AFI->setIsSplitCSR(true);
47259}
47260
47261void X86TargetLowering::insertCopiesSplitCSR(
47262    MachineBasicBlock *Entry,
47263    const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
47264  const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
47265  const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
47266  if (!IStart)
47267    return;
47268
47269  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
47270  MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
47271  MachineBasicBlock::iterator MBBI = Entry->begin();
47272  for (const MCPhysReg *I = IStart; *I; ++I) {
47273    const TargetRegisterClass *RC = nullptr;
47274    if (X86::GR64RegClass.contains(*I))
47275      RC = &X86::GR64RegClass;
47276    else
47277      llvm_unreachable("Unexpected register class in CSRsViaCopy!");
47278
47279    Register NewVR = MRI->createVirtualRegister(RC);
47280    // Create copy from CSR to a virtual register.
47281    // FIXME: this currently does not emit CFI pseudo-instructions, it works
47282    // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
47283    // nounwind. If we want to generalize this later, we may need to emit
47284    // CFI pseudo-instructions.
47285    assert(
47286        Entry->getParent()->getFunction().hasFnAttribute(Attribute::NoUnwind) &&
47287        "Function should be nounwind in insertCopiesSplitCSR!");
47288    Entry->addLiveIn(*I);
47289    BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
47290        .addReg(*I);
47291
47292    // Insert the copy-back instructions right before the terminator.
47293    for (auto *Exit : Exits)
47294      BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
47295              TII->get(TargetOpcode::COPY), *I)
47296          .addReg(NewVR);
47297  }
47298}
47299
47300bool X86TargetLowering::supportSwiftError() const {
47301  return Subtarget.is64Bit();
47302}
47303
47304/// Returns the name of the symbol used to emit stack probes or the empty
47305/// string if not applicable.
47306StringRef
47307X86TargetLowering::getStackProbeSymbolName(MachineFunction &MF) const {
47308  // If the function specifically requests stack probes, emit them.
47309  if (MF.getFunction().hasFnAttribute("probe-stack"))
47310    return MF.getFunction().getFnAttribute("probe-stack").getValueAsString();
47311
47312  // Generally, if we aren't on Windows, the platform ABI does not include
47313  // support for stack probes, so don't emit them.
47314  if (!Subtarget.isOSWindows() || Subtarget.isTargetMachO() ||
47315      MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
47316    return "";
47317
47318  // We need a stack probe to conform to the Windows ABI. Choose the right
47319  // symbol.
47320  if (Subtarget.is64Bit())
47321    return Subtarget.isTargetCygMing() ? "___chkstk_ms" : "__chkstk";
47322  return Subtarget.isTargetCygMing() ? "_alloca" : "_chkstk";
47323}
47324
47325unsigned
47326X86TargetLowering::getStackProbeSize(MachineFunction &MF) const {
47327  // The default stack probe size is 4096 if the function has no stackprobesize
47328  // attribute.
47329  unsigned StackProbeSize = 4096;
47330  const Function &Fn = MF.getFunction();
47331  if (Fn.hasFnAttribute("stack-probe-size"))
47332    Fn.getFnAttribute("stack-probe-size")
47333        .getValueAsString()
47334        .getAsInteger(0, StackProbeSize);
47335  return StackProbeSize;
47336}
47337